aboutsummaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
authorYangbo Lu <yangbo.lu@nxp.com>2020-05-22 15:08:54 +0800
committerAdrian Schmutzler <freifunk@adrianschmutzler.de>2020-05-25 11:48:54 +0200
commit4bbc0e735754acfd7c37eb87c2f2ceb46bafdb16 (patch)
treec56af9680a429b9c6ee740a94cb0dc7e305467e4 /target
parentb1604b744b716a592841757c43bc5156c7772aca (diff)
downloadupstream-4bbc0e735754acfd7c37eb87c2f2ceb46bafdb16.tar.gz
upstream-4bbc0e735754acfd7c37eb87c2f2ceb46bafdb16.tar.bz2
upstream-4bbc0e735754acfd7c37eb87c2f2ceb46bafdb16.zip
layerscape: remove support for kernel 4.14
Remove support for kernel 4.14, and NXP Layerscape SDK had not supported kernel 4.14 since LSDK-20.04 either. Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target')
-rw-r--r--target/linux/layerscape/armv7/config-4.14779
-rw-r--r--target/linux/layerscape/armv8_64b/config-4.14983
-rw-r--r--target/linux/layerscape/patches-4.14/201-config-support-layerscape.patch340
-rw-r--r--target/linux/layerscape/patches-4.14/202-core-linux-support-layerscape.patch1056
-rw-r--r--target/linux/layerscape/patches-4.14/301-arch-support-layerscape.patch467
-rw-r--r--target/linux/layerscape/patches-4.14/302-dts-support-layerscape.patch10909
-rw-r--r--target/linux/layerscape/patches-4.14/701-dpaa2-dpio-support-layerscape.patch2992
-rw-r--r--target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch9130
-rw-r--r--target/linux/layerscape/patches-4.14/703-dpaa2-l2switch-support-layerscape.patch4040
-rw-r--r--target/linux/layerscape/patches-4.14/704-dpaa2-mac-phy-support-layerscape.patch2164
-rw-r--r--target/linux/layerscape/patches-4.14/705-dpaa2-rtc-support-layerscape.patch1386
-rw-r--r--target/linux/layerscape/patches-4.14/706-dpaa2-virtualbridge-support-layerscape.patch3256
-rw-r--r--target/linux/layerscape/patches-4.14/707-dpaa-ethernet-support-layerscape.patch156554
-rw-r--r--target/linux/layerscape/patches-4.14/708-mc-bus-support-layerscape.patch12074
-rw-r--r--target/linux/layerscape/patches-4.14/709-mdio-phy-support-layerscape.patch3729
-rw-r--r--target/linux/layerscape/patches-4.14/710-pfe-eth-support-layerscape.patch11028
-rw-r--r--target/linux/layerscape/patches-4.14/711-dpaa-bqman-support-layerscape.patch923
-rw-r--r--target/linux/layerscape/patches-4.14/712-etsec-support-layerscape.patch77
-rw-r--r--target/linux/layerscape/patches-4.14/713-sdk_qbman-Fix-error-in-IP-revision-comparison.patch28
-rw-r--r--target/linux/layerscape/patches-4.14/801-sata-support-layerscape.patch289
-rw-r--r--target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch4263
-rw-r--r--target/linux/layerscape/patches-4.14/803-flextimer-support-layerscape.patch457
-rw-r--r--target/linux/layerscape/patches-4.14/804-i2c-support-layerscape.patch478
-rw-r--r--target/linux/layerscape/patches-4.14/805-qe-support-layerscape.patch1961
-rw-r--r--target/linux/layerscape/patches-4.14/806-rtc-support-layerscape.patch776
-rw-r--r--target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch1588
-rw-r--r--target/linux/layerscape/patches-4.14/808-vfio-support-layerscape.patch1093
-rw-r--r--target/linux/layerscape/patches-4.14/809-flexcan-support-layerscape.patch596
-rw-r--r--target/linux/layerscape/patches-4.14/810-kvm-support-layerscape.patch208
-rw-r--r--target/linux/layerscape/patches-4.14/811-clock-support-layerscape.patch95
-rw-r--r--target/linux/layerscape/patches-4.14/812-flexspi-support-layerscape.patch1576
-rw-r--r--target/linux/layerscape/patches-4.14/813-ifc-nor-nand-support-layerscape.patch356
-rw-r--r--target/linux/layerscape/patches-4.14/814-ls2-console-support-layerscape.patch316
-rw-r--r--target/linux/layerscape/patches-4.14/815-msi-support-layerscape.patch33
-rw-r--r--target/linux/layerscape/patches-4.14/816-pcie-support-layerscape.patch5977
-rw-r--r--target/linux/layerscape/patches-4.14/817-platform-security-support-layerscape.patch1443
-rw-r--r--target/linux/layerscape/patches-4.14/818-qspi-support-layerscape.patch745
-rw-r--r--target/linux/layerscape/patches-4.14/819-sdhc-support-layerscape.patch572
-rw-r--r--target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch15294
-rw-r--r--target/linux/layerscape/patches-4.14/821-smmu-support-layerscape.patch506
-rw-r--r--target/linux/layerscape/patches-4.14/822-uart-support-layerscape.patch52
-rw-r--r--target/linux/layerscape/patches-4.14/823-pm-support-layerscape.patch631
-rw-r--r--target/linux/layerscape/patches-4.14/824-ptp-support-layerscape.patch1399
-rw-r--r--target/linux/layerscape/patches-4.14/825-tmu-support-layerscape.patch188
44 files changed, 0 insertions, 262807 deletions
diff --git a/target/linux/layerscape/armv7/config-4.14 b/target/linux/layerscape/armv7/config-4.14
deleted file mode 100644
index 2128e27b8f..0000000000
--- a/target/linux/layerscape/armv7/config-4.14
+++ /dev/null
@@ -1,779 +0,0 @@
-CONFIG_AD525X_DPOT=y
-CONFIG_AD525X_DPOT_I2C=y
-# CONFIG_AD525X_DPOT_SPI is not set
-CONFIG_ALIGNMENT_TRAP=y
-CONFIG_APDS9802ALS=y
-CONFIG_AQUANTIA_PHY=y
-# CONFIG_ARCH_AXXIA is not set
-CONFIG_ARCH_CLOCKSOURCE_DATA=y
-CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
-CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
-CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
-CONFIG_ARCH_HAS_RESET_CONTROLLER=y
-CONFIG_ARCH_HAS_SET_MEMORY=y
-CONFIG_ARCH_HAS_SG_CHAIN=y
-CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
-CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
-CONFIG_ARCH_HAS_TICK_BROADCAST=y
-CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
-CONFIG_ARCH_MULTIPLATFORM=y
-# CONFIG_ARCH_MULTI_CPU_AUTO is not set
-CONFIG_ARCH_MULTI_V6_V7=y
-CONFIG_ARCH_MULTI_V7=y
-CONFIG_ARCH_MXC=y
-CONFIG_ARCH_NR_GPIO=0
-CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y
-CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y
-CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
-# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
-# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
-CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
-CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y
-CONFIG_ARCH_SUPPORTS_UPROBES=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_ARCH_USE_BUILTIN_BSWAP=y
-CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
-CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
-CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
-CONFIG_ARM=y
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_ARM_ARCH_TIMER=y
-CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
-CONFIG_ARM_ATAG_DTB_COMPAT=y
-# CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND is not set
-CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y
-CONFIG_ARM_CPUIDLE=y
-CONFIG_ARM_CPU_SUSPEND=y
-CONFIG_ARM_ERRATA_430973=y
-CONFIG_ARM_ERRATA_643719=y
-CONFIG_ARM_ERRATA_720789=y
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_ARM_ERRATA_754327=y
-CONFIG_ARM_ERRATA_764369=y
-CONFIG_ARM_ERRATA_775420=y
-CONFIG_ARM_ERRATA_798181=y
-CONFIG_ARM_GIC=y
-CONFIG_ARM_HAS_SG_CHAIN=y
-CONFIG_ARM_HEAVY_MB=y
-# CONFIG_ARM_HIGHBANK_CPUIDLE is not set
-CONFIG_ARM_L1_CACHE_SHIFT=6
-CONFIG_ARM_L1_CACHE_SHIFT_6=y
-CONFIG_ARM_LPAE=y
-CONFIG_ARM_PATCH_IDIV=y
-CONFIG_ARM_PATCH_PHYS_VIRT=y
-CONFIG_ARM_PMU=y
-CONFIG_ARM_PSCI=y
-CONFIG_ARM_PSCI_FW=y
-# CONFIG_ARM_SMMU is not set
-CONFIG_ARM_THUMB=y
-CONFIG_ARM_THUMBEE=y
-CONFIG_ARM_TIMER_SP804=y
-CONFIG_ARM_UNWIND=y
-CONFIG_ARM_VIRT_EXT=y
-CONFIG_AT803X_PHY=y
-# CONFIG_AT803X_PHY_SMART_EEE is not set
-CONFIG_ATAGS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_AUTO_ZRELADDR=y
-CONFIG_BATTERY_SBS=y
-CONFIG_BCM_NET_PHYLIB=y
-CONFIG_BINARY_PRINTF=y
-CONFIG_BLK_CMDLINE_PARSER=y
-CONFIG_BLK_DEV_BSG=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=262144
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-# CONFIG_BLK_DEV_SR_VENDOR is not set
-CONFIG_BLK_MQ_PCI=y
-CONFIG_BLK_MQ_VIRTIO=y
-CONFIG_BLK_SCSI_REQUEST=y
-# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
-CONFIG_BOUNCE=y
-# CONFIG_BPF_SYSCALL is not set
-CONFIG_BRCMSTB_GISB_ARB=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_BUILD_BIN2C=y
-CONFIG_CACHE_L2X0=y
-# CONFIG_CACHE_L2X0_PMU is not set
-CONFIG_CHECKPOINT_RESTORE=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_CLKDEV_LOOKUP=y
-CONFIG_CLKSRC_IMX_GPT=y
-CONFIG_CLKSRC_MMIO=y
-CONFIG_CLK_QORIQ=y
-CONFIG_CLONE_BACKWARDS=y
-CONFIG_CMA=y
-CONFIG_CMA_ALIGNMENT=8
-CONFIG_CMA_AREAS=7
-# CONFIG_CMA_DEBUG is not set
-# CONFIG_CMA_DEBUGFS is not set
-CONFIG_CMA_SIZE_MBYTES=64
-# CONFIG_CMA_SIZE_SEL_MAX is not set
-CONFIG_CMA_SIZE_SEL_MBYTES=y
-# CONFIG_CMA_SIZE_SEL_MIN is not set
-# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set
-CONFIG_CMDLINE_PARTITION=y
-CONFIG_COMMON_CLK=y
-CONFIG_CONFIGFS_FS=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
-CONFIG_COREDUMP=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_CPUFREQ_DT_PLATDEV=y
-CONFIG_CPU_32v6K=y
-CONFIG_CPU_32v7=y
-CONFIG_CPU_ABRT_EV7=y
-# CONFIG_CPU_BIG_ENDIAN is not set
-# CONFIG_CPU_BPREDICT_DISABLE is not set
-CONFIG_CPU_CACHE_V7=y
-CONFIG_CPU_CACHE_VIPT=y
-CONFIG_CPU_COPY_V6=y
-CONFIG_CPU_CP15=y
-CONFIG_CPU_CP15_MMU=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
-# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
-CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
-CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_HAS_ASID=y
-# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
-# CONFIG_CPU_ICACHE_DISABLE is not set
-CONFIG_CPU_IDLE=y
-# CONFIG_CPU_IDLE_GOV_LADDER is not set
-CONFIG_CPU_IDLE_GOV_MENU=y
-CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
-CONFIG_CPU_PABRT_V7=y
-CONFIG_CPU_PM=y
-CONFIG_CPU_RMAP=y
-CONFIG_CPU_SPECTRE=y
-CONFIG_CPU_THERMAL=y
-CONFIG_CPU_THUMB_CAPABLE=y
-CONFIG_CPU_TLB_V7=y
-CONFIG_CPU_V7=y
-CONFIG_CRASH_CORE=y
-CONFIG_CRC16=y
-# CONFIG_CRC32_SARWATE is not set
-CONFIG_CRC32_SLICEBY8=y
-CONFIG_CROSS_MEMORY_ATTACH=y
-CONFIG_CRYPTO_ACOMP2=y
-CONFIG_CRYPTO_AEAD=y
-CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_CRC32C=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_HASH2=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_MANAGER=y
-CONFIG_CRYPTO_MANAGER2=y
-CONFIG_CRYPTO_NULL2=y
-CONFIG_CRYPTO_RNG2=y
-# CONFIG_CRYPTO_TLS is not set
-CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_DCACHE_WORD_ACCESS=y
-CONFIG_DEBUG_ALIGN_RODATA=y
-CONFIG_DEBUG_BUGVERBOSE=y
-CONFIG_DEBUG_IMX_UART_PORT=1
-CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S"
-# CONFIG_DEBUG_UART_8250 is not set
-# CONFIG_DEBUG_USER is not set
-CONFIG_DECOMPRESS_BZIP2=y
-CONFIG_DECOMPRESS_GZIP=y
-CONFIG_DECOMPRESS_LZMA=y
-CONFIG_DECOMPRESS_LZO=y
-CONFIG_DECOMPRESS_XZ=y
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
-CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEVFREQ_GOV_PASSIVE is not set
-# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set
-# CONFIG_DEVFREQ_GOV_POWERSAVE is not set
-# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND is not set
-# CONFIG_DEVFREQ_GOV_USERSPACE is not set
-# CONFIG_DEVFREQ_THERMAL is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMADEVICES=y
-CONFIG_DMA_CMA=y
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_OF=y
-CONFIG_DMA_SHARED_BUFFER=y
-CONFIG_DMA_VIRTUAL_CHANNELS=y
-CONFIG_DMI=y
-CONFIG_DMIID=y
-# CONFIG_DMI_SYSFS is not set
-CONFIG_DNOTIFY=y
-CONFIG_DTC=y
-CONFIG_DT_IDLE_STATES=y
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_DW_DMAC=y
-CONFIG_DW_DMAC_CORE=y
-CONFIG_DW_WATCHDOG=y
-CONFIG_EDAC_ATOMIC_SCRUB=y
-CONFIG_EDAC_SUPPORT=y
-CONFIG_EEPROM_93CX6=y
-CONFIG_EEPROM_AT24=y
-CONFIG_EFI=y
-# CONFIG_EFIVAR_FS is not set
-CONFIG_EFI_ARMSTUB=y
-# CONFIG_EFI_CAPSULE_LOADER is not set
-CONFIG_EFI_ESRT=y
-CONFIG_EFI_PARAMS_FROM_FDT=y
-CONFIG_EFI_RUNTIME_WRAPPERS=y
-CONFIG_EFI_STUB=y
-# CONFIG_EFI_TEST is not set
-# CONFIG_EFI_VARS is not set
-CONFIG_ELF_CORE=y
-# CONFIG_ENABLE_DEFAULT_TRACERS is not set
-CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_EVENT_TRACING=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_FAT_FS=y
-# CONFIG_FEC is not set
-CONFIG_FHANDLE=y
-CONFIG_FIRMWARE_IN_KERNEL=y
-CONFIG_FIXED_PHY=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_FORCE_MAX_ZONEORDER=12
-CONFIG_FREEZER=y
-# CONFIG_FSL_DPAA2_ETH_CEETM is not set
-CONFIG_FSL_EDMA=y
-CONFIG_FSL_GUTS=y
-CONFIG_FSL_IFC=y
-# CONFIG_FSL_PPFE is not set
-CONFIG_FSL_PQ_MDIO=y
-# CONFIG_FSL_QDMA is not set
-# CONFIG_FSL_QIXIS is not set
-# CONFIG_FSL_SDK_DPA is not set
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_FS_MBCACHE=y
-CONFIG_FS_POSIX_ACL=y
-CONFIG_FTM_ALARM=y
-CONFIG_FTRACE=y
-# CONFIG_FTRACE_SYSCALLS is not set
-CONFIG_FUSE_FS=y
-# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_GENERIC_ARCH_TOPOLOGY=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_GENERIC_IDLE_POLL_SETUP=y
-CONFIG_GENERIC_IO=y
-CONFIG_GENERIC_IRQ_CHIP=y
-CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
-CONFIG_GENERIC_IRQ_MIGRATION=y
-CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
-CONFIG_GENERIC_PCI_IOMAP=y
-CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_PINCONF=y
-CONFIG_GENERIC_PINCTRL_GROUPS=y
-CONFIG_GENERIC_PINMUX_FUNCTIONS=y
-CONFIG_GENERIC_SCHED_CLOCK=y
-CONFIG_GENERIC_SMP_IDLE_THREAD=y
-CONFIG_GENERIC_STRNCPY_FROM_USER=y
-CONFIG_GENERIC_STRNLEN_USER=y
-CONFIG_GENERIC_TIME_VSYSCALL=y
-CONFIG_GIANFAR=y
-CONFIG_GLOB=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_GENERIC=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_GPIO_MXC=y
-# CONFIG_GRO_CELLS is not set
-CONFIG_HANDLE_DOMAIN_IRQ=y
-CONFIG_HARDEN_BRANCH_PREDICTOR=y
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_HAS_DMA=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT_MAP=y
-# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
-CONFIG_HAVE_ARCH_AUDITSYSCALL=y
-CONFIG_HAVE_ARCH_BITREVERSE=y
-CONFIG_HAVE_ARCH_JUMP_LABEL=y
-CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_HAVE_ARCH_PFN_VALID=y
-CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
-CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
-CONFIG_HAVE_ARM_ARCH_TIMER=y
-CONFIG_HAVE_ARM_SMCCC=y
-# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
-CONFIG_HAVE_CC_STACKPROTECTOR=y
-CONFIG_HAVE_CLK=y
-CONFIG_HAVE_CLK_PREPARE=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
-CONFIG_HAVE_C_RECORDMCOUNT=y
-CONFIG_HAVE_DEBUG_KMEMLEAK=y
-CONFIG_HAVE_DMA_API_DEBUG=y
-CONFIG_HAVE_DMA_CONTIGUOUS=y
-CONFIG_HAVE_DYNAMIC_FTRACE=y
-CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
-CONFIG_HAVE_EBPF_JIT=y
-CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
-CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
-CONFIG_HAVE_FUNCTION_TRACER=y
-CONFIG_HAVE_GENERIC_DMA_COHERENT=y
-CONFIG_HAVE_GENERIC_GUP=y
-CONFIG_HAVE_HW_BREAKPOINT=y
-CONFIG_HAVE_IDE=y
-CONFIG_HAVE_IMX_SRC=y
-CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
-CONFIG_HAVE_MEMBLOCK=y
-CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
-CONFIG_HAVE_NET_DSA=y
-CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_OPTPROBES=y
-CONFIG_HAVE_PERF_EVENTS=y
-CONFIG_HAVE_PERF_REGS=y
-CONFIG_HAVE_PERF_USER_STACK_DUMP=y
-CONFIG_HAVE_PROC_CPU=y
-CONFIG_HAVE_RCU_TABLE_FREE=y
-CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
-CONFIG_HAVE_SMP=y
-CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
-CONFIG_HAVE_UID16=y
-CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
-CONFIG_HID=y
-CONFIG_HID_GENERIC=y
-CONFIG_HIGHMEM=y
-CONFIG_HIGHPTE=y
-CONFIG_HOTPLUG_CPU=y
-# CONFIG_HUGETLBFS is not set
-CONFIG_HVC_DRIVER=y
-CONFIG_HW_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_IMX_RNGC=y
-CONFIG_HZ_FIXED=0
-CONFIG_I2C=y
-CONFIG_I2C_BOARDINFO=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_COMPAT=y
-CONFIG_I2C_DEMUX_PINCTRL=y
-CONFIG_I2C_DESIGNWARE_CORE=y
-CONFIG_I2C_DESIGNWARE_PLATFORM=y
-# CONFIG_I2C_DESIGNWARE_SLAVE is not set
-CONFIG_I2C_HELPER_AUTO=y
-CONFIG_I2C_IMX=y
-# CONFIG_I2C_IMX_LPI2C is not set
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_I2C_MUX_PINCTRL=y
-CONFIG_I2C_RK3X=y
-CONFIG_I2C_SLAVE=y
-CONFIG_I2C_SLAVE_EEPROM=y
-CONFIG_I2C_XILINX=y
-CONFIG_ICPLUS_PHY=y
-CONFIG_ICS932S401=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_IMX2_WDT=y
-CONFIG_IMX_DMA=y
-CONFIG_IMX_SDMA=y
-# CONFIG_IMX_WEIM is not set
-CONFIG_INITRAMFS_SOURCE=""
-# CONFIG_INPHI_PHY is not set
-CONFIG_INPUT=y
-# CONFIG_INPUT_MISC is not set
-CONFIG_IOMMU_HELPER=y
-# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
-# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
-CONFIG_IOMMU_SUPPORT=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_IPC_NS=y
-CONFIG_IRQCHIP=y
-CONFIG_IRQ_DOMAIN=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_IRQ_DOMAIN_HIERARCHY=y
-CONFIG_IRQ_FORCED_THREADING=y
-CONFIG_IRQ_WORK=y
-# CONFIG_ISDN is not set
-CONFIG_ISL29003=y
-CONFIG_JBD2=y
-CONFIG_KALLSYMS=y
-CONFIG_KERNEL_GZIP=y
-# CONFIG_KERNEL_XZ is not set
-CONFIG_KEXEC=y
-CONFIG_KEXEC_CORE=y
-CONFIG_LIBFDT=y
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_LOCK_SPIN_ON_OWNER=y
-CONFIG_LS_SCFG_MSI=y
-CONFIG_LS_SOC_DRIVERS=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_MANDATORY_FILE_LOCKING=y
-CONFIG_MARVELL_PHY=y
-CONFIG_MCPM=y
-CONFIG_MDIO_BITBANG=y
-CONFIG_MDIO_BUS=y
-# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set
-CONFIG_MDIO_DEVICE=y
-# CONFIG_MDIO_FSL_BACKPLANE is not set
-# CONFIG_MDIO_GPIO is not set
-CONFIG_MEMORY=y
-CONFIG_MEMORY_ISOLATION=y
-CONFIG_MFD_SYSCON=y
-# CONFIG_MFD_VEXPRESS_SYSREG is not set
-CONFIG_MICREL_PHY=y
-CONFIG_MIGHT_HAVE_CACHE_L2X0=y
-CONFIG_MIGHT_HAVE_PCI=y
-CONFIG_MIGRATION=y
-CONFIG_MMC=y
-CONFIG_MMC_BLOCK=y
-CONFIG_MMC_BLOCK_MINORS=16
-# CONFIG_MMC_MXC is not set
-CONFIG_MMC_SDHCI=y
-# CONFIG_MMC_SDHCI_ESDHC_IMX is not set
-CONFIG_MMC_SDHCI_IO_ACCESSORS=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-# CONFIG_MMC_SDHCI_PCI is not set
-CONFIG_MMC_SDHCI_PLTFM=y
-# CONFIG_MMC_TIFM_SD is not set
-CONFIG_MODULES_TREE_LOOKUP=y
-CONFIG_MODULES_USE_ELF_REL=y
-CONFIG_MSDOS_FS=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_CMDLINE_PARTS=y
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-CONFIG_MTD_DATAFLASH=y
-# CONFIG_MTD_DATAFLASH_OTP is not set
-# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
-CONFIG_MTD_SST25L=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_BEB_LIMIT=20
-# CONFIG_MTD_UBI_BLOCK is not set
-# CONFIG_MTD_UBI_FASTMAP is not set
-# CONFIG_MTD_UBI_GLUEBI is not set
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MULTI_IRQ_HANDLER=y
-CONFIG_MUTEX_SPIN_ON_OWNER=y
-CONFIG_MX3_IPU=y
-CONFIG_MX3_IPU_IRQS=4
-# CONFIG_MXS_DMA is not set
-CONFIG_NAMESPACES=y
-CONFIG_NATIONAL_PHY=y
-CONFIG_NEED_DMA_MAP_STATE=y
-CONFIG_NEON=y
-# CONFIG_NET_CADENCE is not set
-CONFIG_NET_FLOW_LIMIT=y
-CONFIG_NET_NS=y
-CONFIG_NET_PTP_CLASSIFY=y
-CONFIG_NET_SWITCHDEV=y
-CONFIG_NLS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=y
-CONFIG_NOP_TRACER=y
-CONFIG_NO_BOOTMEM=y
-CONFIG_NO_HZ=y
-CONFIG_NO_HZ_COMMON=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_NR_CPUS=16
-CONFIG_NTFS_FS=y
-CONFIG_NVMEM=y
-# CONFIG_NVMEM_IMX_IIM is not set
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
-CONFIG_OF_ADDRESS_PCI=y
-CONFIG_OF_DYNAMIC=y
-CONFIG_OF_EARLY_FLATTREE=y
-CONFIG_OF_FLATTREE=y
-CONFIG_OF_GPIO=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_MDIO=y
-CONFIG_OF_NET=y
-CONFIG_OF_PCI=y
-CONFIG_OF_PCI_IRQ=y
-CONFIG_OF_RESERVED_MEM=y
-CONFIG_OLD_SIGACTION=y
-CONFIG_OLD_SIGSUSPEND3=y
-CONFIG_OUTER_CACHE=y
-CONFIG_OUTER_CACHE_SYNC=y
-CONFIG_PACKET_DIAG=y
-CONFIG_PADATA=y
-CONFIG_PAGE_OFFSET=0xC0000000
-# CONFIG_PANIC_ON_OOPS is not set
-CONFIG_PANIC_ON_OOPS_VALUE=0
-CONFIG_PANIC_TIMEOUT=0
-CONFIG_PCI=y
-CONFIG_PCIEAER=y
-CONFIG_PCIEASPM=y
-# CONFIG_PCIEASPM_DEBUG is not set
-CONFIG_PCIEASPM_DEFAULT=y
-# CONFIG_PCIEASPM_PERFORMANCE is not set
-# CONFIG_PCIEASPM_POWERSAVE is not set
-# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCIE_DW=y
-CONFIG_PCIE_DW_HOST=y
-# CONFIG_PCIE_DW_PLAT_HOST is not set
-CONFIG_PCIE_PME=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCI_DOMAINS_GENERIC=y
-CONFIG_PCI_ECAM=y
-CONFIG_PCI_HOST_COMMON=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_LABEL=y
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_MSI_IRQ_DOMAIN=y
-CONFIG_PERF_EVENTS=y
-CONFIG_PERF_USE_VMALLOC=y
-CONFIG_PGTABLE_LEVELS=3
-CONFIG_PHYLIB=y
-CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_PID_NS=y
-CONFIG_PINCTRL=y
-CONFIG_PL310_ERRATA_588369=y
-CONFIG_PL310_ERRATA_727915=y
-CONFIG_PL310_ERRATA_753970=y
-CONFIG_PL310_ERRATA_769419=y
-CONFIG_PM=y
-CONFIG_PM_CLK=y
-# CONFIG_PM_DEBUG is not set
-CONFIG_PM_DEVFREQ=y
-# CONFIG_PM_DEVFREQ_EVENT is not set
-CONFIG_PM_OPP=y
-CONFIG_PM_SLEEP=y
-CONFIG_PM_SLEEP_SMP=y
-CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_BRCMKONA=y
-CONFIG_POWER_RESET_BRCMSTB=y
-CONFIG_POWER_RESET_GPIO=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-CONFIG_POWER_RESET_SYSCON=y
-CONFIG_POWER_RESET_SYSCON_POWEROFF=y
-CONFIG_POWER_RESET_VEXPRESS=y
-CONFIG_POWER_SUPPLY=y
-CONFIG_PPS=y
-CONFIG_PRINTK_TIME=y
-CONFIG_PROBE_EVENTS=y
-CONFIG_PROC_CHILDREN=y
-CONFIG_PROC_PAGE_MONITOR=y
-CONFIG_PSTORE=y
-CONFIG_PSTORE_CONSOLE=y
-# CONFIG_PSTORE_LZ4_COMPRESS is not set
-# CONFIG_PSTORE_LZO_COMPRESS is not set
-CONFIG_PSTORE_PMSG=y
-CONFIG_PSTORE_RAM=y
-CONFIG_PSTORE_ZLIB_COMPRESS=y
-CONFIG_PTP_1588_CLOCK=y
-CONFIG_PTP_1588_CLOCK_QORIQ=y
-CONFIG_QORIQ_CPUFREQ=y
-# CONFIG_QUICC_ENGINE is not set
-CONFIG_RAS=y
-CONFIG_RATIONAL=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=21
-# CONFIG_RCU_EXPERT is not set
-CONFIG_RCU_NEED_SEGCBLIST=y
-CONFIG_RCU_STALL_COMMON=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_GZIP=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_LZO=y
-CONFIG_RD_XZ=y
-CONFIG_REALTEK_PHY=y
-CONFIG_REED_SOLOMON=y
-CONFIG_REED_SOLOMON_DEC8=y
-CONFIG_REED_SOLOMON_ENC8=y
-CONFIG_REGMAP=y
-CONFIG_REGMAP_I2C=y
-CONFIG_REGMAP_MMIO=y
-CONFIG_REGMAP_SPI=y
-# CONFIG_RESET_ATTACK_MITIGATION is not set
-CONFIG_RESET_CONTROLLER=y
-CONFIG_RFS_ACCEL=y
-CONFIG_RING_BUFFER=y
-CONFIG_RPS=y
-CONFIG_RTC_CLASS=y
-# CONFIG_RTC_DRV_CMOS is not set
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS3232=y
-# CONFIG_RTC_DRV_EFI is not set
-CONFIG_RTC_DRV_EM3027=y
-# CONFIG_RTC_DRV_IMXDI is not set
-# CONFIG_RTC_DRV_MXC is not set
-CONFIG_RTC_DRV_PCF2127=y
-CONFIG_RTC_DRV_PCF85263=y
-CONFIG_RTC_I2C_AND_SPI=y
-CONFIG_RWSEM_SPIN_ON_OWNER=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHED_INFO is not set
-CONFIG_SCSI=y
-CONFIG_SECCOMP=y
-CONFIG_SECCOMP_FILTER=y
-# CONFIG_SECURITY_DMESG_RESTRICT is not set
-CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
-CONFIG_SERIAL_8250_DW=y
-CONFIG_SERIAL_8250_EM=y
-CONFIG_SERIAL_8250_FSL=y
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
-CONFIG_SERIAL_BCM63XX=y
-CONFIG_SERIAL_BCM63XX_CONSOLE=y
-CONFIG_SERIAL_CONEXANT_DIGICOLOR=y
-CONFIG_SERIAL_CONEXANT_DIGICOLOR_CONSOLE=y
-CONFIG_SERIAL_FSL_LPUART=y
-CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
-CONFIG_SERIAL_IMX=y
-CONFIG_SERIAL_IMX_CONSOLE=y
-CONFIG_SERIAL_MCTRL_GPIO=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_ST_ASC=y
-CONFIG_SERIAL_ST_ASC_CONSOLE=y
-CONFIG_SERIAL_XILINX_PS_UART=y
-CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
-CONFIG_SG_POOL=y
-CONFIG_SLUB_DEBUG=y
-CONFIG_SMP=y
-CONFIG_SMP_ON_UP=y
-CONFIG_SMSC_PHY=y
-CONFIG_SOCK_DIAG=y
-CONFIG_SOC_BRCMSTB=y
-CONFIG_SOC_BUS=y
-# CONFIG_SOC_IMX50 is not set
-# CONFIG_SOC_IMX51 is not set
-# CONFIG_SOC_IMX53 is not set
-# CONFIG_SOC_IMX6Q is not set
-# CONFIG_SOC_IMX6SL is not set
-# CONFIG_SOC_IMX6SX is not set
-# CONFIG_SOC_IMX6UL is not set
-# CONFIG_SOC_IMX7D is not set
-CONFIG_SOC_LS1021A=y
-# CONFIG_SOC_VF610 is not set
-CONFIG_SPARSE_IRQ=y
-CONFIG_SPI=y
-CONFIG_SPI_BITBANG=y
-CONFIG_SPI_CADENCE=y
-# CONFIG_SPI_FSL_LPSPI is not set
-# CONFIG_SPI_FSL_QUADSPI is not set
-# CONFIG_SPI_IMX is not set
-CONFIG_SPI_MASTER=y
-# CONFIG_SPI_NXP_FLEXSPI is not set
-CONFIG_SPI_SPIDEV=y
-CONFIG_SPI_XILINX=y
-CONFIG_SPMI=y
-# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
-CONFIG_SQUASHFS_DECOMP_SINGLE=y
-CONFIG_SQUASHFS_FILE_CACHE=y
-# CONFIG_SQUASHFS_FILE_DIRECT is not set
-CONFIG_SQUASHFS_LZO=y
-CONFIG_SQUASHFS_ZLIB=y
-CONFIG_SRAM=y
-CONFIG_SRAM_EXEC=y
-CONFIG_SRCU=y
-CONFIG_STACKTRACE=y
-CONFIG_STAGING_BOARD=y
-# CONFIG_STRIP_ASM_SYMS is not set
-CONFIG_SUSPEND=y
-CONFIG_SUSPEND_FREEZER=y
-CONFIG_SWIOTLB=y
-CONFIG_SWPHY=y
-CONFIG_SWP_EMULATE=y
-CONFIG_SYNC_FILE=y
-CONFIG_SYSFS_SYSCALL=y
-CONFIG_SYS_SUPPORTS_APM_EMULATION=y
-CONFIG_SYS_SUPPORTS_HUGETLBFS=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_THERMAL=y
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
-CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
-CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_OF=y
-# CONFIG_THUMB2_KERNEL is not set
-CONFIG_TICK_CPU_ACCOUNTING=y
-CONFIG_TIMER_OF=y
-CONFIG_TIMER_PROBE=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_TRACEPOINTS=y
-CONFIG_TRACE_CLOCK=y
-CONFIG_TRACING=y
-CONFIG_TRACING_EVENTS_GPIO=y
-CONFIG_TREE_RCU=y
-CONFIG_TREE_SRCU=y
-CONFIG_UBIFS_FS=y
-# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
-CONFIG_UBIFS_FS_LZO=y
-CONFIG_UBIFS_FS_ZLIB=y
-CONFIG_UCS2_STRING=y
-CONFIG_UEVENT_HELPER_PATH=""
-CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h"
-CONFIG_UNIX_DIAG=y
-CONFIG_UPROBES=y
-CONFIG_UPROBE_EVENTS=y
-CONFIG_USB_SUPPORT=y
-CONFIG_USER_NS=y
-CONFIG_USE_OF=y
-CONFIG_UTS_NS=y
-CONFIG_VDSO=y
-CONFIG_VECTORS_BASE=0xffff0000
-CONFIG_VEXPRESS_CONFIG=y
-CONFIG_VEXPRESS_SYSCFG=y
-CONFIG_VFAT_FS=y
-CONFIG_VFP=y
-CONFIG_VFPv3=y
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_VIRTIO_CONSOLE=y
-CONFIG_VIRTIO_MMIO=y
-# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set
-CONFIG_VIRTIO_NET=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_VT_CONSOLE_SLEEP=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_WATCHDOG_CORE=y
-# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
-CONFIG_XILINX_WATCHDOG=y
-CONFIG_XPS=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_BCJ=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_SPARC=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_ZBOOT_ROM_BSS=0
-CONFIG_ZBOOT_ROM_TEXT=0
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_ZLIB_INFLATE=y
diff --git a/target/linux/layerscape/armv8_64b/config-4.14 b/target/linux/layerscape/armv8_64b/config-4.14
deleted file mode 100644
index c1bd718366..0000000000
--- a/target/linux/layerscape/armv8_64b/config-4.14
+++ /dev/null
@@ -1,983 +0,0 @@
-CONFIG_64BIT=y
-# CONFIG_ACPI is not set
-# CONFIG_AHCI_XGENE is not set
-CONFIG_AQUANTIA_PHY=y
-CONFIG_ARCH_CLOCKSOURCE_DATA=y
-CONFIG_ARCH_DMA_ADDR_T_64BIT=y
-CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
-CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
-CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
-CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
-CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
-CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
-CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
-CONFIG_ARCH_HAS_KCOV=y
-CONFIG_ARCH_HAS_SET_MEMORY=y
-CONFIG_ARCH_HAS_SG_CHAIN=y
-CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
-CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
-CONFIG_ARCH_HAS_TICK_BROADCAST=y
-CONFIG_ARCH_HIBERNATION_HEADER=y
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_LAYERSCAPE=y
-CONFIG_ARCH_MMAP_RND_BITS=18
-CONFIG_ARCH_MMAP_RND_BITS_MAX=33
-CONFIG_ARCH_MMAP_RND_BITS_MIN=18
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
-# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set
-# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
-CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
-CONFIG_ARCH_PROC_KCORE_TEXT=y
-CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-CONFIG_ARCH_SPARSEMEM_DEFAULT=y
-CONFIG_ARCH_SPARSEMEM_ENABLE=y
-CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
-CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
-CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
-CONFIG_ARCH_SUPPORTS_UPROBES=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
-CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
-CONFIG_ARCH_WANT_FRAME_POINTERS=y
-CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
-CONFIG_ARM64=y
-# CONFIG_ARM64_16K_PAGES is not set
-CONFIG_ARM64_4K_PAGES=y
-# CONFIG_ARM64_64K_PAGES is not set
-CONFIG_ARM64_CONT_SHIFT=4
-# CONFIG_ARM64_CRYPTO is not set
-CONFIG_ARM64_ERRATUM_819472=y
-CONFIG_ARM64_ERRATUM_824069=y
-CONFIG_ARM64_ERRATUM_826319=y
-CONFIG_ARM64_ERRATUM_827319=y
-CONFIG_ARM64_ERRATUM_832075=y
-CONFIG_ARM64_ERRATUM_843419=y
-CONFIG_ARM64_HW_AFDBM=y
-# CONFIG_ARM64_LSE_ATOMICS is not set
-CONFIG_ARM64_MODULE_CMODEL_LARGE=y
-CONFIG_ARM64_PAGE_SHIFT=12
-CONFIG_ARM64_PAN=y
-# CONFIG_ARM64_PMEM is not set
-# CONFIG_ARM64_PTDUMP_CORE is not set
-# CONFIG_ARM64_PTDUMP_DEBUGFS is not set
-# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set
-CONFIG_ARM64_SSBD=y
-CONFIG_ARM64_UAO=y
-CONFIG_ARM64_VA_BITS=48
-# CONFIG_ARM64_VA_BITS_39 is not set
-CONFIG_ARM64_VA_BITS_48=y
-CONFIG_ARM64_VHE=y
-CONFIG_ARM_AMBA=y
-CONFIG_ARM_ARCH_TIMER=y
-CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
-CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y
-CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
-CONFIG_ARM_CPUIDLE=y
-# CONFIG_ARM_DT_BL_CPUFREQ is not set
-CONFIG_ARM_GIC=y
-CONFIG_ARM_GIC_V2M=y
-CONFIG_ARM_GIC_V3=y
-CONFIG_ARM_GIC_V3_ITS=y
-CONFIG_ARM_GIC_V3_ITS_FSL_MC=y
-# CONFIG_ARM_PL172_MPMC is not set
-CONFIG_ARM_PMU=y
-CONFIG_ARM_PSCI_FW=y
-CONFIG_ARM_SMMU=y
-CONFIG_ARM_SMMU_V3=y
-CONFIG_ARM_SP805_WATCHDOG=y
-CONFIG_ARM_TIMER_SP804=y
-CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y
-CONFIG_ATA=y
-CONFIG_AUDIT=y
-CONFIG_AUDITSYSCALL=y
-CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
-CONFIG_AUDIT_GENERIC=y
-CONFIG_AUDIT_TREE=y
-CONFIG_AUDIT_WATCH=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BALLOON_COMPACTION=y
-CONFIG_BATTERY_BQ27XXX=y
-# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
-CONFIG_BATTERY_BQ27XXX_I2C=y
-CONFIG_BLK_DEV_BSG=y
-CONFIG_BLK_DEV_BSGLIB=y
-CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=262144
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_MQ_PCI=y
-CONFIG_BLK_MQ_VIRTIO=y
-CONFIG_BLK_SCSI_REQUEST=y
-# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
-CONFIG_BOUNCE=y
-# CONFIG_BPF_SYSCALL is not set
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_BTRFS_FS=y
-# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
-CONFIG_BTRFS_FS_POSIX_ACL=y
-CONFIG_BUILD_BIN2C=y
-CONFIG_CAVIUM_ERRATUM_22375=y
-CONFIG_CAVIUM_ERRATUM_23144=y
-CONFIG_CAVIUM_ERRATUM_23154=y
-CONFIG_CAVIUM_ERRATUM_27456=y
-CONFIG_CHECKPOINT_RESTORE=y
-CONFIG_CHROME_PLATFORMS=y
-CONFIG_CLKDEV_LOOKUP=y
-CONFIG_CLKSRC_MMIO=y
-CONFIG_CLK_QORIQ=y
-CONFIG_CLK_SP810=y
-CONFIG_CLK_VEXPRESS_OSC=y
-CONFIG_CLONE_BACKWARDS=y
-CONFIG_CMA=y
-CONFIG_CMA_ALIGNMENT=8
-CONFIG_CMA_AREAS=7
-# CONFIG_CMA_DEBUG is not set
-# CONFIG_CMA_DEBUGFS is not set
-CONFIG_CMA_SIZE_MBYTES=16
-# CONFIG_CMA_SIZE_SEL_MAX is not set
-CONFIG_CMA_SIZE_SEL_MBYTES=y
-# CONFIG_CMA_SIZE_SEL_MIN is not set
-# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set
-CONFIG_COMMON_CLK=y
-CONFIG_COMMON_CLK_CS2000_CP=y
-CONFIG_COMMON_CLK_VERSATILE=y
-CONFIG_COMMON_CLK_XGENE=y
-CONFIG_CONFIGFS_FS=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-CONFIG_COREDUMP=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_CPUFREQ_DT_PLATDEV=y
-# CONFIG_CPU_BIG_ENDIAN is not set
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_STAT=y
-# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
-CONFIG_CPU_IDLE=y
-# CONFIG_CPU_IDLE_GOV_LADDER is not set
-CONFIG_CPU_IDLE_GOV_MENU=y
-CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
-CONFIG_CPU_PM=y
-CONFIG_CPU_RMAP=y
-CONFIG_CPU_THERMAL=y
-CONFIG_CRASH_CORE=y
-CONFIG_CRC16=y
-# CONFIG_CRC32_SARWATE is not set
-CONFIG_CRC32_SLICEBY8=y
-CONFIG_CRC7=y
-CONFIG_CRC_ITU_T=y
-CONFIG_CRC_T10DIF=y
-CONFIG_CROSS_MEMORY_ATTACH=y
-CONFIG_CRYPTO_ACOMP2=y
-CONFIG_CRYPTO_AEAD=y
-CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_CRC32C=y
-CONFIG_CRYPTO_CRCT10DIF=y
-CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM is not set
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_HASH2=y
-CONFIG_CRYPTO_HW=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_MANAGER=y
-CONFIG_CRYPTO_MANAGER2=y
-CONFIG_CRYPTO_NULL2=y
-CONFIG_CRYPTO_RNG2=y
-# CONFIG_CRYPTO_TLS is not set
-CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_DCACHE_WORD_ACCESS=y
-CONFIG_DEBUG_BUGVERBOSE=y
-# CONFIG_DEBUG_EFI is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_DEBUG_INFO_REDUCED is not set
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DECOMPRESS_BZIP2=y
-CONFIG_DECOMPRESS_GZIP=y
-CONFIG_DECOMPRESS_LZMA=y
-CONFIG_DECOMPRESS_LZO=y
-CONFIG_DECOMPRESS_XZ=y
-CONFIG_DEFAULT_CFQ=y
-CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
-CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMADEVICES=y
-CONFIG_DMATEST=y
-CONFIG_DMA_CMA=y
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_ENGINE_RAID=y
-CONFIG_DMA_OF=y
-CONFIG_DMA_SHARED_BUFFER=y
-CONFIG_DMA_VIRTUAL_CHANNELS=y
-CONFIG_DMI=y
-CONFIG_DMIID=y
-# CONFIG_DMI_SYSFS is not set
-CONFIG_DNOTIFY=y
-CONFIG_DTC=y
-CONFIG_DT_IDLE_STATES=y
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_EDAC_SUPPORT=y
-CONFIG_EEPROM_AT24=y
-CONFIG_EFI=y
-CONFIG_EFIVAR_FS=y
-CONFIG_EFI_ARMSTUB=y
-# CONFIG_EFI_CAPSULE_LOADER is not set
-CONFIG_EFI_ESRT=y
-CONFIG_EFI_PARAMS_FROM_FDT=y
-CONFIG_EFI_RUNTIME_WRAPPERS=y
-CONFIG_EFI_STUB=y
-# CONFIG_EFI_TEST is not set
-# CONFIG_EFI_VARS is not set
-CONFIG_ELF_CORE=y
-# CONFIG_EMBEDDED is not set
-CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-# CONFIG_EXT2_FS_SECURITY is not set
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-# CONFIG_EXT3_FS_SECURITY is not set
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXTCON=y
-CONFIG_EXTCON_USB_GPIO=y
-CONFIG_FANOTIFY=y
-CONFIG_FAT_FS=y
-CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_CMDLINE=y
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_EFI is not set
-CONFIG_FB_MODE_HELPERS=y
-# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set
-CONFIG_FB_SYS_COPYAREA=y
-CONFIG_FB_SYS_FILLRECT=y
-CONFIG_FB_SYS_FOPS=y
-CONFIG_FB_SYS_IMAGEBLIT=y
-CONFIG_FHANDLE=y
-CONFIG_FIRMWARE_IN_KERNEL=y
-CONFIG_FIXED_PHY=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_FMAN_ARM=y
-# CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN is not set
-# CONFIG_FMAN_P1023 is not set
-# CONFIG_FMAN_P3040_P4080_P5020 is not set
-# CONFIG_FMAN_PFC is not set
-# CONFIG_FMAN_V3H is not set
-# CONFIG_FMAN_V3L is not set
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x16=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_SUPPORT=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-CONFIG_FRAME_POINTER=y
-CONFIG_FRAME_WARN=2048
-CONFIG_FREEZER=y
-CONFIG_FSL_BMAN_CONFIG=y
-CONFIG_FSL_BMAN_DEBUGFS=y
-# CONFIG_FSL_BMAN_TEST is not set
-# CONFIG_FSL_DPAA is not set
-CONFIG_FSL_DPAA2=y
-CONFIG_FSL_DPAA2_ETH=y
-CONFIG_FSL_DPAA2_ETHSW=y
-# CONFIG_FSL_DPAA2_ETH_CEETM is not set
-# CONFIG_FSL_DPAA2_ETH_DEBUGFS is not set
-# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set
-CONFIG_FSL_DPAA2_EVB=y
-CONFIG_FSL_DPAA2_MAC=y
-# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set
-CONFIG_FSL_DPAA2_PTP_CLOCK=y
-# CONFIG_FSL_DPAA2_QDMA is not set
-# CONFIG_FSL_DPAA_1588 is not set
-CONFIG_FSL_DPAA_ADVANCED_DRIVERS=y
-# CONFIG_FSL_DPAA_CEETM is not set
-CONFIG_FSL_DPAA_CS_THRESHOLD_10G=0x10000000
-CONFIG_FSL_DPAA_CS_THRESHOLD_1G=0x06000000
-# CONFIG_FSL_DPAA_DBG_LOOP is not set
-# CONFIG_FSL_DPAA_ETH_DEBUG is not set
-CONFIG_FSL_DPAA_ETH_DEBUGFS=y
-# CONFIG_FSL_DPAA_ETH_JUMBO_FRAME is not set
-CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT=128
-CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD=80
-# CONFIG_FSL_DPAA_HOOKS is not set
-CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD=0x10000000
-CONFIG_FSL_DPAA_OFFLINE_PORTS=y
-# CONFIG_FSL_DPAA_TS is not set
-CONFIG_FSL_DPA_CAN_WAIT=y
-CONFIG_FSL_DPA_CAN_WAIT_SYNC=y
-# CONFIG_FSL_DPA_CHECKING is not set
-CONFIG_FSL_DPA_PIRQ_FAST=y
-CONFIG_FSL_DPA_PIRQ_SLOW=y
-CONFIG_FSL_DPA_PORTAL_SHARE=y
-CONFIG_FSL_EDMA=y
-CONFIG_FSL_ERRATUM_A008585=y
-# CONFIG_FSL_FMAN is not set
-CONFIG_FSL_FM_MAX_FRAME_SIZE=1522
-CONFIG_FSL_FM_RX_EXTRA_HEADROOM=64
-CONFIG_FSL_GUTS=y
-CONFIG_FSL_IFC=y
-CONFIG_FSL_LS2_CONSOLE=y
-CONFIG_FSL_MC_BUS=y
-CONFIG_FSL_MC_DPIO=y
-CONFIG_FSL_MC_RESTOOL=y
-CONFIG_FSL_PPFE=y
-CONFIG_FSL_PPFE_UTIL_DISABLED=y
-# CONFIG_FSL_QBMAN_DEBUG is not set
-# CONFIG_FSL_QDMA is not set
-CONFIG_FSL_QIXIS=y
-CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W=2
-CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W=2
-CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV=4
-CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W=3
-CONFIG_FSL_QMAN_CONFIG=y
-CONFIG_FSL_QMAN_DEBUGFS=y
-CONFIG_FSL_QMAN_FQD_SZ=10
-CONFIG_FSL_QMAN_FQ_LOOKUP=y
-CONFIG_FSL_QMAN_INIT_TIMEOUT=10
-CONFIG_FSL_QMAN_PFDR_SZ=13
-CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH=12
-CONFIG_FSL_QMAN_PIRQ_IPERIOD=100
-CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH=4
-CONFIG_FSL_QMAN_POLL_LIMIT=32
-# CONFIG_FSL_QMAN_TEST is not set
-CONFIG_FSL_SDK_BMAN=y
-CONFIG_FSL_SDK_DPA=y
-CONFIG_FSL_SDK_DPAA_ETH=y
-CONFIG_FSL_SDK_FMAN=y
-# CONFIG_FSL_SDK_FMAN_TEST is not set
-CONFIG_FSL_SDK_QMAN=y
-CONFIG_FSL_USDPAA=y
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_FS_IOMAP=y
-CONFIG_FS_MBCACHE=y
-CONFIG_FS_POSIX_ACL=y
-CONFIG_FTM_ALARM=y
-CONFIG_FUSE_FS=y
-# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
-CONFIG_GARP=y
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_GENERIC_ARCH_TOPOLOGY=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_CSUM=y
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_GENERIC_IDLE_POLL_SETUP=y
-CONFIG_GENERIC_IO=y
-CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
-CONFIG_GENERIC_IRQ_MIGRATION=y
-CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
-CONFIG_GENERIC_PCI_IOMAP=y
-CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_SCHED_CLOCK=y
-CONFIG_GENERIC_SMP_IDLE_THREAD=y
-CONFIG_GENERIC_STRNCPY_FROM_USER=y
-CONFIG_GENERIC_STRNLEN_USER=y
-CONFIG_GENERIC_TIME_VSYSCALL=y
-# CONFIG_GIANFAR is not set
-CONFIG_GLOB=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_GENERIC=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_GPIO_SYSFS=y
-# CONFIG_GRO_CELLS is not set
-CONFIG_HANDLE_DOMAIN_IRQ=y
-CONFIG_HARDEN_BRANCH_PREDICTOR=y
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_HAS_DMA=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT_MAP=y
-# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
-CONFIG_HAVE_ARCH_AUDITSYSCALL=y
-CONFIG_HAVE_ARCH_BITREVERSE=y
-CONFIG_HAVE_ARCH_HUGE_VMAP=y
-CONFIG_HAVE_ARCH_JUMP_LABEL=y
-CONFIG_HAVE_ARCH_KASAN=y
-CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_HAVE_ARCH_PFN_VALID=y
-CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
-CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
-CONFIG_HAVE_ARCH_VMAP_STACK=y
-CONFIG_HAVE_ARM_SMCCC=y
-# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
-CONFIG_HAVE_CC_STACKPROTECTOR=y
-CONFIG_HAVE_CLK=y
-CONFIG_HAVE_CLK_PREPARE=y
-CONFIG_HAVE_CMPXCHG_DOUBLE=y
-CONFIG_HAVE_CMPXCHG_LOCAL=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
-CONFIG_HAVE_C_RECORDMCOUNT=y
-CONFIG_HAVE_DEBUG_BUGVERBOSE=y
-CONFIG_HAVE_DEBUG_KMEMLEAK=y
-CONFIG_HAVE_DMA_API_DEBUG=y
-CONFIG_HAVE_DMA_CONTIGUOUS=y
-CONFIG_HAVE_DYNAMIC_FTRACE=y
-CONFIG_HAVE_EBPF_JIT=y
-CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
-CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
-CONFIG_HAVE_FUNCTION_TRACER=y
-CONFIG_HAVE_GENERIC_DMA_COHERENT=y
-CONFIG_HAVE_GENERIC_GUP=y
-CONFIG_HAVE_HW_BREAKPOINT=y
-CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
-CONFIG_HAVE_MEMBLOCK=y
-CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
-CONFIG_HAVE_MEMORY_PRESENT=y
-CONFIG_HAVE_NET_DSA=y
-CONFIG_HAVE_PATA_PLATFORM=y
-CONFIG_HAVE_PERF_EVENTS=y
-CONFIG_HAVE_PERF_REGS=y
-CONFIG_HAVE_PERF_USER_STACK_DUMP=y
-CONFIG_HAVE_RCU_TABLE_FREE=y
-CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
-CONFIG_HAVE_SETUP_PER_CPU_AREA=y
-CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
-CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
-CONFIG_HIBERNATE_CALLBACKS=y
-CONFIG_HIBERNATION=y
-CONFIG_HID=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GENERIC=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HOLES_IN_ZONE=y
-CONFIG_HOTPLUG_CPU=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_HVC_DRIVER=y
-CONFIG_HVC_IRQ=y
-CONFIG_HVC_XEN=y
-CONFIG_HVC_XEN_FRONTEND=y
-CONFIG_HW_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_HZ=250
-# CONFIG_HZ_100 is not set
-CONFIG_HZ_250=y
-CONFIG_I2C=y
-CONFIG_I2C_BOARDINFO=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_COMPAT=y
-CONFIG_I2C_DESIGNWARE_CORE=y
-CONFIG_I2C_DESIGNWARE_PLATFORM=y
-# CONFIG_I2C_DESIGNWARE_SLAVE is not set
-CONFIG_I2C_HELPER_AUTO=y
-CONFIG_I2C_IMX=y
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_I2C_RK3X=y
-CONFIG_I2C_SLAVE=y
-# CONFIG_I2C_SLAVE_EEPROM is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
-CONFIG_IMX2_WDT=y
-CONFIG_INET_DIAG=y
-# CONFIG_INET_DIAG_DESTROY is not set
-# CONFIG_INET_RAW_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_INPHI_PHY=y
-CONFIG_INPUT=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_INPUT_MOUSE=y
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y
-CONFIG_IOMMU_API=y
-CONFIG_IOMMU_DMA=y
-CONFIG_IOMMU_HELPER=y
-CONFIG_IOMMU_IOVA=y
-CONFIG_IOMMU_IO_PGTABLE=y
-# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
-CONFIG_IOMMU_IO_PGTABLE_LPAE=y
-# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set
-CONFIG_IOMMU_SUPPORT=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_IPC_NS=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IRQCHIP=y
-CONFIG_IRQ_BYPASS_MANAGER=y
-CONFIG_IRQ_DOMAIN=y
-CONFIG_IRQ_DOMAIN_HIERARCHY=y
-CONFIG_IRQ_FORCED_THREADING=y
-CONFIG_IRQ_WORK=y
-# CONFIG_ISDN is not set
-CONFIG_JBD2=y
-CONFIG_JUMP_LABEL=y
-CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_KEXEC=y
-CONFIG_KEXEC_CORE=y
-CONFIG_KEYBOARD_ATKBD=y
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_KSM=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_LIBCRC32C=y
-CONFIG_LIBFDT=y
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_LOCK_SPIN_ON_OWNER=y
-CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_CLUT224=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_LS_SCFG_MSI=y
-CONFIG_LS_SOC_DRIVERS=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_MANDATORY_FILE_LOCKING=y
-CONFIG_MDIO_BITBANG=y
-CONFIG_MDIO_BUS=y
-CONFIG_MDIO_BUS_MUX=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
-CONFIG_MDIO_BUS_MUX_MULTIPLEXER=y
-CONFIG_MDIO_DEVICE=y
-# CONFIG_MDIO_FSL_BACKPLANE is not set
-# CONFIG_MDIO_GPIO is not set
-CONFIG_MEMORY=y
-CONFIG_MEMORY_BALLOON=y
-CONFIG_MEMORY_ISOLATION=y
-CONFIG_MEMTEST=y
-CONFIG_MFD_CORE=y
-CONFIG_MFD_SYSCON=y
-# CONFIG_MFD_VEXPRESS_SYSREG is not set
-CONFIG_MICREL_PHY=y
-CONFIG_MIGRATION=y
-CONFIG_MMC=y
-CONFIG_MMC_BLOCK=y
-CONFIG_MMC_BLOCK_MINORS=32
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_IO_ACCESSORS=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-# CONFIG_MMC_SDHCI_PCI is not set
-CONFIG_MMC_SDHCI_PLTFM=y
-# CONFIG_MMC_TIFM_SD is not set
-CONFIG_MMU_NOTIFIER=y
-CONFIG_MODULES_TREE_LOOKUP=y
-CONFIG_MODULES_USE_ELF_RELA=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_MOUSE_BCM5974 is not set
-# CONFIG_MOUSE_CYAPA is not set
-CONFIG_MOUSE_PS2=y
-CONFIG_MOUSE_PS2_ALPS=y
-CONFIG_MOUSE_PS2_BYD=y
-CONFIG_MOUSE_PS2_CYPRESS=y
-# CONFIG_MOUSE_PS2_ELANTECH is not set
-CONFIG_MOUSE_PS2_FOCALTECH=y
-CONFIG_MOUSE_PS2_LOGIPS2PP=y
-CONFIG_MOUSE_PS2_SMBUS=y
-CONFIG_MOUSE_PS2_SYNAPTICS=y
-CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
-CONFIG_MOUSE_PS2_TRACKPOINT=y
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
-CONFIG_MRP=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-# CONFIG_MTD_CFI_GEOMETRY is not set
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_CMDLINE_PARTS=y
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-CONFIG_MTD_DATAFLASH=y
-# CONFIG_MTD_DATAFLASH_OTP is not set
-# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
-CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT=16384
-CONFIG_MTD_SPLIT_FIRMWARE=y
-CONFIG_MTD_SPLIT_FIT_FW=y
-CONFIG_MTD_SST25L=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_BEB_LIMIT=20
-# CONFIG_MTD_UBI_BLOCK is not set
-# CONFIG_MTD_UBI_FASTMAP is not set
-# CONFIG_MTD_UBI_GLUEBI is not set
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MULTIPLEXER=y
-CONFIG_MUTEX_SPIN_ON_OWNER=y
-# CONFIG_MUX_ADG792A is not set
-# CONFIG_MUX_GPIO is not set
-CONFIG_MUX_MMIO=y
-CONFIG_MV_XOR_V2=y
-CONFIG_NAMESPACES=y
-CONFIG_NEED_DMA_MAP_STATE=y
-CONFIG_NEED_MULTIPLE_NODES=y
-CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
-CONFIG_NEED_SG_DMA_LENGTH=y
-# CONFIG_NET_CADENCE is not set
-CONFIG_NET_FLOW_LIMIT=y
-CONFIG_NET_NS=y
-CONFIG_NET_PTP_CLASSIFY=y
-CONFIG_NET_SWITCHDEV=y
-CONFIG_NLS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NODES_SHIFT=2
-CONFIG_NO_BOOTMEM=y
-CONFIG_NO_HZ_COMMON=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_NR_CPUS=64
-CONFIG_NUMA=y
-CONFIG_NUMA_BALANCING=y
-CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
-CONFIG_NVMEM=y
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
-CONFIG_OF_ADDRESS_PCI=y
-CONFIG_OF_EARLY_FLATTREE=y
-CONFIG_OF_FLATTREE=y
-CONFIG_OF_GPIO=y
-CONFIG_OF_IOMMU=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_MDIO=y
-CONFIG_OF_NET=y
-CONFIG_OF_NUMA=y
-CONFIG_OF_PCI=y
-CONFIG_OF_PCI_IRQ=y
-CONFIG_OF_RESERVED_MEM=y
-CONFIG_PACKET_DIAG=y
-CONFIG_PADATA=y
-# CONFIG_PANIC_ON_OOPS is not set
-CONFIG_PANIC_ON_OOPS_VALUE=0
-CONFIG_PANIC_TIMEOUT=0
-CONFIG_PARAVIRT=y
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_PARTITION_PERCPU=y
-CONFIG_PCI=y
-CONFIG_PCIEAER=y
-CONFIG_PCIEASPM=y
-# CONFIG_PCIEASPM_DEBUG is not set
-CONFIG_PCIEASPM_DEFAULT=y
-# CONFIG_PCIEASPM_PERFORMANCE is not set
-# CONFIG_PCIEASPM_POWERSAVE is not set
-# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCIE_DW=y
-CONFIG_PCIE_DW_HOST=y
-# CONFIG_PCIE_DW_PLAT_HOST is not set
-CONFIG_PCIE_MOBIVEIL=y
-CONFIG_PCIE_MOBIVEIL_HOST=y
-CONFIG_PCIE_PME=y
-CONFIG_PCI_ATS=y
-CONFIG_PCI_BUS_ADDR_T_64BIT=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCI_DOMAINS_GENERIC=y
-CONFIG_PCI_ECAM=y
-CONFIG_PCI_HISI=y
-CONFIG_PCI_HOST_COMMON=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_IOV=y
-CONFIG_PCI_LABEL=y
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_LAYERSCAPE_GEN4=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_MSI_IRQ_DOMAIN=y
-CONFIG_PERF_EVENTS=y
-CONFIG_PGTABLE_LEVELS=4
-CONFIG_PHYLIB=y
-CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_PHY_XGENE=y
-CONFIG_PID_IN_CONTEXTIDR=y
-CONFIG_PID_NS=y
-CONFIG_PL330_DMA=y
-CONFIG_PM=y
-CONFIG_PM_CLK=y
-# CONFIG_PM_DEBUG is not set
-CONFIG_PM_OPP=y
-CONFIG_PM_SLEEP=y
-CONFIG_PM_SLEEP_SMP=y
-CONFIG_PM_STD_PARTITION=""
-CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_SYSCON=y
-CONFIG_POWER_RESET_VEXPRESS=y
-CONFIG_POWER_RESET_XGENE=y
-CONFIG_POWER_SUPPLY=y
-CONFIG_PPS=y
-CONFIG_PREEMPT=y
-CONFIG_PREEMPT_COUNT=y
-# CONFIG_PREEMPT_NONE is not set
-CONFIG_PREEMPT_RCU=y
-CONFIG_PRINTK_TIME=y
-CONFIG_PRINT_QUOTA_WARNING=y
-CONFIG_PROC_CHILDREN=y
-CONFIG_PROFILING=y
-CONFIG_PTP_1588_CLOCK=y
-CONFIG_PTP_1588_CLOCK_QORIQ=y
-CONFIG_QCOM_HIDMA=y
-CONFIG_QCOM_HIDMA_MGMT=y
-CONFIG_QCOM_QDF2400_ERRATUM_0065=y
-# CONFIG_QFMT_V1 is not set
-# CONFIG_QFMT_V2 is not set
-CONFIG_QMAN_CEETM_UPDATE_PERIOD=1000
-CONFIG_QORIQ_CPUFREQ=y
-# CONFIG_QUICC_ENGINE is not set
-CONFIG_QUOTA=y
-CONFIG_QUOTACTL=y
-# CONFIG_QUOTA_NETLINK_INTERFACE is not set
-CONFIG_RADIX_TREE_MULTIORDER=y
-CONFIG_RAID6_PQ=y
-# CONFIG_RANDOMIZE_BASE is not set
-CONFIG_RAS=y
-CONFIG_RATIONAL=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=21
-# CONFIG_RCU_EXPERT is not set
-CONFIG_RCU_NEED_SEGCBLIST=y
-CONFIG_RCU_STALL_COMMON=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_GZIP=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_LZO=y
-CONFIG_RD_XZ=y
-CONFIG_REALTEK_PHY=y
-CONFIG_REGMAP=y
-CONFIG_REGMAP_I2C=y
-CONFIG_REGMAP_MMIO=y
-CONFIG_REGMAP_SPI=y
-# CONFIG_RESET_ATTACK_MITIGATION is not set
-CONFIG_RESET_CONTROLLER=y
-CONFIG_RFS_ACCEL=y
-CONFIG_RPS=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_RTC_DRV_EFI=y
-CONFIG_RTC_DRV_PCF2127=y
-CONFIG_RTC_DRV_PCF85263=y
-CONFIG_RTC_DRV_PL031=y
-CONFIG_RTC_I2C_AND_SPI=y
-CONFIG_RWSEM_SPIN_ON_OWNER=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_SCHED_INFO=y
-CONFIG_SCHED_MC=y
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-# CONFIG_SCSI_SAS_ATA is not set
-CONFIG_SCSI_SAS_ATTRS=y
-CONFIG_SCSI_SAS_HOST_SMP=y
-CONFIG_SCSI_SAS_LIBSAS=y
-CONFIG_SECCOMP=y
-CONFIG_SECCOMP_FILTER=y
-CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
-CONFIG_SERIAL_8250_DW=y
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_FSL=y
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_AMBA_PL011=y
-CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_SERIAL_FSL_LPUART=y
-CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_XILINX_PS_UART=y
-CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
-CONFIG_SERIO=y
-CONFIG_SERIO_AMBAKMI=y
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SG_POOL=y
-CONFIG_SLAB=y
-# CONFIG_SLUB is not set
-CONFIG_SMP=y
-CONFIG_SOCK_DIAG=y
-CONFIG_SOC_BUS=y
-CONFIG_SPARSEMEM=y
-CONFIG_SPARSEMEM_EXTREME=y
-CONFIG_SPARSEMEM_MANUAL=y
-CONFIG_SPARSEMEM_VMEMMAP=y
-CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_SPI=y
-CONFIG_SPI_FSL_DSPI=y
-CONFIG_SPI_FSL_QUADSPI=y
-CONFIG_SPI_MASTER=y
-CONFIG_SPI_NXP_FLEXSPI=y
-CONFIG_SPI_PL022=y
-CONFIG_SPMI=y
-# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
-CONFIG_SQUASHFS_DECOMP_SINGLE=y
-CONFIG_SQUASHFS_FILE_CACHE=y
-# CONFIG_SQUASHFS_FILE_DIRECT is not set
-# CONFIG_SQUASHFS_XZ is not set
-CONFIG_SQUASHFS_ZLIB=y
-CONFIG_SRAM=y
-CONFIG_SRCU=y
-# CONFIG_STRIP_ASM_SYMS is not set
-CONFIG_SUSPEND=y
-CONFIG_SUSPEND_FREEZER=y
-CONFIG_SWIOTLB=y
-CONFIG_SWIOTLB_XEN=y
-CONFIG_SWPHY=y
-CONFIG_SYNC_FILE=y
-CONFIG_SYSCTL_EXCEPTION_TRACE=y
-CONFIG_SYSFS_SYSCALL=y
-CONFIG_SYS_HYPERVISOR=y
-CONFIG_SYS_SUPPORTS_HUGETLBFS=y
-CONFIG_TASKSTATS=y
-CONFIG_TASKS_RCU=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_TASK_XACCT=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_THERMAL=y
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
-CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
-CONFIG_THERMAL_EMULATION=y
-CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
-CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_OF=y
-CONFIG_THREAD_INFO_IN_TASK=y
-CONFIG_TICK_CPU_ACCOUNTING=y
-CONFIG_TIMER_OF=y
-CONFIG_TIMER_PROBE=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
-# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
-CONFIG_TRANSPARENT_HUGE_PAGECACHE=y
-CONFIG_TREE_SRCU=y
-CONFIG_UBIFS_FS=y
-# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
-CONFIG_UBIFS_FS_LZO=y
-CONFIG_UBIFS_FS_ZLIB=y
-CONFIG_UCS2_STRING=y
-CONFIG_UIO=y
-CONFIG_UIO_AEC=y
-CONFIG_UIO_CIF=y
-CONFIG_UIO_DMEM_GENIRQ=y
-CONFIG_UIO_MF624=y
-CONFIG_UIO_NETX=y
-CONFIG_UIO_PCI_GENERIC=y
-CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_PRUSS is not set
-CONFIG_UIO_SERCOS3=y
-CONFIG_UNINLINE_SPIN_UNLOCK=y
-CONFIG_UNIX_DIAG=y
-CONFIG_UNMAP_KERNEL_AT_EL0=y
-CONFIG_USB_SUPPORT=y
-# CONFIG_USERIO is not set
-CONFIG_USER_NS=y
-CONFIG_USE_PERCPU_NUMA_NODE_ID=y
-CONFIG_UTS_NS=y
-CONFIG_VEXPRESS_CONFIG=y
-CONFIG_VEXPRESS_SYSCFG=y
-CONFIG_VFAT_FS=y
-CONFIG_VFIO=y
-CONFIG_VFIO_FSL_MC=y
-CONFIG_VFIO_IOMMU_TYPE1=y
-# CONFIG_VFIO_MDEV is not set
-# CONFIG_VFIO_NOIOMMU is not set
-CONFIG_VFIO_PCI=y
-CONFIG_VFIO_PCI_INTX=y
-CONFIG_VFIO_PCI_MMAP=y
-# CONFIG_VFIO_PLATFORM is not set
-CONFIG_VFIO_VIRQFD=y
-CONFIG_VGA_ARB=y
-CONFIG_VGA_ARB_MAX_GPUS=16
-CONFIG_VIDEOMODE_HELPERS=y
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_VIRTIO_CONSOLE=y
-CONFIG_VIRTIO_MMIO=y
-# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set
-CONFIG_VIRTIO_NET=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_VLAN_8021Q_MVRP=y
-CONFIG_VMAP_STACK=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_VT_CONSOLE_SLEEP=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_WATCHDOG_CORE=y
-# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
-CONFIG_XEN=y
-CONFIG_XENFS=y
-CONFIG_XEN_AUTO_XLATE=y
-CONFIG_XEN_BACKEND=y
-CONFIG_XEN_BALLOON=y
-# CONFIG_XEN_BLKDEV_BACKEND is not set
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_XEN_COMPAT_XENFS=y
-CONFIG_XEN_DEV_EVTCHN=y
-CONFIG_XEN_DOM0=y
-CONFIG_XEN_EFI=y
-CONFIG_XEN_FBDEV_FRONTEND=y
-CONFIG_XEN_GNTDEV=y
-CONFIG_XEN_GRANT_DEV_ALLOC=y
-# CONFIG_XEN_NETDEV_BACKEND is not set
-CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_PRIVCMD=y
-# CONFIG_XEN_PVCALLS_BACKEND is not set
-CONFIG_XEN_SCRUB_PAGES=y
-# CONFIG_XEN_SCSI_FRONTEND is not set
-CONFIG_XEN_SYS_HYPERVISOR=y
-# CONFIG_XEN_WDT is not set
-CONFIG_XEN_XENBUS_FRONTEND=y
-CONFIG_XFS_FS=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
-CONFIG_XOR_BLOCKS=y
-CONFIG_XPS=y
-CONFIG_XXHASH=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_BCJ=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_SPARC=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZSTD_COMPRESS=y
-CONFIG_ZSTD_DECOMPRESS=y
diff --git a/target/linux/layerscape/patches-4.14/201-config-support-layerscape.patch b/target/linux/layerscape/patches-4.14/201-config-support-layerscape.patch
deleted file mode 100644
index 7974a5051a..0000000000
--- a/target/linux/layerscape/patches-4.14/201-config-support-layerscape.patch
+++ /dev/null
@@ -1,340 +0,0 @@
-From 2a1351617985ef47581de825ae1bbf1d42bf3200 Mon Sep 17 00:00:00 2001
-From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Mon, 6 May 2019 17:29:32 +0800
-Subject: [PATCH] config: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of config for layerscape
-
-Signed-off-by: Alison Wang <alison.wang@nxp.com>
-Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
-Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
-Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
-Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
-Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
-Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: Li Yang <leoyang.li@nxp.com>
-Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
-Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
-Signed-off-by: Pankit Garg <pankit.garg@nxp.com>
-Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
-Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
-Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
-Signed-off-by: Shengzhou Liu <Shengzhou.Liu@nxp.com>
-Signed-off-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
-Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
----
- drivers/Makefile | 2 ++
- drivers/irqchip/Makefile | 1 +
- drivers/net/ethernet/freescale/Kconfig | 14 +++++----
- drivers/net/ethernet/freescale/Makefile | 3 ++
- drivers/ptp/Kconfig | 16 +++++-----
- drivers/soc/Kconfig | 1 +
- drivers/soc/fsl/Kconfig | 22 +++++++++++++
- drivers/soc/fsl/Kconfig.arm | 16 ++++++++++
- drivers/soc/fsl/Makefile | 5 +++
- drivers/soc/fsl/layerscape/Kconfig | 10 ++++++
- drivers/soc/fsl/layerscape/Makefile | 1 +
- drivers/staging/Kconfig | 4 +++
- drivers/staging/Makefile | 2 ++
- drivers/staging/fsl-dpaa2/Kconfig | 56 ++++++++++++++++++++++++++++++++-
- drivers/staging/fsl-dpaa2/Makefile | 4 +++
- 15 files changed, 142 insertions(+), 15 deletions(-)
- create mode 100644 drivers/soc/fsl/Kconfig.arm
- create mode 100644 drivers/soc/fsl/layerscape/Kconfig
- create mode 100644 drivers/soc/fsl/layerscape/Makefile
-
---- a/drivers/Makefile
-+++ b/drivers/Makefile
-@@ -20,6 +20,8 @@ obj-$(CONFIG_PCI) += pci/
- obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/
- # PCI dwc controller drivers
- obj-y += pci/dwc/
-+# PCI mobiveil controller drivers
-+obj-y += pci/mobiveil/
-
- obj-$(CONFIG_PARISC) += parisc/
- obj-$(CONFIG_RAPIDIO) += rapidio/
---- a/drivers/irqchip/Makefile
-+++ b/drivers/irqchip/Makefile
-@@ -80,3 +80,4 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed
- obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
- obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
- obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
-+obj-$(CONFIG_QUICC_ENGINE) += irq-qeic.o
---- a/drivers/net/ethernet/freescale/Kconfig
-+++ b/drivers/net/ethernet/freescale/Kconfig
-@@ -5,10 +5,11 @@
- config NET_VENDOR_FREESCALE
- bool "Freescale devices"
- default y
-- depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
-- M523x || M527x || M5272 || M528x || M520x || M532x || \
-- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
-- ARCH_LAYERSCAPE || COMPILE_TEST
-+ depends on FSL_SOC || (QUICC_ENGINE && PPC32) || CPM1 || CPM2 || \
-+ PPC_MPC512x || M523x || M527x || M5272 || M528x || M520x || \
-+ M532x || ARCH_MXC || ARCH_MXS || \
-+ (PPC_MPC52xx && PPC_BESTCOMM) || ARCH_LAYERSCAPE || \
-+ COMPILE_TEST
- ---help---
- If you have a network (Ethernet) card belonging to this class, say Y.
-
-@@ -73,7 +74,7 @@ config FSL_XGMAC_MDIO
-
- config UCC_GETH
- tristate "Freescale QE Gigabit Ethernet"
-- depends on QUICC_ENGINE
-+ depends on QUICC_ENGINE && FSL_SOC && PPC32
- select FSL_PQ_MDIO
- select PHYLIB
- ---help---
-@@ -94,7 +95,8 @@ config GIANFAR
- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
- and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
- on the 8540.
--
-+source "drivers/net/ethernet/freescale/sdk_fman/Kconfig"
-+source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig"
- source "drivers/net/ethernet/freescale/dpaa/Kconfig"
-
- endif # NET_VENDOR_FREESCALE
---- a/drivers/net/ethernet/freescale/Makefile
-+++ b/drivers/net/ethernet/freescale/Makefile
-@@ -20,5 +20,8 @@ gianfar_driver-objs := gianfar.o \
- obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
- ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
-
-+obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/
-+obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/
-+
- obj-$(CONFIG_FSL_FMAN) += fman/
- obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
---- a/drivers/ptp/Kconfig
-+++ b/drivers/ptp/Kconfig
-@@ -41,19 +41,19 @@ config PTP_1588_CLOCK_DTE
- To compile this driver as a module, choose M here: the module
- will be called ptp_dte.
-
--config PTP_1588_CLOCK_GIANFAR
-- tristate "Freescale eTSEC as PTP clock"
-- depends on GIANFAR
-+config PTP_1588_CLOCK_QORIQ
-+ tristate "Freescale QorIQ 1588 timer as PTP clock"
-+ depends on GIANFAR || FSL_DPAA_ETH || FSL_SDK_DPAA_ETH
- depends on PTP_1588_CLOCK
- default y
- help
-- This driver adds support for using the eTSEC as a PTP
-- clock. This clock is only useful if your PTP programs are
-- getting hardware time stamps on the PTP Ethernet packets
-- using the SO_TIMESTAMPING API.
-+ This driver adds support for using the Freescale QorIQ 1588
-+ timer as a PTP clock. This clock is only useful if your PTP
-+ programs are getting hardware time stamps on the PTP Ethernet
-+ packets using the SO_TIMESTAMPING API.
-
- To compile this driver as a module, choose M here: the module
-- will be called gianfar_ptp.
-+ will be called ptp_qoriq.
-
- config PTP_1588_CLOCK_IXP46X
- tristate "Intel IXP46x as PTP clock"
---- a/drivers/soc/Kconfig
-+++ b/drivers/soc/Kconfig
-@@ -5,6 +5,7 @@ source "drivers/soc/amlogic/Kconfig"
- source "drivers/soc/atmel/Kconfig"
- source "drivers/soc/bcm/Kconfig"
- source "drivers/soc/fsl/Kconfig"
-+source "drivers/soc/fsl/ls2-console/Kconfig"
- source "drivers/soc/imx/Kconfig"
- source "drivers/soc/mediatek/Kconfig"
- source "drivers/soc/qcom/Kconfig"
---- a/drivers/soc/fsl/Kconfig
-+++ b/drivers/soc/fsl/Kconfig
-@@ -16,3 +16,25 @@ config FSL_GUTS
- Initially only reading SVR and registering soc device are supported.
- Other guts accesses, such as reading RCW, should eventually be moved
- into this driver as well.
-+
-+config FSL_QIXIS
-+ tristate "QIXIS system controller driver"
-+ depends on OF
-+ select REGMAP_I2C
-+ select REGMAP_MMIO
-+ select MFD_CORE
-+ default n
-+ help
-+ Say y here to enable QIXIS system controller api. The qixis driver
-+ provides FPGA functions to control system.
-+
-+config FSL_SLEEP_FSM
-+ bool
-+ help
-+ This driver configures a hardware FSM (Finite State Machine) for deep sleep.
-+ The FSM is used to finish clean-ups at the last stage of system entering deep
-+ sleep, and also wakes up system when a wake up event happens.
-+
-+if ARM || ARM64
-+source "drivers/soc/fsl/Kconfig.arm"
-+endif
---- /dev/null
-+++ b/drivers/soc/fsl/Kconfig.arm
-@@ -0,0 +1,16 @@
-+#
-+# Freescale ARM SOC Drivers
-+#
-+
-+config LS_SOC_DRIVERS
-+ bool "Layerscape Soc Drivers"
-+ depends on ARCH_LAYERSCAPE || SOC_LS1021A
-+ default n
-+ help
-+ Say y here to enable Freescale Layerscape Soc Device Drivers support.
-+ The Soc Drivers provides the device driver that is a specific block
-+ or feature on Layerscape platform.
-+
-+if LS_SOC_DRIVERS
-+ source "drivers/soc/fsl/layerscape/Kconfig"
-+endif
---- a/drivers/soc/fsl/Makefile
-+++ b/drivers/soc/fsl/Makefile
-@@ -5,4 +5,9 @@
- obj-$(CONFIG_FSL_DPAA) += qbman/
- obj-$(CONFIG_QUICC_ENGINE) += qe/
- obj-$(CONFIG_CPM) += qe/
-+obj-$(CONFIG_FSL_QIXIS) += qixis_ctrl.o
- obj-$(CONFIG_FSL_GUTS) += guts.o
-+obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console/
-+obj-$(CONFIG_SUSPEND) += rcpm.o
-+obj-$(CONFIG_LS_SOC_DRIVERS) += layerscape/
-+obj-$(CONFIG_FSL_SLEEP_FSM) += sleep_fsm.o
---- /dev/null
-+++ b/drivers/soc/fsl/layerscape/Kconfig
-@@ -0,0 +1,10 @@
-+#
-+# Layerscape Soc drivers
-+#
-+config FTM_ALARM
-+ bool "FTM alarm driver"
-+ default n
-+ help
-+ Say y here to enable FTM alarm support. The FTM alarm provides
-+ alarm functions for wakeup system from deep sleep. There is only
-+ one FTM can be used in ALARM(FTM 0).
---- /dev/null
-+++ b/drivers/soc/fsl/layerscape/Makefile
-@@ -0,0 +1 @@
-+obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o
---- a/drivers/staging/Kconfig
-+++ b/drivers/staging/Kconfig
-@@ -118,4 +118,8 @@ source "drivers/staging/vboxvideo/Kconfi
-
- source "drivers/staging/pi433/Kconfig"
-
-+source "drivers/staging/fsl_qbman/Kconfig"
-+
-+source "drivers/staging/fsl_ppfe/Kconfig"
-+
- endif # STAGING
---- a/drivers/staging/Makefile
-+++ b/drivers/staging/Makefile
-@@ -50,3 +50,5 @@ obj-$(CONFIG_BCM2835_VCHIQ) += vc04_serv
- obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
- obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
- obj-$(CONFIG_PI433) += pi433/
-+obj-$(CONFIG_FSL_SDK_DPA) += fsl_qbman/
-+obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/
---- a/drivers/staging/fsl-dpaa2/Kconfig
-+++ b/drivers/staging/fsl-dpaa2/Kconfig
-@@ -4,7 +4,7 @@
-
- config FSL_DPAA2
- bool "Freescale DPAA2 devices"
-- depends on FSL_MC_BUS && ARCH_LAYERSCAPE
-+ depends on FSL_MC_BUS
- ---help---
- Build drivers for Freescale DataPath Acceleration
- Architecture (DPAA2) family of SoCs.
-@@ -16,3 +16,57 @@ config FSL_DPAA2_ETH
- ---help---
- Ethernet driver for Freescale DPAA2 SoCs, using the
- Freescale MC bus driver
-+
-+if FSL_DPAA2_ETH
-+config FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ bool "Enable Rx error queue"
-+ default n
-+ ---help---
-+ Allow Rx error frames to be enqueued on an error queue
-+ and processed by the driver (by default they are dropped
-+ in hardware).
-+ This may impact performance, recommended for debugging
-+ purposes only.
-+
-+# QBMAN_DEBUG requires some additional DPIO APIs
-+config FSL_DPAA2_ETH_DEBUGFS
-+ depends on DEBUG_FS
-+ bool "Enable debugfs support"
-+ default y
-+ ---help---
-+ Enable advanced statistics through debugfs interface.
-+
-+config FSL_DPAA2_ETH_DCB
-+ bool "Data Center Bridging (DCB) Support"
-+ default n
-+ depends on DCB
-+ ---help---
-+ Say Y here if you want to use Data Center Bridging (DCB) features
-+ (PFC) in the driver.
-+
-+ If unsure, say N.
-+
-+config FSL_DPAA2_PTP_CLOCK
-+ tristate "Freescale DPAA2 as PTP clock"
-+ select PTP_1588_CLOCK
-+ default y
-+ help
-+ This driver adds support for using the DPAA2 1588 timer module
-+ as a PTP clock. This clock is only useful if your PTP programs are
-+ getting hardware time stamps on the PTP Ethernet packets
-+ using the SO_TIMESTAMPING API.
-+
-+ To compile this driver as a module, choose M here: the module
-+ will be called dpaa2-rtc.
-+endif
-+
-+source "drivers/staging/fsl-dpaa2/mac/Kconfig"
-+source "drivers/staging/fsl-dpaa2/evb/Kconfig"
-+
-+config FSL_DPAA2_ETHSW
-+ tristate "Freescale DPAA2 Ethernet Switch"
-+ depends on FSL_DPAA2
-+ depends on NET_SWITCHDEV
-+ ---help---
-+ Driver for Freescale DPAA2 Ethernet Switch. Select
-+ BRIDGE to have support for bridge tools.
---- a/drivers/staging/fsl-dpaa2/Makefile
-+++ b/drivers/staging/fsl-dpaa2/Makefile
-@@ -3,3 +3,7 @@
- #
-
- obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
-+obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
-+obj-$(CONFIG_FSL_DPAA2_EVB) += evb/
-+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += rtc/
-+obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
diff --git a/target/linux/layerscape/patches-4.14/202-core-linux-support-layerscape.patch b/target/linux/layerscape/patches-4.14/202-core-linux-support-layerscape.patch
deleted file mode 100644
index 07fc71f0d4..0000000000
--- a/target/linux/layerscape/patches-4.14/202-core-linux-support-layerscape.patch
+++ /dev/null
@@ -1,1056 +0,0 @@
-From d2ef9f2f6d16d34d7eee74cb8efd269341fec5a1 Mon Sep 17 00:00:00 2001
-From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Mon, 6 May 2019 16:54:17 +0800
-Subject: [PATCH] core-linux: support layerscape
-
-This is an integrated patch of core-linux for layerscape
-
-Signed-off-by: Aaron Lu <aaron.lu@intel.com>
-Signed-off-by: Abhijit Ayarekar <abhijit.ayarekar@caviumnetworks.com>
-Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Signed-off-by: David Ahern <dsahern@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Jiri Pirko <jiri@mellanox.com>
-Signed-off-by: Joel Fernandes <joelaf@google.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Li Yang <leoyang.li@nxp.com>
-Signed-off-by: Lukas Wunner <lukas@wunner.de>
-Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
-Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
-Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
-Signed-off-by: pascal paillet <p.paillet@st.com>
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
-Signed-off-by: Robin Murphy <robin.murphy@arm.com>
-Signed-off-by: Suresh Gupta <suresh.gupta@freescale.com>
-Signed-off-by: Vivek Gautam <vivek.gautam@codeaurora.org>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/base/core.c | 122 ++++++++++++++++++++++++++----
- drivers/base/dma-mapping.c | 7 ++
- drivers/gpu/ipu-v3/ipu-pre.c | 3 +-
- drivers/gpu/ipu-v3/ipu-prg.c | 3 +-
- drivers/iommu/dma-iommu.c | 3 +
- drivers/mux/Kconfig | 12 +--
- drivers/mux/mmio.c | 6 +-
- drivers/of/device.c | 14 +++-
- drivers/soc/imx/gpc.c | 2 +-
- include/linux/device.h | 20 +++--
- include/linux/fsl_devices.h | 2 +
- include/linux/netdevice.h | 10 ++-
- include/linux/skbuff.h | 2 +
- lib/dma-noop.c | 19 +++++
- mm/page_alloc.c | 10 ++-
- net/core/dev.c | 81 ++++++++++++--------
- net/core/skbuff.c | 29 ++++++-
- samples/bpf/Makefile | 12 ++-
- samples/bpf/map_perf_test_kern.c | 2 +-
- samples/bpf/map_perf_test_user.c | 2 +-
- tools/testing/selftests/bpf/bpf_helpers.h | 56 ++++++++++++--
- 21 files changed, 337 insertions(+), 80 deletions(-)
-
---- a/drivers/base/core.c
-+++ b/drivers/base/core.c
-@@ -162,10 +162,10 @@ static int device_reorder_to_tail(struct
- * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
- * ignored.
- *
-- * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
-- * when the consumer device driver unbinds from it. The combination of both
-- * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
-- * to be returned.
-+ * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
-+ * automatically when the consumer device driver unbinds from it.
-+ * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
-+ * set is invalid and will cause NULL to be returned.
- *
- * A side effect of the link creation is re-ordering of dpm_list and the
- * devices_kset list by moving the consumer device and all devices depending
-@@ -183,7 +183,8 @@ struct device_link *device_link_add(stru
- bool rpm_put_supplier = false;
-
- if (!consumer || !supplier ||
-- ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
-+ ((flags & DL_FLAG_STATELESS) &&
-+ (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
- return NULL;
-
- if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
-@@ -209,8 +210,10 @@ struct device_link *device_link_add(stru
- }
-
- list_for_each_entry(link, &supplier->links.consumers, s_node)
-- if (link->consumer == consumer)
-+ if (link->consumer == consumer) {
-+ kref_get(&link->kref);
- goto out;
-+ }
-
- link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link)
-@@ -237,6 +240,7 @@ struct device_link *device_link_add(stru
- link->consumer = consumer;
- INIT_LIST_HEAD(&link->c_node);
- link->flags = flags;
-+ kref_init(&link->kref);
-
- /* Determine the initial link state. */
- if (flags & DL_FLAG_STATELESS) {
-@@ -311,8 +315,10 @@ static void __device_link_free_srcu(stru
- device_link_free(container_of(rhead, struct device_link, rcu_head));
- }
-
--static void __device_link_del(struct device_link *link)
-+static void __device_link_del(struct kref *kref)
- {
-+ struct device_link *link = container_of(kref, struct device_link, kref);
-+
- dev_info(link->consumer, "Dropping the link to %s\n",
- dev_name(link->supplier));
-
-@@ -324,8 +330,10 @@ static void __device_link_del(struct dev
- call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
- }
- #else /* !CONFIG_SRCU */
--static void __device_link_del(struct device_link *link)
-+static void __device_link_del(struct kref *kref)
- {
-+ struct device_link *link = container_of(kref, struct device_link, kref);
-+
- dev_info(link->consumer, "Dropping the link to %s\n",
- dev_name(link->supplier));
-
-@@ -343,18 +351,50 @@ static void __device_link_del(struct dev
- * @link: Device link to delete.
- *
- * The caller must ensure proper synchronization of this function with runtime
-- * PM.
-+ * PM. If the link was added multiple times, it needs to be deleted as often.
-+ * Care is required for hotplugged devices: Their links are purged on removal
-+ * and calling device_link_del() is then no longer allowed.
- */
- void device_link_del(struct device_link *link)
- {
- device_links_write_lock();
- device_pm_lock();
-- __device_link_del(link);
-+ kref_put(&link->kref, __device_link_del);
- device_pm_unlock();
- device_links_write_unlock();
- }
- EXPORT_SYMBOL_GPL(device_link_del);
-
-+/**
-+ * device_link_remove - remove a link between two devices.
-+ * @consumer: Consumer end of the link.
-+ * @supplier: Supplier end of the link.
-+ *
-+ * The caller must ensure proper synchronization of this function with runtime
-+ * PM.
-+ */
-+void device_link_remove(void *consumer, struct device *supplier)
-+{
-+ struct device_link *link;
-+
-+ if (WARN_ON(consumer == supplier))
-+ return;
-+
-+ device_links_write_lock();
-+ device_pm_lock();
-+
-+ list_for_each_entry(link, &supplier->links.consumers, s_node) {
-+ if (link->consumer == consumer) {
-+ kref_put(&link->kref, __device_link_del);
-+ break;
-+ }
-+ }
-+
-+ device_pm_unlock();
-+ device_links_write_unlock();
-+}
-+EXPORT_SYMBOL_GPL(device_link_remove);
-+
- static void device_links_missing_supplier(struct device *dev)
- {
- struct device_link *link;
-@@ -462,8 +502,8 @@ static void __device_links_no_driver(str
- if (link->flags & DL_FLAG_STATELESS)
- continue;
-
-- if (link->flags & DL_FLAG_AUTOREMOVE)
-- __device_link_del(link);
-+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
-+ kref_put(&link->kref, __device_link_del);
- else if (link->status != DL_STATE_SUPPLIER_UNBIND)
- WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
- }
-@@ -498,8 +538,18 @@ void device_links_driver_cleanup(struct
- if (link->flags & DL_FLAG_STATELESS)
- continue;
-
-- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
-+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
- WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
-+
-+ /*
-+ * autoremove the links between this @dev and its consumer
-+ * devices that are not active, i.e. where the link state
-+ * has moved to DL_STATE_SUPPLIER_UNBIND.
-+ */
-+ if (link->status == DL_STATE_SUPPLIER_UNBIND &&
-+ link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
-+ kref_put(&link->kref, __device_link_del);
-+
- WRITE_ONCE(link->status, DL_STATE_DORMANT);
- }
-
-@@ -616,13 +666,13 @@ static void device_links_purge(struct de
-
- list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
- WARN_ON(link->status == DL_STATE_ACTIVE);
-- __device_link_del(link);
-+ __device_link_del(&link->kref);
- }
-
- list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
- WARN_ON(link->status != DL_STATE_DORMANT &&
- link->status != DL_STATE_NONE);
-- __device_link_del(link);
-+ __device_link_del(&link->kref);
- }
-
- device_links_write_unlock();
-@@ -1044,6 +1094,34 @@ static ssize_t online_store(struct devic
- }
- static DEVICE_ATTR_RW(online);
-
-+static ssize_t suppliers_show(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct device_link *link;
-+ size_t count = 0;
-+
-+ list_for_each_entry(link, &dev->links.suppliers, c_node)
-+ count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
-+ dev_name(link->supplier));
-+
-+ return count;
-+}
-+static DEVICE_ATTR_RO(suppliers);
-+
-+static ssize_t consumers_show(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct device_link *link;
-+ size_t count = 0;
-+
-+ list_for_each_entry(link, &dev->links.consumers, s_node)
-+ count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
-+ dev_name(link->consumer));
-+
-+ return count;
-+}
-+static DEVICE_ATTR_RO(consumers);
-+
- int device_add_groups(struct device *dev, const struct attribute_group **groups)
- {
- return sysfs_create_groups(&dev->kobj, groups);
-@@ -1215,8 +1293,20 @@ static int device_add_attrs(struct devic
- goto err_remove_dev_groups;
- }
-
-+ error = device_create_file(dev, &dev_attr_suppliers);
-+ if (error)
-+ goto err_remove_online;
-+
-+ error = device_create_file(dev, &dev_attr_consumers);
-+ if (error)
-+ goto err_remove_suppliers;
-+
- return 0;
-
-+ err_remove_suppliers:
-+ device_remove_file(dev, &dev_attr_suppliers);
-+ err_remove_online:
-+ device_remove_file(dev, &dev_attr_online);
- err_remove_dev_groups:
- device_remove_groups(dev, dev->groups);
- err_remove_type_groups:
-@@ -1234,6 +1324,8 @@ static void device_remove_attrs(struct d
- struct class *class = dev->class;
- const struct device_type *type = dev->type;
-
-+ device_remove_file(dev, &dev_attr_consumers);
-+ device_remove_file(dev, &dev_attr_suppliers);
- device_remove_file(dev, &dev_attr_online);
- device_remove_groups(dev, dev->groups);
-
---- a/drivers/base/dma-mapping.c
-+++ b/drivers/base/dma-mapping.c
-@@ -335,6 +335,7 @@ void dma_common_free_remap(void *cpu_add
- * Common configuration to enable DMA API use for a device
- */
- #include <linux/pci.h>
-+#include <linux/fsl/mc.h>
-
- int dma_configure(struct device *dev)
- {
-@@ -350,6 +351,12 @@ int dma_configure(struct device *dev)
- dma_dev = dma_dev->parent;
- }
-
-+ if (dev_is_fsl_mc(dev)) {
-+ dma_dev = dev;
-+ while (dev_is_fsl_mc(dma_dev))
-+ dma_dev = dma_dev->parent;
-+ }
-+
- if (dma_dev->of_node) {
- ret = of_dma_configure(dev, dma_dev->of_node);
- } else if (has_acpi_companion(dma_dev)) {
---- a/drivers/gpu/ipu-v3/ipu-pre.c
-+++ b/drivers/gpu/ipu-v3/ipu-pre.c
-@@ -125,7 +125,8 @@ ipu_pre_lookup_by_phandle(struct device
- list_for_each_entry(pre, &ipu_pre_list, list) {
- if (pre_node == pre->dev->of_node) {
- mutex_unlock(&ipu_pre_list_mutex);
-- device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
-+ device_link_add(dev, pre->dev,
-+ DL_FLAG_AUTOREMOVE_CONSUMER);
- of_node_put(pre_node);
- return pre;
- }
---- a/drivers/gpu/ipu-v3/ipu-prg.c
-+++ b/drivers/gpu/ipu-v3/ipu-prg.c
-@@ -99,7 +99,8 @@ ipu_prg_lookup_by_phandle(struct device
- list_for_each_entry(prg, &ipu_prg_list, list) {
- if (prg_node == prg->dev->of_node) {
- mutex_unlock(&ipu_prg_list_mutex);
-- device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
-+ device_link_add(dev, prg->dev,
-+ DL_FLAG_AUTOREMOVE_CONSUMER);
- prg->id = ipu_id;
- of_node_put(prg_node);
- return prg;
---- a/drivers/iommu/dma-iommu.c
-+++ b/drivers/iommu/dma-iommu.c
-@@ -381,6 +381,9 @@ static dma_addr_t iommu_dma_alloc_iova(s
- if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
- iova_len = roundup_pow_of_two(iova_len);
-
-+ if (dev->bus_dma_mask)
-+ dma_limit &= dev->bus_dma_mask;
-+
- if (domain->geometry.force_aperture)
- dma_limit = min(dma_limit, domain->geometry.aperture_end);
-
---- a/drivers/mux/Kconfig
-+++ b/drivers/mux/Kconfig
-@@ -35,14 +35,14 @@ config MUX_GPIO
- be called mux-gpio.
-
- config MUX_MMIO
-- tristate "MMIO register bitfield-controlled Multiplexer"
-- depends on (OF && MFD_SYSCON) || COMPILE_TEST
-+ tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
-+ depends on OF || COMPILE_TEST
- help
-- MMIO register bitfield-controlled Multiplexer controller.
-+ MMIO/Regmap register bitfield-controlled Multiplexer controller.
-
-- The driver builds multiplexer controllers for bitfields in a syscon
-- register. For N bit wide bitfields, there will be 2^N possible
-- multiplexer states.
-+ The driver builds multiplexer controllers for bitfields in either
-+ a syscon register or a driver regmap register. For N bit wide
-+ bitfields, there will be 2^N possible multiplexer states.
-
- To compile the driver as a module, choose M here: the module will
- be called mux-mmio.
---- a/drivers/mux/mmio.c
-+++ b/drivers/mux/mmio.c
-@@ -31,6 +31,7 @@ static const struct mux_control_ops mux_
-
- static const struct of_device_id mux_mmio_dt_ids[] = {
- { .compatible = "mmio-mux", },
-+ { .compatible = "reg-mux", },
- { /* sentinel */ }
- };
- MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
-@@ -46,7 +47,10 @@ static int mux_mmio_probe(struct platfor
- int ret;
- int i;
-
-- regmap = syscon_node_to_regmap(np->parent);
-+ if (of_device_is_compatible(np, "mmio-mux"))
-+ regmap = syscon_node_to_regmap(np->parent);
-+ else
-+ regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- dev_err(dev, "failed to get regmap: %d\n", ret);
---- a/drivers/of/device.c
-+++ b/drivers/of/device.c
-@@ -15,6 +15,9 @@
-
- #include <asm/errno.h>
- #include "of_private.h"
-+#ifdef CONFIG_FSL_MC_BUS
-+#include <linux/fsl/mc.h>
-+#endif
-
- /**
- * of_match_device - Tell if a struct device matches an of_device_id list
-@@ -105,6 +108,9 @@ int of_dma_configure(struct device *dev,
- #ifdef CONFIG_ARM_AMBA
- dev->bus != &amba_bustype &&
- #endif
-+#ifdef CONFIG_FSL_MC_BUS
-+ dev->bus != &fsl_mc_bus_type &&
-+#endif
- dev->bus != &platform_bus_type)
- return ret == -ENODEV ? 0 : ret;
-
-@@ -152,10 +158,16 @@ int of_dma_configure(struct device *dev,
- * set by the driver.
- */
- mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
-+ dev->bus_dma_mask = mask;
- dev->coherent_dma_mask &= mask;
- *dev->dma_mask &= mask;
-
-- coherent = of_dma_is_coherent(np);
-+#ifdef CONFIG_FSL_MC_BUS
-+ if (dev_is_fsl_mc(dev))
-+ coherent = fsl_mc_is_dev_coherent(dev);
-+ else
-+#endif
-+ coherent = of_dma_is_coherent(np);
- dev_dbg(dev, "device is%sdma coherent\n",
- coherent ? " " : " not ");
-
---- a/drivers/soc/imx/gpc.c
-+++ b/drivers/soc/imx/gpc.c
-@@ -210,7 +210,7 @@ static int imx_pgc_power_domain_probe(st
- goto genpd_err;
- }
-
-- device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE);
-+ device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
-
- return 0;
-
---- a/include/linux/device.h
-+++ b/include/linux/device.h
-@@ -55,6 +55,8 @@ struct bus_attribute {
- struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
- #define BUS_ATTR_RO(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
-+#define BUS_ATTR_WO(_name) \
-+ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
-
- extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-@@ -751,14 +753,16 @@ enum device_link_state {
- * Device link flags.
- *
- * STATELESS: The core won't track the presence of supplier/consumer drivers.
-- * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
-+ * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
- * PM_RUNTIME: If set, the runtime PM framework will use this link.
- * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
-+ * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
- */
--#define DL_FLAG_STATELESS BIT(0)
--#define DL_FLAG_AUTOREMOVE BIT(1)
--#define DL_FLAG_PM_RUNTIME BIT(2)
--#define DL_FLAG_RPM_ACTIVE BIT(3)
-+#define DL_FLAG_STATELESS BIT(0)
-+#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
-+#define DL_FLAG_PM_RUNTIME BIT(2)
-+#define DL_FLAG_RPM_ACTIVE BIT(3)
-+#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
-
- /**
- * struct device_link - Device link representation.
-@@ -769,6 +773,7 @@ enum device_link_state {
- * @status: The state of the link (with respect to the presence of drivers).
- * @flags: Link flags.
- * @rpm_active: Whether or not the consumer device is runtime-PM-active.
-+ * @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
- */
- struct device_link {
-@@ -779,6 +784,7 @@ struct device_link {
- enum device_link_state status;
- u32 flags;
- bool rpm_active;
-+ struct kref kref;
- #ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
- #endif
-@@ -851,6 +857,8 @@ struct dev_links_info {
- * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
- * hardware supports 64-bit addresses for consistent allocations
- * such descriptors.
-+ * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
-+ * limit than the device itself supports.
- * @dma_pfn_offset: offset of DMA memory range relatively of RAM
- * @dma_parms: A low level driver may set these to teach IOMMU code about
- * segment limitations.
-@@ -930,6 +938,7 @@ struct device {
- not all hardware supports
- 64 bit addresses for consistent
- allocations such descriptors. */
-+ u64 bus_dma_mask; /* upstream dma_mask constraint */
- unsigned long dma_pfn_offset;
-
- struct device_dma_parameters *dma_parms;
-@@ -1268,6 +1277,7 @@ extern const char *dev_driver_string(con
- struct device_link *device_link_add(struct device *consumer,
- struct device *supplier, u32 flags);
- void device_link_del(struct device_link *link);
-+void device_link_remove(void *consumer, struct device *supplier);
-
- #ifdef CONFIG_PRINTK
-
---- a/include/linux/fsl_devices.h
-+++ b/include/linux/fsl_devices.h
-@@ -99,7 +99,9 @@ struct fsl_usb2_platform_data {
- unsigned suspended:1;
- unsigned already_suspended:1;
- unsigned has_fsl_erratum_a007792:1;
-+ unsigned has_fsl_erratum_14:1;
- unsigned has_fsl_erratum_a005275:1;
-+ unsigned has_fsl_erratum_a006918:1;
- unsigned has_fsl_erratum_a005697:1;
- unsigned check_phy_clk_valid:1;
-
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -2349,7 +2349,8 @@ int register_netdevice_notifier(struct n
- int unregister_netdevice_notifier(struct notifier_block *nb);
-
- struct netdev_notifier_info {
-- struct net_device *dev;
-+ struct net_device *dev;
-+ struct netlink_ext_ack *extack;
- };
-
- struct netdev_notifier_info_ext {
-@@ -2381,6 +2382,7 @@ static inline void netdev_notifier_info_
- struct net_device *dev)
- {
- info->dev = dev;
-+ info->extack = NULL;
- }
-
- static inline struct net_device *
-@@ -2389,6 +2391,12 @@ netdev_notifier_info_to_dev(const struct
- return info->dev;
- }
-
-+static inline struct netlink_ext_ack *
-+netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
-+{
-+ return info->extack;
-+}
-+
- int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
-
-
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -964,6 +964,7 @@ void kfree_skb_list(struct sk_buff *segs
- void skb_tx_error(struct sk_buff *skb);
- void consume_skb(struct sk_buff *skb);
- void __consume_stateless_skb(struct sk_buff *skb);
-+void skb_recycle(struct sk_buff *skb);
- void __kfree_skb(struct sk_buff *skb);
- extern struct kmem_cache *skbuff_head_cache;
-
-@@ -3315,6 +3316,7 @@ static inline void skb_free_datagram_loc
- }
- int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
- int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
-+void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
- int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
- __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
- int len, __wsum csum);
---- a/lib/dma-noop.c
-+++ b/lib/dma-noop.c
-@@ -58,11 +58,30 @@ static int dma_noop_map_sg(struct device
- return nents;
- }
-
-+static int dma_noop_supported(struct device *dev, u64 mask)
-+{
-+#ifdef CONFIG_ZONE_DMA
-+ if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
-+ return 0;
-+#else
-+ /*
-+ * Because 32-bit DMA masks are so common we expect every architecture
-+ * to be able to satisfy them - either by not supporting more physical
-+ * memory, or by providing a ZONE_DMA32. If neither is the case, the
-+ * architecture needs to use an IOMMU instead of the direct mapping.
-+ */
-+ if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
-+ return 0;
-+#endif
-+ return 1;
-+}
-+
- const struct dma_map_ops dma_noop_ops = {
- .alloc = dma_noop_alloc,
- .free = dma_noop_free,
- .map_page = dma_noop_map_page,
- .map_sg = dma_noop_map_sg,
-+ dma_supported = dma_noop_supported
- };
-
- EXPORT_SYMBOL(dma_noop_ops);
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -4366,8 +4366,14 @@ void page_frag_free(void *addr)
- {
- struct page *page = virt_to_head_page(addr);
-
-- if (unlikely(put_page_testzero(page)))
-- __free_pages_ok(page, compound_order(page));
-+ if (unlikely(put_page_testzero(page))) {
-+ unsigned int order = compound_order(page);
-+
-+ if (order == 0) /* Via pcp? */
-+ free_hot_cold_page(page, false);
-+ else
-+ __free_pages_ok(page, order);
-+ }
- }
- EXPORT_SYMBOL(page_frag_free);
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -162,7 +162,6 @@ static struct list_head offload_base __r
-
- static int netif_rx_internal(struct sk_buff *skb);
- static int call_netdevice_notifiers_info(unsigned long val,
-- struct net_device *dev,
- struct netdev_notifier_info *info);
- static struct napi_struct *napi_by_id(unsigned int napi_id);
-
-@@ -1312,10 +1311,11 @@ EXPORT_SYMBOL(netdev_features_change);
- void netdev_state_change(struct net_device *dev)
- {
- if (dev->flags & IFF_UP) {
-- struct netdev_notifier_change_info change_info;
-+ struct netdev_notifier_change_info change_info = {
-+ .info.dev = dev,
-+ };
-
-- change_info.flags_changed = 0;
-- call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
-+ call_netdevice_notifiers_info(NETDEV_CHANGE,
- &change_info.info);
- rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
- }
-@@ -1536,9 +1536,10 @@ EXPORT_SYMBOL(dev_disable_lro);
- static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
- struct net_device *dev)
- {
-- struct netdev_notifier_info info;
-+ struct netdev_notifier_info info = {
-+ .dev = dev,
-+ };
-
-- netdev_notifier_info_init(&info, dev);
- return nb->notifier_call(nb, val, &info);
- }
-
-@@ -1663,11 +1664,9 @@ EXPORT_SYMBOL(unregister_netdevice_notif
- */
-
- static int call_netdevice_notifiers_info(unsigned long val,
-- struct net_device *dev,
- struct netdev_notifier_info *info)
- {
- ASSERT_RTNL();
-- netdev_notifier_info_init(info, dev);
- return raw_notifier_call_chain(&netdev_chain, val, info);
- }
-
-@@ -1682,9 +1681,11 @@ static int call_netdevice_notifiers_info
-
- int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
- {
-- struct netdev_notifier_info info;
-+ struct netdev_notifier_info info = {
-+ .dev = dev,
-+ };
-
-- return call_netdevice_notifiers_info(val, dev, &info);
-+ return call_netdevice_notifiers_info(val, &info);
- }
- EXPORT_SYMBOL(call_netdevice_notifiers);
-
-@@ -1707,7 +1708,7 @@ static int call_netdevice_notifiers_mtu(
-
- BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
-
-- return call_netdevice_notifiers_info(val, dev, &info.info);
-+ return call_netdevice_notifiers_info(val, &info.info);
- }
-
- #ifdef CONFIG_NET_INGRESS
-@@ -6342,7 +6343,15 @@ static int __netdev_upper_dev_link(struc
- struct net_device *upper_dev, bool master,
- void *upper_priv, void *upper_info)
- {
-- struct netdev_notifier_changeupper_info changeupper_info;
-+ struct netdev_notifier_changeupper_info changeupper_info = {
-+ .info = {
-+ .dev = dev,
-+ },
-+ .upper_dev = upper_dev,
-+ .master = master,
-+ .linking = true,
-+ .upper_info = upper_info,
-+ };
- int ret = 0;
-
- ASSERT_RTNL();
-@@ -6360,12 +6369,7 @@ static int __netdev_upper_dev_link(struc
- if (master && netdev_master_upper_dev_get(dev))
- return -EBUSY;
-
-- changeupper_info.upper_dev = upper_dev;
-- changeupper_info.master = master;
-- changeupper_info.linking = true;
-- changeupper_info.upper_info = upper_info;
--
-- ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
-+ ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
- &changeupper_info.info);
- ret = notifier_to_errno(ret);
- if (ret)
-@@ -6377,7 +6381,7 @@ static int __netdev_upper_dev_link(struc
- return ret;
-
- netdev_update_addr_mask(dev);
-- ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
-+ ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
- &changeupper_info.info);
- ret = notifier_to_errno(ret);
- if (ret)
-@@ -6441,21 +6445,25 @@ EXPORT_SYMBOL(netdev_master_upper_dev_li
- void netdev_upper_dev_unlink(struct net_device *dev,
- struct net_device *upper_dev)
- {
-- struct netdev_notifier_changeupper_info changeupper_info;
-+ struct netdev_notifier_changeupper_info changeupper_info = {
-+ .info = {
-+ .dev = dev,
-+ },
-+ .upper_dev = upper_dev,
-+ .linking = false,
-+ };
-
- ASSERT_RTNL();
-
-- changeupper_info.upper_dev = upper_dev;
- changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
-- changeupper_info.linking = false;
-
-- call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
-+ call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
- &changeupper_info.info);
-
- __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
-
- netdev_update_addr_mask(dev);
-- call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
-+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
- &changeupper_info.info);
- }
- EXPORT_SYMBOL(netdev_upper_dev_unlink);
-@@ -6471,11 +6479,13 @@ EXPORT_SYMBOL(netdev_upper_dev_unlink);
- void netdev_bonding_info_change(struct net_device *dev,
- struct netdev_bonding_info *bonding_info)
- {
-- struct netdev_notifier_bonding_info info;
-+ struct netdev_notifier_bonding_info info = {
-+ .info.dev = dev,
-+ };
-
- memcpy(&info.bonding_info, bonding_info,
- sizeof(struct netdev_bonding_info));
-- call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
-+ call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
- &info.info);
- }
- EXPORT_SYMBOL(netdev_bonding_info_change);
-@@ -6601,11 +6611,13 @@ EXPORT_SYMBOL(dev_get_nest_level);
- void netdev_lower_state_changed(struct net_device *lower_dev,
- void *lower_state_info)
- {
-- struct netdev_notifier_changelowerstate_info changelowerstate_info;
-+ struct netdev_notifier_changelowerstate_info changelowerstate_info = {
-+ .info.dev = lower_dev,
-+ };
-
- ASSERT_RTNL();
- changelowerstate_info.lower_state_info = lower_state_info;
-- call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
-+ call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
- &changelowerstate_info.info);
- }
- EXPORT_SYMBOL(netdev_lower_state_changed);
-@@ -6896,11 +6908,14 @@ void __dev_notify_flags(struct net_devic
-
- if (dev->flags & IFF_UP &&
- (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
-- struct netdev_notifier_change_info change_info;
-+ struct netdev_notifier_change_info change_info = {
-+ .info = {
-+ .dev = dev,
-+ },
-+ .flags_changed = changes,
-+ };
-
-- change_info.flags_changed = changes;
-- call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
-- &change_info.info);
-+ call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
- }
- }
-
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -803,6 +803,32 @@ void napi_consume_skb(struct sk_buff *sk
- }
- EXPORT_SYMBOL(napi_consume_skb);
-
-+/**
-+ * skb_recycle - clean up an skb for reuse
-+ * @skb: buffer
-+ *
-+ * Recycles the skb to be reused as a receive buffer. This
-+ * function does any necessary reference count dropping, and
-+ * cleans up the skbuff as if it just came from __alloc_skb().
-+ */
-+void skb_recycle(struct sk_buff *skb)
-+{
-+ struct skb_shared_info *shinfo;
-+ u8 head_frag = skb->head_frag;
-+
-+ skb_release_head_state(skb);
-+
-+ shinfo = skb_shinfo(skb);
-+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
-+ atomic_set(&shinfo->dataref, 1);
-+
-+ memset(skb, 0, offsetof(struct sk_buff, tail));
-+ skb->data = skb->head + NET_SKB_PAD;
-+ skb->head_frag = head_frag;
-+ skb_reset_tail_pointer(skb);
-+}
-+EXPORT_SYMBOL(skb_recycle);
-+
- /* Make sure a field is enclosed inside headers_start/headers_end section */
- #define CHECK_SKB_FIELD(field) \
- BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
-@@ -1322,7 +1348,7 @@ static void skb_headers_offset_update(st
- skb->inner_mac_header += off;
- }
-
--static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
-+void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
- {
- __copy_skb_header(new, old);
-
-@@ -1330,6 +1356,7 @@ static void copy_skb_header(struct sk_bu
- skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
- skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
- }
-+EXPORT_SYMBOL(copy_skb_header);
-
- static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
- {
---- a/samples/bpf/Makefile
-+++ b/samples/bpf/Makefile
-@@ -178,6 +178,12 @@ HOSTLOADLIBES_syscall_tp += -lelf
- LLC ?= llc
- CLANG ?= clang
-
-+# Detect that we're cross compiling and use the cross compiler
-+ifdef CROSS_COMPILE
-+HOSTCC = $(CROSS_COMPILE)gcc
-+CLANG_ARCH_ARGS = -target $(ARCH)
-+endif
-+
- # Trick to allow make to be run from this directory
- all: $(LIBBPF)
- $(MAKE) -C ../../ $(CURDIR)/
-@@ -228,9 +234,9 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nr
- $(obj)/%.o: $(src)/%.c
- $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
- -I$(srctree)/tools/testing/selftests/bpf/ \
-- -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
-- -Wno-compare-distinct-pointer-types \
-+ -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
-+ -D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \
- -Wno-gnu-variable-sized-type-not-at-end \
- -Wno-address-of-packed-member -Wno-tautological-compare \
-- -Wno-unknown-warning-option \
-+ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
- -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
---- a/samples/bpf/map_perf_test_kern.c
-+++ b/samples/bpf/map_perf_test_kern.c
-@@ -266,7 +266,7 @@ int stress_hash_map_lookup(struct pt_reg
- return 0;
- }
-
--SEC("kprobe/sys_getpgrp")
-+SEC("kprobe/sys_getppid")
- int stress_array_map_lookup(struct pt_regs *ctx)
- {
- u32 key = 1, i;
---- a/samples/bpf/map_perf_test_user.c
-+++ b/samples/bpf/map_perf_test_user.c
-@@ -282,7 +282,7 @@ static void test_array_lookup(int cpu)
-
- start_time = time_get_ns();
- for (i = 0; i < max_cnt; i++)
-- syscall(__NR_getpgrp, 0);
-+ syscall(__NR_getppid, 0);
- printf("%d:array_lookup %lld lookups per sec\n",
- cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
- }
---- a/tools/testing/selftests/bpf/bpf_helpers.h
-+++ b/tools/testing/selftests/bpf/bpf_helpers.h
-@@ -110,7 +110,47 @@ static int (*bpf_skb_under_cgroup)(void
- static int (*bpf_skb_change_head)(void *, int len, int flags) =
- (void *) BPF_FUNC_skb_change_head;
-
-+/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
-+#if defined(__TARGET_ARCH_x86)
-+ #define bpf_target_x86
-+ #define bpf_target_defined
-+#elif defined(__TARGET_ARCH_s930x)
-+ #define bpf_target_s930x
-+ #define bpf_target_defined
-+#elif defined(__TARGET_ARCH_arm64)
-+ #define bpf_target_arm64
-+ #define bpf_target_defined
-+#elif defined(__TARGET_ARCH_mips)
-+ #define bpf_target_mips
-+ #define bpf_target_defined
-+#elif defined(__TARGET_ARCH_powerpc)
-+ #define bpf_target_powerpc
-+ #define bpf_target_defined
-+#elif defined(__TARGET_ARCH_sparc)
-+ #define bpf_target_sparc
-+ #define bpf_target_defined
-+#else
-+ #undef bpf_target_defined
-+#endif
-+
-+/* Fall back to what the compiler says */
-+#ifndef bpf_target_defined
- #if defined(__x86_64__)
-+ #define bpf_target_x86
-+#elif defined(__s390x__)
-+ #define bpf_target_s930x
-+#elif defined(__aarch64__)
-+ #define bpf_target_arm64
-+#elif defined(__mips__)
-+ #define bpf_target_mips
-+#elif defined(__powerpc__)
-+ #define bpf_target_powerpc
-+#elif defined(__sparc__)
-+ #define bpf_target_sparc
-+#endif
-+#endif
-+
-+#if defined(bpf_target_x86)
-
- #define PT_REGS_PARM1(x) ((x)->di)
- #define PT_REGS_PARM2(x) ((x)->si)
-@@ -123,7 +163,7 @@ static int (*bpf_skb_change_head)(void *
- #define PT_REGS_SP(x) ((x)->sp)
- #define PT_REGS_IP(x) ((x)->ip)
-
--#elif defined(__s390x__)
-+#elif defined(bpf_target_s390x)
-
- #define PT_REGS_PARM1(x) ((x)->gprs[2])
- #define PT_REGS_PARM2(x) ((x)->gprs[3])
-@@ -136,7 +176,7 @@ static int (*bpf_skb_change_head)(void *
- #define PT_REGS_SP(x) ((x)->gprs[15])
- #define PT_REGS_IP(x) ((x)->psw.addr)
-
--#elif defined(__aarch64__)
-+#elif defined(bpf_target_arm64)
-
- #define PT_REGS_PARM1(x) ((x)->regs[0])
- #define PT_REGS_PARM2(x) ((x)->regs[1])
-@@ -149,7 +189,7 @@ static int (*bpf_skb_change_head)(void *
- #define PT_REGS_SP(x) ((x)->sp)
- #define PT_REGS_IP(x) ((x)->pc)
-
--#elif defined(__mips__)
-+#elif defined(bpf_target_mips)
-
- #define PT_REGS_PARM1(x) ((x)->regs[4])
- #define PT_REGS_PARM2(x) ((x)->regs[5])
-@@ -162,7 +202,7 @@ static int (*bpf_skb_change_head)(void *
- #define PT_REGS_SP(x) ((x)->regs[29])
- #define PT_REGS_IP(x) ((x)->cp0_epc)
-
--#elif defined(__powerpc__)
-+#elif defined(bpf_target_powerpc)
-
- #define PT_REGS_PARM1(x) ((x)->gpr[3])
- #define PT_REGS_PARM2(x) ((x)->gpr[4])
-@@ -173,7 +213,7 @@ static int (*bpf_skb_change_head)(void *
- #define PT_REGS_SP(x) ((x)->sp)
- #define PT_REGS_IP(x) ((x)->nip)
-
--#elif defined(__sparc__)
-+#elif defined(bpf_target_sparc)
-
- #define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
- #define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
-@@ -183,6 +223,8 @@ static int (*bpf_skb_change_head)(void *
- #define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
- #define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
- #define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
-+
-+/* Should this also be a bpf_target check for the sparc case? */
- #if defined(__arch64__)
- #define PT_REGS_IP(x) ((x)->tpc)
- #else
-@@ -191,10 +233,10 @@ static int (*bpf_skb_change_head)(void *
-
- #endif
-
--#ifdef __powerpc__
-+#ifdef bpf_target_powerpc
- #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
- #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
--#elif defined(__sparc__)
-+#elif bpf_target_sparc
- #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
- #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
- #else
diff --git a/target/linux/layerscape/patches-4.14/301-arch-support-layerscape.patch b/target/linux/layerscape/patches-4.14/301-arch-support-layerscape.patch
deleted file mode 100644
index 59ed130567..0000000000
--- a/target/linux/layerscape/patches-4.14/301-arch-support-layerscape.patch
+++ /dev/null
@@ -1,467 +0,0 @@
-From f29db0048a07384ee4cd962c676b750e13e5d6b0 Mon Sep 17 00:00:00 2001
-From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Mon, 6 May 2019 17:17:58 +0800
-Subject: [PATCH] arch: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of arch for layerscape
-
-Signed-off-by: Abhimanyu Saini <abhimanyu.saini@nxp.com>
-Signed-off-by: Alison Wang <alison.wang@freescale.com>
-Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
-Signed-off-by: Dave Liu <daveliu@freescale.com>
-Signed-off-by: Guanhua <guanhua.gao@nxp.com>
-Signed-off-by: Haiying Wang <Haiying.wang@freescale.com>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Jerry Huang <Chang-Ming.Huang@freescale.com>
-Signed-off-by: Jianhua Xie <jianhua.xie@nxp.com>
-Signed-off-by: Jin Qing <b24347@freescale.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Li Yang <leoli@freescale.com>
-Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
-Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
-Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
-Signed-off-by: Poonam Aggrwal <poonam.aggrwal@nxp.com>
-Signed-off-by: Rajesh Bhagat <rajesh.bhagat@nxp.com>
-Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
-Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Shengzhou Liu <Shengzhou.Liu@freescale.com>
-Signed-off-by: Tang Yuantian <Yuantian.Tang@freescale.com>
-Signed-off-by: Vabhav Sharma <vabhav.sharma@nxp.com>
-Signed-off-by: Wang Dongsheng <dongsheng.wang@freescale.com>
-Signed-off-by: Xie Xiaobo <X.Xie@freescale.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
-Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
----
- arch/arm/include/asm/delay.h | 16 ++++++
- arch/arm/include/asm/io.h | 31 ++++++++++
- arch/arm/include/asm/mach/map.h | 4 +-
- arch/arm/include/asm/pgtable.h | 7 +++
- arch/arm/kernel/time.c | 3 +
- arch/arm/mm/dma-mapping.c | 1 +
- arch/arm/mm/ioremap.c | 7 +++
- arch/arm/mm/mmu.c | 9 +++
- arch/arm64/include/asm/cache.h | 2 +-
- arch/arm64/include/asm/io.h | 1 +
- arch/arm64/include/asm/pgtable-prot.h | 3 +
- arch/arm64/include/asm/pgtable.h | 5 ++
- arch/arm64/mm/dma-mapping.c | 1 +
- arch/arm64/mm/init.c | 12 ++--
- drivers/soc/fsl/guts.c | 9 +++
- drivers/soc/fsl/qixis_ctrl.c | 105 ++++++++++++++++++++++++++++++++++
- 16 files changed, 209 insertions(+), 7 deletions(-)
- create mode 100644 drivers/soc/fsl/qixis_ctrl.c
-
---- a/arch/arm/include/asm/delay.h
-+++ b/arch/arm/include/asm/delay.h
-@@ -85,6 +85,22 @@ extern void __bad_udelay(void);
- __const_udelay((n) * UDELAY_MULT)) : \
- __udelay(n))
-
-+#define spin_event_timeout(condition, timeout, delay) \
-+({ \
-+ typeof(condition) __ret; \
-+ int i = 0; \
-+ while (!(__ret = (condition)) && (i++ < timeout)) { \
-+ if (delay) \
-+ udelay(delay); \
-+ else \
-+ cpu_relax(); \
-+ udelay(1); \
-+ } \
-+ if (!__ret) \
-+ __ret = (condition); \
-+ __ret; \
-+})
-+
- /* Loop-based definitions for assembly code. */
- extern void __loop_delay(unsigned long loops);
- extern void __loop_udelay(unsigned long usecs);
---- a/arch/arm/include/asm/io.h
-+++ b/arch/arm/include/asm/io.h
-@@ -128,6 +128,7 @@ static inline u32 __raw_readl(const vola
- #define MT_DEVICE_NONSHARED 1
- #define MT_DEVICE_CACHED 2
- #define MT_DEVICE_WC 3
-+#define MT_MEMORY_RW_NS 4
- /*
- * types 4 onwards can be found in asm/mach/map.h and are undefined
- * for ioremap
-@@ -229,6 +230,34 @@ void __iomem *pci_remap_cfgspace(resourc
- #endif
- #endif
-
-+/* access ports */
-+#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
-+#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
-+
-+#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
-+#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
-+
-+#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
-+#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
-+
-+/* Clear and set bits in one shot. These macros can be used to clear and
-+ * set multiple bits in a register using a single read-modify-write. These
-+ * macros can also be used to set a multiple-bit bit pattern using a mask,
-+ * by specifying the mask in the 'clear' parameter and the new bit pattern
-+ * in the 'set' parameter.
-+ */
-+
-+#define clrsetbits_be32(addr, clear, set) \
-+ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_le32(addr, clear, set) \
-+ iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_be16(addr, clear, set) \
-+ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_le16(addr, clear, set) \
-+ iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_8(addr, clear, set) \
-+ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
-+
- /*
- * IO port access primitives
- * -------------------------
-@@ -417,6 +446,8 @@ void __iomem *ioremap_wc(resource_size_t
- #define ioremap_wc ioremap_wc
- #define ioremap_wt ioremap_wc
-
-+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
-+
- void iounmap(volatile void __iomem *iomem_cookie);
- #define iounmap iounmap
-
---- a/arch/arm/include/asm/mach/map.h
-+++ b/arch/arm/include/asm/mach/map.h
-@@ -21,9 +21,9 @@ struct map_desc {
- unsigned int type;
- };
-
--/* types 0-3 are defined in asm/io.h */
-+/* types 0-4 are defined in asm/io.h */
- enum {
-- MT_UNCACHED = 4,
-+ MT_UNCACHED = 5,
- MT_CACHECLEAN,
- MT_MINICLEAN,
- MT_LOW_VECTORS,
---- a/arch/arm/include/asm/pgtable.h
-+++ b/arch/arm/include/asm/pgtable.h
-@@ -119,6 +119,13 @@ extern pgprot_t pgprot_s2_device;
- #define pgprot_noncached(prot) \
- __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
-
-+#define pgprot_cached(prot) \
-+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED)
-+
-+#define pgprot_cached_ns(prot) \
-+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \
-+ L_PTE_MT_DEV_NONSHARED)
-+
- #define pgprot_writecombine(prot) \
- __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
-
---- a/arch/arm/kernel/time.c
-+++ b/arch/arm/kernel/time.c
-@@ -12,6 +12,7 @@
- * reading the RTC at bootup, etc...
- */
- #include <linux/clk-provider.h>
-+#include <linux/clockchips.h>
- #include <linux/clocksource.h>
- #include <linux/errno.h>
- #include <linux/export.h>
-@@ -121,5 +122,7 @@ void __init time_init(void)
- of_clk_init(NULL);
- #endif
- timer_probe();
-+
-+ tick_setup_hrtimer_broadcast();
- }
- }
---- a/arch/arm/mm/dma-mapping.c
-+++ b/arch/arm/mm/dma-mapping.c
-@@ -2416,6 +2416,7 @@ void arch_setup_dma_ops(struct device *d
- #endif
- dev->archdata.dma_ops_setup = true;
- }
-+EXPORT_SYMBOL(arch_setup_dma_ops);
-
- void arch_teardown_dma_ops(struct device *dev)
- {
---- a/arch/arm/mm/ioremap.c
-+++ b/arch/arm/mm/ioremap.c
-@@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t
- }
- EXPORT_SYMBOL(ioremap_wc);
-
-+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
-+{
-+ return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
-+ __builtin_return_address(0));
-+}
-+EXPORT_SYMBOL(ioremap_cache_ns);
-+
- /*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space as memory. Needed when the kernel wants to execute
---- a/arch/arm/mm/mmu.c
-+++ b/arch/arm/mm/mmu.c
-@@ -315,6 +315,13 @@ static struct mem_type mem_types[] __ro_
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
- .domain = DOMAIN_KERNEL,
- },
-+ [MT_MEMORY_RW_NS] = {
-+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-+ L_PTE_XN,
-+ .prot_l1 = PMD_TYPE_TABLE,
-+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
-+ .domain = DOMAIN_KERNEL,
-+ },
- [MT_ROM] = {
- .prot_sect = PMD_TYPE_SECT,
- .domain = DOMAIN_KERNEL,
-@@ -651,6 +658,7 @@ static void __init build_mem_type_table(
- }
- kern_pgprot |= PTE_EXT_AF;
- vecs_pgprot |= PTE_EXT_AF;
-+ mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
-
- /*
- * Set PXN for user mappings
-@@ -679,6 +687,7 @@ static void __init build_mem_type_table(
- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
- mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
- mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
-+ mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
- mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
- mem_types[MT_ROM].prot_sect |= cp->pmd;
---- a/arch/arm64/include/asm/cache.h
-+++ b/arch/arm64/include/asm/cache.h
-@@ -34,7 +34,7 @@
- #define ICACHE_POLICY_VIPT 2
- #define ICACHE_POLICY_PIPT 3
-
--#define L1_CACHE_SHIFT 7
-+#define L1_CACHE_SHIFT 6
- #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
- /*
---- a/arch/arm64/include/asm/io.h
-+++ b/arch/arm64/include/asm/io.h
-@@ -186,6 +186,7 @@ extern void __iomem *ioremap_cache(phys_
- #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
- #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
- #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
-+#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS))
- #define iounmap __iounmap
-
- /*
---- a/arch/arm64/include/asm/pgtable-prot.h
-+++ b/arch/arm64/include/asm/pgtable-prot.h
-@@ -48,6 +48,7 @@
- #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
- #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
- #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
-+#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
-
- #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
- #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
-@@ -68,6 +69,7 @@
- #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
-
- #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
-+#define PAGE_S2_NS __pgprot(PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDWR | PTE_TYPE_PAGE | PTE_AF)
- #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-
- #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
---- a/arch/arm64/include/asm/pgtable.h
-+++ b/arch/arm64/include/asm/pgtable.h
-@@ -357,6 +357,11 @@ static inline int pmd_protnone(pmd_t pmd
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
- #define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
-+#define pgprot_cached(prot) \
-+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
-+ PTE_PXN | PTE_UXN)
-+#define pgprot_cached_ns(prot) \
-+ __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED)
- #define pgprot_device(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
- #define __HAVE_PHYS_MEM_ACCESS_PROT
---- a/arch/arm64/mm/dma-mapping.c
-+++ b/arch/arm64/mm/dma-mapping.c
-@@ -947,3 +947,4 @@ void arch_setup_dma_ops(struct device *d
- }
- #endif
- }
-+EXPORT_SYMBOL(arch_setup_dma_ops);
---- a/arch/arm64/mm/init.c
-+++ b/arch/arm64/mm/init.c
-@@ -457,6 +457,14 @@ void __init arm64_memblock_init(void)
- * Register the kernel text, kernel data, initrd, and initial
- * pagetables with memblock.
- */
-+
-+ /* make this the first reservation so that there are no chances of
-+ * overlap
-+ */
-+ reserve_elfcorehdr();
-+
-+ reserve_crashkernel();
-+
- memblock_reserve(__pa_symbol(_text), _end - _text);
- #ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
-@@ -476,10 +484,6 @@ void __init arm64_memblock_init(void)
- else
- arm64_dma_phys_limit = PHYS_MASK + 1;
-
-- reserve_crashkernel();
--
-- reserve_elfcorehdr();
--
- high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
-
- dma_contiguous_reserve(arm64_dma_phys_limit);
---- a/drivers/soc/fsl/guts.c
-+++ b/drivers/soc/fsl/guts.c
-@@ -100,6 +100,11 @@ static const struct fsl_soc_die_attr fsl
- .svr = 0x87000000,
- .mask = 0xfff70000,
- },
-+ /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
-+ { .die = "LX2160A",
-+ .svr = 0x87360000,
-+ .mask = 0xff3f0000,
-+ },
- { },
- };
-
-@@ -213,6 +218,10 @@ static const struct of_device_id fsl_gut
- { .compatible = "fsl,ls1021a-dcfg", },
- { .compatible = "fsl,ls1043a-dcfg", },
- { .compatible = "fsl,ls2080a-dcfg", },
-+ { .compatible = "fsl,ls1088a-dcfg", },
-+ { .compatible = "fsl,ls1012a-dcfg", },
-+ { .compatible = "fsl,ls1046a-dcfg", },
-+ { .compatible = "fsl,lx2160a-dcfg", },
- {}
- };
- MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
---- /dev/null
-+++ b/drivers/soc/fsl/qixis_ctrl.c
-@@ -0,0 +1,105 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+
-+/* Freescale QIXIS system controller driver.
-+ *
-+ * Copyright 2015 Freescale Semiconductor, Inc.
-+ * Copyright 2018-2019 NXP
-+ */
-+
-+#include <linux/err.h>
-+#include <linux/i2c.h>
-+#include <linux/module.h>
-+#include <linux/mfd/core.h>
-+#include <linux/of.h>
-+#include <linux/regmap.h>
-+
-+/* QIXIS MAP */
-+struct fsl_qixis_regs {
-+ u8 id; /* Identification Registers */
-+ u8 version; /* Version Register */
-+ u8 qixis_ver; /* QIXIS Version Register */
-+ u8 reserved1[0x1f];
-+};
-+
-+struct qixis_priv {
-+ struct regmap *regmap;
-+};
-+
-+static struct regmap_config qixis_regmap_config = {
-+ .reg_bits = 8,
-+ .val_bits = 8,
-+};
-+
-+static const struct mfd_cell fsl_qixis_devs[] = {
-+ {
-+ .name = "reg-mux",
-+ .of_compatible = "reg-mux",
-+ },
-+};
-+
-+static int fsl_qixis_i2c_probe(struct i2c_client *client)
-+{
-+ struct qixis_priv *priv;
-+ int ret = 0;
-+ u32 qver;
-+
-+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-+ return -EOPNOTSUPP;
-+
-+ priv = devm_kzalloc(&client->dev, sizeof(struct qixis_priv),
-+ GFP_KERNEL);
-+ if (!priv)
-+ return -ENOMEM;
-+
-+ priv->regmap = regmap_init_i2c(client, &qixis_regmap_config);
-+ regmap_read(priv->regmap, offsetof(struct fsl_qixis_regs, qixis_ver),
-+ &qver);
-+ pr_info("Freescale QIXIS Version: 0x%08x\n", qver);
-+
-+ i2c_set_clientdata(client, priv);
-+
-+ if (of_device_is_compatible(client->dev.of_node, "simple-mfd"))
-+ ret = devm_mfd_add_devices(&client->dev, -1, fsl_qixis_devs,
-+ ARRAY_SIZE(fsl_qixis_devs), NULL, 0,
-+ NULL);
-+ if (ret)
-+ goto error;
-+
-+ return ret;
-+error:
-+ regmap_exit(priv->regmap);
-+
-+ return ret;
-+}
-+
-+static int fsl_qixis_i2c_remove(struct i2c_client *client)
-+{
-+ struct qixis_priv *priv;
-+
-+ priv = i2c_get_clientdata(client);
-+ regmap_exit(priv->regmap);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id fsl_qixis_i2c_of_match[] = {
-+ { .compatible = "fsl,fpga-qixis-i2c" },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, fsl_qixis_i2c_of_match);
-+
-+static struct i2c_driver fsl_qixis_i2c_driver = {
-+ .driver = {
-+ .name = "qixis_ctrl_i2c",
-+ .owner = THIS_MODULE,
-+ .of_match_table = of_match_ptr(fsl_qixis_i2c_of_match),
-+ },
-+ .probe_new = fsl_qixis_i2c_probe,
-+ .remove = fsl_qixis_i2c_remove,
-+};
-+module_i2c_driver(fsl_qixis_i2c_driver);
-+
-+MODULE_AUTHOR("Wang Dongsheng <dongsheng.wang@freescale.com>");
-+MODULE_DESCRIPTION("Freescale QIXIS system controller driver");
-+MODULE_LICENSE("GPL");
-+
diff --git a/target/linux/layerscape/patches-4.14/302-dts-support-layerscape.patch b/target/linux/layerscape/patches-4.14/302-dts-support-layerscape.patch
deleted file mode 100644
index 919bb30cfc..0000000000
--- a/target/linux/layerscape/patches-4.14/302-dts-support-layerscape.patch
+++ /dev/null
@@ -1,10909 +0,0 @@
-From cc1d1d1b68d18a31aeb8a572ca6b3929b083855c Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:33 +0800
-Subject: [PATCH] dts: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of dts for layerscape
-
-Signed-off-by: Abhimanyu Saini <abhimanyu.saini@nxp.com>
-Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
-Signed-off-by: Alan Wang <alan.wang@nxp.com>
-Signed-off-by: Alison Wang <alison.wang@nxp.com>
-Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
-Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
-Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
-Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
-Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
-Signed-off-by: Bhupesh Sharma <bhupesh.sharma@freescale.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
-Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
-Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
-Signed-off-by: Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
-Signed-off-by: Changming Huang <jerry.huang@nxp.com>
-Signed-off-by: Chuanhua Han <chuanhua.han@nxp.com>
-Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
-Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Honghua Yin <Hong-Hua.Yin@freescale.com>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: Iordache Florinel-R70177 <florinel.iordache@nxp.com>
-Signed-off-by: Jagdish Gediya <jagdish.gediya@nxp.com>
-Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Li Yang <leoyang.li@nxp.com>
-Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
-Signed-off-by: Mathew McBride <matt@traverse.com.au>
-Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
-Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
-Signed-off-by: Pankaj Gupta <pankaj.gupta@nxp.com>
-Signed-off-by: Peng Ma <peng.ma@nxp.com>
-Signed-off-by: Po Liu <po.liu@nxp.com>
-Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
-Signed-off-by: Pratiyush Mohan Srivastava <pratiyush.srivastava@nxp.com>
-Signed-off-by: Priyanka Jain <priyanka.jain@nxp.com>
-Signed-off-by: Raghav Dogra <raghav.dogra@nxp.com>
-Signed-off-by: Rai Harninder <harninder.rai@nxp.com>
-Signed-off-by: Ramneek Mehresh <ramneek.mehresh@nxp.com>
-Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Sakar Arora <Sakar.Arora@freescale.com>
-Signed-off-by: Santan Kumar <santan.kumar@nxp.com>
-Signed-off-by: Scott Wood <oss@buserror.net>
-Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
-Signed-off-by: Shawn Guo <shawnguo@kernel.org>
-Signed-off-by: Sriram Dash <sriram.dash@nxp.com>
-Signed-off-by: Sumit Garg <sumit.garg@nxp.com>
-Signed-off-by: Suresh Gupta <suresh.gupta@nxp.com>
-Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
-Signed-off-by: Tao Yang <b31903@freescale.com>
-Signed-off-by: Vabhav Sharma <vabhav.sharma@nxp.com>
-Signed-off-by: Vicentiu Galanopulo <vicentiu.galanopulo@nxp.com>
-Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
-Signed-off-by: Wasim Khan <wasim.khan@nxp.com>
-Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
-Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
-Signed-off-by: Yuantian Tang <andy.tang@nxp.com>
-Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
-Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
-Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
----
- arch/arm/boot/dts/Makefile | 3 +-
- arch/arm/boot/dts/imx25.dtsi | 4 +-
- arch/arm/boot/dts/imx28.dtsi | 4 +-
- arch/arm/boot/dts/imx35.dtsi | 4 +-
- arch/arm/boot/dts/imx53.dtsi | 4 +-
- arch/arm/boot/dts/ls1021a-iot.dts | 262 ++++
- arch/arm/boot/dts/ls1021a-qds.dts | 32 +
- arch/arm/boot/dts/ls1021a-twr.dts | 27 +
- arch/arm/boot/dts/ls1021a.dtsi | 111 +-
- arch/arm64/boot/dts/freescale/Makefile | 16 +-
- .../boot/dts/freescale/fsl-ls1012a-2g5rdb.dts | 126 ++
- .../boot/dts/freescale/fsl-ls1012a-frdm.dts | 97 +-
- .../boot/dts/freescale/fsl-ls1012a-frwy.dts | 179 +++
- .../boot/dts/freescale/fsl-ls1012a-qds.dts | 136 +-
- .../boot/dts/freescale/fsl-ls1012a-rdb.dts | 100 +-
- .../arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 210 ++-
- .../boot/dts/freescale/fsl-ls1043-post.dtsi | 3 +-
- .../dts/freescale/fsl-ls1043a-qds-sdk.dts | 263 ++++
- .../boot/dts/freescale/fsl-ls1043a-qds.dts | 206 ++-
- .../dts/freescale/fsl-ls1043a-rdb-sdk.dts | 262 ++++
- .../dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 140 ++
- .../boot/dts/freescale/fsl-ls1043a-rdb.dts | 76 +-
- .../arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 382 +++--
- .../boot/dts/freescale/fsl-ls1046-post.dtsi | 2 +-
- .../dts/freescale/fsl-ls1046a-qds-sdk.dts | 268 ++++
- .../boot/dts/freescale/fsl-ls1046a-qds.dts | 194 ++-
- .../dts/freescale/fsl-ls1046a-rdb-sdk.dts | 307 ++++
- .../dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 133 ++
- .../boot/dts/freescale/fsl-ls1046a-rdb.dts | 48 +-
- .../arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 386 +++--
- .../boot/dts/freescale/fsl-ls1088a-qds.dts | 88 +-
- .../boot/dts/freescale/fsl-ls1088a-rdb.dts | 150 +-
- .../arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 546 ++++++-
- .../boot/dts/freescale/fsl-ls2080a-qds.dts | 100 +-
- .../boot/dts/freescale/fsl-ls2080a-rdb.dts | 118 +-
- .../boot/dts/freescale/fsl-ls2080a-simu.dts | 38 +-
- .../arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 50 +-
- .../boot/dts/freescale/fsl-ls2081a-rdb.dts | 163 ++
- .../boot/dts/freescale/fsl-ls2088a-qds.dts | 158 +-
- .../boot/dts/freescale/fsl-ls2088a-rdb.dts | 118 +-
- .../arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 52 +-
- .../boot/dts/freescale/fsl-ls208xa-qds.dtsi | 43 +-
- .../boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 60 +-
- .../arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 299 ++--
- .../boot/dts/freescale/fsl-lx2160a-qds.dts | 353 +++++
- .../boot/dts/freescale/fsl-lx2160a-rdb.dts | 241 +++
- .../arm64/boot/dts/freescale/fsl-lx2160a.dtsi | 1318 +++++++++++++++++
- .../boot/dts/freescale/fsl-tmu-map1.dtsi | 99 ++
- .../boot/dts/freescale/fsl-tmu-map2.dtsi | 99 ++
- .../boot/dts/freescale/fsl-tmu-map3.dtsi | 99 ++
- arch/arm64/boot/dts/freescale/fsl-tmu.dtsi | 251 ++++
- .../dts/freescale/qoriq-bman-portals-sdk.dtsi | 55 +
- .../dts/freescale/qoriq-bman-portals.dtsi | 8 +-
- .../boot/dts/freescale/qoriq-dpaa-eth.dtsi | 97 ++
- .../dts/freescale/qoriq-fman3-0-10g-0.dtsi | 11 +-
- .../dts/freescale/qoriq-fman3-0-10g-1.dtsi | 11 +-
- .../dts/freescale/qoriq-fman3-0-1g-0.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-1.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-2.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-3.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-4.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-5.dtsi | 7 +-
- .../boot/dts/freescale/qoriq-fman3-0-6oh.dtsi | 47 +
- .../boot/dts/freescale/qoriq-fman3-0.dtsi | 67 +-
- .../dts/freescale/qoriq-qman-portals-sdk.dtsi | 38 +
- .../dts/freescale/qoriq-qman-portals.dtsi | 9 +-
- .../boot/dts/freescale/traverse-ls1043s.dts | 29 +
- .../boot/dts/freescale/traverse-ls1043v.dts | 29 +
- 68 files changed, 7660 insertions(+), 1211 deletions(-)
- create mode 100644 arch/arm/boot/dts/ls1021a-iot.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-2g5rdb.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu-map1.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu-map2.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu-map3.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman-portals-sdk.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman-portals-sdk.dtsi
-
---- a/arch/arm/boot/dts/Makefile
-+++ b/arch/arm/boot/dts/Makefile
-@@ -496,7 +496,8 @@ dtb-$(CONFIG_SOC_IMX7D) += \
- imx7s-warp.dtb
- dtb-$(CONFIG_SOC_LS1021A) += \
- ls1021a-qds.dtb \
-- ls1021a-twr.dtb
-+ ls1021a-twr.dtb \
-+ ls1021a-iot.dtb
- dtb-$(CONFIG_SOC_VF610) += \
- vf500-colibri-eval-v3.dtb \
- vf610-colibri-eval-v3.dtb \
---- a/arch/arm/boot/dts/imx25.dtsi
-+++ b/arch/arm/boot/dts/imx25.dtsi
-@@ -122,7 +122,7 @@
- };
-
- can1: can@43f88000 {
-- compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx25-flexcan";
- reg = <0x43f88000 0x4000>;
- interrupts = <43>;
- clocks = <&clks 75>, <&clks 75>;
-@@ -131,7 +131,7 @@
- };
-
- can2: can@43f8c000 {
-- compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx25-flexcan";
- reg = <0x43f8c000 0x4000>;
- interrupts = <44>;
- clocks = <&clks 76>, <&clks 76>;
---- a/arch/arm/boot/dts/imx28.dtsi
-+++ b/arch/arm/boot/dts/imx28.dtsi
-@@ -1038,7 +1038,7 @@
- };
-
- can0: can@80032000 {
-- compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx28-flexcan";
- reg = <0x80032000 0x2000>;
- interrupts = <8>;
- clocks = <&clks 58>, <&clks 58>;
-@@ -1047,7 +1047,7 @@
- };
-
- can1: can@80034000 {
-- compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx28-flexcan";
- reg = <0x80034000 0x2000>;
- interrupts = <9>;
- clocks = <&clks 59>, <&clks 59>;
---- a/arch/arm/boot/dts/imx35.dtsi
-+++ b/arch/arm/boot/dts/imx35.dtsi
-@@ -303,7 +303,7 @@
- };
-
- can1: can@53fe4000 {
-- compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx35-flexcan";
- reg = <0x53fe4000 0x1000>;
- clocks = <&clks 33>, <&clks 33>;
- clock-names = "ipg", "per";
-@@ -312,7 +312,7 @@
- };
-
- can2: can@53fe8000 {
-- compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx35-flexcan";
- reg = <0x53fe8000 0x1000>;
- clocks = <&clks 34>, <&clks 34>;
- clock-names = "ipg", "per";
---- a/arch/arm/boot/dts/imx53.dtsi
-+++ b/arch/arm/boot/dts/imx53.dtsi
-@@ -536,7 +536,7 @@
- };
-
- can1: can@53fc8000 {
-- compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx53-flexcan";
- reg = <0x53fc8000 0x4000>;
- interrupts = <82>;
- clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
-@@ -546,7 +546,7 @@
- };
-
- can2: can@53fcc000 {
-- compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
-+ compatible = "fsl,imx53-flexcan";
- reg = <0x53fcc000 0x4000>;
- interrupts = <83>;
- clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
---- /dev/null
-+++ b/arch/arm/boot/dts/ls1021a-iot.dts
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ */
-+
-+/dts-v1/;
-+#include "ls1021a.dtsi"
-+
-+/ {
-+ model = "LS1021A IOT Board";
-+
-+ sys_mclk: clock-mclk {
-+ compatible = "fixed-clock";
-+ #clock-cells = <0>;
-+ clock-frequency = <24576000>;
-+ };
-+
-+ regulators {
-+ compatible = "simple-bus";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ reg_3p3v: regulator@0 {
-+ compatible = "regulator-fixed";
-+ reg = <0>;
-+ regulator-name = "3P3V";
-+ regulator-min-microvolt = <3300000>;
-+ regulator-max-microvolt = <3300000>;
-+ regulator-always-on;
-+ };
-+
-+ reg_2p5v: regulator@1 {
-+ compatible = "regulator-fixed";
-+ reg = <1>;
-+ regulator-name = "2P5V";
-+ regulator-min-microvolt = <2500000>;
-+ regulator-max-microvolt = <2500000>;
-+ regulator-always-on;
-+ };
-+ };
-+
-+ sound {
-+ compatible = "simple-audio-card";
-+ simple-audio-card,format = "i2s";
-+ simple-audio-card,widgets =
-+ "Microphone", "Microphone Jack",
-+ "Headphone", "Headphone Jack",
-+ "Speaker", "Speaker Ext",
-+ "Line", "Line In Jack";
-+ simple-audio-card,routing =
-+ "MIC_IN", "Microphone Jack",
-+ "Microphone Jack", "Mic Bias",
-+ "LINE_IN", "Line In Jack",
-+ "Headphone Jack", "HP_OUT",
-+ "Speaker Ext", "LINE_OUT";
-+
-+ simple-audio-card,cpu {
-+ sound-dai = <&sai2>;
-+ frame-master;
-+ bitclock-master;
-+ };
-+
-+ simple-audio-card,codec {
-+ sound-dai = <&codec>;
-+ frame-master;
-+ bitclock-master;
-+ };
-+ };
-+
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
-+ };
-+ };
-+};
-+
-+&enet0 {
-+ tbi-handle = <&tbi1>;
-+ phy-handle = <&phy1>;
-+ phy-connection-type = "sgmii";
-+ status = "okay";
-+};
-+
-+&enet1 {
-+ tbi-handle = <&tbi1>;
-+ phy-handle = <&phy3>;
-+ phy-connection-type = "sgmii";
-+ status = "okay";
-+};
-+
-+&enet2 {
-+ fixed-link = <0 1 1000 0 0>;
-+ phy-connection-type = "rgmii-id";
-+ status = "okay";
-+};
-+
-+&can0{
-+ status = "disabled";
-+};
-+
-+&can1{
-+ status = "disabled";
-+};
-+
-+&can2{
-+ status = "disabled";
-+};
-+
-+&can3{
-+ status = "okay";
-+};
-+
-+&esdhc{
-+ status = "okay";
-+};
-+
-+&i2c0 {
-+ status = "okay";
-+
-+ max1239@35 {
-+ compatible = "maxim,max1239";
-+ reg = <0x35>;
-+ #io-channel-cells = <1>;
-+ };
-+
-+ codec: sgtl5000@2a {
-+ #sound-dai-cells=<0x0>;
-+ compatible = "fsl,sgtl5000";
-+ reg = <0x2a>;
-+ VDDA-supply = <&reg_3p3v>;
-+ VDDIO-supply = <&reg_2p5v>;
-+ clocks = <&sys_mclk 1>;
-+ };
-+
-+ pca9555: pca9555@23 {
-+ compatible = "nxp,pca9555";
-+ /*pinctrl-names = "default";*/
-+ /*interrupt-parent = <&gpio2>;
-+ interrupts = <19 0x2>;*/
-+ gpio-controller;
-+ #gpio-cells = <2>;
-+ interrupt-controller;
-+ #interrupt-cells = <2>;
-+ reg = <0x23>;
-+ };
-+
-+ ina220@44 {
-+ compatible = "ti,ina220";
-+ reg = <0x44>;
-+ shunt-resistor = <1000>;
-+ };
-+
-+ ina220@45 {
-+ compatible = "ti,ina220";
-+ reg = <0x45>;
-+ shunt-resistor = <1000>;
-+ };
-+
-+ lm75b@48 {
-+ compatible = "nxp,lm75a";
-+ reg = <0x48>;
-+ };
-+
-+ adt7461a@4c {
-+ compatible = "adt7461a";
-+ reg = <0x4c>;
-+ };
-+
-+ hdmi: sii9022a@39 {
-+ compatible = "fsl,sii902x";
-+ reg = <0x39>;
-+ interrupts = <GIC_SPI 163 IRQ_TYPE_EDGE_RISING>;
-+ };
-+};
-+
-+&i2c1 {
-+ status = "disabled";
-+};
-+
-+&ifc {
-+ status = "disabled";
-+};
-+
-+&lpuart0 {
-+ status = "okay";
-+};
-+
-+&mdio0 {
-+ phy0: ethernet-phy@0 {
-+ reg = <0x0>;
-+ };
-+ phy1: ethernet-phy@1 {
-+ reg = <0x1>;
-+ };
-+ phy2: ethernet-phy@2 {
-+ reg = <0x2>;
-+ };
-+ phy3: ethernet-phy@3 {
-+ reg = <0x3>;
-+ };
-+ tbi1: tbi-phy@1f {
-+ reg = <0x1f>;
-+ device_type = "tbi-phy";
-+ };
-+};
-+
-+&qspi {
-+ num-cs = <2>;
-+ status = "okay";
-+
-+ qflash0: s25fl128s@0 {
-+ compatible = "spansion,s25fl129p1";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ };
-+};
-+
-+&sai2 {
-+ status = "okay";
-+};
-+
-+&uart0 {
-+ status = "okay";
-+};
-+
-+&uart1 {
-+ status = "okay";
-+};
-+
-+&dcu {
-+ display = <&display>;
-+ status = "okay";
-+
-+ display: display@0 {
-+ bits-per-pixel = <24>;
-+
-+ display-timings {
-+ native-mode = <&timing0>;
-+
-+ timing0: mode0 {
-+ clock-frequency = <25000000>;
-+ hactive = <640>;
-+ vactive = <480>;
-+ hback-porch = <80>;
-+ hfront-porch = <80>;
-+ vback-porch = <16>;
-+ vfront-porch = <16>;
-+ hsync-len = <12>;
-+ vsync-len = <2>;
-+ hsync-active = <1>;
-+ vsync-active = <1>;
-+ };
-+ };
-+ };
-+};
---- a/arch/arm/boot/dts/ls1021a-qds.dts
-+++ b/arch/arm/boot/dts/ls1021a-qds.dts
-@@ -124,6 +124,21 @@
- };
- };
-
-+&qspi {
-+ num-cs = <2>;
-+ status = "okay";
-+
-+ qflash0: s25fl128s@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+};
-+
- &enet0 {
- tbi-handle = <&tbi0>;
- phy-handle = <&sgmii_phy1c>;
-@@ -239,6 +254,11 @@
- device-width = <1>;
- };
-
-+ nand@2,0 {
-+ compatible = "fsl,ifc-nand";
-+ reg = <0x2 0x0 0x10000>;
-+ };
-+
- fpga: board-control@3,0 {
- #address-cells = <1>;
- #size-cells = <1>;
-@@ -316,6 +336,10 @@
- };
- };
-
-+&esdhc {
-+ status = "okay";
-+};
-+
- &sai2 {
- status = "okay";
- };
-@@ -331,3 +355,11 @@
- &uart1 {
- status = "okay";
- };
-+
-+&can0 {
-+ status = "okay";
-+};
-+
-+&can1 {
-+ status = "okay";
-+};
---- a/arch/arm/boot/dts/ls1021a-twr.dts
-+++ b/arch/arm/boot/dts/ls1021a-twr.dts
-@@ -142,6 +142,21 @@
- };
- };
-
-+&qspi {
-+ num-cs = <2>;
-+ status = "okay";
-+
-+ qflash0: n25q128a13@0 {
-+ compatible = "n25q128a13", "jedec,spi-nor";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+};
-+
- &enet0 {
- tbi-handle = <&tbi0>;
- phy-handle = <&sgmii_phy2>;
-@@ -235,6 +250,10 @@
- };
- };
-
-+&esdhc {
-+ status = "okay";
-+};
-+
- &sai1 {
- status = "okay";
- };
-@@ -250,3 +269,11 @@
- &uart1 {
- status = "okay";
- };
-+
-+&can0 {
-+ status = "okay";
-+};
-+
-+&can1 {
-+ status = "okay";
-+};
---- a/arch/arm/boot/dts/ls1021a.dtsi
-+++ b/arch/arm/boot/dts/ls1021a.dtsi
-@@ -146,12 +146,13 @@
- ifc: ifc@1530000 {
- compatible = "fsl,ifc", "simple-bus";
- reg = <0x0 0x1530000 0x0 0x10000>;
-+ big-endian;
- interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- dcfg: dcfg@1ee0000 {
- compatible = "fsl,ls1021a-dcfg", "syscon";
-- reg = <0x0 0x1ee0000 0x0 0x10000>;
-+ reg = <0x0 0x1ee0000 0x0 0x1000>;
- big-endian;
- };
-
-@@ -334,25 +335,44 @@
- status = "disabled";
- };
-
-+ qspi: quadspi@1550000 {
-+ compatible = "fsl,ls1021a-qspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x1550000 0x0 0x10000>,
-+ <0x0 0x40000000 0x0 0x4000000>;
-+ reg-names = "QuadSPI", "QuadSPI-memory";
-+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "qspi_en", "qspi";
-+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
-+ big-endian;
-+ status = "disabled";
-+ };
-+
- i2c0: i2c@2180000 {
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls1021a-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2180000 0x0 0x10000>;
- interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
- clocks = <&clockgen 4 1>;
-+ dma-names = "tx", "rx";
-+ dmas = <&edma0 1 39>,
-+ <&edma0 1 38>;
-+ fsl-scl-gpio = <&gpio3 23 0>;
- status = "disabled";
- };
-
- i2c1: i2c@2190000 {
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls1021a-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2190000 0x0 0x10000>;
- interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c";
- clocks = <&clockgen 4 1>;
-+ fsl-scl-gpio = <&gpio3 23 0>;
- status = "disabled";
- };
-
-@@ -497,6 +517,17 @@
- status = "disabled";
- };
-
-+ ftm0: ftm0@29d0000 {
-+ compatible = "fsl,ls1021a-ftm-alarm";
-+ reg = <0x0 0x29d0000 0x0 0x10000>,
-+ <0x0 0x1ee2144 0x0 0x4>,
-+ <0x0 0x0157051c 0x0 0x4>;
-+ reg-names = "ftm", "pmctrl", "scrachpad";
-+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
-+ big-endian;
-+ status = "okay";
-+ };
-+
- wdog0: watchdog@2ad0000 {
- compatible = "fsl,imx21-wdt";
- reg = <0x0 0x2ad0000 0x0 0x10000>;
-@@ -550,6 +581,25 @@
- <&clockgen 4 1>;
- };
-
-+ qdma: qdma@8390000 {
-+ compatible = "fsl,ls1021a-qdma";
-+ reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */
-+ <0x0 0x8389000 0x0 0x1000>, /* Status regs */
-+ <0x0 0x838a000 0x0 0x2000>; /* Block regs */
-+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "qdma-error",
-+ "qdma-queue0", "qdma-queue1";
-+ channels = <8>;
-+ block-number = <2>;
-+ block-offset = <0x1000>;
-+ queues = <2>;
-+ status-sizes = <64>;
-+ queue-sizes = <64 64>;
-+ big-endian;
-+ };
-+
- dcu: dcu@2ce0000 {
- compatible = "fsl,ls1021a-dcu";
- reg = <0x0 0x2ce0000 0x0 0x10000>;
-@@ -693,6 +743,11 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ configure-gfladj;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
- };
-
- pcie@3400000 {
-@@ -700,7 +755,9 @@
- reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */
- 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
-- interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
-+ interrupt-names = "pme", "aer";
- fsl,pcie-scfg = <&scfg 0>;
- #address-cells = <3>;
- #size-cells = <2>;
-@@ -716,6 +773,7 @@
- <0000 0 0 2 &gic GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 3 &gic GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 4 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
- };
-
- pcie@3500000 {
-@@ -723,7 +781,9 @@
- reg = <0x00 0x03500000 0x0 0x00010000 /* controller registers */
- 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
-- interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
-+ interrupt-names = "pme", "aer";
- fsl,pcie-scfg = <&scfg 1>;
- #address-cells = <3>;
- #size-cells = <2>;
-@@ -739,6 +799,47 @@
- <0000 0 0 2 &gic GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ can0: can@2a70000 {
-+ compatible = "fsl,ls1021ar2-flexcan";
-+ reg = <0x0 0x2a70000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
-+ clock-names = "ipg", "per";
-+ big-endian;
-+ status = "disabled";
-+ };
-+
-+ can1: can@2a80000 {
-+ compatible = "fsl,ls1021ar2-flexcan";
-+ reg = <0x0 0x2a80000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
-+ clock-names = "ipg", "per";
-+ big-endian;
-+ status = "disabled";
-+ };
-+
-+ can2: can@2a90000 {
-+ compatible = "fsl,ls1021ar2-flexcan";
-+ reg = <0x0 0x2a90000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
-+ clock-names = "ipg", "per";
-+ big-endian;
-+ status = "disabled";
-+ };
-+
-+ can3: can@2aa0000 {
-+ compatible = "fsl,ls1021ar2-flexcan";
-+ reg = <0x0 0x2aa0000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
-+ clock-names = "ipg", "per";
-+ big-endian;
-+ status = "disabled";
- };
- };
- };
---- a/arch/arm64/boot/dts/freescale/Makefile
-+++ b/arch/arm64/boot/dts/freescale/Makefile
-@@ -1,19 +1,33 @@
- # SPDX-License-Identifier: GPL-2.0
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frwy.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-2g5rdb.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds-sdk.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb-sdk.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb-usdpaa.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds-sdk.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb-sdk.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb-usdpaa.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-qds.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2081a-rdb.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
--
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
-+
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043v.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043s.dtb
-+
- always := $(dtb-y)
- subdir-y := $(dts-dirs)
- clean-files := *.dtb
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-2g5rdb.dts
-@@ -0,0 +1,126 @@
-+/*
-+ * Device Tree file for NXP LS1012A 2G5RDB Board.
-+ *
-+ * Copyright 2017 NXP
-+ *
-+ * Bhaskar Upadhaya <bhaskar.upadhaya@nxp.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+/dts-v1/;
-+
-+#include "fsl-ls1012a.dtsi"
-+
-+/ {
-+ model = "LS1012A 2G5RDB Board";
-+ compatible = "fsl,ls1012a-rdb", "fsl,ls1012a";
-+
-+ aliases {
-+ ethernet0 = &pfe_mac0;
-+ ethernet1 = &pfe_mac1;
-+ };
-+};
-+
-+&duart0 {
-+ status = "okay";
-+};
-+
-+&i2c0 {
-+ status = "okay";
-+};
-+
-+&qspi {
-+ num-cs = <2>;
-+ bus-num = <0>;
-+ status = "okay";
-+
-+ qflash0: s25fs512s@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ m25p,fast-read;
-+ reg = <0>;
-+ };
-+};
-+
-+&sata {
-+ status = "okay";
-+};
-+
-+&pfe {
-+ status = "okay";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ pfe_mac0: ethernet@0 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii-2500";
-+ phy-handle = <&sgmii_phy1>;
-+ };
-+
-+ pfe_mac1: ethernet@1 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x1>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii-2500";
-+ phy-handle = <&sgmii_phy2>;
-+ };
-+
-+ mdio@0 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ sgmii_phy1: ethernet-phy@1 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x1>;
-+ };
-+
-+ sgmii_phy2: ethernet-phy@2 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x2>;
-+ };
-+ };
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
-@@ -1,45 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS1012A Freedom Board.
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
- /dts-v1/;
-
-@@ -49,6 +13,11 @@
- model = "LS1012A Freedom Board";
- compatible = "fsl,ls1012a-frdm", "fsl,ls1012a";
-
-+ aliases {
-+ ethernet0 = &pfe_mac0;
-+ ethernet1 = &pfe_mac1;
-+ };
-+
- sys_mclk: clock-mclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
-@@ -110,6 +79,45 @@
- };
- };
-
-+&pfe {
-+ status = "okay";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ pfe_mac0: ethernet@0 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii";
-+ phy-handle = <&sgmii_phy1>;
-+ };
-+
-+ pfe_mac1: ethernet@1 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x1>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii";
-+ phy-handle = <&sgmii_phy2>;
-+ };
-+
-+ mdio@0 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ sgmii_phy1: ethernet-phy@2 {
-+ reg = <0x2>;
-+ };
-+
-+ sgmii_phy2: ethernet-phy@1 {
-+ reg = <0x1>;
-+ };
-+ };
-+};
-+
- &sai2 {
- status = "okay";
- };
-@@ -117,3 +125,18 @@
- &sata {
- status = "okay";
- };
-+
-+&qspi {
-+ status = "okay";
-+ qflash0: s25fs512s@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ m25p,fast-read;
-+ reg = <0>;
-+ spi-rx-bus-width = <2>;
-+ spi-tx-bus-width = <2>;
-+ };
-+
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
-@@ -0,0 +1,179 @@
-+/*
-+ * Device Tree file for NXP LS1012A FRWY Board.
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+/dts-v1/;
-+
-+#include "fsl-ls1012a.dtsi"
-+
-+/ {
-+ model = "LS1012A FRWY Board";
-+ compatible = "fsl,ls1012a-frwy", "fsl,ls1012a";
-+
-+ aliases {
-+ ethernet0 = &pfe_mac0;
-+ ethernet1 = &pfe_mac1;
-+ };
-+
-+ sys_mclk: clock-mclk {
-+ compatible = "fixed-clock";
-+ #clock-cells = <0>;
-+ clock-frequency = <25000000>;
-+ };
-+
-+ reg_1p8v: regulator-1p8v {
-+ compatible = "regulator-fixed";
-+ regulator-name = "1P8V";
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
-+ regulator-always-on;
-+ };
-+
-+ sound {
-+ compatible = "simple-audio-card";
-+ simple-audio-card,format = "i2s";
-+ simple-audio-card,widgets =
-+ "Microphone", "Microphone Jack",
-+ "Headphone", "Headphone Jack",
-+ "Speaker", "Speaker Ext",
-+ "Line", "Line In Jack";
-+ simple-audio-card,routing =
-+ "MIC_IN", "Microphone Jack",
-+ "Microphone Jack", "Mic Bias",
-+ "LINE_IN", "Line In Jack",
-+ "Headphone Jack", "HP_OUT",
-+ "Speaker Ext", "LINE_OUT";
-+
-+ simple-audio-card,cpu {
-+ sound-dai = <&sai2>;
-+ frame-master;
-+ bitclock-master;
-+ };
-+
-+ simple-audio-card,codec {
-+ sound-dai = <&codec>;
-+ frame-master;
-+ bitclock-master;
-+ system-clock-frequency = <25000000>;
-+ };
-+ };
-+};
-+
-+&pcie {
-+ status = "okay";
-+};
-+
-+&duart0 {
-+ status = "okay";
-+};
-+
-+&i2c0 {
-+ status = "okay";
-+
-+ codec: sgtl5000@a {
-+ compatible = "fsl,sgtl5000";
-+ #sound-dai-cells = <0>;
-+ reg = <0xa>;
-+ VDDA-supply = <&reg_1p8v>;
-+ VDDIO-supply = <&reg_1p8v>;
-+ clocks = <&sys_mclk>;
-+ };
-+};
-+
-+&qspi {
-+ num-cs = <1>;
-+ bus-num = <0>;
-+ status = "okay";
-+
-+ qflash0: w25q16dw@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ m25p,fast-read;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ };
-+};
-+
-+&pfe {
-+ status = "okay";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ pfe_mac0: ethernet@0 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii";
-+ phy-handle = <&sgmii_phy1>;
-+ };
-+
-+ pfe_mac1: ethernet@1 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x1>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii";
-+ phy-handle = <&sgmii_phy2>;
-+ };
-+
-+ mdio@0 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ sgmii_phy1: ethernet-phy@2 {
-+ reg = <0x2>;
-+ };
-+
-+ sgmii_phy2: ethernet-phy@1 {
-+ reg = <0x1>;
-+ };
-+ };
-+};
-+
-+&sai2 {
-+ status = "okay";
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
-@@ -1,45 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS1012A QDS Board.
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
- /dts-v1/;
-
-@@ -49,6 +13,11 @@
- model = "LS1012A QDS Board";
- compatible = "fsl,ls1012a-qds", "fsl,ls1012a";
-
-+ aliases {
-+ ethernet0 = &pfe_mac0;
-+ ethernet1 = &pfe_mac1;
-+ };
-+
- sys_mclk: clock-mclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
-@@ -93,6 +62,43 @@
- };
- };
-
-+&pcie {
-+ status = "okay";
-+};
-+
-+&dspi {
-+ bus-num = <0>;
-+ status = "okay";
-+
-+ flash@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "n25q128a11", "jedec,spi-nor";
-+ reg = <0>;
-+ spi-max-frequency = <10000000>;
-+ };
-+
-+ flash@1 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "sst25wf040b", "jedec,spi-nor";
-+ spi-cpol;
-+ spi-cpha;
-+ reg = <1>;
-+ spi-max-frequency = <10000000>;
-+ };
-+
-+ flash@2 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "en25s64", "jedec,spi-nor";
-+ spi-cpol;
-+ spi-cpha;
-+ reg = <2>;
-+ spi-max-frequency = <10000000>;
-+ };
-+};
-+
- &duart0 {
- status = "okay";
- };
-@@ -131,6 +137,47 @@
- };
- };
-
-+&pfe {
-+ status = "okay";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ pfe_mac0: ethernet@0 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x2>;
-+ phy-mode = "sgmii-2500";
-+ phy-handle = <&sgmii_phy1>;
-+ };
-+
-+ pfe_mac1: ethernet@1 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x1>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x3>;
-+ phy-mode = "sgmii-2500";
-+ phy-handle = <&sgmii_phy2>;
-+ };
-+
-+ mdio@0 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ sgmii_phy1: ethernet-phy@1 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x1>;
-+ };
-+
-+ sgmii_phy2: ethernet-phy@2 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x2>;
-+ };
-+ };
-+};
-+
- &sai2 {
- status = "okay";
- };
-@@ -138,3 +185,18 @@
- &sata {
- status = "okay";
- };
-+
-+&qspi {
-+ status = "okay";
-+ qflash0: s25fs512s@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ m25p,fast-read;
-+ reg = <0>;
-+ spi-rx-bus-width = <2>;
-+ spi-tx-bus-width = <2>;
-+ };
-+
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
-@@ -1,45 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS1012A RDB Board.
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
- /dts-v1/;
-
-@@ -48,6 +12,15 @@
- / {
- model = "LS1012A RDB Board";
- compatible = "fsl,ls1012a-rdb", "fsl,ls1012a";
-+
-+ aliases {
-+ ethernet0 = &pfe_mac0;
-+ ethernet1 = &pfe_mac1;
-+ };
-+};
-+
-+&pcie {
-+ status = "okay";
- };
-
- &duart0 {
-@@ -74,3 +47,56 @@
- &sata {
- status = "okay";
- };
-+
-+&pfe {
-+ status = "okay";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ pfe_mac0: ethernet@0 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii";
-+ phy-handle = <&sgmii_phy>;
-+ };
-+
-+ pfe_mac1: ethernet@1 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x1>; /* GEM_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "rgmii-txid";
-+ phy-handle = <&rgmii_phy>;
-+ };
-+ mdio@0 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ sgmii_phy: ethernet-phy@2 {
-+ reg = <0x2>;
-+ };
-+
-+ rgmii_phy: ethernet-phy@1 {
-+ reg = <0x1>;
-+ };
-+ };
-+};
-+
-+&qspi {
-+ status = "okay";
-+ qflash0: s25fs512s@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ m25p,fast-read;
-+ reg = <0>;
-+ spi-rx-bus-width = <2>;
-+ spi-tx-bus-width = <2>;
-+ };
-+
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
-@@ -1,45 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-1012A family SoC.
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- #include <dt-bindings/interrupt-controller/arm-gic.h>
-@@ -64,12 +28,30 @@
- #address-cells = <1>;
- #size-cells = <0>;
-
-- cpu0: cpu@0 {
-+ cooling_map0: cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0>;
- clocks = <&clockgen 1 0>;
- #cooling-cells = <2>;
-+ cpu-idle-states = <&CPU_PH20>;
-+ };
-+ };
-+
-+ idle-states {
-+ /*
-+ * PSCI node is not added default, U-boot will add missing
-+ * parts if it determines to use PSCI.
-+ */
-+ entry-method = "arm,psci";
-+
-+ CPU_PH20: cpu-ph20 {
-+ compatible = "arm,idle-state";
-+ idle-state-name = "PH20";
-+ arm,psci-suspend-param = <0x0>;
-+ entry-latency-us = <1000>;
-+ exit-latency-us = <1000>;
-+ min-residency-us = <3000>;
- };
- };
-
-@@ -247,7 +229,7 @@
- dcfg: dcfg@1ee0000 {
- compatible = "fsl,ls1012a-dcfg",
- "syscon";
-- reg = <0x0 0x1ee0000 0x0 0x10000>;
-+ reg = <0x0 0x1ee0000 0x0 0x1000>;
- big-endian;
- };
-
-@@ -304,44 +286,25 @@
- #thermal-sensor-cells = <1>;
- };
-
-- thermal-zones {
-- cpu_thermal: cpu-thermal {
-- polling-delay-passive = <1000>;
-- polling-delay = <5000>;
-- thermal-sensors = <&tmu 0>;
--
-- trips {
-- cpu_alert: cpu-alert {
-- temperature = <85000>;
-- hysteresis = <2000>;
-- type = "passive";
-- };
--
-- cpu_crit: cpu-crit {
-- temperature = <95000>;
-- hysteresis = <2000>;
-- type = "critical";
-- };
-- };
-+ #include "fsl-tmu.dtsi"
-
-- cooling-maps {
-- map0 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu0 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- };
-- };
-+ ftm0: ftm0@29d0000 {
-+ compatible = "fsl,ls1012a-ftm-alarm";
-+ reg = <0x0 0x29d0000 0x0 0x10000>,
-+ <0x0 0x1ee2140 0x0 0x4>;
-+ reg-names = "ftm", "pmctrl";
-+ interrupts = <0 86 0x4>;
-+ big-endian;
- };
-
- i2c0: i2c@2180000 {
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls1012a-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2180000 0x0 0x10000>;
- interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
-- clocks = <&clockgen 4 0>;
-+ clocks = <&clockgen 4 3>;
-+ scl-gpios = <&gpio0 13 0>;
- status = "disabled";
- };
-
-@@ -351,7 +314,20 @@
- #size-cells = <0>;
- reg = <0x0 0x2190000 0x0 0x10000>;
- interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 3>;
-+ status = "disabled";
-+ };
-+
-+ dspi: dspi@2100000 {
-+ compatible = "fsl,ls1012a-dspi", "fsl,ls1021a-v1.0-dspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2100000 0x0 0x10000>;
-+ interrupts = <0 64 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "dspi";
- clocks = <&clockgen 4 0>;
-+ spi-num-chipselects = <5>;
-+ big-endian;
- status = "disabled";
- };
-
-@@ -400,6 +376,20 @@
- big-endian;
- };
-
-+ qspi: quadspi@1550000 {
-+ compatible = "fsl,ls1012a-qspi", "fsl,ls1021a-qspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x1550000 0x0 0x10000>,
-+ <0x0 0x40000000 0x0 0x10000000>;
-+ reg-names = "QuadSPI", "QuadSPI-memory";
-+ interrupts = <0 99 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "qspi_en", "qspi";
-+ clocks = <&clockgen 4 0>, <&clockgen 4 0>;
-+ big-endian;
-+ status = "disabled";
-+ };
-+
- sai1: sai@2b50000 {
- #sound-dai-cells = <0>;
- compatible = "fsl,vf610-sai";
-@@ -451,6 +441,8 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
- };
-
- sata: sata@3200000 {
-@@ -471,5 +463,85 @@
- dr_mode = "host";
- phy_type = "ulpi";
- };
-+
-+ msi: msi-controller1@1572000 {
-+ compatible = "fsl,ls1012a-msi";
-+ reg = <0x0 0x1572000 0x0 0x8>;
-+ msi-controller;
-+ interrupts = <0 126 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+
-+ pcie: pcie@3400000 {
-+ compatible = "fsl,ls1012a-pcie", "snps,dw-pcie";
-+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
-+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <0 118 0x4>, /* AER interrupt */
-+ <0 117 0x4>; /* PME interrupt */
-+ interrupt-names = "aer", "pme";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ num-lanes = <4>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&msi>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 110 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 111 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 112 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ rcpm: rcpm@1ee2000 {
-+ compatible = "fsl,ls1012a-rcpm", "fsl,qoriq-rcpm-2.1";
-+ reg = <0x0 0x1ee2000 0x0 0x1000>;
-+ fsl,#rcpm-wakeup-cells = <1>;
-+ };
-+ };
-+
-+ reserved-memory {
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ ranges;
-+
-+ pfe_reserved: packetbuffer@83400000 {
-+ reg = <0 0x83400000 0 0xc00000>;
-+ };
-+ };
-+
-+ pfe: pfe@04000000 {
-+ compatible = "fsl,pfe";
-+ reg = <0x0 0x04000000 0x0 0xc00000>, /* AXI 16M */
-+ <0x0 0x83400000 0x0 0xc00000>; /* PFE DDR 12M */
-+ reg-names = "pfe", "pfe-ddr";
-+ fsl,pfe-num-interfaces = <0x2>;
-+ interrupts = <0 172 0x4>, /* HIF interrupt */
-+ <0 173 0x4>, /*HIF_NOCPY interrupt */
-+ <0 174 0x4>; /* WoL interrupt */
-+ interrupt-names = "pfe_hif", "pfe_hif_nocpy", "pfe_wol";
-+ memory-region = <&pfe_reserved>;
-+ fsl,pfe-scfg = <&scfg 0>;
-+ fsl,rcpm-wakeup = <&rcpm 0xf0000020>;
-+ clocks = <&clockgen 4 0>;
-+ clock-names = "pfe";
-+
-+ status = "okay";
-+ };
-+
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
-+ };
-+ };
-+};
-+
-+&thermal_zones {
-+ thermal-zone0 {
-+ status = "okay";
- };
- };
---- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
-@@ -1,9 +1,8 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 device tree nodes for ls1043
- *
- * Copyright 2015-2016 Freescale Semiconductor Inc.
-- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- &soc {
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
-@@ -0,0 +1,263 @@
-+/*
-+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
-+ *
-+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
-+ *
-+ * Mingkai Hu <Mingkai.hu@freescale.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#include "fsl-ls1043a-qds.dts"
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+/delete-property/ dma-coherent;
-+
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+
-+pcie@3400000 {
-+ /delete-property/ iommu-map;
-+ dma-coherent;
-+};
-+
-+pcie@3500000 {
-+ /delete-property/ iommu-map;
-+ dma-coherent;
-+};
-+
-+pcie@3600000 {
-+ /delete-property/ iommu-map;
-+ dma-coherent;
-+};
-+
-+/delete-node/ iommu@9000000;
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+ dma-coherent;
-+};
-+
-+&clockgen {
-+ dma-coherent;
-+};
-+
-+&scfg {
-+ dma-coherent;
-+};
-+
-+&crypto {
-+ dma-coherent;
-+};
-+
-+&dcfg {
-+ dma-coherent;
-+};
-+
-+&ifc {
-+ dma-coherent;
-+};
-+
-+&qspi {
-+ dma-coherent;
-+};
-+
-+&esdhc {
-+ dma-coherent;
-+};
-+
-+&ddr {
-+ dma-coherent;
-+};
-+
-+&tmu {
-+ dma-coherent;
-+};
-+
-+&qman {
-+ dma-coherent;
-+};
-+
-+&bman {
-+ dma-coherent;
-+};
-+
-+&bportals {
-+ dma-coherent;
-+};
-+
-+&qportals {
-+ dma-coherent;
-+};
-+
-+&dspi0 {
-+ dma-coherent;
-+};
-+
-+&dspi1 {
-+ dma-coherent;
-+};
-+
-+&i2c0 {
-+ dma-coherent;
-+};
-+
-+&i2c1 {
-+ dma-coherent;
-+};
-+
-+&i2c2 {
-+ dma-coherent;
-+};
-+
-+&i2c3 {
-+ dma-coherent;
-+};
-+
-+&duart0 {
-+ dma-coherent;
-+};
-+
-+&duart1 {
-+ dma-coherent;
-+};
-+
-+&duart2 {
-+ dma-coherent;
-+};
-+
-+&duart3 {
-+ dma-coherent;
-+};
-+
-+&gpio1 {
-+ dma-coherent;
-+};
-+
-+&gpio2 {
-+ dma-coherent;
-+};
-+
-+&gpio3 {
-+ dma-coherent;
-+};
-+
-+&gpio4 {
-+ dma-coherent;
-+};
-+
-+&uqe {
-+ dma-coherent;
-+};
-+
-+&lpuart0 {
-+ dma-coherent;
-+};
-+
-+&lpuart1 {
-+ dma-coherent;
-+};
-+
-+&lpuart2 {
-+ dma-coherent;
-+};
-+
-+&lpuart3 {
-+ dma-coherent;
-+};
-+
-+&lpuart4 {
-+ dma-coherent;
-+};
-+
-+&lpuart5 {
-+ dma-coherent;
-+};
-+
-+&ftm0 {
-+ dma-coherent;
-+};
-+
-+&wdog0 {
-+ dma-coherent;
-+};
-+
-+&edma0 {
-+ dma-coherent;
-+};
-+
-+&qdma {
-+ dma-coherent;
-+};
-+
-+&msi1 {
-+ dma-coherent;
-+};
-+
-+&msi2 {
-+ dma-coherent;
-+};
-+
-+&msi3 {
-+ dma-coherent;
-+};
-+
-+&ptp_timer0 {
-+ dma-coherent;
-+};
-+
-+&fsldpaa {
-+ dma-coherent;
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
-@@ -1,47 +1,10 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-1043A family SoC.
- *
- * Copyright 2014-2015 Freescale Semiconductor, Inc.
- *
- * Mingkai Hu <Mingkai.hu@freescale.com>
-- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -60,6 +23,22 @@
- serial1 = &duart1;
- serial2 = &duart2;
- serial3 = &duart3;
-+ sgmii_riser_s1_p1 = &sgmii_phy_s1_p1;
-+ sgmii_riser_s2_p1 = &sgmii_phy_s2_p1;
-+ sgmii_riser_s3_p1 = &sgmii_phy_s3_p1;
-+ sgmii_riser_s4_p1 = &sgmii_phy_s4_p1;
-+ qsgmii_s1_p1 = &qsgmii_phy_s1_p1;
-+ qsgmii_s1_p2 = &qsgmii_phy_s1_p2;
-+ qsgmii_s1_p3 = &qsgmii_phy_s1_p3;
-+ qsgmii_s1_p4 = &qsgmii_phy_s1_p4;
-+ qsgmii_s2_p1 = &qsgmii_phy_s2_p1;
-+ qsgmii_s2_p2 = &qsgmii_phy_s2_p2;
-+ qsgmii_s2_p3 = &qsgmii_phy_s2_p3;
-+ qsgmii_s2_p4 = &qsgmii_phy_s2_p4;
-+ emi1_slot1 = &ls1043mdio_s1;
-+ emi1_slot2 = &ls1043mdio_s2;
-+ emi1_slot3 = &ls1043mdio_s3;
-+ emi1_slot4 = &ls1043mdio_s4;
- };
-
- chosen {
-@@ -97,8 +76,11 @@
- };
-
- fpga: board-control@2,0 {
-- compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis";
-+ compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis", "simple-bus";
- reg = <0x2 0x0 0x0000100>;
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ ranges = <0 2 0 0x100>;
- };
- };
-
-@@ -179,7 +161,153 @@
- #size-cells = <1>;
- spi-max-frequency = <20000000>;
- reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
- };
- };
-
- #include "fsl-ls1043-post.dtsi"
-+
-+&fman0 {
-+ ethernet@e0000 {
-+ phy-handle = <&qsgmii_phy_s2_p1>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@e2000 {
-+ phy-handle = <&qsgmii_phy_s2_p2>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@e4000 {
-+ phy-handle = <&rgmii_phy1>;
-+ phy-connection-type = "rgmii";
-+ };
-+
-+ ethernet@e6000 {
-+ phy-handle = <&rgmii_phy2>;
-+ phy-connection-type = "rgmii";
-+ };
-+
-+ ethernet@e8000 {
-+ phy-handle = <&qsgmii_phy_s2_p3>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@ea000 {
-+ phy-handle = <&qsgmii_phy_s2_p4>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@f0000 { /* DTSEC9/10GEC1 */
-+ fixed-link = <1 1 10000 0 0>;
-+ phy-connection-type = "xgmii";
-+ };
-+};
-+
-+&fpga {
-+ mdio-mux-emi1 {
-+ compatible = "mdio-mux-mmioreg", "mdio-mux";
-+ mdio-parent-bus = <&mdio0>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x54 1>; /* BRDCFG4 */
-+ mux-mask = <0xe0>; /* EMI1 */
-+
-+ /* On-board RGMII1 PHY */
-+ ls1043mdio0: mdio@0 {
-+ reg = <0>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ rgmii_phy1: ethernet-phy@1 { /* MAC3 */
-+ reg = <0x1>;
-+ };
-+ };
-+
-+ /* On-board RGMII2 PHY */
-+ ls1043mdio1: mdio@1 {
-+ reg = <0x20>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ rgmii_phy2: ethernet-phy@2 { /* MAC4 */
-+ reg = <0x2>;
-+ };
-+ };
-+
-+ /* Slot 1 */
-+ ls1043mdio_s1: mdio@2 {
-+ reg = <0x40>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ status = "disabled";
-+
-+ qsgmii_phy_s1_p1: ethernet-phy@4 {
-+ reg = <0x4>;
-+ };
-+ qsgmii_phy_s1_p2: ethernet-phy@5 {
-+ reg = <0x5>;
-+ };
-+ qsgmii_phy_s1_p3: ethernet-phy@6 {
-+ reg = <0x6>;
-+ };
-+ qsgmii_phy_s1_p4: ethernet-phy@7 {
-+ reg = <0x7>;
-+ };
-+
-+ sgmii_phy_s1_p1: ethernet-phy@1c {
-+ reg = <0x1c>;
-+ };
-+ };
-+
-+ /* Slot 2 */
-+ ls1043mdio_s2: mdio@3 {
-+ reg = <0x60>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ status = "disabled";
-+
-+ qsgmii_phy_s2_p1: ethernet-phy@8 {
-+ reg = <0x8>;
-+ };
-+ qsgmii_phy_s2_p2: ethernet-phy@9 {
-+ reg = <0x9>;
-+ };
-+ qsgmii_phy_s2_p3: ethernet-phy@a {
-+ reg = <0xa>;
-+ };
-+ qsgmii_phy_s2_p4: ethernet-phy@b {
-+ reg = <0xb>;
-+ };
-+
-+ sgmii_phy_s2_p1: ethernet-phy@1c {
-+ reg = <0x1c>;
-+ };
-+ };
-+
-+ /* Slot 3 */
-+ ls1043mdio_s3: mdio@4 {
-+ reg = <0x80>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ status = "disabled";
-+
-+ sgmii_phy_s3_p1: ethernet-phy@1c {
-+ reg = <0x1c>;
-+ };
-+ };
-+
-+ /* Slot 4 */
-+ ls1043mdio_s4: mdio@5 {
-+ reg = <0xa0>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ status = "disabled";
-+
-+ sgmii_phy_s4_p1: ethernet-phy@1c {
-+ reg = <0x1c>;
-+ };
-+ };
-+ };
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts
-@@ -0,0 +1,262 @@
-+/*
-+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
-+ *
-+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
-+ *
-+ * Mingkai Hu <Mingkai.hu@freescale.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#include "fsl-ls1043a-rdb.dts"
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+/delete-property/ dma-coherent;
-+
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+
-+pcie@3400000 {
-+ /delete-property/ iommu-map;
-+ dma-coherent;
-+};
-+
-+pcie@3500000 {
-+ /delete-property/ iommu-map;
-+ dma-coherent;
-+};
-+
-+pcie@3600000 {
-+ /delete-property/ iommu-map;
-+ dma-coherent;
-+};
-+
-+/delete-node/ iommu@9000000;
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
-+
-+&clockgen {
-+ dma-coherent;
-+};
-+
-+&scfg {
-+ dma-coherent;
-+};
-+
-+&crypto {
-+ dma-coherent;
-+};
-+
-+&dcfg {
-+ dma-coherent;
-+};
-+
-+&ifc {
-+ dma-coherent;
-+};
-+
-+&qspi {
-+ dma-coherent;
-+};
-+
-+&esdhc {
-+ dma-coherent;
-+};
-+
-+&ddr {
-+ dma-coherent;
-+};
-+
-+&tmu {
-+ dma-coherent;
-+};
-+
-+&qman {
-+ dma-coherent;
-+};
-+
-+&bman {
-+ dma-coherent;
-+};
-+
-+&bportals {
-+ dma-coherent;
-+};
-+
-+&qportals {
-+ dma-coherent;
-+};
-+
-+&dspi0 {
-+ dma-coherent;
-+};
-+
-+&dspi1 {
-+ dma-coherent;
-+};
-+
-+&i2c0 {
-+ dma-coherent;
-+};
-+
-+&i2c1 {
-+ dma-coherent;
-+};
-+
-+&i2c2 {
-+ dma-coherent;
-+};
-+
-+&i2c3 {
-+ dma-coherent;
-+};
-+
-+&duart0 {
-+ dma-coherent;
-+};
-+
-+&duart1 {
-+ dma-coherent;
-+};
-+
-+&duart2 {
-+ dma-coherent;
-+};
-+
-+&duart3 {
-+ dma-coherent;
-+};
-+
-+&gpio1 {
-+ dma-coherent;
-+};
-+
-+&gpio2 {
-+ dma-coherent;
-+};
-+
-+&gpio3 {
-+ dma-coherent;
-+};
-+
-+&gpio4 {
-+ dma-coherent;
-+};
-+
-+&lpuart0 {
-+ dma-coherent;
-+};
-+
-+&lpuart1 {
-+ dma-coherent;
-+};
-+
-+&lpuart2 {
-+ dma-coherent;
-+};
-+
-+&lpuart3 {
-+ dma-coherent;
-+};
-+
-+&lpuart4 {
-+ dma-coherent;
-+};
-+
-+&lpuart5 {
-+ dma-coherent;
-+};
-+
-+&ftm0 {
-+ dma-coherent;
-+};
-+
-+&wdog0 {
-+ dma-coherent;
-+};
-+
-+&edma0 {
-+ dma-coherent;
-+};
-+
-+&qdma {
-+ dma-coherent;
-+};
-+
-+&msi1 {
-+ dma-coherent;
-+};
-+
-+&msi2 {
-+ dma-coherent;
-+};
-+
-+&msi3 {
-+ dma-coherent;
-+};
-+
-+&fman0 {
-+ dma-coherent;
-+};
-+
-+&ptp_timer0 {
-+ dma-coherent;
-+};
-+
-+&fsldpaa {
-+ dma-coherent;
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts
-@@ -0,0 +1,140 @@
-+/*
-+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
-+ *
-+ * Copyright (C) 2014-2015, Freescale Semiconductor
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
-+
-+#include "fsl-ls1043a-rdb-sdk.dts"
-+
-+&soc {
-+ bp7: buffer-pool@7 {
-+ compatible = "fsl,p4080-bpool", "fsl,bpool";
-+ fsl,bpid = <7>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
-+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
-+ dma-coherent;
-+ };
-+
-+ bp8: buffer-pool@8 {
-+ compatible = "fsl,p4080-bpool", "fsl,bpool";
-+ fsl,bpid = <8>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
-+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
-+ dma-coherent;
-+ };
-+
-+ bp9: buffer-pool@9 {
-+ compatible = "fsl,p4080-bpool", "fsl,bpool";
-+ fsl,bpid = <9>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>;
-+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
-+ dma-coherent;
-+ };
-+
-+ fsl,dpaa {
-+ compatible = "fsl,ls1043a", "fsl,dpaa", "simple-bus";
-+ dma-coherent;
-+
-+ ethernet@0 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x50 1 0x51 1>;
-+ fsl,qman-frame-queues-tx = <0x70 1 0x71 1>;
-+ };
-+
-+ ethernet@1 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x52 1 0x53 1>;
-+ fsl,qman-frame-queues-tx = <0x72 1 0x73 1>;
-+ };
-+
-+ ethernet@2 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
-+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
-+ };
-+
-+ ethernet@3 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
-+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
-+ };
-+
-+ ethernet@4 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>;
-+ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>;
-+ };
-+
-+ ethernet@5 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x60 1 0x61 1>;
-+ fsl,qman-frame-queues-tx = <0x80 1 0x81 1>;
-+ };
-+
-+ ethernet@8 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
-+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
-+
-+ };
-+ dpa-fman0-oh@2 {
-+ compatible = "fsl,dpa-oh";
-+ /* Define frame queues for the OH port*/
-+ /* <OH Rx error, OH Rx default> */
-+ fsl,qman-frame-queues-oh = <0x5a 1 0x5b 1>;
-+ fsl,fman-oh-port = <&fman0_oh2>;
-+ };
-+ };
-+
-+ pcie@3400000 {
-+ /delete-property/ iommu-map;
-+ };
-+
-+ pcie@3500000 {
-+ /delete-property/ iommu-map;
-+ };
-+
-+ pcie@3600000 {
-+ /delete-property/ iommu-map;
-+ };
-+
-+ /delete-node/ iommu@9000000;
-+};
-+/ {
-+ reserved-memory {
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ ranges;
-+
-+ /* For legacy usdpaa based use-cases, update the size and
-+ alignment parameters. e.g. to allocate 256 MB memory:
-+ size = <0 0x10000000>;
-+ alignment = <0 0x10000000>;
-+ */
-+ usdpaa_mem: usdpaa_mem {
-+ compatible = "fsl,usdpaa-mem";
-+ alloc-ranges = <0 0 0x10000 0>;
-+ size = <0 0x1000>;
-+ alignment = <0 0x1000>;
-+ };
-+ };
-+};
-+
-+&fman0 {
-+ fman0_oh2: port@83000 {
-+ cell-index = <1>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x83000 0x1000>;
-+ };
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
-@@ -1,47 +1,10 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-1043A family SoC.
- *
- * Copyright 2014-2015 Freescale Semiconductor, Inc.
- *
- * Mingkai Hu <Mingkai.hu@freescale.com>
-- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -51,7 +14,6 @@
- model = "LS1043A RDB Board";
-
- aliases {
-- crypto = &crypto;
- serial0 = &duart0;
- serial1 = &duart1;
- serial2 = &duart2;
-@@ -86,6 +48,10 @@
- compatible = "pericom,pt7c4338";
- reg = <0x68>;
- };
-+ rtc@51 {
-+ compatible = "nxp,pcf85263";
-+ reg = <0x51>;
-+ };
- };
-
- &ifc {
-@@ -130,6 +96,38 @@
- reg = <0>;
- spi-max-frequency = <1000000>; /* input clock */
- };
-+
-+ slic@2 {
-+ compatible = "maxim,ds26522";
-+ reg = <2>;
-+ spi-max-frequency = <2000000>;
-+ fsl,spi-cs-sck-delay = <100>;
-+ fsl,spi-sck-cs-delay = <50>;
-+ };
-+
-+ slic@3 {
-+ compatible = "maxim,ds26522";
-+ reg = <3>;
-+ spi-max-frequency = <2000000>;
-+ fsl,spi-cs-sck-delay = <100>;
-+ fsl,spi-sck-cs-delay = <50>;
-+ };
-+};
-+
-+&uqe {
-+ ucc_hdlc: ucc@2000 {
-+ compatible = "fsl,ucc-hdlc";
-+ rx-clock-name = "clk8";
-+ tx-clock-name = "clk9";
-+ fsl,rx-sync-clock = "rsync_pin";
-+ fsl,tx-sync-clock = "tsync_pin";
-+ fsl,tx-timeslot-mask = <0xfffffffe>;
-+ fsl,rx-timeslot-mask = <0xfffffffe>;
-+ fsl,tdm-framer-type = "e1";
-+ fsl,tdm-id = <0>;
-+ fsl,siram-entry-id = <0>;
-+ fsl,tdm-interface;
-+ };
- };
-
- &duart0 {
---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
-@@ -1,47 +1,10 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-1043A family SoC.
- *
- * Copyright 2014-2015 Freescale Semiconductor, Inc.
- *
- * Mingkai Hu <Mingkai.hu@freescale.com>
-- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- #include <dt-bindings/thermal/thermal.h>
-@@ -54,6 +17,7 @@
- #size-cells = <2>;
-
- aliases {
-+ crypto = &crypto;
- fman0 = &fman0;
- ethernet0 = &enet0;
- ethernet1 = &enet1;
-@@ -74,13 +38,14 @@
- *
- * Currently supported enable-method is psci v0.2
- */
-- cpu0: cpu@0 {
-+ cooling_map0: cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0>;
- clocks = <&clockgen 1 0>;
- next-level-cache = <&l2>;
- #cooling-cells = <2>;
-+ cpu-idle-states = <&CPU_PH20>;
- };
-
- cpu1: cpu@1 {
-@@ -89,6 +54,7 @@
- reg = <0x1>;
- clocks = <&clockgen 1 0>;
- next-level-cache = <&l2>;
-+ cpu-idle-states = <&CPU_PH20>;
- };
-
- cpu2: cpu@2 {
-@@ -97,6 +63,7 @@
- reg = <0x2>;
- clocks = <&clockgen 1 0>;
- next-level-cache = <&l2>;
-+ cpu-idle-states = <&CPU_PH20>;
- };
-
- cpu3: cpu@3 {
-@@ -105,6 +72,7 @@
- reg = <0x3>;
- clocks = <&clockgen 1 0>;
- next-level-cache = <&l2>;
-+ cpu-idle-states = <&CPU_PH20>;
- };
-
- l2: l2-cache {
-@@ -112,6 +80,23 @@
- };
- };
-
-+ idle-states {
-+ /*
-+ * PSCI node is not added default, U-boot will add missing
-+ * parts if it determines to use PSCI.
-+ */
-+ entry-method = "arm,psci";
-+
-+ CPU_PH20: cpu-ph20 {
-+ compatible = "arm,idle-state";
-+ idle-state-name = "PH20";
-+ arm,psci-suspend-param = <0x0>;
-+ entry-latency-us = <1000>;
-+ exit-latency-us = <1000>;
-+ min-residency-us = <3000>;
-+ };
-+ };
-+
- memory@80000000 {
- device_type = "memory";
- reg = <0x0 0x80000000 0 0x80000000>;
-@@ -196,6 +181,8 @@
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
-+ dma-coherent;
-
- clockgen: clocking@1ee1000 {
- compatible = "fsl,ls1043a-clockgen";
-@@ -204,6 +191,49 @@
- clocks = <&sysclk>;
- };
-
-+ smmu: iommu@9000000 {
-+ compatible = "arm,mmu-500";
-+ reg = <0 0x9000000 0 0x400000>;
-+ dma-coherent;
-+ stream-match-mask = <0x7f00>;
-+ #global-interrupts = <2>;
-+ #iommu-cells = <1>;
-+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+
- scfg: scfg@1570000 {
- compatible = "fsl,ls1043a-scfg", "syscon";
- reg = <0x0 0x1570000 0x0 0x10000>;
-@@ -255,7 +285,7 @@
-
- dcfg: dcfg@1ee0000 {
- compatible = "fsl,ls1043a-dcfg", "syscon";
-- reg = <0x0 0x1ee0000 0x0 0x10000>;
-+ reg = <0x0 0x1ee0000 0x0 0x1000>;
- big-endian;
- };
-
-@@ -342,36 +372,7 @@
- #thermal-sensor-cells = <1>;
- };
-
-- thermal-zones {
-- cpu_thermal: cpu-thermal {
-- polling-delay-passive = <1000>;
-- polling-delay = <5000>;
--
-- thermal-sensors = <&tmu 3>;
--
-- trips {
-- cpu_alert: cpu-alert {
-- temperature = <85000>;
-- hysteresis = <2000>;
-- type = "passive";
-- };
-- cpu_crit: cpu-crit {
-- temperature = <95000>;
-- hysteresis = <2000>;
-- type = "critical";
-- };
-- };
--
-- cooling-maps {
-- map0 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu0 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- };
-- };
-- };
-+ #include "fsl-tmu.dtsi"
-
- qman: qman@1880000 {
- compatible = "fsl,qman";
-@@ -422,7 +423,7 @@
- };
-
- i2c0: i2c@2180000 {
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls1043a-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2180000 0x0 0x10000>;
-@@ -432,6 +433,7 @@
- dmas = <&edma0 1 39>,
- <&edma0 1 38>;
- dma-names = "tx", "rx";
-+ scl-gpios = <&gpio4 12 0>;
- status = "disabled";
- };
-
-@@ -536,6 +538,72 @@
- #interrupt-cells = <2>;
- };
-
-+ uqe: uqe@2400000 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ device_type = "qe";
-+ compatible = "fsl,qe", "simple-bus";
-+ ranges = <0x0 0x0 0x2400000 0x40000>;
-+ reg = <0x0 0x2400000 0x0 0x480>;
-+ brg-frequency = <100000000>;
-+ bus-frequency = <200000000>;
-+
-+ fsl,qe-num-riscs = <1>;
-+ fsl,qe-num-snums = <28>;
-+
-+ qeic: qeic@80 {
-+ compatible = "fsl,qe-ic";
-+ reg = <0x80 0x80>;
-+ #address-cells = <0>;
-+ interrupt-controller;
-+ #interrupt-cells = <1>;
-+ interrupts = <0 77 0x04 0 77 0x04>;
-+ };
-+
-+ si1: si@700 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ compatible = "fsl,ls1043-qe-si",
-+ "fsl,t1040-qe-si";
-+ reg = <0x700 0x80>;
-+ };
-+
-+ siram1: siram@1000 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "fsl,ls1043-qe-siram",
-+ "fsl,t1040-qe-siram";
-+ reg = <0x1000 0x800>;
-+ };
-+
-+ ucc@2000 {
-+ cell-index = <1>;
-+ reg = <0x2000 0x200>;
-+ interrupts = <32>;
-+ interrupt-parent = <&qeic>;
-+ };
-+
-+ ucc@2200 {
-+ cell-index = <3>;
-+ reg = <0x2200 0x200>;
-+ interrupts = <34>;
-+ interrupt-parent = <&qeic>;
-+ };
-+
-+ muram@10000 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "fsl,qe-muram", "fsl,cpm-muram";
-+ ranges = <0x0 0x10000 0x6000>;
-+
-+ data-only@0 {
-+ compatible = "fsl,qe-muram-data",
-+ "fsl,cpm-muram-data";
-+ reg = <0x0 0x6000>;
-+ };
-+ };
-+ };
-+
- lpuart0: serial@2950000 {
- compatible = "fsl,ls1021a-lpuart";
- reg = <0x0 0x2950000 0x0 0x1000>;
-@@ -590,6 +658,16 @@
- status = "disabled";
- };
-
-+ ftm0: ftm0@29d0000 {
-+ compatible = "fsl,ls1043a-ftm-alarm";
-+ reg = <0x0 0x29d0000 0x0 0x10000>,
-+ <0x0 0x1ee2140 0x0 0x4>;
-+ reg-names = "ftm", "pmctrl";
-+ interrupts = <0 86 0x4>;
-+ big-endian;
-+ status = "okay";
-+ };
-+
- wdog0: wdog@2ad0000 {
- compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt";
- reg = <0x0 0x2ad0000 0x0 0x10000>;
-@@ -615,41 +693,81 @@
- <&clockgen 4 0>;
- };
-
-- usb0: usb3@2f00000 {
-- compatible = "snps,dwc3";
-- reg = <0x0 0x2f00000 0x0 0x10000>;
-- interrupts = <0 60 0x4>;
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- };
--
-- usb1: usb3@3000000 {
-- compatible = "snps,dwc3";
-- reg = <0x0 0x3000000 0x0 0x10000>;
-- interrupts = <0 61 0x4>;
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- };
--
-- usb2: usb3@3100000 {
-- compatible = "snps,dwc3";
-- reg = <0x0 0x3100000 0x0 0x10000>;
-- interrupts = <0 63 0x4>;
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- };
--
-- sata: sata@3200000 {
-- compatible = "fsl,ls1043a-ahci";
-- reg = <0x0 0x3200000 0x0 0x10000>,
-- <0x0 0x20140520 0x0 0x4>;
-- reg-names = "ahci", "sata-ecc";
-- interrupts = <0 69 0x4>;
-- clocks = <&clockgen 4 0>;
-- dma-coherent;
-+ aux_bus: aux_bus {
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ compatible = "simple-bus";
-+ ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
-+
-+ usb0: usb3@2f00000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x2f00000 0x0 0x10000>;
-+ interrupts = <0 60 0x4>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ usb1: usb3@3000000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3000000 0x0 0x10000>;
-+ interrupts = <0 61 0x4>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ usb2: usb3@3100000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3100000 0x0 0x10000>;
-+ interrupts = <0 63 0x4>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ sata: sata@3200000 {
-+ compatible = "fsl,ls1043a-ahci";
-+ reg = <0x0 0x3200000 0x0 0x10000>,
-+ <0x0 0x20140520 0x0 0x4>;
-+ reg-names = "ahci", "sata-ecc";
-+ interrupts = <0 69 0x4>;
-+ clocks = <&clockgen 4 0>;
-+ };
-+ };
-+
-+ qdma: qdma@8380000 {
-+ compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma";
-+ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
-+ <0x0 0x8390000 0x0 0x10000>, /* Status regs */
-+ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */
-+ interrupts = <0 152 0x4>,
-+ <0 39 0x4>,
-+ <0 40 0x4>,
-+ <0 41 0x4>,
-+ <0 42 0x4>;
-+ interrupt-names = "qdma-error", "qdma-queue0",
-+ "qdma-queue1", "qdma-queue2", "qdma-queue3";
-+ channels = <8>;
-+ block-number = <1>;
-+ block-offset = <0x10000>;
-+ queues = <2>;
-+ status-sizes = <64>;
-+ queue-sizes = <64 64>;
-+ big-endian;
- };
-
- msi1: msi-controller1@1571000 {
-@@ -678,13 +796,13 @@
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
-- interrupts = <0 118 0x4>, /* controller interrupt */
-- <0 117 0x4>; /* PME interrupt */
-- interrupt-names = "intr", "pme";
-+ interrupts = <0 117 0x4>, /* PME interrupt */
-+ <0 118 0x4>; /* aer interrupt */
-+ interrupt-names = "pme", "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
-- dma-coherent;
-+ iommu-map = <0 &smmu 0 1>;
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
-@@ -696,6 +814,7 @@
- <0000 0 0 2 &gic 0 111 0x4>,
- <0000 0 0 3 &gic 0 112 0x4>,
- <0000 0 0 4 &gic 0 113 0x4>;
-+ status = "disabled";
- };
-
- pcie@3500000 {
-@@ -703,13 +822,13 @@
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
-- interrupts = <0 128 0x4>,
-- <0 127 0x4>;
-- interrupt-names = "intr", "pme";
-+ interrupts = <0 127 0x4>,
-+ <0 128 0x4>;
-+ interrupt-names = "pme", "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
-- dma-coherent;
-+ iommu-map = <0 &smmu 0 1>;
- num-lanes = <2>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
-@@ -721,6 +840,7 @@
- <0000 0 0 2 &gic 0 121 0x4>,
- <0000 0 0 3 &gic 0 122 0x4>,
- <0000 0 0 4 &gic 0 123 0x4>;
-+ status = "disabled";
- };
-
- pcie@3600000 {
-@@ -728,13 +848,13 @@
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x50 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
-- interrupts = <0 162 0x4>,
-- <0 161 0x4>;
-- interrupt-names = "intr", "pme";
-+ interrupts = <0 161 0x4>,
-+ <0 162 0x4>;
-+ interrupt-names = "pme", "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
-- dma-coherent;
-+ iommu-map = <0 &smmu 0 1>;
- num-lanes = <2>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
-@@ -746,6 +866,14 @@
- <0000 0 0 2 &gic 0 155 0x4>,
- <0000 0 0 3 &gic 0 156 0x4>,
- <0000 0 0 4 &gic 0 157 0x4>;
-+ status = "disabled";
-+ };
-+ };
-+
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
- };
- };
-
-@@ -753,3 +881,29 @@
-
- #include "qoriq-qman-portals.dtsi"
- #include "qoriq-bman-portals.dtsi"
-+
-+&thermal_zones {
-+ thermal-zone0 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone1 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone2 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone3 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone4 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone5 {
-+ status = "okay";
-+ };
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
-@@ -1,9 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 device tree nodes for ls1046
- *
- * Copyright 2015-2016 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- &soc {
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts
-@@ -0,0 +1,268 @@
-+/*
-+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
-+ *
-+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
-+ *
-+ * Mingkai Hu <Mingkai.hu@freescale.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#include "fsl-ls1046a-qds.dts"
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+/delete-property/ dma-coherent;
-+
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+
-+pcie@3400000 {
-+ /delete-property/ iommu-map;
-+};
-+
-+pcie@3500000 {
-+ /delete-property/ iommu-map;
-+};
-+
-+pcie@3600000 {
-+ /delete-property/ iommu-map;
-+};
-+
-+/delete-node/ iommu@9000000;
-+};
-+
-+&fsldpaa {
-+ ethernet@9 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet7>;
-+ dma-coherent;
-+ };
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+ dma-coherent;
-+};
-+
-+&clockgen {
-+ dma-coherent;
-+};
-+
-+&scfg {
-+ dma-coherent;
-+};
-+
-+&crypto {
-+ dma-coherent;
-+};
-+
-+&dcfg {
-+ dma-coherent;
-+};
-+
-+&ifc {
-+ dma-coherent;
-+};
-+
-+&qspi {
-+ dma-coherent;
-+};
-+
-+&esdhc {
-+ dma-coherent;
-+};
-+
-+&ddr {
-+ dma-coherent;
-+};
-+
-+&tmu {
-+ dma-coherent;
-+};
-+
-+&qman {
-+ dma-coherent;
-+};
-+
-+&bman {
-+ dma-coherent;
-+};
-+
-+&bportals {
-+ dma-coherent;
-+};
-+
-+&qportals {
-+ dma-coherent;
-+};
-+
-+&dspi {
-+ dma-coherent;
-+};
-+
-+&i2c0 {
-+ dma-coherent;
-+};
-+
-+&i2c1 {
-+ dma-coherent;
-+};
-+
-+&i2c2 {
-+ dma-coherent;
-+};
-+
-+&i2c3 {
-+ dma-coherent;
-+};
-+
-+&duart0 {
-+ dma-coherent;
-+};
-+
-+&duart1 {
-+ dma-coherent;
-+};
-+
-+&duart2 {
-+ dma-coherent;
-+};
-+
-+&duart3 {
-+ dma-coherent;
-+};
-+
-+&gpio0 {
-+ dma-coherent;
-+};
-+
-+&gpio1 {
-+ dma-coherent;
-+};
-+
-+&gpio2 {
-+ dma-coherent;
-+};
-+
-+&gpio3 {
-+ dma-coherent;
-+};
-+
-+&lpuart0 {
-+ dma-coherent;
-+};
-+
-+&lpuart1 {
-+ dma-coherent;
-+};
-+
-+&lpuart2 {
-+ dma-coherent;
-+};
-+
-+&lpuart3 {
-+ dma-coherent;
-+};
-+
-+&lpuart4 {
-+ dma-coherent;
-+};
-+
-+&lpuart5 {
-+ dma-coherent;
-+};
-+
-+&ftm0 {
-+ dma-coherent;
-+};
-+
-+&wdog0 {
-+ dma-coherent;
-+};
-+
-+&edma0 {
-+ dma-coherent;
-+};
-+
-+&sata {
-+ dma-coherent;
-+};
-+
-+&qdma {
-+ dma-coherent;
-+};
-+
-+&msi1 {
-+ dma-coherent;
-+};
-+
-+&msi2 {
-+ dma-coherent;
-+};
-+
-+&msi3 {
-+ dma-coherent;
-+};
-+
-+&ptp_timer0 {
-+ dma-coherent;
-+};
-+
-+&serdes1 {
-+ dma-coherent;
-+};
-+
-+&fsldpaa {
-+ dma-coherent;
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
-@@ -1,47 +1,10 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-1046A family SoC.
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
- * Shaohui Xie <Shaohui.Xie@nxp.com>
-- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -61,6 +24,20 @@
- serial1 = &duart1;
- serial2 = &duart2;
- serial3 = &duart3;
-+
-+ emi1_slot1 = &ls1046mdio_s1;
-+ emi1_slot2 = &ls1046mdio_s2;
-+ emi1_slot4 = &ls1046mdio_s4;
-+
-+ sgmii_s1_p1 = &sgmii_phy_s1_p1;
-+ sgmii_s1_p2 = &sgmii_phy_s1_p2;
-+ sgmii_s1_p3 = &sgmii_phy_s1_p3;
-+ sgmii_s1_p4 = &sgmii_phy_s1_p4;
-+ sgmii_s4_p1 = &sgmii_phy_s4_p1;
-+ qsgmii_s2_p1 = &qsgmii_phy_s2_p1;
-+ qsgmii_s2_p2 = &qsgmii_phy_s2_p2;
-+ qsgmii_s2_p3 = &qsgmii_phy_s2_p3;
-+ qsgmii_s2_p4 = &qsgmii_phy_s2_p4;
- };
-
- chosen {
-@@ -188,8 +165,9 @@
- };
-
- fpga: board-control@2,0 {
-- compatible = "fsl,ls1046aqds-fpga", "fsl,fpga-qixis";
-+ compatible = "fsl,ls1046aqds-fpga", "fsl,fpga-qixis", "simple-bus";
- reg = <0x2 0x0 0x0000100>;
-+ ranges = <0 2 0 0x100>;
- };
- };
-
-@@ -206,9 +184,145 @@
- compatible = "spansion,m25p80";
- #address-cells = <1>;
- #size-cells = <1>;
-- spi-max-frequency = <20000000>;
-+ spi-max-frequency = <50000000>;
- reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
- };
- };
-
- #include "fsl-ls1046-post.dtsi"
-+
-+&fman0 {
-+ ethernet@e0000 {
-+ phy-handle = <&qsgmii_phy_s2_p1>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@e2000 {
-+ phy-handle = <&sgmii_phy_s4_p1>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@e4000 {
-+ phy-handle = <&rgmii_phy1>;
-+ phy-connection-type = "rgmii";
-+ };
-+
-+ ethernet@e6000 {
-+ phy-handle = <&rgmii_phy2>;
-+ phy-connection-type = "rgmii";
-+ };
-+
-+ ethernet@e8000 {
-+ phy-handle = <&sgmii_phy_s1_p3>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@ea000 {
-+ phy-handle = <&sgmii_phy_s1_p4>;
-+ phy-connection-type = "sgmii";
-+ };
-+
-+ ethernet@f0000 { /* DTSEC9/10GEC1 */
-+ phy-handle = <&sgmii_phy_s1_p1>;
-+ phy-connection-type = "xgmii";
-+ };
-+
-+ ethernet@f2000 { /* DTSEC10/10GEC2 */
-+ phy-handle = <&sgmii_phy_s1_p2>;
-+ phy-connection-type = "xgmii";
-+ };
-+};
-+
-+&fpga {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ mdio-mux-emi1 {
-+ compatible = "mdio-mux-mmioreg", "mdio-mux";
-+ mdio-parent-bus = <&mdio0>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x54 1>; /* BRDCFG4 */
-+ mux-mask = <0xe0>; /* EMI1 */
-+
-+ /* On-board RGMII1 PHY */
-+ ls1046mdio0: mdio@0 {
-+ reg = <0>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ rgmii_phy1: ethernet-phy@1 { /* MAC3 */
-+ reg = <0x1>;
-+ };
-+ };
-+
-+ /* On-board RGMII2 PHY */
-+ ls1046mdio1: mdio@1 {
-+ reg = <0x20>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ rgmii_phy2: ethernet-phy@2 { /* MAC4 */
-+ reg = <0x2>;
-+ };
-+ };
-+
-+ /* Slot 1 */
-+ ls1046mdio_s1: mdio@2 {
-+ reg = <0x40>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ status = "disabled";
-+
-+ sgmii_phy_s1_p1: ethernet-phy@1c {
-+ reg = <0x1c>;
-+ };
-+
-+ sgmii_phy_s1_p2: ethernet-phy@1d {
-+ reg = <0x1d>;
-+ };
-+
-+ sgmii_phy_s1_p3: ethernet-phy@1e {
-+ reg = <0x1e>;
-+ };
-+
-+ sgmii_phy_s1_p4: ethernet-phy@1f {
-+ reg = <0x1f>;
-+ };
-+ };
-+
-+ /* Slot 2 */
-+ ls1046mdio_s2: mdio@3 {
-+ reg = <0x60>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ status = "disabled";
-+
-+ qsgmii_phy_s2_p1: ethernet-phy@8 {
-+ reg = <0x8>;
-+ };
-+ qsgmii_phy_s2_p2: ethernet-phy@9 {
-+ reg = <0x9>;
-+ };
-+ qsgmii_phy_s2_p3: ethernet-phy@a {
-+ reg = <0xa>;
-+ };
-+ qsgmii_phy_s2_p4: ethernet-phy@b {
-+ reg = <0xb>;
-+ };
-+ };
-+
-+ /* Slot 4 */
-+ ls1046mdio_s4: mdio@5 {
-+ reg = <0x80>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ status = "disabled";
-+
-+ sgmii_phy_s4_p1: ethernet-phy@1c {
-+ reg = <0x1c>;
-+ };
-+ };
-+ };
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
-@@ -0,0 +1,307 @@
-+/*
-+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
-+ *
-+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
-+ *
-+ * Mingkai Hu <Mingkai.hu@freescale.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#include "fsl-ls1046a-rdb.dts"
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+/delete-property/ dma-coherent;
-+
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+
-+pcie@3400000 {
-+ /delete-property/ iommu-map;
-+};
-+
-+pcie@3500000 {
-+ /delete-property/ iommu-map;
-+};
-+
-+pcie@3600000 {
-+ /delete-property/ iommu-map;
-+};
-+
-+/delete-node/ iommu@9000000;
-+};
-+
-+&fsldpaa {
-+ ethernet@0 {
-+ status = "disabled";
-+ };
-+ ethernet@1 {
-+ status = "disabled";
-+ };
-+ ethernet@9 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet7>;
-+ dma-coherent;
-+ };
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
-+
-+&mdio9 {
-+ pcsphy6: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x8C0 0x40>; /* lane D */
-+ };
-+};
-+
-+&mdio10 {
-+ pcsphy7: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x880 0x40>; /* lane C */
-+ };
-+};
-+
-+/* Update MAC connections to backplane PHYs
-+ * &mac9 {
-+ * phy-handle = <&pcsphy6>;
-+ *};
-+ *
-+ *&mac10 {
-+ * phy-handle = <&pcsphy7>;
-+ *};
-+*/
-+
-+&clockgen {
-+ dma-coherent;
-+};
-+
-+&scfg {
-+ dma-coherent;
-+};
-+
-+&crypto {
-+ dma-coherent;
-+};
-+
-+&dcfg {
-+ dma-coherent;
-+};
-+
-+&ifc {
-+ dma-coherent;
-+};
-+
-+&qspi {
-+ dma-coherent;
-+};
-+
-+&esdhc {
-+ dma-coherent;
-+};
-+
-+&ddr {
-+ dma-coherent;
-+};
-+
-+&tmu {
-+ dma-coherent;
-+};
-+
-+&qman {
-+ dma-coherent;
-+};
-+
-+&bman {
-+ dma-coherent;
-+};
-+
-+&bportals {
-+ dma-coherent;
-+};
-+
-+&qportals {
-+ dma-coherent;
-+};
-+
-+&dspi {
-+ dma-coherent;
-+};
-+
-+&i2c0 {
-+ dma-coherent;
-+};
-+
-+&i2c1 {
-+ dma-coherent;
-+};
-+
-+&i2c2 {
-+ dma-coherent;
-+};
-+
-+&i2c3 {
-+ dma-coherent;
-+};
-+
-+&duart0 {
-+ dma-coherent;
-+};
-+
-+&duart1 {
-+ dma-coherent;
-+};
-+
-+&duart2 {
-+ dma-coherent;
-+};
-+
-+&duart3 {
-+ dma-coherent;
-+};
-+
-+&gpio0 {
-+ dma-coherent;
-+};
-+
-+&gpio1 {
-+ dma-coherent;
-+};
-+
-+&gpio2 {
-+ dma-coherent;
-+};
-+
-+&gpio3 {
-+ dma-coherent;
-+};
-+
-+&lpuart0 {
-+ dma-coherent;
-+};
-+
-+&lpuart1 {
-+ dma-coherent;
-+};
-+
-+&lpuart2 {
-+ dma-coherent;
-+};
-+
-+&lpuart3 {
-+ dma-coherent;
-+};
-+
-+&lpuart4 {
-+ dma-coherent;
-+};
-+
-+&lpuart5 {
-+ dma-coherent;
-+};
-+
-+&ftm0 {
-+ dma-coherent;
-+};
-+
-+&wdog0 {
-+ dma-coherent;
-+};
-+
-+&edma0 {
-+ dma-coherent;
-+};
-+
-+&sata {
-+ dma-coherent;
-+};
-+
-+&qdma {
-+ dma-coherent;
-+};
-+
-+&msi1 {
-+ dma-coherent;
-+};
-+
-+&msi2 {
-+ dma-coherent;
-+};
-+
-+&msi3 {
-+ dma-coherent;
-+};
-+
-+&fman0 {
-+ dma-coherent;
-+};
-+
-+&ptp_timer0 {
-+ dma-coherent;
-+};
-+
-+&serdes1 {
-+ dma-coherent;
-+};
-+
-+&fsldpaa {
-+ dma-coherent;
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
-@@ -0,0 +1,133 @@
-+/*
-+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
-+ *
-+ * Copyright (C) 2016, Freescale Semiconductor
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
-+
-+#include "fsl-ls1046a-rdb-sdk.dts"
-+
-+&soc {
-+ bp7: buffer-pool@7 {
-+ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
-+ fsl,bpid = <7>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
-+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
-+ dma-coherent;
-+ };
-+
-+ bp8: buffer-pool@8 {
-+ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
-+ fsl,bpid = <8>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
-+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
-+ dma-coherent;
-+ };
-+
-+ bp9: buffer-pool@9 {
-+ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
-+ fsl,bpid = <9>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>;
-+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
-+ dma-coherent;
-+ };
-+
-+ fsl,dpaa {
-+ compatible = "fsl,ls1046a", "fsl,dpaa", "simple-bus";
-+ dma-coherent;
-+
-+ ethernet@2 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
-+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
-+ };
-+
-+ ethernet@3 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
-+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
-+ };
-+
-+ ethernet@4 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>;
-+ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>;
-+ };
-+
-+ ethernet@5 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x5a 1 0x5b 1>;
-+ fsl,qman-frame-queues-tx = <0x7a 1 0x7b 1>;
-+ };
-+
-+ ethernet@8 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
-+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
-+ };
-+
-+ ethernet@9 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
-+ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
-+ };
-+
-+ dpa-fman0-oh@2 {
-+ compatible = "fsl,dpa-oh";
-+ /* Define frame queues for the OH port*/
-+ /* <OH Rx error, OH Rx default> */
-+ fsl,qman-frame-queues-oh = <0x60 1 0x61 1>;
-+ fsl,fman-oh-port = <&fman0_oh2>;
-+ };
-+ };
-+
-+ pcie@3400000 {
-+ /delete-property/ iommu-map;
-+ };
-+
-+ pcie@3500000 {
-+ /delete-property/ iommu-map;
-+ };
-+
-+ pcie@3600000 {
-+ /delete-property/ iommu-map;
-+ };
-+
-+ /delete-node/ iommu@9000000;
-+};
-+/ {
-+ reserved-memory {
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ ranges;
-+
-+ /* For legacy usdpaa based use-cases, update the size and
-+ alignment parameters. e.g. to allocate 256 MB memory:
-+ size = <0 0x10000000>;
-+ alignment = <0 0x10000000>;
-+ */
-+ usdpaa_mem: usdpaa_mem {
-+ compatible = "fsl,usdpaa-mem";
-+ alloc-ranges = <0 0 0x10000 0>;
-+ size = <0 0x1000>;
-+ alignment = <0 0x1000>;
-+ };
-+ };
-+};
-+
-+&fman0 {
-+ fman0_oh2: port@83000 {
-+ cell-index = <1>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x83000 0x1000>;
-+ };
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
-@@ -1,47 +1,10 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-1046A family SoC.
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
- * Mingkai Hu <mingkai.hu@nxp.com>
-- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -139,21 +102,26 @@
- num-cs = <2>;
- bus-num = <0>;
- status = "okay";
-+ fsl,qspi-has-second-chip;
-
- qflash0: s25fs512s@0 {
- compatible = "spansion,m25p80";
- #address-cells = <1>;
- #size-cells = <1>;
-- spi-max-frequency = <20000000>;
-+ spi-max-frequency = <50000000>;
- reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
- };
-
- qflash1: s25fs512s@1 {
- compatible = "spansion,m25p80";
- #address-cells = <1>;
- #size-cells = <1>;
-- spi-max-frequency = <20000000>;
-+ spi-max-frequency = <50000000>;
- reg = <1>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
- };
- };
-
---- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
-@@ -1,47 +1,10 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-1046A family SoC.
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
- * Mingkai Hu <mingkai.hu@nxp.com>
-- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- #include <dt-bindings/interrupt-controller/arm-gic.h>
-@@ -70,7 +33,7 @@
- #address-cells = <1>;
- #size-cells = <0>;
-
-- cpu0: cpu@0 {
-+ cooling_map0: cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x0>;
-@@ -122,7 +85,7 @@
- CPU_PH20: cpu-ph20 {
- compatible = "arm,idle-state";
- idle-state-name = "PH20";
-- arm,psci-suspend-param = <0x00010000>;
-+ arm,psci-suspend-param = <0x0>;
- entry-latency-us = <1000>;
- exit-latency-us = <1000>;
- min-residency-us = <3000>;
-@@ -188,6 +151,8 @@
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
-+ dma-coherent;
-
- ddr: memory-controller@1080000 {
- compatible = "fsl,qoriq-memory-controller";
-@@ -214,7 +179,6 @@
- clock-names = "qspi_en", "qspi";
- clocks = <&clockgen 4 1>, <&clockgen 4 1>;
- big-endian;
-- fsl,qspi-has-second-chip;
- status = "disabled";
- };
-
-@@ -229,6 +193,49 @@
- bus-width = <4>;
- };
-
-+ smmu: iommu@9000000 {
-+ compatible = "arm,mmu-500";
-+ reg = <0 0x9000000 0 0x400000>;
-+ dma-coherent;
-+ stream-match-mask = <0x7f00>;
-+ #global-interrupts = <2>;
-+ #iommu-cells = <1>;
-+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+
- scfg: scfg@1570000 {
- compatible = "fsl,ls1046a-scfg", "syscon";
- reg = <0x0 0x1570000 0x0 0x10000>;
-@@ -304,7 +311,7 @@
-
- dcfg: dcfg@1ee0000 {
- compatible = "fsl,ls1046a-dcfg", "syscon";
-- reg = <0x0 0x1ee0000 0x0 0x10000>;
-+ reg = <0x0 0x1ee0000 0x0 0x1000>;
- big-endian;
- };
-
-@@ -362,36 +369,7 @@
- #thermal-sensor-cells = <1>;
- };
-
-- thermal-zones {
-- cpu_thermal: cpu-thermal {
-- polling-delay-passive = <1000>;
-- polling-delay = <5000>;
-- thermal-sensors = <&tmu 3>;
--
-- trips {
-- cpu_alert: cpu-alert {
-- temperature = <85000>;
-- hysteresis = <2000>;
-- type = "passive";
-- };
--
-- cpu_crit: cpu-crit {
-- temperature = <95000>;
-- hysteresis = <2000>;
-- type = "critical";
-- };
-- };
--
-- cooling-maps {
-- map0 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu0 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- };
-- };
-- };
-+ #include "fsl-tmu.dtsi"
-
- dspi: dspi@2100000 {
- compatible = "fsl,ls1021a-v1.0-dspi";
-@@ -407,7 +385,7 @@
- };
-
- i2c0: i2c@2180000 {
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls1046a-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2180000 0x0 0x10000>;
-@@ -416,6 +394,7 @@
- dmas = <&edma0 1 39>,
- <&edma0 1 38>;
- dma-names = "tx", "rx";
-+ scl-gpios = <&gpio3 12 0>;
- status = "disabled";
- };
-
-@@ -440,12 +419,13 @@
- };
-
- i2c3: i2c@21b0000 {
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls1046a-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x21b0000 0x0 0x10000>;
- interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
-+ scl-gpios = <&gpio3 12 0>;
- status = "disabled";
- };
-
-@@ -571,6 +551,15 @@
- status = "disabled";
- };
-
-+ ftm0: ftm0@29d0000 {
-+ compatible = "fsl,ls1046a-ftm-alarm";
-+ reg = <0x0 0x29d0000 0x0 0x10000>,
-+ <0x0 0x1ee2140 0x0 0x4>;
-+ reg-names = "ftm", "pmctrl";
-+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
-+ big-endian;
-+ };
-+
- wdog0: watchdog@2ad0000 {
- compatible = "fsl,imx21-wdt";
- reg = <0x0 0x2ad0000 0x0 0x10000>;
-@@ -595,40 +584,81 @@
- <&clockgen 4 1>;
- };
-
-- usb0: usb@2f00000 {
-- compatible = "snps,dwc3";
-- reg = <0x0 0x2f00000 0x0 0x10000>;
-- interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- };
--
-- usb1: usb@3000000 {
-- compatible = "snps,dwc3";
-- reg = <0x0 0x3000000 0x0 0x10000>;
-- interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- };
--
-- usb2: usb@3100000 {
-- compatible = "snps,dwc3";
-- reg = <0x0 0x3100000 0x0 0x10000>;
-- interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- };
--
-- sata: sata@3200000 {
-- compatible = "fsl,ls1046a-ahci";
-- reg = <0x0 0x3200000 0x0 0x10000>,
-- <0x0 0x20140520 0x0 0x4>;
-- reg-names = "ahci", "sata-ecc";
-- interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
-- clocks = <&clockgen 4 1>;
-+ aux_bus: aux_bus {
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ compatible = "simple-bus";
-+ ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
-+
-+ usb0: usb@2f00000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x2f00000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ usb1: usb@3000000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3000000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ usb2: usb@3100000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3100000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ sata: sata@3200000 {
-+ compatible = "fsl,ls1046a-ahci";
-+ reg = <0x0 0x3200000 0x0 0x10000>,
-+ <0x0 0x20140520 0x0 0x4>;
-+ reg-names = "ahci", "sata-ecc";
-+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 1>;
-+ };
-+ };
-+
-+ qdma: qdma@8380000 {
-+ compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma";
-+ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
-+ <0x0 0x8390000 0x0 0x10000>, /* Status regs */
-+ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */
-+ interrupts = <0 153 0x4>,
-+ <0 39 0x4>,
-+ <0 40 0x4>,
-+ <0 41 0x4>,
-+ <0 42 0x4>;
-+ interrupt-names = "qdma-error", "qdma-queue0",
-+ "qdma-queue1", "qdma-queue2", "qdma-queue3";
-+ channels = <8>;
-+ block-number = <1>;
-+ block-offset = <0x10000>;
-+ queues = <2>;
-+ status-sizes = <64>;
-+ queue-sizes = <64 64>;
-+ big-endian;
- };
-
- msi1: msi-controller@1580000 {
-@@ -661,6 +691,125 @@
- <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
- };
-
-+ pcie@3400000 {
-+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
-+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
-+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
-+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
-+ interrupt-names = "pme", "aer";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ iommu-map = <0 &smmu 0 1>;
-+ num-lanes = <4>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&msi1>, <&msi2>, <&msi3>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3400000 {
-+ compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
-+ reg = <0x00 0x03400000 0x0 0x00100000
-+ 0x40 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ib-windows = <6>;
-+ num-ob-windows = <8>;
-+ num-lanes = <2>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3500000 {
-+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
-+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
-+ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "pme", "aer";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ iommu-map = <0 &smmu 0 1>;
-+ num-lanes = <2>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&msi1>, <&msi2>, <&msi3>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3500000 {
-+ compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
-+ reg = <0x00 0x03500000 0x0 0x00100000
-+ 0x48 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ib-windows = <6>;
-+ num-ob-windows = <8>;
-+ num-lanes = <2>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3600000 {
-+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
-+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
-+ 0x50 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "pme", "aer";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ iommu-map = <0 &smmu 0 1>;
-+ num-lanes = <2>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&msi1>, <&msi2>, <&msi3>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3600000 {
-+ compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep";
-+ reg = <0x00 0x03600000 0x0 0x00100000
-+ 0x50 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ib-windows = <6>;
-+ num-ob-windows = <8>;
-+ num-lanes = <2>;
-+ status = "disabled";
-+ };
-+
-+ serdes1: serdes@1ea0000 {
-+ reg = <0x0 0x1ea0000 0 0x00002000>;
-+ compatible = "fsl,serdes-10g";
-+ };
-+
- };
-
- reserved-memory {
-@@ -689,7 +838,36 @@
- no-map;
- };
- };
-+
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
-+ };
-+ };
- };
-
- #include "qoriq-qman-portals.dtsi"
- #include "qoriq-bman-portals.dtsi"
-+
-+&thermal_zones {
-+ thermal-zone0 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone1 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone2 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone3 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone4 {
-+ status = "okay";
-+ };
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for NXP LS1088A QDS Board.
- *
-@@ -5,43 +6,6 @@
- *
- * Harninder Rai <harninder.rai@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -134,6 +98,30 @@
- };
- };
-
-+&qspi {
-+ status = "okay";
-+ fsl,qspi-has-second-chip;
-+ qflash0: s25fs512s@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+
-+ qflash1: s25fs512s@1 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <1>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+};
-+
- &duart0 {
- status = "okay";
- };
-@@ -149,3 +137,29 @@
- &sata {
- status = "okay";
- };
-+
-+&pcs_mdio1 {
-+ pcs_phy1: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x840 0x40>;/* lane B */
-+ };
-+};
-+
-+&pcs_mdio2 {
-+ pcs_phy2: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x800 0x40>;/* lane A */
-+ };
-+};
-+
-+/* Update DPMAC connections to backplane PHYs, under SerDes 0x1D_0xXX.
-+ * &dpmac1 {
-+ * phy-handle = <&pcs_phy1>;
-+ * };
-+ */
---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for NXP LS1088A RDB Board.
- *
-@@ -5,43 +6,6 @@
- *
- * Harninder Rai <harninder.rai@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -110,6 +74,31 @@
- };
- };
-
-+&qspi {
-+ status = "okay";
-+ fsl,qspi-has-second-chip;
-+ qflash0: s25fs512s@0 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+
-+ qflash1: s25fs512s@1 {
-+ compatible = "spansion,m25p80";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <1>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+
-+};
-+
- &duart0 {
- status = "okay";
- };
-@@ -118,6 +107,14 @@
- status = "okay";
- };
-
-+&usb0 {
-+ status = "okay";
-+};
-+
-+&usb1 {
-+ status = "okay";
-+};
-+
- &esdhc {
- status = "okay";
- };
-@@ -125,3 +122,82 @@
- &sata {
- status = "okay";
- };
-+
-+&emdio1 {
-+ /* Freescale F104 PHY1 */
-+ mdio1_phy1: emdio1_phy@1 {
-+ reg = <0x1c>;
-+ phy-connection-type = "qsgmii";
-+ };
-+ mdio1_phy2: emdio1_phy@2 {
-+ reg = <0x1d>;
-+ phy-connection-type = "qsgmii";
-+ };
-+ mdio1_phy3: emdio1_phy@3 {
-+ reg = <0x1e>;
-+ phy-connection-type = "qsgmii";
-+ };
-+ mdio1_phy4: emdio1_phy@4 {
-+ reg = <0x1f>;
-+ phy-connection-type = "qsgmii";
-+ };
-+ /* F104 PHY2 */
-+ mdio1_phy5: emdio1_phy@5 {
-+ reg = <0x0c>;
-+ phy-connection-type = "qsgmii";
-+ };
-+ mdio1_phy6: emdio1_phy@6 {
-+ reg = <0x0d>;
-+ phy-connection-type = "qsgmii";
-+ };
-+ mdio1_phy7: emdio1_phy@7 {
-+ reg = <0x0e>;
-+ phy-connection-type = "qsgmii";
-+ };
-+ mdio1_phy8: emdio1_phy@8 {
-+ reg = <0x0f>;
-+ phy-connection-type = "qsgmii";
-+ };
-+};
-+
-+&emdio2 {
-+ /* Aquantia AQR105 10G PHY */
-+ mdio2_phy1: emdio2_phy@1 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 2 0x4>;
-+ reg = <0x0>;
-+ phy-connection-type = "xfi";
-+ };
-+};
-+
-+/* DPMAC connections to external PHYs
-+ * based on LS1088A RM RevC - $24.1.2 SerDes Options
-+ */
-+/* DPMAC1 is 10G SFP+, fixed link */
-+&dpmac2 {
-+ phy-handle = <&mdio2_phy1>;
-+};
-+&dpmac3 {
-+ phy-handle = <&mdio1_phy5>;
-+};
-+&dpmac4 {
-+ phy-handle = <&mdio1_phy6>;
-+};
-+&dpmac5 {
-+ phy-handle = <&mdio1_phy7>;
-+};
-+&dpmac6 {
-+ phy-handle = <&mdio1_phy8>;
-+};
-+&dpmac7 {
-+ phy-handle = <&mdio1_phy1>;
-+};
-+&dpmac8 {
-+ phy-handle = <&mdio1_phy2>;
-+};
-+&dpmac9 {
-+ phy-handle = <&mdio1_phy3>;
-+};
-+&dpmac10 {
-+ phy-handle = <&mdio1_phy4>;
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for NXP Layerscape-1088A family SoC.
- *
-@@ -5,43 +6,6 @@
- *
- * Harninder Rai <harninder.rai@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/thermal/thermal.h>
-@@ -61,7 +25,7 @@
- #size-cells = <0>;
-
- /* We have 2 clusters having 4 Cortex-A53 cores each */
-- cpu0: cpu@0 {
-+ cooling_map0: cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0>;
-@@ -94,7 +58,7 @@
- cpu-idle-states = <&CPU_PH20>;
- };
-
-- cpu4: cpu@100 {
-+ cooling_map1: cpu4: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x100>;
-@@ -130,7 +94,7 @@
- CPU_PH20: cpu-ph20 {
- compatible = "arm,idle-state";
- idle-state-name = "PH20";
-- arm,psci-suspend-param = <0x00010000>;
-+ arm,psci-suspend-param = <0x0>;
- entry-latency-us = <1000>;
- exit-latency-us = <1000>;
- min-residency-us = <3000>;
-@@ -147,6 +111,15 @@
- <0x0 0x0c0d0000 0 0x1000>, /* GICH */
- <0x0 0x0c0e0000 0 0x20000>; /* GICV */
- interrupts = <1 9 IRQ_TYPE_LEVEL_HIGH>;
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ ranges;
-+
-+ its: gic-its@6020000 {
-+ compatible = "arm,gic-v3-its";
-+ msi-controller;
-+ reg = <0x0 0x6020000 0 0x20000>;
-+ };
- };
-
- timer {
-@@ -169,11 +142,31 @@
- clock-output-names = "sysclk";
- };
-
-+ dcfg: dcfg@1e00000 {
-+ compatible = "fsl,ls1088a-dcfg", "syscon";
-+ reg = <0x0 0x1e00000 0x0 0x10000>;
-+ little-endian;
-+ };
-+
-+ rstcr: syscon@1e60000 {
-+ compatible = "fsl,ls1088a-rstcr", "syscon";
-+ reg = <0x0 0x1e60000 0x0 0x4>;
-+ };
-+
-+ reboot {
-+ compatible = "syscon-reboot";
-+ regmap = <&rstcr>;
-+ offset = <0x0>;
-+ mask = <0x02>;
-+ };
-+
-+
- soc {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
-
- clockgen: clocking@1300000 {
- compatible = "fsl,ls1088a-clockgen";
-@@ -229,43 +222,7 @@
- #thermal-sensor-cells = <1>;
- };
-
-- thermal-zones {
-- cpu_thermal: cpu-thermal {
-- polling-delay-passive = <1000>;
-- polling-delay = <5000>;
-- thermal-sensors = <&tmu 0>;
--
-- trips {
-- cpu_alert: cpu-alert {
-- temperature = <85000>;
-- hysteresis = <2000>;
-- type = "passive";
-- };
--
-- cpu_crit: cpu-crit {
-- temperature = <95000>;
-- hysteresis = <2000>;
-- type = "critical";
-- };
-- };
--
-- cooling-maps {
-- map0 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu0 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
--
-- map1 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu4 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- };
-- };
-- };
-+ #include "fsl-tmu.dtsi"
-
- duart0: serial@21c0500 {
- compatible = "fsl,ns16550", "ns16550a";
-@@ -283,6 +240,62 @@
- status = "disabled";
- };
-
-+ cluster1_core0_watchdog: wdt@c000000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc000000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
-+ cluster1_core1_watchdog: wdt@c010000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc010000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
-+ cluster1_core2_watchdog: wdt@c020000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc020000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
-+ cluster1_core3_watchdog: wdt@c030000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc030000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
-+ cluster2_core0_watchdog: wdt@c100000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc100000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
-+ cluster2_core1_watchdog: wdt@c110000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc110000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
-+ cluster2_core2_watchdog: wdt@c120000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc120000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
-+ cluster2_core3_watchdog: wdt@c130000 {
-+ compatible = "arm,sp805-wdt", "arm,primecell";
-+ reg = <0x0 0xc130000 0x0 0x1000>;
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "apb_pclk", "wdog_clk";
-+ };
-+
- gpio0: gpio@2300000 {
- compatible = "fsl,qoriq-gpio";
- reg = <0x0 0x2300000 0x0 0x10000>;
-@@ -323,6 +336,72 @@
- #interrupt-cells = <2>;
- };
-
-+ /* TODO: WRIOP (CCSR?) */
-+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000,
-+ * E-MDIO1: 0x1_6000
-+ */
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8B96000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian; /* force the driver in LE mode */
-+
-+ /* Not necessary on the QDS, but needed on the RDB */
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000,
-+ * E-MDIO2: 0x1_7000
-+ */
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8B97000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian; /* force the driver in LE mode */
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio1: mdio@0x8c07000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c07000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio2: mdio@0x8c0b000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c0b000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio3: mdio@0x8c0f000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c0f000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio4: mdio@0x8c13000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c13000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
- ifc: ifc@2240000 {
- compatible = "fsl,ifc", "simple-bus";
- reg = <0x0 0x2240000 0x0 0x20000>;
-@@ -333,13 +412,22 @@
- status = "disabled";
- };
-
-+ ftm0: ftm0@2800000 {
-+ compatible = "fsl,ls1088a-ftm-alarm";
-+ reg = <0x0 0x2800000 0x0 0x10000>,
-+ <0x0 0x1e34050 0x0 0x4>;
-+ reg-names = "ftm", "pmctrl";
-+ interrupts = <0 44 4>;
-+ };
-+
- i2c0: i2c@2000000 {
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls1088a-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2000000 0x0 0x10000>;
- interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 7>;
-+ scl-gpios = <&gpio3 30 0>;
- status = "disabled";
- };
-
-@@ -349,7 +437,7 @@
- #size-cells = <0>;
- reg = <0x0 0x2010000 0x0 0x10000>;
- interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 7>;
- status = "disabled";
- };
-
-@@ -359,7 +447,7 @@
- #size-cells = <0>;
- reg = <0x0 0x2020000 0x0 0x10000>;
- interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 7>;
- status = "disabled";
- };
-
-@@ -369,7 +457,7 @@
- #size-cells = <0>;
- reg = <0x0 0x2030000 0x0 0x10000>;
- interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 7>;
- status = "disabled";
- };
-
-@@ -385,6 +473,28 @@
- status = "disabled";
- };
-
-+ usb0: usb3@3100000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3100000 0x0 0x10000>;
-+ interrupts = <0 80 0x4>; /* Level high type */
-+ dr_mode = "host";
-+ configure-gfladj;
-+ snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ usb1: usb3@3110000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3110000 0x0 0x10000>;
-+ interrupts = <0 81 0x4>; /* Level high type */
-+ dr_mode = "host";
-+ configure-gfladj;
-+ snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
- sata: sata@3200000 {
- compatible = "fsl,ls1088a-ahci";
- reg = <0x0 0x3200000 0x0 0x10000>,
-@@ -395,6 +505,17 @@
- dma-coherent;
- status = "disabled";
- };
-+ qspi: quadspi@20c0000 {
-+ compatible = "fsl,ls2080a-qspi", "fsl,ls1088a-qspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x20c0000 0x0 0x10000>,
-+ <0x0 0x20000000 0x0 0x10000000>;
-+ reg-names = "QuadSPI", "QuadSPI-memory";
-+ interrupts = <0 25 0x4>; /* Level high type */
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "qspi_en", "qspi";
-+ };
-
- crypto: crypto@8000000 {
- compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
-@@ -434,6 +555,267 @@
- interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
-+
-+ pcie@3400000 {
-+ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie";
-+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
-+ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
-+ interrupt-names = "aer";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ num-lanes = <4>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3500000 {
-+ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie";
-+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
-+ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
-+ interrupt-names = "aer";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ num-lanes = <4>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 0 115 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 0 116 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 0 117 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3600000 {
-+ compatible = "fsl,ls1088a-pcie", "snps,dw-pcie";
-+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
-+ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
-+ interrupt-names = "aer";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ num-lanes = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 0 120 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 0 121 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 0 122 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ fsl_mc: fsl-mc@80c000000 {
-+ compatible = "fsl,qoriq-mc";
-+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
-+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
-+ msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
-+ dma-coherent;
-+ #address-cells = <3>;
-+ #size-cells = <1>;
-+
-+ /*
-+ * Region type 0x0 - MC portals
-+ * Region type 0x1 - QBMAN portals
-+ */
-+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
-+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
-+
-+ dpmacs {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ dpmac1: dpmac@1 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <1>;
-+ };
-+
-+ dpmac2: dpmac@2 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <2>;
-+ };
-+
-+ dpmac3: dpmac@3 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <3>;
-+ };
-+
-+ dpmac4: dpmac@4 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <4>;
-+ };
-+
-+ dpmac5: dpmac@5 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <5>;
-+ };
-+
-+ dpmac6: dpmac@6 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <6>;
-+ };
-+
-+ dpmac7: dpmac@7 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <7>;
-+ };
-+
-+ dpmac8: dpmac@8 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <8>;
-+ };
-+
-+ dpmac9: dpmac@9 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <9>;
-+ };
-+
-+ dpmac10: dpmac@a {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xa>;
-+ };
-+ };
-+ };
-+
-+ smmu: iommu@5000000 {
-+ compatible = "arm,mmu-500";
-+ reg = <0 0x5000000 0 0x800000>;
-+ #global-interrupts = <12>;
-+ #iommu-cells = <1>;
-+ stream-match-mask = <0x7C00>;
-+ interrupts = <0 13 4>, /* global secure fault */
-+ <0 14 4>, /* combined secure interrupt */
-+ <0 15 4>, /* global non-secure fault */
-+ <0 16 4>, /* combined non-secure interrupt */
-+ /* performance counter interrupts 0-7 */
-+ <0 211 4>,
-+ <0 212 4>,
-+ <0 213 4>,
-+ <0 214 4>,
-+ <0 215 4>,
-+ <0 216 4>,
-+ <0 217 4>,
-+ <0 218 4>,
-+ /* per context interrupt, 64 interrupts */
-+ <0 146 4>,
-+ <0 147 4>,
-+ <0 148 4>,
-+ <0 149 4>,
-+ <0 150 4>,
-+ <0 151 4>,
-+ <0 152 4>,
-+ <0 153 4>,
-+ <0 154 4>,
-+ <0 155 4>,
-+ <0 156 4>,
-+ <0 157 4>,
-+ <0 158 4>,
-+ <0 159 4>,
-+ <0 160 4>,
-+ <0 161 4>,
-+ <0 162 4>,
-+ <0 163 4>,
-+ <0 164 4>,
-+ <0 165 4>,
-+ <0 166 4>,
-+ <0 167 4>,
-+ <0 168 4>,
-+ <0 169 4>,
-+ <0 170 4>,
-+ <0 171 4>,
-+ <0 172 4>,
-+ <0 173 4>,
-+ <0 174 4>,
-+ <0 175 4>,
-+ <0 176 4>,
-+ <0 177 4>,
-+ <0 178 4>,
-+ <0 179 4>,
-+ <0 180 4>,
-+ <0 181 4>,
-+ <0 182 4>,
-+ <0 183 4>,
-+ <0 184 4>,
-+ <0 185 4>,
-+ <0 186 4>,
-+ <0 187 4>,
-+ <0 188 4>,
-+ <0 189 4>,
-+ <0 190 4>,
-+ <0 191 4>,
-+ <0 192 4>,
-+ <0 193 4>,
-+ <0 194 4>,
-+ <0 195 4>,
-+ <0 196 4>,
-+ <0 197 4>,
-+ <0 198 4>,
-+ <0 199 4>,
-+ <0 200 4>,
-+ <0 201 4>,
-+ <0 202 4>,
-+ <0 203 4>,
-+ <0 204 4>,
-+ <0 205 4>,
-+ <0 206 4>,
-+ <0 207 4>,
-+ <0 208 4>,
-+ <0 209 4>;
-+ };
-+
-+ serdes1: serdes@1ea0000 {
-+ compatible = "fsl,serdes-10g";
-+ reg = <0x0 0x1ea0000 0 0x00002000>;
-+ little-endian;
-+ };
- };
-
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
-+ };
-+ };
-+};
-+
-+#include "fsl-tmu-map1.dtsi"
-+
-+&thermal_zones {
-+ thermal-zone0 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone1 {
-+ status = "okay";
-+ };
- };
---- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS2080a QDS Board.
- *
-@@ -7,43 +8,6 @@
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- * Bhupesh Sharma <bhupesh.sharma@freescale.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -59,3 +23,65 @@
- stdout-path = "serial0:115200n8";
- };
- };
-+
-+&ifc {
-+ boardctrl: board-control@3,0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus";
-+ reg = <3 0 0x300>; /* TODO check address */
-+ ranges = <0 3 0 0x300>;
-+
-+ mdio_mux_emi1 {
-+ compatible = "mdio-mux-mmioreg", "mdio-mux";
-+ mdio-parent-bus = <&emdio1>;
-+ reg = <0x54 1>; /* BRDCFG4 */
-+ mux-mask = <0xe0>; /* EMI1_MDIO */
-+
-+ #address-cells=<1>;
-+ #size-cells = <0>;
-+
-+ /* Child MDIO buses, one for each riser card:
-+ * reg = 0x0, 0x20, 0x40, 0x60, 0x80, 0xa0.
-+ * VSC8234 PHYs on the riser cards.
-+ */
-+
-+ mdio_mux3: mdio@60 {
-+ reg = <0x60>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ mdio0_phy12: mdio_phy0@1c {
-+ reg = <0x1c>;
-+ phy-connection-type = "sgmii";
-+ };
-+ mdio0_phy13: mdio_phy1@1d {
-+ reg = <0x1d>;
-+ phy-connection-type = "sgmii";
-+ };
-+ mdio0_phy14: mdio_phy2@1e {
-+ reg = <0x1e>;
-+ phy-connection-type = "sgmii";
-+ };
-+ mdio0_phy15: mdio_phy3@1f {
-+ reg = <0x1f>;
-+ phy-connection-type = "sgmii";
-+ };
-+ };
-+ };
-+ };
-+};
-+
-+/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */
-+&dpmac9 {
-+ phy-handle = <&mdio0_phy12>;
-+};
-+&dpmac10 {
-+ phy-handle = <&mdio0_phy13>;
-+};
-+&dpmac11 {
-+ phy-handle = <&mdio0_phy14>;
-+};
-+&dpmac12 {
-+ phy-handle = <&mdio0_phy15>;
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS2080a RDB Board.
- *
-@@ -7,43 +8,6 @@
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- * Bhupesh Sharma <bhupesh.sharma@freescale.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -59,3 +23,83 @@
- stdout-path = "serial1:115200n8";
- };
- };
-+
-+&emdio1 {
-+ status = "disabled";
-+ /* CS4340 PHYs */
-+ mdio1_phy1: emdio1_phy@1 {
-+ reg = <0x10>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio1_phy2: emdio1_phy@2 {
-+ reg = <0x11>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio1_phy3: emdio1_phy@3 {
-+ reg = <0x12>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio1_phy4: emdio1_phy@4 {
-+ reg = <0x13>;
-+ phy-connection-type = "xfi";
-+ };
-+};
-+
-+&emdio2 {
-+ /* AQR405 PHYs */
-+ mdio2_phy1: emdio2_phy@1 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 1 0x4>; /* Level high type */
-+ reg = <0x0>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio2_phy2: emdio2_phy@2 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 2 0x4>; /* Level high type */
-+ reg = <0x1>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio2_phy3: emdio2_phy@3 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 4 0x4>; /* Level high type */
-+ reg = <0x2>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio2_phy4: emdio2_phy@4 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 5 0x4>; /* Level high type */
-+ reg = <0x3>;
-+ phy-connection-type = "xfi";
-+ };
-+};
-+
-+/* Update DPMAC connections to external PHYs, under the assumption of
-+ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board.
-+ */
-+/* Leave Cortina nodes commented out until driver is integrated
-+ *&dpmac1 {
-+ * phy-handle = <&mdio1_phy1>;
-+ *};
-+ *&dpmac2 {
-+ * phy-handle = <&mdio1_phy2>;
-+ *};
-+ *&dpmac3 {
-+ * phy-handle = <&mdio1_phy3>;
-+ *};
-+ *&dpmac4 {
-+ * phy-handle = <&mdio1_phy4>;
-+ *};
-+ */
-+
-+&dpmac5 {
-+ phy-handle = <&mdio2_phy1>;
-+};
-+&dpmac6 {
-+ phy-handle = <&mdio2_phy2>;
-+};
-+&dpmac7 {
-+ phy-handle = <&mdio2_phy3>;
-+};
-+&dpmac8 {
-+ phy-handle = <&mdio2_phy4>;
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS2080a software Simulator model
- *
-@@ -5,43 +6,6 @@
- *
- * Bhupesh Sharma <bhupesh.sharma@freescale.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPL or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
---- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-2080A family SoC.
- *
-@@ -6,49 +7,12 @@
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- * Bhupesh Sharma <bhupesh.sharma@freescale.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- #include "fsl-ls208xa.dtsi"
-
- &cpu {
-- cpu0: cpu@0 {
-+ cooling_map0: cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0>;
-@@ -67,7 +31,7 @@
- next-level-cache = <&cluster0_l2>;
- };
-
-- cpu2: cpu@100 {
-+ cooling_map1: cpu2: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x100>;
-@@ -86,7 +50,7 @@
- next-level-cache = <&cluster1_l2>;
- };
-
-- cpu4: cpu@200 {
-+ cooling_map2: cpu4: cpu@200 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x200>;
-@@ -105,7 +69,7 @@
- next-level-cache = <&cluster2_l2>;
- };
-
-- cpu6: cpu@300 {
-+ cooling_map3: cpu6: cpu@300 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x300>;
-@@ -150,6 +114,10 @@
- };
- };
-
-+&timer {
-+ fsl,erratum-a008585;
-+};
-+
- &pcie1 {
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x10 0x00000000 0x0 0x00002000>; /* configuration space */
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts
-@@ -0,0 +1,163 @@
-+/*
-+ * Device Tree file for NXP LS2081A RDB Board.
-+ *
-+ * Copyright 2017 NXP
-+ *
-+ * Priyanka Jain <priyanka.jain@nxp.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+/dts-v1/;
-+
-+#include "fsl-ls2088a.dtsi"
-+
-+/ {
-+ model = "NXP Layerscape 2081A RDB Board";
-+ compatible = "fsl,ls2081a-rdb", "fsl,ls2081a";
-+
-+ aliases {
-+ serial0 = &serial0;
-+ serial1 = &serial1;
-+ };
-+
-+ chosen {
-+ stdout-path = "serial1:115200n8";
-+ };
-+};
-+
-+&esdhc {
-+ status = "okay";
-+};
-+
-+&ifc {
-+ status = "disabled";
-+};
-+
-+&i2c0 {
-+ status = "okay";
-+ pca9547@75 {
-+ compatible = "nxp,pca9547";
-+ reg = <0x75>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ i2c@1 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x01>;
-+ rtc@51 {
-+ compatible = "nxp,pcf2129";
-+ reg = <0x51>;
-+ };
-+ };
-+
-+ i2c@2 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x02>;
-+
-+ ina220@40 {
-+ compatible = "ti,ina220";
-+ reg = <0x40>;
-+ shunt-resistor = <500>;
-+ };
-+ };
-+
-+ i2c@3 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x3>;
-+
-+ adt7481@4c {
-+ compatible = "adi,adt7461";
-+ reg = <0x4c>;
-+ };
-+ };
-+ };
-+};
-+
-+&dspi {
-+ status = "okay";
-+ dflash0: n25q512a {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "st,m25p80";
-+ spi-max-frequency = <3000000>;
-+ reg = <0>;
-+ };
-+};
-+
-+&qspi {
-+ status = "okay";
-+ fsl,qspi-has-second-chip;
-+ flash0: s25fs512s@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "spansion,m25p80";
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ };
-+ flash1: s25fs512s@1 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ compatible = "spansion,m25p80";
-+ spi-max-frequency = <20000000>;
-+ reg = <1>;
-+ };
-+};
-+
-+&sata0 {
-+ status = "okay";
-+};
-+
-+&sata1 {
-+ status = "okay";
-+};
-+
-+&usb0 {
-+ status = "okay";
-+};
-+
-+&usb1 {
-+ status = "okay";
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS2088A QDS Board.
- *
-@@ -6,43 +7,6 @@
- *
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -58,3 +22,123 @@
- stdout-path = "serial0:115200n8";
- };
- };
-+
-+&ifc {
-+ boardctrl: board-control@3,0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus";
-+ reg = <3 0 0x300>; /* TODO check address */
-+ ranges = <0 3 0 0x300>;
-+
-+ mdio_mux_emi1 {
-+ compatible = "mdio-mux-mmioreg", "mdio-mux";
-+ mdio-parent-bus = <&emdio1>;
-+ reg = <0x54 1>; /* BRDCFG4 */
-+ mux-mask = <0xe0>; /* EMI1_MDIO */
-+
-+ #address-cells=<1>;
-+ #size-cells = <0>;
-+
-+ /* Child MDIO buses, one for each riser card:
-+ * reg = 0x0, 0x20, 0x40, 0x60, 0x80, 0xa0.
-+ * VSC8234 PHYs on the riser cards.
-+ */
-+
-+ mdio_mux3: mdio@60 {
-+ reg = <0x60>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ mdio0_phy12: mdio_phy0@1c {
-+ reg = <0x1c>;
-+ phy-connection-type = "sgmii";
-+ };
-+ mdio0_phy13: mdio_phy1@1d {
-+ reg = <0x1d>;
-+ phy-connection-type = "sgmii";
-+ };
-+ mdio0_phy14: mdio_phy2@1e {
-+ reg = <0x1e>;
-+ phy-connection-type = "sgmii";
-+ };
-+ mdio0_phy15: mdio_phy3@1f {
-+ reg = <0x1f>;
-+ phy-connection-type = "sgmii";
-+ };
-+ };
-+ };
-+ };
-+};
-+
-+&pcs_mdio1 {
-+ pcs_phy1: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x9C0 0x40>;/* lane H */
-+ };
-+};
-+
-+&pcs_mdio2 {
-+ pcs_phy2: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x980 0x40>;/* lane G */
-+ };
-+};
-+
-+&pcs_mdio3 {
-+ pcs_phy3: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x940 0x40>;/* lane F */
-+ };
-+};
-+
-+&pcs_mdio4 {
-+ pcs_phy4: ethernet-phy@0 {
-+ backplane-mode = "10gbase-kr";
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0x900 0x40>;/* lane E */
-+ };
-+};
-+
-+/* Update DPMAC connections to backplane PHYs, under SerDes 0x2a_0xXX.
-+ * &dpmac1 {
-+ * phy-handle = <&pcs_phy1>;
-+ * };
-+ *
-+ * &dpmac2 {
-+ * phy-handle = <&pcs_phy2>;
-+ * };
-+ *
-+ * &dpmac3 {
-+ * phy-handle = <&pcs_phy3>;
-+ * };
-+ *
-+ * &dpmac4 {
-+ * phy-handle = <&pcs_phy4>;
-+ * };
-+ */
-+
-+/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */
-+&dpmac9 {
-+ phy-handle = <&mdio0_phy12>;
-+};
-+&dpmac10 {
-+ phy-handle = <&mdio0_phy13>;
-+};
-+&dpmac11 {
-+ phy-handle = <&mdio0_phy14>;
-+};
-+&dpmac12 {
-+ phy-handle = <&mdio0_phy15>;
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS2088A RDB Board.
- *
-@@ -6,43 +7,6 @@
- *
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- /dts-v1/;
-@@ -58,3 +22,83 @@
- stdout-path = "serial1:115200n8";
- };
- };
-+
-+&emdio1 {
-+ status = "disabled";
-+ /* CS4340 PHYs */
-+ mdio1_phy1: emdio1_phy@1 {
-+ reg = <0x10>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio1_phy2: emdio1_phy@2 {
-+ reg = <0x11>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio1_phy3: emdio1_phy@3 {
-+ reg = <0x12>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio1_phy4: emdio1_phy@4 {
-+ reg = <0x13>;
-+ phy-connection-type = "xfi";
-+ };
-+};
-+
-+&emdio2 {
-+ /* AQR405 PHYs */
-+ mdio2_phy1: emdio2_phy@1 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 1 0x4>; /* Level high type */
-+ reg = <0x0>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio2_phy2: emdio2_phy@2 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 2 0x4>; /* Level high type */
-+ reg = <0x1>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio2_phy3: emdio2_phy@3 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 4 0x4>; /* Level high type */
-+ reg = <0x2>;
-+ phy-connection-type = "xfi";
-+ };
-+ mdio2_phy4: emdio2_phy@4 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ interrupts = <0 5 0x4>; /* Level high type */
-+ reg = <0x3>;
-+ phy-connection-type = "xfi";
-+ };
-+};
-+
-+/* Update DPMAC connections to external PHYs, under the assumption of
-+ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board.
-+ */
-+/* Leave Cortina PHYs commented out until proper driver is integrated
-+ *&dpmac1 {
-+ * phy-handle = <&mdio1_phy1>;
-+ *};
-+ *&dpmac2 {
-+ * phy-handle = <&mdio1_phy2>;
-+ *};
-+ *&dpmac3 {
-+ * phy-handle = <&mdio1_phy3>;
-+ *};
-+ *&dpmac4 {
-+ * phy-handle = <&mdio1_phy4>;
-+ *};
-+ */
-+
-+&dpmac5 {
-+ phy-handle = <&mdio2_phy1>;
-+};
-+&dpmac6 {
-+ phy-handle = <&mdio2_phy2>;
-+};
-+&dpmac7 {
-+ phy-handle = <&mdio2_phy3>;
-+};
-+&dpmac8 {
-+ phy-handle = <&mdio2_phy4>;
-+};
---- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-2088A family SoC.
- *
-@@ -6,49 +7,12 @@
- *
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- #include "fsl-ls208xa.dtsi"
-
- &cpu {
-- cpu0: cpu@0 {
-+ cooling_map0: cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x0>;
-@@ -67,7 +31,7 @@
- next-level-cache = <&cluster0_l2>;
- };
-
-- cpu2: cpu@100 {
-+ cooling_map1: cpu2: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x100>;
-@@ -86,7 +50,7 @@
- next-level-cache = <&cluster1_l2>;
- };
-
-- cpu4: cpu@200 {
-+ cooling_map2: cpu4: cpu@200 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x200>;
-@@ -105,7 +69,7 @@
- next-level-cache = <&cluster2_l2>;
- };
-
-- cpu6: cpu@300 {
-+ cooling_map3: cpu6: cpu@300 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x300>;
-@@ -143,7 +107,7 @@
- CPU_PW20: cpu-pw20 {
- compatible = "arm,idle-state";
- idle-state-name = "PW20";
-- arm,psci-suspend-param = <0x00010000>;
-+ arm,psci-suspend-param = <0x0>;
- entry-latency-us = <2000>;
- exit-latency-us = <2000>;
- min-residency-us = <6000>;
-@@ -151,6 +115,7 @@
- };
-
- &pcie1 {
-+ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
-
-@@ -159,6 +124,7 @@
- };
-
- &pcie2 {
-+ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
-
-@@ -167,6 +133,7 @@
- };
-
- &pcie3 {
-+ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
-
-@@ -175,6 +142,7 @@
- };
-
- &pcie4 {
-+ compatible = "fsl,ls2088a-pcie", "snps,dw-pcie";
- reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
- 0x38 0x00000000 0x0 0x00002000>; /* configuration space */
-
---- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS2080A QDS Board.
- *
-@@ -6,43 +7,6 @@
- *
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- &esdhc {
-@@ -165,16 +129,21 @@
-
- &qspi {
- status = "okay";
-+ fsl,qspi-has-second-chip;
- flash0: s25fl256s1@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p80";
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
- spi-max-frequency = <20000000>;
- reg = <0>;
- };
- flash2: s25fl256s1@2 {
- #address-cells = <1>;
- #size-cells = <1>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
- compatible = "st,m25p80";
- spi-max-frequency = <20000000>;
- reg = <0>;
---- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree file for Freescale LS2080A RDB Board.
- *
-@@ -6,43 +7,6 @@
- *
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- &esdhc {
-@@ -85,6 +49,7 @@
- reg = <0x75>;
- #address-cells = <1>;
- #size-cells = <0>;
-+ i2c-mux-never-disable;
- i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
-@@ -95,6 +60,17 @@
- };
- };
-
-+ i2c@2 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x02>;
-+ ina220@40 {
-+ compatible = "ti,ina220";
-+ reg = <0x40>;
-+ shunt-resistor = <500>;
-+ };
-+ };
-+
- i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
-@@ -132,7 +108,15 @@
- };
-
- &qspi {
-- status = "disabled";
-+ status = "okay";
-+ flash0: s25fs512s@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "spansion,m25p80";
-+ m25p,fast-read;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ };
- };
-
- &sata0 {
---- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- /*
- * Device Tree Include file for Freescale Layerscape-2080A family SoC.
- *
-@@ -6,43 +7,6 @@
- *
- * Abhimanyu Saini <abhimanyu.saini@nxp.com>
- *
-- * This file is dual-licensed: you can use it either under the terms
-- * of the GPLv2 or the X11 license, at your option. Note that this dual
-- * licensing only applies to this file, and not this project as a
-- * whole.
-- *
-- * a) This library is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation; either version 2 of the
-- * License, or (at your option) any later version.
-- *
-- * This library is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * Or, alternatively,
-- *
-- * b) Permission is hereby granted, free of charge, to any person
-- * obtaining a copy of this software and associated documentation
-- * files (the "Software"), to deal in the Software without
-- * restriction, including without limitation the rights to use,
-- * copy, modify, merge, publish, distribute, sublicense, and/or
-- * sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following
-- * conditions:
-- *
-- * The above copyright notice and this permission notice shall be
-- * included in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
- #include <dt-bindings/thermal/thermal.h>
-@@ -111,13 +75,12 @@
- mask = <0x2>;
- };
-
-- timer {
-+ timer: timer {
- compatible = "arm,armv8-timer";
- interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
- <1 14 4>, /* Physical Non-Secure PPI, active-low */
- <1 11 4>, /* Virtual PPI, active-low */
- <1 10 4>; /* Hypervisor PPI, active-low */
-- fsl,erratum-a008585;
- };
-
- pmu {
-@@ -135,6 +98,7 @@
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
-
- clockgen: clocking@1300000 {
- compatible = "fsl,ls2080a-clockgen";
-@@ -194,54 +158,7 @@
- #thermal-sensor-cells = <1>;
- };
-
-- thermal-zones {
-- cpu_thermal: cpu-thermal {
-- polling-delay-passive = <1000>;
-- polling-delay = <5000>;
--
-- thermal-sensors = <&tmu 4>;
--
-- trips {
-- cpu_alert: cpu-alert {
-- temperature = <75000>;
-- hysteresis = <2000>;
-- type = "passive";
-- };
-- cpu_crit: cpu-crit {
-- temperature = <85000>;
-- hysteresis = <2000>;
-- type = "critical";
-- };
-- };
--
-- cooling-maps {
-- map0 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu0 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- map1 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu2 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- map2 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu4 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- map3 {
-- trip = <&cpu_alert>;
-- cooling-device =
-- <&cpu6 THERMAL_NO_LIMIT
-- THERMAL_NO_LIMIT>;
-- };
-- };
-- };
-- };
-+ #include "fsl-tmu.dtsi"
-
- serial0: serial@21c0500 {
- compatible = "fsl,ns16550", "ns16550a";
-@@ -357,6 +274,8 @@
- reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
- <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
- msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
-+ dma-coherent;
- #address-cells = <3>;
- #size-cells = <1>;
-
-@@ -460,6 +379,8 @@
- compatible = "arm,mmu-500";
- reg = <0 0x5000000 0 0x800000>;
- #global-interrupts = <12>;
-+ #iommu-cells = <1>;
-+ stream-match-mask = <0x7C00>;
- interrupts = <0 13 4>, /* global secure fault */
- <0 14 4>, /* combined secure interrupt */
- <0 15 4>, /* global non-secure fault */
-@@ -502,7 +423,6 @@
- <0 204 4>, <0 205 4>,
- <0 206 4>, <0 207 4>,
- <0 208 4>, <0 209 4>;
-- mmu-masters = <&fsl_mc 0x300 0>;
- };
-
- dspi: dspi@2100000 {
-@@ -574,15 +494,126 @@
- #interrupt-cells = <2>;
- };
-
-+ /* TODO: WRIOP (CCSR?) */
-+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000,
-+ * E-MDIO1: 0x1_6000
-+ */
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8B96000 0x0 0x1000>;
-+ device_type = "mdio"; /* TODO: is this necessary? */
-+ little-endian; /* force the driver in LE mode */
-+
-+ /* Not necessary on the QDS, but needed on the RDB */
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000,
-+ * E-MDIO2: 0x1_7000
-+ */
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8B97000 0x0 0x1000>;
-+ device_type = "mdio"; /* TODO: is this necessary? */
-+ little-endian; /* force the driver in LE mode */
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio1: mdio@0x8c07000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c07000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio2: mdio@0x8c0b000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c0b000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio3: mdio@0x8c0f000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c0f000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio4: mdio@0x8c13000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c13000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio5: mdio@0x8c17000 {
-+ status = "disabled";
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c17000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio6: mdio@0x8c1b000 {
-+ status = "disabled";
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c1b000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio7: mdio@0x8c1f000 {
-+ status = "disabled";
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c1f000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio8: mdio@0x8c23000 {
-+ status = "disabled";
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c23000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
- i2c0: i2c@2000000 {
- status = "disabled";
-- compatible = "fsl,vf610-i2c";
-+ compatible = "fsl,vf610-i2c", "fsl,ls208xa-vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2000000 0x0 0x10000>;
- interrupts = <0 34 0x4>; /* Level high type */
- clock-names = "i2c";
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 1>;
-+ scl-gpios = <&gpio3 10 0>;
- };
-
- i2c1: i2c@2010000 {
-@@ -593,7 +624,7 @@
- reg = <0x0 0x2010000 0x0 0x10000>;
- interrupts = <0 34 0x4>; /* Level high type */
- clock-names = "i2c";
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 1>;
- };
-
- i2c2: i2c@2020000 {
-@@ -604,7 +635,7 @@
- reg = <0x0 0x2020000 0x0 0x10000>;
- interrupts = <0 35 0x4>; /* Level high type */
- clock-names = "i2c";
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 1>;
- };
-
- i2c3: i2c@2030000 {
-@@ -615,7 +646,7 @@
- reg = <0x0 0x2030000 0x0 0x10000>;
- interrupts = <0 35 0x4>; /* Level high type */
- clock-names = "i2c";
-- clocks = <&clockgen 4 3>;
-+ clocks = <&clockgen 4 1>;
- };
-
- ifc: ifc@2240000 {
-@@ -648,8 +679,8 @@
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg-names = "regs", "config";
-- interrupts = <0 108 0x4>; /* Level high type */
-- interrupt-names = "intr";
-+ interrupts = <0 108 0x4>; /* aer interrupt */
-+ interrupt-names = "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
-@@ -657,20 +688,22 @@
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
- <0000 0 0 2 &gic 0 0 0 110 4>,
- <0000 0 0 3 &gic 0 0 0 111 4>,
- <0000 0 0 4 &gic 0 0 0 112 4>;
-+ status = "disabled";
- };
-
- pcie2: pcie@3500000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg-names = "regs", "config";
-- interrupts = <0 113 0x4>; /* Level high type */
-- interrupt-names = "intr";
-+ interrupts = <0 113 0x4>; /* aer interrupt */
-+ interrupt-names = "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
-@@ -678,20 +711,22 @@
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
- <0000 0 0 2 &gic 0 0 0 115 4>,
- <0000 0 0 3 &gic 0 0 0 116 4>,
- <0000 0 0 4 &gic 0 0 0 117 4>;
-+ status = "disabled";
- };
-
- pcie3: pcie@3600000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg-names = "regs", "config";
-- interrupts = <0 118 0x4>; /* Level high type */
-- interrupt-names = "intr";
-+ interrupts = <0 118 0x4>; /* aer interrupt */
-+ interrupt-names = "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
-@@ -699,20 +734,22 @@
- num-lanes = <8>;
- bus-range = <0x0 0xff>;
- msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
- <0000 0 0 2 &gic 0 0 0 120 4>,
- <0000 0 0 3 &gic 0 0 0 121 4>,
- <0000 0 0 4 &gic 0 0 0 122 4>;
-+ status = "disabled";
- };
-
- pcie4: pcie@3700000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg-names = "regs", "config";
-- interrupts = <0 123 0x4>; /* Level high type */
-- interrupt-names = "intr";
-+ interrupts = <0 123 0x4>; /* aer interrupt */
-+ interrupt-names = "aer";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
-@@ -720,12 +757,14 @@
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
- <0000 0 0 2 &gic 0 0 0 125 4>,
- <0000 0 0 3 &gic 0 0 0 126 4>,
- <0000 0 0 4 &gic 0 0 0 127 4>;
-+ status = "disabled";
- };
-
- sata0: sata@3200000 {
-@@ -754,6 +793,8 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
- };
-
- usb1: usb3@3110000 {
-@@ -764,6 +805,14 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ };
-+
-+ serdes1: serdes@1ea0000 {
-+ compatible = "fsl,serdes-10g";
-+ reg = <0x0 0x1ea0000 0 0x00002000>;
-+ little-endian;
- };
-
- ccn@4000000 {
-@@ -771,6 +820,14 @@
- reg = <0x0 0x04000000 0x0 0x01000000>;
- interrupts = <0 12 4>;
- };
-+
-+ ftm0: ftm0@2800000 {
-+ compatible = "fsl,ls208xa-ftm-alarm";
-+ reg = <0x0 0x2800000 0x0 0x10000>,
-+ <0x0 0x1e34050 0x0 0x4>;
-+ reg-names = "ftm", "pmctrl";
-+ interrupts = <0 44 4>;
-+ };
- };
-
- ddr1: memory-controller@1080000 {
-@@ -786,4 +843,44 @@
- interrupts = <0 18 0x4>;
- little-endian;
- };
-+
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
-+ };
-+ };
-+};
-+
-+#include "fsl-tmu-map1.dtsi"
-+#include "fsl-tmu-map2.dtsi"
-+#include "fsl-tmu-map3.dtsi"
-+&thermal_zones {
-+ thermal-zone1 {
-+ status = "okay";
-+ };
-+
-+ thermal-zone2{
-+ status = "okay";
-+ };
-+
-+ thermal-zone3{
-+ status = "okay";
-+ };
-+
-+ thermal-zone4{
-+ status = "okay";
-+ };
-+
-+ thermal-zone5{
-+ status = "okay";
-+ };
-+
-+ thermal-zone6{
-+ status = "okay";
-+ };
-+
-+ thermal-zone7 {
-+ status = "okay";
-+ };
- };
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
-@@ -0,0 +1,353 @@
-+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-+//
-+// Device Tree file for LX2160AQDS
-+//
-+// Copyright 2018 NXP
-+
-+/dts-v1/;
-+
-+#include "fsl-lx2160a.dtsi"
-+
-+/ {
-+ model = "NXP Layerscape LX2160AQDS";
-+ compatible = "fsl,lx2160a-qds", "fsl,lx2160a";
-+
-+ aliases {
-+ crypto = &crypto;
-+ serial0 = &uart0;
-+ };
-+
-+ chosen {
-+ stdout-path = "serial0:115200n8";
-+ };
-+
-+ sb_3v3: regulator-sb3v3 {
-+ compatible = "regulator-fixed";
-+ regulator-name = "MC34717-3.3VSB";
-+ regulator-min-microvolt = <3300000>;
-+ regulator-max-microvolt = <3300000>;
-+ regulator-boot-on;
-+ regulator-always-on;
-+ };
-+
-+ mdio-mux-1 {
-+ compatible = "mdio-mux-multiplexer";
-+ mux-controls = <&mux 0>;
-+ mdio-parent-bus = <&emdio1>;
-+ #address-cells=<1>;
-+ #size-cells = <0>;
-+
-+ mdio@0 { /* On-board PHY #1 RGMI1*/
-+ reg = <0x00>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@8 { /* On-board PHY #2 RGMI2*/
-+ reg = <0x8>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@18 { /* Slot #1 */
-+ reg = <0x18>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@19 { /* Slot #2 */
-+ reg = <0x19>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@1a { /* Slot #3 */
-+ reg = <0x1a>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@1b { /* Slot #4 */
-+ reg = <0x1b>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@1c { /* Slot #5 */
-+ reg = <0x1c>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@1d { /* Slot #6 */
-+ reg = <0x1d>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@1e { /* Slot #7 */
-+ reg = <0x1e>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@1f { /* Slot #8 */
-+ reg = <0x1f>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+ };
-+
-+ mdio-mux-2 {
-+ compatible = "mdio-mux-multiplexer";
-+ mux-controls = <&mux 1>;
-+ mdio-parent-bus = <&emdio2>;
-+ #address-cells=<1>;
-+ #size-cells = <0>;
-+
-+ mdio@0 { /* Slot #1 (secondary EMI) */
-+ reg = <0x00>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@1 { /* Slot #2 (secondary EMI) */
-+ reg = <0x01>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@2 { /* Slot #3 (secondary EMI) */
-+ reg = <0x02>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@3 { /* Slot #4 (secondary EMI) */
-+ reg = <0x03>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@4 { /* Slot #5 (secondary EMI) */
-+ reg = <0x04>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@5 { /* Slot #6 (secondary EMI) */
-+ reg = <0x05>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@6 { /* Slot #7 (secondary EMI) */
-+ reg = <0x06>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ mdio@7 { /* Slot #8 (secondary EMI) */
-+ reg = <0x07>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+ };
-+};
-+
-+&crypto {
-+ status = "okay";
-+};
-+
-+&dspi0 {
-+ status = "okay";
-+
-+ dflash0: flash@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "jedec,spi-nor";
-+ reg = <0>;
-+ spi-max-frequency = <1000000>;
-+ };
-+};
-+
-+&dspi1 {
-+ status = "okay";
-+
-+ dflash1: flash@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "jedec,spi-nor";
-+ reg = <0>;
-+ spi-max-frequency = <1000000>;
-+ };
-+};
-+
-+&dspi2 {
-+ status = "okay";
-+
-+ dflash2: flash@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "jedec,spi-nor";
-+ reg = <0>;
-+ spi-max-frequency = <1000000>;
-+ };
-+};
-+
-+&emdio1 {
-+ status = "okay";
-+};
-+
-+&emdio2 {
-+ status = "okay";
-+};
-+
-+&esdhc0 {
-+ status = "okay";
-+};
-+
-+&esdhc1 {
-+ status = "okay";
-+};
-+
-+&i2c0 {
-+ status = "okay";
-+
-+ fpga@66 {
-+ compatible = "fsl,lx2160aqds-fpga", "fsl,fpga-qixis-i2c",
-+ "simple-mfd";
-+ reg = <0x66>;
-+
-+ mux: mux-controller {
-+ compatible = "reg-mux";
-+ #mux-control-cells = <1>;
-+ mux-reg-masks = <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */
-+ <0x54 0x07>; /* 1: reg 0x54, bit 2:0 */
-+ };
-+ };
-+
-+ i2c-mux@77 {
-+ compatible = "nxp,pca9547";
-+ reg = <0x77>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ i2c@2 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x2>;
-+
-+ power-monitor@40 {
-+ compatible = "ti,ina220";
-+ reg = <0x40>;
-+ shunt-resistor = <500>;
-+ };
-+
-+ power-monitor@41 {
-+ compatible = "ti,ina220";
-+ reg = <0x41>;
-+ shunt-resistor = <1000>;
-+ };
-+ };
-+
-+ i2c@3 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x3>;
-+
-+ temperature-sensor@4c {
-+ compatible = "nxp,sa56004";
-+ reg = <0x4c>;
-+ vcc-supply = <&sb_3v3>;
-+ };
-+
-+ temperature-sensor@4d {
-+ compatible = "nxp,sa56004";
-+ reg = <0x4d>;
-+ vcc-supply = <&sb_3v3>;
-+ };
-+
-+ rtc@51 {
-+ compatible = "nxp,pcf2129";
-+ reg = <0x51>;
-+ };
-+ };
-+ };
-+};
-+
-+&uart0 {
-+ status = "okay";
-+};
-+
-+&uart1 {
-+ status = "okay";
-+};
-+
-+&usb0 {
-+ status = "okay";
-+};
-+
-+&usb1 {
-+ status = "okay";
-+};
-+
-+&pcs_mdio1 {
-+ pcs_phy1: ethernet-phy@0 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ backplane-mode = "40gbase-kr";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0xF00 0xE00 0xD00 0xC00>; /* lanes H, G, F, E */
-+ };
-+};
-+
-+&pcs_mdio2 {
-+ pcs_phy2: ethernet-phy@0 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ backplane-mode = "40gbase-kr";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0xB00 0xA00 0x900 0x800>; /* lanes D, C, B, A */
-+ };
-+};
-+
-+&pcs_mdio3 {
-+ pcs_phy3: ethernet-phy@0 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ backplane-mode = "10gbase-kr";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0xF00 0x100>; /* lane H */
-+ };
-+};
-+
-+&pcs_mdio4 {
-+ pcs_phy4: ethernet-phy@0 {
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ backplane-mode = "10gbase-kr";
-+ reg = <0x0>;
-+ fsl,lane-handle = <&serdes1>;
-+ fsl,lane-reg = <0xE00 0x100>; /* lane G */
-+ };
-+};
-+
-+/* Update DPMAC connections to 40G backplane PHYs
-+ * &dpmac1 {
-+ * phy-handle = <&pcs_phy1>;
-+ * };
-+ *
-+ * &dpmac2 {
-+ * phy-handle = <&pcs_phy2>;
-+ * };
-+ */
-+
-+/* Update DPMAC connections to 10G backplane PHYs
-+ * &dpmac3 {
-+ * phy-handle = <&pcs_phy3>;
-+ * };
-+ *
-+ * &dpmac4 {
-+ * phy-handle = <&pcs_phy4>;
-+ * };
-+ */
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
-@@ -0,0 +1,233 @@
-+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-+//
-+// Device Tree file for LX2160ARDB
-+//
-+// Copyright 2018 NXP
-+
-+/dts-v1/;
-+
-+#include "fsl-lx2160a.dtsi"
-+
-+/ {
-+ model = "NXP Layerscape LX2160ARDB";
-+ compatible = "fsl,lx2160a-rdb", "fsl,lx2160a";
-+
-+ aliases {
-+ crypto = &crypto;
-+ serial0 = &uart0;
-+ };
-+
-+ chosen {
-+ stdout-path = "serial0:115200n8";
-+ };
-+
-+ sb_3v3: regulator-sb3v3 {
-+ compatible = "regulator-fixed";
-+ regulator-name = "MC34717-3.3VSB";
-+ regulator-min-microvolt = <3300000>;
-+ regulator-max-microvolt = <3300000>;
-+ regulator-boot-on;
-+ regulator-always-on;
-+ };
-+};
-+
-+&crypto {
-+ status = "okay";
-+};
-+
-+&emdio1 {
-+ status = "okay";
-+};
-+
-+&emdio2 {
-+ status = "okay";
-+};
-+
-+&esdhc0 {
-+ sd-uhs-sdr104;
-+ sd-uhs-sdr50;
-+ sd-uhs-sdr25;
-+ sd-uhs-sdr12;
-+ status = "okay";
-+};
-+
-+&esdhc1 {
-+ mmc-hs200-1_8v;
-+ mmc-hs400-1_8v;
-+ bus-width = <8>;
-+ status = "okay";
-+};
-+
-+&i2c0 {
-+ status = "okay";
-+
-+ i2c-mux@77 {
-+ compatible = "nxp,pca9547";
-+ reg = <0x77>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ i2c@2 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x2>;
-+
-+ power-monitor@40 {
-+ compatible = "ti,ina220";
-+ reg = <0x40>;
-+ shunt-resistor = <1000>;
-+ };
-+ };
-+
-+ i2c@3 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x3>;
-+
-+ temperature-sensor@4c {
-+ compatible = "nxp,sa56004";
-+ reg = <0x4c>;
-+ vcc-supply = <&sb_3v3>;
-+ };
-+
-+ temperature-sensor@4d {
-+ compatible = "nxp,sa56004";
-+ reg = <0x4d>;
-+ vcc-supply = <&sb_3v3>;
-+ };
-+ };
-+ };
-+};
-+
-+&i2c4 {
-+ status = "okay";
-+
-+ rtc@51 {
-+ compatible = "nxp,pcf2129";
-+ reg = <0x51>;
-+ // IRQ10_B
-+ interrupts = <0 150 0x4>;
-+ };
-+};
-+
-+&fspi {
-+ status = "okay";
-+ nxp,fspi-has-second-chip;
-+ flash0: mt35xu512aba@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "micron,m25p80";
-+ m25p,fast-read;
-+ spi-max-frequency = <50000000>;
-+ reg = <0>;
-+ /* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
-+ spi-rx-bus-width = <8>;
-+ spi-tx-bus-width = <1>;
-+ };
-+
-+ flash1: mt35xu512aba@1 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ compatible = "micron,m25p80";
-+ m25p,fast-read;
-+ spi-max-frequency = <50000000>;
-+ reg = <1>;
-+ /* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
-+ spi-rx-bus-width = <8>;
-+ spi-tx-bus-width = <1>;
-+ };
-+};
-+
-+&uart0 {
-+ status = "okay";
-+};
-+
-+&uart1 {
-+ status = "okay";
-+};
-+
-+&usb0 {
-+ status = "okay";
-+};
-+
-+&usb1 {
-+ status = "okay";
-+};
-+
-+&emdio1 {
-+ rgmii_phy1: ethernet-phy@1 {
-+ /* AR8035 PHY - "compatible" property not strictly needed */
-+ compatible = "ethernet-phy-id004d.d072";
-+ reg = <0x1>;
-+ /* Poll mode - no "interrupts" property defined */
-+ };
-+ rgmii_phy2: ethernet-phy@2 {
-+ /* AR8035 PHY - "compatible" property not strictly needed */
-+ compatible = "ethernet-phy-id004d.d072";
-+ reg = <0x2>;
-+ /* Poll mode - no "interrupts" property defined */
-+ };
-+ aquantia_phy1: ethernet-phy@4 {
-+ /* AQR107 PHY - "compatible" property not strictly needed */
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x4>;
-+ /* Poll mode - no "interrupts" property defined */
-+ };
-+ aquantia_phy2: ethernet-phy@5 {
-+ /* AQR107 PHY - "compatible" property not strictly needed */
-+ compatible = "ethernet-phy-ieee802.3-c45";
-+ reg = <0x5>;
-+ /* Poll mode - no "interrupts" property defined */
-+ };
-+};
-+
-+&emdio2 {
-+ inphi_phy: ethernet-phy@0 {
-+ compatible = "ethernet-phy-id0210.7440";
-+ reg = <0x0>;
-+ };
-+};
-+
-+&dpmac3 {
-+ phy-handle = <&aquantia_phy1>;
-+ phy-connection-type = "xgmii";
-+};
-+
-+&dpmac4 {
-+ phy-handle = <&aquantia_phy2>;
-+ phy-connection-type = "xgmii";
-+};
-+
-+&dpmac5 {
-+ phy-handle = <&inphi_phy>;
-+};
-+
-+&dpmac6 {
-+ phy-handle = <&inphi_phy>;
-+};
-+
-+&dpmac17 {
-+ phy-handle = <&rgmii_phy1>;
-+ phy-connection-type = "rgmii-id";
-+};
-+
-+&dpmac18 {
-+ phy-handle = <&rgmii_phy2>;
-+ phy-connection-type = "rgmii-id";
-+};
-+
-+&sata0 {
-+ status = "okay";
-+};
-+
-+&sata1 {
-+ status = "okay";
-+};
-+
-+&sata2 {
-+ status = "okay";
-+};
-+
-+&sata3 {
-+ status = "okay";
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
-@@ -0,0 +1,1318 @@
-+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-+//
-+// Device Tree Include file for Layerscape-LX2160A family SoC.
-+//
-+// Copyright 2018 NXP
-+
-+#include <dt-bindings/gpio/gpio.h>
-+#include <dt-bindings/interrupt-controller/arm-gic.h>
-+
-+/memreserve/ 0x80000000 0x00010000;
-+
-+/ {
-+ compatible = "fsl,lx2160a";
-+ interrupt-parent = <&gic>;
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+
-+ cpus {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ // 8 clusters having 2 Cortex-A72 cores each
-+ cpu@0 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x0>;
-+ clocks = <&clockgen 1 0>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster0_l2>;
-+ };
-+
-+ cpu@1 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x1>;
-+ clocks = <&clockgen 1 0>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster0_l2>;
-+ };
-+
-+ cpu@100 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x100>;
-+ clocks = <&clockgen 1 1>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster1_l2>;
-+ };
-+
-+ cpu@101 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x101>;
-+ clocks = <&clockgen 1 1>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster1_l2>;
-+ };
-+
-+ cpu@200 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x200>;
-+ clocks = <&clockgen 1 2>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster2_l2>;
-+ };
-+
-+ cpu@201 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x201>;
-+ clocks = <&clockgen 1 2>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster2_l2>;
-+ };
-+
-+ cpu@300 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x300>;
-+ clocks = <&clockgen 1 3>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster3_l2>;
-+ };
-+
-+ cpu@301 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x301>;
-+ clocks = <&clockgen 1 3>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster3_l2>;
-+ };
-+
-+ cpu@400 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x400>;
-+ clocks = <&clockgen 1 4>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster4_l2>;
-+ };
-+
-+ cpu@401 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x401>;
-+ clocks = <&clockgen 1 4>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster4_l2>;
-+ };
-+
-+ cpu@500 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x500>;
-+ clocks = <&clockgen 1 5>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster5_l2>;
-+ };
-+
-+ cpu@501 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x501>;
-+ clocks = <&clockgen 1 5>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster5_l2>;
-+ };
-+
-+ cpu@600 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x600>;
-+ clocks = <&clockgen 1 6>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster6_l2>;
-+ };
-+
-+ cpu@601 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x601>;
-+ clocks = <&clockgen 1 6>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster6_l2>;
-+ };
-+
-+ cpu@700 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x700>;
-+ clocks = <&clockgen 1 7>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster7_l2>;
-+ };
-+
-+ cpu@701 {
-+ device_type = "cpu";
-+ compatible = "arm,cortex-a72";
-+ enable-method = "psci";
-+ reg = <0x701>;
-+ clocks = <&clockgen 1 7>;
-+ d-cache-size = <0x8000>;
-+ d-cache-line-size = <64>;
-+ d-cache-sets = <128>;
-+ i-cache-size = <0xC000>;
-+ i-cache-line-size = <64>;
-+ i-cache-sets = <192>;
-+ next-level-cache = <&cluster7_l2>;
-+ };
-+
-+ cluster0_l2: l2-cache0 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+
-+ cluster1_l2: l2-cache1 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+
-+ cluster2_l2: l2-cache2 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+
-+ cluster3_l2: l2-cache3 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+
-+ cluster4_l2: l2-cache4 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+
-+ cluster5_l2: l2-cache5 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+
-+ cluster6_l2: l2-cache6 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+
-+ cluster7_l2: l2-cache7 {
-+ compatible = "cache";
-+ cache-size = <0x100000>;
-+ cache-line-size = <64>;
-+ cache-sets = <1024>;
-+ cache-level = <2>;
-+ };
-+ };
-+
-+ gic: interrupt-controller@6000000 {
-+ compatible = "arm,gic-v3";
-+ reg = <0x0 0x06000000 0 0x10000>, // GIC Dist
-+ <0x0 0x06200000 0 0x200000>, // GICR (RD_base +
-+ // SGI_base)
-+ <0x0 0x0c0c0000 0 0x2000>, // GICC
-+ <0x0 0x0c0d0000 0 0x1000>, // GICH
-+ <0x0 0x0c0e0000 0 0x20000>; // GICV
-+ #interrupt-cells = <3>;
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ ranges;
-+ interrupt-controller;
-+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
-+
-+ its: gic-its@6020000 {
-+ compatible = "arm,gic-v3-its";
-+ msi-controller;
-+ reg = <0x0 0x6020000 0 0x20000>;
-+ };
-+ };
-+
-+ timer {
-+ compatible = "arm,armv8-timer";
-+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+
-+ pmu {
-+ compatible = "arm,cortex-a72-pmu";
-+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
-+ };
-+
-+ psci {
-+ compatible = "arm,psci-0.2";
-+ method = "smc";
-+ };
-+
-+ memory@80000000 {
-+ // DRAM space - 1, size : 2 GB DRAM
-+ device_type = "memory";
-+ reg = <0x00000000 0x80000000 0 0x80000000>;
-+ };
-+
-+ ddr1: memory-controller@1080000 {
-+ compatible = "fsl,qoriq-memory-controller";
-+ reg = <0x0 0x1080000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-+ little-endian;
-+ };
-+
-+ ddr2: memory-controller@1090000 {
-+ compatible = "fsl,qoriq-memory-controller";
-+ reg = <0x0 0x1090000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
-+ little-endian;
-+ };
-+
-+ sysclk: sysclk {
-+ compatible = "fixed-clock";
-+ #clock-cells = <0>;
-+ clock-frequency = <100000000>;
-+ clock-output-names = "sysclk";
-+ };
-+
-+ soc {
-+ compatible = "simple-bus";
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
-+
-+ crypto: crypto@8000000 {
-+ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
-+ fsl,sec-era = <10>;
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ ranges = <0x0 0x00 0x8000000 0x100000>;
-+ reg = <0x00 0x8000000 0x0 0x100000>;
-+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
-+ dma-coherent;
-+ status = "disabled";
-+
-+ sec_jr0: jr@10000 {
-+ compatible = "fsl,sec-v5.0-job-ring",
-+ "fsl,sec-v4.0-job-ring";
-+ reg = <0x10000 0x10000>;
-+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+
-+ sec_jr1: jr@20000 {
-+ compatible = "fsl,sec-v5.0-job-ring",
-+ "fsl,sec-v4.0-job-ring";
-+ reg = <0x20000 0x10000>;
-+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+
-+ sec_jr2: jr@30000 {
-+ compatible = "fsl,sec-v5.0-job-ring",
-+ "fsl,sec-v4.0-job-ring";
-+ reg = <0x30000 0x10000>;
-+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+
-+ sec_jr3: jr@40000 {
-+ compatible = "fsl,sec-v5.0-job-ring",
-+ "fsl,sec-v4.0-job-ring";
-+ reg = <0x40000 0x10000>;
-+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
-+ };
-+ };
-+
-+ clockgen: clock-controller@1300000 {
-+ compatible = "fsl,lx2160a-clockgen";
-+ reg = <0 0x1300000 0 0xa0000>;
-+ #clock-cells = <2>;
-+ clocks = <&sysclk>;
-+ };
-+
-+ dcfg: syscon@1e00000 {
-+ compatible = "fsl,lx2160a-dcfg", "syscon";
-+ reg = <0x0 0x1e00000 0x0 0x10000>;
-+ little-endian;
-+ };
-+
-+ /* WRIOP0: 0x8b8_0000, E-MDIO1: 0x1_6000 */
-+ emdio1: mdio@8b96000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8b96000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ little-endian; /* force the driver in LE mode */
-+ status = "disabled";
-+ };
-+
-+ /* WRIOP0: 0x8b8_0000, E-MDIO2: 0x1_7000 */
-+ emdio2: mdio@8b97000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8b97000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ little-endian; /* force the driver in LE mode */
-+ status = "disabled";
-+ };
-+
-+ pcs_mdio1: mdio@0x8c07000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c07000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio2: mdio@0x8c0b000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c0b000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio3: mdio@0x8c0f000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c0f000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio4: mdio@0x8c13000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c13000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio5: mdio@0x8c17000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c17000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio6: mdio@0x8c1b000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c1b000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio7: mdio@0x8c1f000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c1f000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ pcs_mdio8: mdio@0x8c23000 {
-+ compatible = "fsl,fman-memac-mdio";
-+ reg = <0x0 0x8c23000 0x0 0x1000>;
-+ device_type = "mdio";
-+ little-endian;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ };
-+
-+ serdes1: serdes@1ea0000 {
-+ compatible = "fsl,serdes-28g";
-+ reg = <0x0 0x1ea0000 0 0x00002000>;
-+ little-endian;
-+ };
-+
-+ i2c0: i2c@2000000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2000000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ i2c1: i2c@2010000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2010000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ status = "disabled";
-+ };
-+
-+ i2c2: i2c@2020000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2020000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ status = "disabled";
-+ };
-+
-+ i2c3: i2c@2030000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2030000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ status = "disabled";
-+ };
-+
-+ i2c4: i2c@2040000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2040000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ i2c5: i2c@2050000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2050000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ status = "disabled";
-+ };
-+
-+ i2c6: i2c@2060000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2060000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ status = "disabled";
-+ };
-+
-+ i2c7: i2c@2070000 {
-+ compatible = "fsl,vf610-i2c";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2070000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
-+ clock-names = "i2c";
-+ clocks = <&clockgen 4 7>;
-+ status = "disabled";
-+ };
-+
-+ dspi0: spi@2100000 {
-+ compatible = "fsl,lx2160a-dspi", "fsl,ls2085a-dspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2100000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 7>;
-+ clock-names = "dspi";
-+ spi-num-chipselects = <5>;
-+ bus-num = <0>;
-+ status = "disabled";
-+ };
-+
-+ dspi1: spi@2110000 {
-+ compatible = "fsl,lx2160a-dspi", "fsl,ls2085a-dspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2110000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 7>;
-+ clock-names = "dspi";
-+ spi-num-chipselects = <5>;
-+ bus-num = <1>;
-+ status = "disabled";
-+ };
-+
-+ dspi2: spi@2120000 {
-+ compatible = "fsl,lx2160a-dspi", "fsl,ls2085a-dspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x2120000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 7>;
-+ clock-names = "dspi";
-+ spi-num-chipselects = <5>;
-+ bus-num = <2>;
-+ status = "disabled";
-+ };
-+
-+ esdhc0: esdhc@2140000 {
-+ compatible = "fsl,esdhc";
-+ reg = <0x0 0x2140000 0x0 0x10000>;
-+ interrupts = <0 28 0x4>; /* Level high type */
-+ clocks = <&clockgen 4 1>;
-+ voltage-ranges = <1800 1800 3300 3300>;
-+ sdhci,auto-cmd12;
-+ little-endian;
-+ bus-width = <4>;
-+ status = "disabled";
-+ };
-+
-+ esdhc1: esdhc@2150000 {
-+ compatible = "fsl,esdhc";
-+ reg = <0x0 0x2150000 0x0 0x10000>;
-+ interrupts = <0 63 0x4>; /* Level high type */
-+ clocks = <&clockgen 4 1>;
-+ voltage-ranges = <1800 1800 3300 3300>;
-+ sdhci,auto-cmd12;
-+ broken-cd;
-+ little-endian;
-+ bus-width = <4>;
-+ status = "disabled";
-+ };
-+
-+ uart0: serial@21c0000 {
-+ compatible = "arm,sbsa-uart","arm,pl011";
-+ reg = <0x0 0x21c0000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
-+ current-speed = <115200>;
-+ status = "disabled";
-+ };
-+
-+ uart1: serial@21d0000 {
-+ compatible = "arm,sbsa-uart","arm,pl011";
-+ reg = <0x0 0x21d0000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
-+ current-speed = <115200>;
-+ status = "disabled";
-+ };
-+
-+ uart2: serial@21e0000 {
-+ compatible = "arm,sbsa-uart","arm,pl011";
-+ reg = <0x0 0x21e0000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
-+ current-speed = <115200>;
-+ status = "disabled";
-+ };
-+
-+ uart3: serial@21f0000 {
-+ compatible = "arm,sbsa-uart","arm,pl011";
-+ reg = <0x0 0x21f0000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
-+ current-speed = <115200>;
-+ status = "disabled";
-+ };
-+
-+ gpio0: gpio@2300000 {
-+ compatible = "fsl,qoriq-gpio";
-+ reg = <0x0 0x2300000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
-+ gpio-controller;
-+ little-endian;
-+ #gpio-cells = <2>;
-+ interrupt-controller;
-+ #interrupt-cells = <2>;
-+ };
-+
-+ gpio1: gpio@2310000 {
-+ compatible = "fsl,qoriq-gpio";
-+ reg = <0x0 0x2310000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
-+ gpio-controller;
-+ little-endian;
-+ #gpio-cells = <2>;
-+ interrupt-controller;
-+ #interrupt-cells = <2>;
-+ };
-+
-+ gpio2: gpio@2320000 {
-+ compatible = "fsl,qoriq-gpio";
-+ reg = <0x0 0x2320000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
-+ gpio-controller;
-+ little-endian;
-+ #gpio-cells = <2>;
-+ interrupt-controller;
-+ #interrupt-cells = <2>;
-+ };
-+
-+ gpio3: gpio@2330000 {
-+ compatible = "fsl,qoriq-gpio";
-+ reg = <0x0 0x2330000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
-+ gpio-controller;
-+ little-endian;
-+ #gpio-cells = <2>;
-+ interrupt-controller;
-+ #interrupt-cells = <2>;
-+ };
-+
-+ watchdog@23a0000 {
-+ compatible = "arm,sbsa-gwdt";
-+ reg = <0x0 0x23a0000 0 0x1000>,
-+ <0x0 0x2390000 0 0x1000>;
-+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
-+ timeout-sec = <30>;
-+ };
-+
-+ ftm0: ftm0@2800000 {
-+ compatible = "fsl,ftm-alarm", "fsl,lx2160a-ftm-alarm";
-+ reg = <0x0 0x2800000 0x0 0x10000>,
-+ <0x0 0x1e34050 0x0 0x4>;
-+ reg-names = "ftm", "FlexTimer1";
-+ interrupts = <0 44 0x4>;
-+ status = "okay";
-+ };
-+
-+ usb0: usb@3100000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3100000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ status = "disabled";
-+ };
-+
-+ usb1: usb@3110000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3110000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ snps,host-vbus-glitches;
-+ status = "disabled";
-+ };
-+
-+ smmu: iommu@5000000 {
-+ compatible = "arm,mmu-500";
-+ reg = <0 0x5000000 0 0x800000>;
-+ #iommu-cells = <1>;
-+ #global-interrupts = <14>;
-+ // global secure fault
-+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
-+ // combined secure
-+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
-+ // global non-secure fault
-+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
-+ // combined non-secure
-+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
-+ // performance counter interrupts 0-9
-+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
-+ // per context interrupt, 64 interrupts
-+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
-+ dma-coherent;
-+ };
-+
-+ fsl_mc: fsl-mc@80c000000 {
-+ compatible = "fsl,qoriq-mc";
-+ reg = <0x00000008 0x0c000000 0 0x40>,
-+ <0x00000000 0x08340000 0 0x40000>;
-+ msi-parent = <&its>;
-+ /* iommu-map property is fixed up by u-boot */
-+ iommu-map = <0 &smmu 0 0>;
-+ dma-coherent;
-+ #address-cells = <3>;
-+ #size-cells = <1>;
-+
-+ /*
-+ * Region type 0x0 - MC portals
-+ * Region type 0x1 - QBMAN portals
-+ */
-+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
-+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
-+
-+ /*
-+ * Define the maximum number of MACs present on the SoC.
-+ */
-+ dpmacs {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ dpmac1: dpmac@1 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x1>;
-+ };
-+
-+ dpmac2: dpmac@2 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x2>;
-+ };
-+
-+ dpmac3: dpmac@3 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x3>;
-+ };
-+
-+ dpmac4: dpmac@4 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x4>;
-+ };
-+
-+ dpmac5: dpmac@5 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x5>;
-+ };
-+
-+ dpmac6: dpmac@6 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x6>;
-+ };
-+
-+ dpmac7: dpmac@7 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x7>;
-+ };
-+
-+ dpmac8: dpmac@8 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x8>;
-+ };
-+
-+ dpmac9: dpmac@9 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x9>;
-+ };
-+
-+ dpmac10: dpmac@a {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xa>;
-+ };
-+
-+ dpmac11: dpmac@b {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xb>;
-+ };
-+
-+ dpmac12: dpmac@c {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xc>;
-+ };
-+
-+ dpmac13: dpmac@d {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xd>;
-+ };
-+
-+ dpmac14: dpmac@e {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xe>;
-+ };
-+
-+ dpmac15: dpmac@f {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xf>;
-+ };
-+
-+ dpmac16: dpmac@10 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x10>;
-+ };
-+
-+ dpmac17: dpmac@11 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x11>;
-+ };
-+
-+ dpmac18: dpmac@12 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0x12>;
-+ };
-+ };
-+ };
-+
-+ fspi: flexspi@20c0000 {
-+ status = "disabled";
-+ compatible = "nxp,lx2160a-fspi";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x20c0000 0x0 0x10000>,
-+ <0x0 0x20000000 0x0 0x10000000>;
-+ reg-names = "FSPI", "FSPI-memory";
-+ interrupts = <0 25 0x4>; /* Level high type */
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "fspi_en", "fspi";
-+ };
-+
-+ sata0: sata@3200000 {
-+ status = "disabled";
-+ compatible = "fsl,lx2160a-ahci";
-+ reg = <0x0 0x3200000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 3>;
-+ dma-coherent;
-+ };
-+
-+ sata1: sata@3210000 {
-+ status = "disabled";
-+ compatible = "fsl,lx2160a-ahci";
-+ reg = <0x0 0x3210000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 3>;
-+ dma-coherent;
-+ };
-+
-+ sata2: sata@3220000 {
-+ status = "disabled";
-+ compatible = "fsl,lx2160a-ahci";
-+ reg = <0x0 0x3220000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 3>;
-+ dma-coherent;
-+ };
-+
-+ sata3: sata@3230000 {
-+ status = "disabled";
-+ compatible = "fsl,lx2160a-ahci";
-+ reg = <0x0 0x3230000 0x0 0x10000>;
-+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 4 3>;
-+ dma-coherent;
-+ };
-+
-+ pcie@3400000 {
-+ compatible = "fsl,lx2160a-pcie";
-+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
-+ 0x80 0x00000000 0x0 0x00001000>; /* configuration space */
-+ reg-names = "csr_axi_slave", "config_axi_slave";
-+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
-+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
-+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-+ interrupt-names = "aer", "pme", "intr";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ apio-wins = <8>;
-+ ppio-wins = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3400000 {
-+ compatible = "fsl,lx2160a-pcie-ep";
-+ reg = <0x00 0x03400000 0x0 0x00100000
-+ 0x80 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ob-windows = <256>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3500000 {
-+ compatible = "fsl,lx2160a-pcie";
-+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
-+ 0x88 0x00000000 0x0 0x00001000>; /* configuration space */
-+ reg-names = "csr_axi_slave", "config_axi_slave";
-+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
-+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
-+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-+ interrupt-names = "aer", "pme", "intr";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ apio-wins = <8>;
-+ ppio-wins = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3500000 {
-+ compatible = "fsl,lx2160a-pcie-ep";
-+ reg = <0x00 0x03500000 0x0 0x00100000
-+ 0x88 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ob-windows = <256>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3600000 {
-+ compatible = "fsl,lx2160a-pcie";
-+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
-+ 0x90 0x00000000 0x0 0x00001000>; /* configuration space */
-+ reg-names = "csr_axi_slave", "config_axi_slave";
-+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
-+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
-+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-+ interrupt-names = "aer", "pme", "intr";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ apio-wins = <8>;
-+ ppio-wins = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x82000000 0x0 0x40000000 0x90 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3600000 {
-+ compatible = "fsl,lx2160a-pcie-ep";
-+ reg = <0x00 0x03600000 0x0 0x00100000
-+ 0x90 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ob-windows = <256>;
-+ max-functions = <2>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3700000 {
-+ compatible = "fsl,lx2160a-pcie";
-+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
-+ 0x98 0x00000000 0x0 0x00001000>; /* configuration space */
-+ reg-names = "csr_axi_slave", "config_axi_slave";
-+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
-+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
-+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-+ interrupt-names = "aer", "pme", "intr";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ apio-wins = <8>;
-+ ppio-wins = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x82000000 0x0 0x40000000 0x98 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3700000 {
-+ compatible = "fsl,lx2160a-pcie-ep";
-+ reg = <0x00 0x03700000 0x0 0x00100000
-+ 0x98 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ob-windows = <256>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3800000 {
-+ compatible = "fsl,lx2160a-pcie";
-+ reg = <0x00 0x03800000 0x0 0x00100000 /* controller registers */
-+ 0xa0 0x00000000 0x0 0x00001000>; /* configuration space */
-+ reg-names = "csr_axi_slave", "config_axi_slave";
-+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
-+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
-+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-+ interrupt-names = "aer", "pme", "intr";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ apio-wins = <8>;
-+ ppio-wins = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x82000000 0x0 0x40000000 0xa0 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3800000 {
-+ compatible = "fsl,lx2160a-pcie-ep";
-+ reg = <0x00 0x03800000 0x0 0x00100000
-+ 0xa0 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ob-windows = <256>;
-+ max-functions = <2>;
-+ status = "disabled";
-+ };
-+
-+ pcie@3900000 {
-+ compatible = "fsl,lx2160a-pcie";
-+ reg = <0x00 0x03900000 0x0 0x00100000 /* controller registers */
-+ 0xa8 0x00000000 0x0 0x00001000>; /* configuration space */
-+ reg-names = "csr_axi_slave", "config_axi_slave";
-+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
-+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
-+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-+ interrupt-names = "aer", "pme", "intr";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ apio-wins = <8>;
-+ ppio-wins = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x82000000 0x0 0x40000000 0xa8 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
-+ status = "disabled";
-+ };
-+
-+ pcie_ep@3900000 {
-+ compatible = "fsl,lx2160a-pcie-ep";
-+ reg = <0x00 0x03900000 0x0 0x00100000
-+ 0xa8 0x00000000 0x8 0x00000000>;
-+ reg-names = "regs", "addr_space";
-+ num-ob-windows = <256>;
-+ status = "disabled";
-+ };
-+
-+ };
-+
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
-+ };
-+ };
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-tmu-map1.dtsi
-@@ -0,0 +1,99 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-+/*
-+ * Device Tree Include file for Thermal Monitor Unit.
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Tang Yuantian <andy.tang@nxp.com>
-+ *
-+ */
-+
-+&thermal_zones {
-+ thermal-zone0 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert0>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone1 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert1>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone2 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert2>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone3 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert3>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone4 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert4>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone5 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert5>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone6 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert6>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone7 {
-+ cooling-maps {
-+ map1 {
-+ trip = <&alert7>;
-+ cooling-device =
-+ <&cooling_map1 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-tmu-map2.dtsi
-@@ -0,0 +1,99 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-+/*
-+ * Device Tree Include file for Thermal Monitor Unit.
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Tang Yuantian <andy.tang@nxp.com>
-+ *
-+ */
-+
-+&thermal_zones {
-+ thermal-zone0 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert0>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone1 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert1>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone2 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert2>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone3 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert3>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone4 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert4>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone5 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert5>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone6 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert6>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone7 {
-+ cooling-maps {
-+ map2 {
-+ trip = <&alert7>;
-+ cooling-device =
-+ <&cooling_map2 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-tmu-map3.dtsi
-@@ -0,0 +1,99 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-+/*
-+ * Device Tree Include file for Thermal Monitor Unit.
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Tang Yuantian <andy.tang@nxp.com>
-+ *
-+ */
-+
-+&thermal_zones {
-+ thermal-zone0 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert0>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone1 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert1>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone2 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert2>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone3 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert3>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone4 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert4>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone5 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert5>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone6 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert6>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone7 {
-+ cooling-maps {
-+ map3 {
-+ trip = <&alert7>;
-+ cooling-device =
-+ <&cooling_map3 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-tmu.dtsi
-@@ -0,0 +1,251 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-+/*
-+ * Device Tree Include file for Thermal Monitor Unit.
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Tang Yuantian <andy.tang@nxp.com>
-+ *
-+ */
-+
-+thermal_zones: thermal-zones {
-+ thermal_zone0: thermal-zone0 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 0>;
-+ status = "disabled";
-+
-+ trips {
-+ alert0: alert0 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit0: crit0 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert0>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone1 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 1>;
-+ status = "disabled";
-+
-+ trips {
-+ alert1: alert1 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit1: crit1 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert1>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone2 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 2>;
-+ status = "disabled";
-+
-+ trips {
-+ alert2: alert2 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit2: crit2 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert2>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone3 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 3>;
-+ status = "disabled";
-+
-+ trips {
-+ alert3: alert3 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit3: crit3 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert3>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone4 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 4>;
-+ status = "disabled";
-+
-+ trips {
-+ alert4: alert4 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit4: crit4 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert4>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone5 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 5>;
-+ status = "disabled";
-+
-+ trips {
-+ alert5: alert5 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit5: crit5 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert5>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone6 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 6>;
-+ status = "disabled";
-+
-+ trips {
-+ alert6: alert6 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit6: crit6 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert6>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+
-+ thermal-zone7 {
-+ polling-delay-passive = <1000>;
-+ polling-delay = <5000>;
-+ thermal-sensors = <&tmu 7>;
-+ status = "disabled";
-+
-+ trips {
-+ alert7: alert7 {
-+ temperature = <75000>;
-+ hysteresis = <2000>;
-+ type = "passive";
-+ };
-+
-+ crit7: crit7 {
-+ temperature = <85000>;
-+ hysteresis = <2000>;
-+ type = "critical";
-+ };
-+ };
-+
-+ cooling-maps {
-+ map0 {
-+ trip = <&alert7>;
-+ cooling-device =
-+ <&cooling_map0 THERMAL_NO_LIMIT
-+ THERMAL_NO_LIMIT>;
-+ };
-+ };
-+ };
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals-sdk.dtsi
-@@ -0,0 +1,55 @@
-+/*
-+ * QorIQ BMan SDK Portals device tree nodes
-+ *
-+ * Copyright 2011-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+ */
-+
-+&bportals {
-+ bman-portal@0 {
-+ cell-index = <0>;
-+ };
-+
-+ bman-portal@10000 {
-+ cell-index = <1>;
-+ };
-+
-+ bman-portal@20000 {
-+ cell-index = <2>;
-+ };
-+
-+ bman-portal@30000 {
-+ cell-index = <3>;
-+ };
-+
-+ bman-portal@40000 {
-+ cell-index = <4>;
-+ };
-+
-+ bman-portal@50000 {
-+ cell-index = <5>;
-+ };
-+
-+ bman-portal@60000 {
-+ cell-index = <6>;
-+ };
-+
-+ bman-portal@70000 {
-+ cell-index = <7>;
-+ };
-+
-+ bman-portal@80000 {
-+ cell-index = <8>;
-+ };
-+
-+ bman-portal@90000 {
-+ cell-index = <9>;
-+ };
-+
-+ bman-bpids@0 {
-+ compatible = "fsl,bpid-range";
-+ fsl,bpid-range = <32 32>;
-+ };
-+};
---- a/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
-@@ -1,9 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ BMan Portals device tree
- *
- * Copyright 2011-2016 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- &bportals {
-@@ -68,4 +68,10 @@
- reg = <0x80000 0x4000>, <0x4080000 0x4000>;
- interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
- };
-+
-+ bman-portal@90000 {
-+ compatible = "fsl,bman-portal";
-+ reg = <0x90000 0x4000>, <0x4090000 0x4000>;
-+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
-+ };
- };
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
-@@ -0,0 +1,97 @@
-+/*
-+ * QorIQ FMan v3 10g port #1 device tree stub [ controller @ offset 0x400000 ]
-+ *
-+ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+fsldpaa: fsl,dpaa {
-+ compatible = "fsl,ls1043a-dpaa", "simple-bus", "fsl,dpaa";
-+ ethernet@0 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet0>;
-+ dma-coherent;
-+ };
-+ ethernet@1 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet1>;
-+ dma-coherent;
-+ };
-+ ethernet@2 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet2>;
-+ dma-coherent;
-+ };
-+ ethernet@3 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet3>;
-+ dma-coherent;
-+ };
-+ ethernet@4 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet4>;
-+ dma-coherent;
-+ };
-+ ethernet@5 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet5>;
-+ dma-coherent;
-+ };
-+ ethernet@8 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet6>;
-+ dma-coherent;
-+ };
-+ ethernet@6 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet2>;
-+ dma-coherent;
-+ fpmevt-sel = <0>;
-+ };
-+ ethernet@7 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet3>;
-+ dma-coherent;
-+ fpmevt-sel = <1>;
-+ };
-+ ethernet@10 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet4>;
-+ dma-coherent;
-+ fpmevt-sel = <2>;
-+ };
-+ ethernet@11 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet5>;
-+ dma-coherent;
-+ fpmevt-sel = <3>;
-+ };
-+};
-+
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
-@@ -1,27 +1,28 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 10g port #0 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x10: port@90000 {
- cell-index = <0x10>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
- reg = <0x90000 0x1000>;
- fsl,fman-10g-port;
- };
-
- fman0_tx_0x30: port@b0000 {
- cell-index = <0x30>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
- reg = <0xb0000 0x1000>;
- fsl,fman-10g-port;
-+ fsl,qman-channel-id = <0x800>;
- };
-
-- ethernet@f0000 {
-+ mac9: ethernet@f0000 {
- cell-index = <0x8>;
- compatible = "fsl,fman-memac";
- reg = <0xf0000 0x1000>;
-@@ -29,7 +30,7 @@ fman@1a00000 {
- pcsphy-handle = <&pcsphy6>;
- };
-
-- mdio@f1000 {
-+ mdio9: mdio@f1000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
-@@ -1,27 +1,28 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 10g port #1 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x11: port@91000 {
- cell-index = <0x11>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
- reg = <0x91000 0x1000>;
- fsl,fman-10g-port;
- };
-
- fman0_tx_0x31: port@b1000 {
- cell-index = <0x31>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
- reg = <0xb1000 0x1000>;
- fsl,fman-10g-port;
-+ fsl,qman-channel-id = <0x801>;
- };
-
-- ethernet@f2000 {
-+ mac10: ethernet@f2000 {
- cell-index = <0x9>;
- compatible = "fsl,fman-memac";
- reg = <0xf2000 0x1000>;
-@@ -29,7 +30,7 @@ fman@1a00000 {
- pcsphy-handle = <&pcsphy7>;
- };
-
-- mdio@f3000 {
-+ mdio10: mdio@f3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #0 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x08: port@88000 {
- cell-index = <0x8>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x88000 0x1000>;
- };
-
- fman0_tx_0x28: port@a8000 {
- cell-index = <0x28>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xa8000 0x1000>;
-+ fsl,qman-channel-id = <0x802>;
- };
-
- ethernet@e0000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #1 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x09: port@89000 {
- cell-index = <0x9>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x89000 0x1000>;
- };
-
- fman0_tx_0x29: port@a9000 {
- cell-index = <0x29>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xa9000 0x1000>;
-+ fsl,qman-channel-id = <0x803>;
- };
-
- ethernet@e2000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #2 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0a: port@8a000 {
- cell-index = <0xa>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8a000 0x1000>;
- };
-
- fman0_tx_0x2a: port@aa000 {
- cell-index = <0x2a>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xaa000 0x1000>;
-+ fsl,qman-channel-id = <0x804>;
- };
-
- ethernet@e4000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #3 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0b: port@8b000 {
- cell-index = <0xb>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8b000 0x1000>;
- };
-
- fman0_tx_0x2b: port@ab000 {
- cell-index = <0x2b>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xab000 0x1000>;
-+ fsl,qman-channel-id = <0x805>;
- };
-
- ethernet@e6000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #4 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0c: port@8c000 {
- cell-index = <0xc>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8c000 0x1000>;
- };
-
- fman0_tx_0x2c: port@ac000 {
- cell-index = <0x2c>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xac000 0x1000>;
-+ fsl,qman-channel-id = <0x806>;
- };
-
- ethernet@e8000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #5 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0d: port@8d000 {
- cell-index = <0xd>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8d000 0x1000>;
- };
-
- fman0_tx_0x2d: port@ad000 {
- cell-index = <0x2d>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xad000 0x1000>;
-+ fsl,qman-channel-id = <0x807>;
- };
-
- ethernet@ea000 {
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
-@@ -0,0 +1,47 @@
-+/*
-+ * QorIQ FMan v3 OH ports device tree
-+ *
-+ * Copyright 2012-2015 Freescale Semiconductor Inc.
-+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+ */
-+
-+fman@1a00000 {
-+
-+ fman0_oh1: port@82000 {
-+ cell-index = <0>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x82000 0x1000>;
-+ };
-+
-+ fman0_oh2: port@83000 {
-+ cell-index = <1>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x83000 0x1000>;
-+ };
-+
-+ fman0_oh3: port@84000 {
-+ cell-index = <2>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x84000 0x1000>;
-+ };
-+
-+ fman0_oh4: port@85000 {
-+ cell-index = <3>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x85000 0x1000>;
-+ };
-+
-+ fman0_oh5: port@86000 {
-+ cell-index = <4>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x86000 0x1000>;
-+ };
-+
-+ fman0_oh6: port@87000 {
-+ cell-index = <5>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x87000 0x1000>;
-+ };
-+
-+};
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
-@@ -1,9 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman0: fman@1a00000 {
-@@ -11,53 +11,104 @@ fman0: fman@1a00000 {
- #size-cells = <1>;
- cell-index = <0>;
- compatible = "fsl,fman";
-- ranges = <0x0 0x0 0x1a00000 0x100000>;
-- reg = <0x0 0x1a00000 0x0 0x100000>;
-+ ranges = <0x0 0x0 0x1a00000 0xfe000>;
-+ reg = <0x0 0x1a00000 0x0 0xfe000>;
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 3 0>;
- clock-names = "fmanclk";
- fsl,qman-channel-range = <0x800 0x10>;
-+ ptimer-handle = <&ptp_timer0>;
-+
-+ cc {
-+ compatible = "fsl,fman-cc";
-+ };
-
- muram@0 {
- compatible = "fsl,fman-muram";
- reg = <0x0 0x60000>;
- };
-
-+ bmi@80000 {
-+ compatible = "fsl,fman-bmi";
-+ reg = <0x80000 0x400>;
-+ };
-+
-+ qmi@80400 {
-+ compatible = "fsl,fman-qmi";
-+ reg = <0x80400 0x400>;
-+ };
-+
- fman0_oh_0x2: port@82000 {
- cell-index = <0x2>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x82000 0x1000>;
-+ fsl,qman-channel-id = <0x809>;
- };
-
- fman0_oh_0x3: port@83000 {
- cell-index = <0x3>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x83000 0x1000>;
-+ fsl,qman-channel-id = <0x80a>;
- };
-
- fman0_oh_0x4: port@84000 {
- cell-index = <0x4>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x84000 0x1000>;
-+ fsl,qman-channel-id = <0x80b>;
- };
-
- fman0_oh_0x5: port@85000 {
- cell-index = <0x5>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x85000 0x1000>;
-+ fsl,qman-channel-id = <0x80c>;
- };
-
- fman0_oh_0x6: port@86000 {
- cell-index = <0x6>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x86000 0x1000>;
-+ fsl,qman-channel-id = <0x80d>;
- };
-
- fman0_oh_0x7: port@87000 {
- cell-index = <0x7>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x87000 0x1000>;
-+ fsl,qman-channel-id = <0x80e>;
-+ };
-+
-+ policer@c0000 {
-+ compatible = "fsl,fman-policer";
-+ reg = <0xc0000 0x1000>;
-+ };
-+
-+ keygen@c1000 {
-+ compatible = "fsl,fman-keygen";
-+ reg = <0xc1000 0x1000>;
-+ };
-+
-+ dma@c2000 {
-+ compatible = "fsl,fman-dma";
-+ reg = <0xc2000 0x1000>;
-+ };
-+
-+ fpm@c3000 {
-+ compatible = "fsl,fman-fpm";
-+ reg = <0xc3000 0x1000>;
-+ };
-+
-+ parser@c7000 {
-+ compatible = "fsl,fman-parser";
-+ reg = <0xc7000 0x1000>;
-+ };
-+
-+ vsps@dc000 {
-+ compatible = "fsl,fman-vsps";
-+ reg = <0xdc000 0x1000>;
- };
-
- mdio0: mdio@fc000 {
-@@ -73,9 +124,11 @@ fman0: fman@1a00000 {
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
- reg = <0xfd000 0x1000>;
- };
-+};
-
-- ptp_timer0: ptp-timer@fe000 {
-- compatible = "fsl,fman-ptp-timer";
-- reg = <0xfe000 0x1000>;
-- };
-+ptp_timer0: ptp-timer@1afe000 {
-+ compatible = "fsl,fman-ptp-timer";
-+ reg = <0x0 0x1afe000 0x0 0x1000>;
-+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
-+ clocks = <&clockgen 3 0>;
- };
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals-sdk.dtsi
-@@ -0,0 +1,38 @@
-+/*
-+ * QorIQ QMan SDK Portals device tree nodes
-+ *
-+ * Copyright 2011-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+ */
-+
-+&qportals {
-+ qman-fqids@0 {
-+ compatible = "fsl,fqid-range";
-+ fsl,fqid-range = <256 256>;
-+ };
-+
-+ qman-fqids@1 {
-+ compatible = "fsl,fqid-range";
-+ fsl,fqid-range = <32768 32768>;
-+ };
-+
-+ qman-pools@0 {
-+ compatible = "fsl,pool-channel-range";
-+ fsl,pool-channel-range = <0x401 0xf>;
-+ };
-+
-+ qman-cgrids@0 {
-+ compatible = "fsl,cgrid-range";
-+ fsl,cgrid-range = <0 256>;
-+ };
-+
-+ qman-ceetm@0 {
-+ compatible = "fsl,qman-ceetm";
-+ fsl,ceetm-lfqid-range = <0xf00000 0x1000>;
-+ fsl,ceetm-sp-range = <0 16>;
-+ fsl,ceetm-lni-range = <0 8>;
-+ fsl,ceetm-channel-range = <0 32>;
-+ };
-+};
---- a/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
-@@ -1,9 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ QMan Portals device tree
- *
- * Copyright 2011-2016 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- &qportals {
-@@ -77,4 +77,11 @@
- interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
- cell-index = <8>;
- };
-+
-+ qportal9: qman-portal@90000 {
-+ compatible = "fsl,qman-portal";
-+ reg = <0x90000 0x4000>, <0x4090000 0x4000>;
-+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
-+ cell-index = <9>;
-+ };
- };
---- a/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
-+++ b/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
-@@ -330,3 +330,32 @@
- &sata {
- status = "disabled";
- };
-+
-+/* Additions for Layerscape SDK (4.4/4.9) Kernel only
-+ * These kernels need additional setup for FMan/QMan DMA shared memory
-+ */
-+
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
---- a/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
-+++ b/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
-@@ -251,3 +251,32 @@
- &sata {
- status = "disabled";
- };
-+
-+/* Additions for Layerscape SDK (4.4/4.9) Kernel only
-+ * These kernels need additional setup for FMan/QMan DMA shared memory
-+ */
-+
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
diff --git a/target/linux/layerscape/patches-4.14/701-dpaa2-dpio-support-layerscape.patch b/target/linux/layerscape/patches-4.14/701-dpaa2-dpio-support-layerscape.patch
deleted file mode 100644
index ce5dfdbe86..0000000000
--- a/target/linux/layerscape/patches-4.14/701-dpaa2-dpio-support-layerscape.patch
+++ /dev/null
@@ -1,2992 +0,0 @@
-From 80df9e62536d7cac5c03a4fcb494c6ddf0723633 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:27 +0800
-Subject: [PATCH] dpaa2-dpio: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of dpaa2-dpio for layerscape
-
-Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Haiying Wang <Haiying.Wang@nxp.com>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Li Yang <leoyang.li@nxp.com>
-Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Youri Querry <youri.querry_1@nxp.com>
----
- drivers/staging/fsl-mc/Kconfig | 1 +
- drivers/staging/fsl-mc/Makefile | 1 +
- drivers/staging/fsl-mc/bus/Kconfig | 5 +-
- drivers/staging/fsl-mc/bus/Makefile | 3 +-
- drivers/staging/fsl-mc/bus/dpbp-cmd.h | 28 +-
- drivers/staging/fsl-mc/bus/dpbp.c | 28 +-
- drivers/staging/fsl-mc/bus/dpcon-cmd.h | 28 +-
- drivers/staging/fsl-mc/bus/dpcon.c | 32 +-
- drivers/staging/fsl-mc/bus/dpio/Makefile | 3 +-
- drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h | 29 +-
- drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 99 ++--
- .../staging/fsl-mc/bus/dpio/dpio-service.c | 295 +++++++++---
- drivers/staging/fsl-mc/bus/dpio/dpio.c | 51 +--
- drivers/staging/fsl-mc/bus/dpio/dpio.h | 32 +-
- .../staging/fsl-mc/bus/dpio/qbman-portal.c | 421 ++++++++++++++----
- .../staging/fsl-mc/bus/dpio/qbman-portal.h | 134 ++++--
- drivers/staging/fsl-mc/bus/dpmcp.c | 28 +-
- drivers/staging/fsl-mc/bus/dprc-driver.c | 4 +-
- drivers/staging/fsl-mc/bus/dprc.c | 28 +-
- drivers/staging/fsl-mc/bus/fsl-mc-allocator.c | 4 +-
- drivers/staging/fsl-mc/bus/fsl-mc-bus.c | 4 +-
- drivers/staging/fsl-mc/bus/fsl-mc-msi.c | 4 +-
- drivers/staging/fsl-mc/bus/fsl-mc-private.h | 4 +-
- .../fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 4 +-
- drivers/staging/fsl-mc/bus/mc-io.c | 28 +-
- drivers/staging/fsl-mc/bus/mc-sys.c | 28 +-
- drivers/staging/fsl-mc/include/dpaa2-fd.h | 288 ++++++++++--
- drivers/staging/fsl-mc/include/dpaa2-global.h | 27 +-
- drivers/staging/fsl-mc/include/dpaa2-io.h | 110 +++--
- drivers/staging/fsl-mc/include/dpbp.h | 29 +-
- drivers/staging/fsl-mc/include/dpcon.h | 32 +-
- drivers/staging/fsl-mc/include/dpopr.h | 110 +++++
- drivers/staging/fsl-mc/include/mc.h | 4 +-
- 33 files changed, 1233 insertions(+), 693 deletions(-)
- create mode 100644 drivers/staging/fsl-mc/include/dpopr.h
-
---- a/drivers/staging/fsl-mc/Kconfig
-+++ b/drivers/staging/fsl-mc/Kconfig
-@@ -1 +1,2 @@
-+# SPDX-License-Identifier: GPL-2.0
- source "drivers/staging/fsl-mc/bus/Kconfig"
---- a/drivers/staging/fsl-mc/Makefile
-+++ b/drivers/staging/fsl-mc/Makefile
-@@ -1,2 +1,3 @@
-+# SPDX-License-Identifier: GPL-2.0
- # Freescale Management Complex (MC) bus drivers
- obj-$(CONFIG_FSL_MC_BUS) += bus/
---- a/drivers/staging/fsl-mc/bus/Kconfig
-+++ b/drivers/staging/fsl-mc/bus/Kconfig
-@@ -1,10 +1,9 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # DPAA2 fsl-mc bus
- #
- # Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- #
--# This file is released under the GPLv2
--#
-
- config FSL_MC_BUS
- bool "QorIQ DPAA2 fsl-mc bus driver"
-@@ -18,7 +17,7 @@ config FSL_MC_BUS
-
- config FSL_MC_DPIO
- tristate "QorIQ DPAA2 DPIO driver"
-- depends on FSL_MC_BUS && ARCH_LAYERSCAPE
-+ depends on FSL_MC_BUS
- help
- Driver for the DPAA2 DPIO object. A DPIO provides queue and
- buffer management facilities for software to interact with
---- a/drivers/staging/fsl-mc/bus/Makefile
-+++ b/drivers/staging/fsl-mc/bus/Makefile
-@@ -1,10 +1,9 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # Freescale Management Complex (MC) bus drivers
- #
- # Copyright (C) 2014 Freescale Semiconductor, Inc.
- #
--# This file is released under the GPLv2
--#
- obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
-
- mc-bus-driver-objs := fsl-mc-bus.o \
---- a/drivers/staging/fsl-mc/bus/dpbp-cmd.h
-+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
-@@ -1,33 +1,7 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef _FSL_DPBP_CMD_H
- #define _FSL_DPBP_CMD_H
---- a/drivers/staging/fsl-mc/bus/dpbp.c
-+++ b/drivers/staging/fsl-mc/bus/dpbp.c
-@@ -1,33 +1,7 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/kernel.h>
- #include "../include/mc.h"
---- a/drivers/staging/fsl-mc/bus/dpcon-cmd.h
-+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
-@@ -1,33 +1,7 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef _FSL_DPCON_CMD_H
- #define _FSL_DPCON_CMD_H
---- a/drivers/staging/fsl-mc/bus/dpcon.c
-+++ b/drivers/staging/fsl-mc/bus/dpcon.c
-@@ -1,33 +1,7 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/kernel.h>
- #include "../include/mc.h"
---- a/drivers/staging/fsl-mc/bus/dpio/Makefile
-+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
-@@ -1,9 +1,8 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # QorIQ DPAA2 DPIO driver
- #
-
--subdir-ccflags-y := -Werror
--
- obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
-
- fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
---- a/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
-@@ -1,34 +1,8 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef _FSL_DPIO_CMD_H
- #define _FSL_DPIO_CMD_H
-@@ -51,6 +25,7 @@
- #define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
- #define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
- #define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
-+#define DPIO_CMDID_RESET DPIO_CMD(0x005)
-
- struct dpio_cmd_open {
- __le32 dpio_id;
---- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
-@@ -1,33 +1,8 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
-- * Copyright NXP 2016
-+ * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
- #include <linux/types.h>
-@@ -38,6 +13,7 @@
- #include <linux/msi.h>
- #include <linux/dma-mapping.h>
- #include <linux/delay.h>
-+#include <linux/io.h>
-
- #include "../../include/mc.h"
- #include "../../include/dpaa2-io.h"
-@@ -54,6 +30,8 @@ struct dpio_priv {
- struct dpaa2_io *io;
- };
-
-+static cpumask_var_t cpus_unused_mask;
-+
- static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
- {
- struct device *dev = (struct device *)arg;
-@@ -113,7 +91,7 @@ static int dpaa2_dpio_probe(struct fsl_m
- struct dpio_priv *priv;
- int err = -ENOMEM;
- struct device *dev = &dpio_dev->dev;
-- static int next_cpu = -1;
-+ int possible_next_cpu;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
-@@ -135,6 +113,12 @@ static int dpaa2_dpio_probe(struct fsl_m
- goto err_open;
- }
-
-+ err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpio_reset() failed\n");
-+ goto err_reset;
-+ }
-+
- err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
- &dpio_attrs);
- if (err) {
-@@ -155,26 +139,35 @@ static int dpaa2_dpio_probe(struct fsl_m
- desc.dpio_id = dpio_dev->obj_desc.id;
-
- /* get the cpu to use for the affinity hint */
-- if (next_cpu == -1)
-- next_cpu = cpumask_first(cpu_online_mask);
-- else
-- next_cpu = cpumask_next(next_cpu, cpu_online_mask);
--
-- if (!cpu_possible(next_cpu)) {
-+ possible_next_cpu = cpumask_first(cpus_unused_mask);
-+ if (possible_next_cpu >= nr_cpu_ids) {
- dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
- err = -ERANGE;
- goto err_allocate_irqs;
- }
-- desc.cpu = next_cpu;
-+ desc.cpu = possible_next_cpu;
-+ cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
-
-- /*
-- * Set the CENA regs to be the cache inhibited area of the portal to
-- * avoid coherency issues if a user migrates to another core.
-- */
-- desc.regs_cena = ioremap_wc(dpio_dev->regions[1].start,
-- resource_size(&dpio_dev->regions[1]));
-- desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
-- resource_size(&dpio_dev->regions[1]));
-+ if (dpio_dev->obj_desc.region_count < 3) {
-+ /* No support for DDR backed portals, use classic mapping */
-+ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
-+ resource_size(&dpio_dev->regions[0]));
-+ } else {
-+ desc.regs_cena = memremap(dpio_dev->regions[2].start,
-+ resource_size(&dpio_dev->regions[2]),
-+ MEMREMAP_WB);
-+ }
-+ if (IS_ERR(desc.regs_cena)) {
-+ dev_err(dev, "ioremap_cache_ns failed\n");
-+ goto err_allocate_irqs;
-+ }
-+
-+ desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
-+ resource_size(&dpio_dev->regions[1]));
-+ if (!desc.regs_cinh) {
-+ dev_err(dev, "devm_ioremap failed\n");
-+ goto err_allocate_irqs;
-+ }
-
- err = fsl_mc_allocate_irqs(dpio_dev);
- if (err) {
-@@ -186,7 +179,7 @@ static int dpaa2_dpio_probe(struct fsl_m
- if (err)
- goto err_register_dpio_irq;
-
-- priv->io = dpaa2_io_create(&desc);
-+ priv->io = dpaa2_io_create(&desc, dev);
- if (!priv->io) {
- dev_err(dev, "dpaa2_io_create failed\n");
- goto err_dpaa2_io_create;
-@@ -196,7 +189,6 @@ static int dpaa2_dpio_probe(struct fsl_m
- dev_dbg(dev, " receives_notifications = %d\n",
- desc.receives_notifications);
- dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
-- fsl_mc_portal_free(dpio_dev->mc_io);
-
- return 0;
-
-@@ -207,6 +199,7 @@ err_register_dpio_irq:
- err_allocate_irqs:
- dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
- err_get_attr:
-+err_reset:
- dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
- err_open:
- fsl_mc_portal_free(dpio_dev->mc_io);
-@@ -227,7 +220,7 @@ static int dpaa2_dpio_remove(struct fsl_
- {
- struct device *dev;
- struct dpio_priv *priv;
-- int err;
-+ int err = 0, cpu;
-
- dev = &dpio_dev->dev;
- priv = dev_get_drvdata(dev);
-@@ -236,11 +229,8 @@ static int dpaa2_dpio_remove(struct fsl_
-
- dpio_teardown_irqs(dpio_dev);
-
-- err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
-- if (err) {
-- dev_err(dev, "MC portal allocation failed\n");
-- goto err_mcportal;
-- }
-+ cpu = dpaa2_io_get_cpu(priv->io);
-+ cpumask_set_cpu(cpu, cpus_unused_mask);
-
- err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
- &dpio_dev->mc_handle);
-@@ -261,7 +251,7 @@ static int dpaa2_dpio_remove(struct fsl_
-
- err_open:
- fsl_mc_portal_free(dpio_dev->mc_io);
--err_mcportal:
-+
- return err;
- }
-
-@@ -285,11 +275,16 @@ static struct fsl_mc_driver dpaa2_dpio_d
-
- static int dpio_driver_init(void)
- {
-+ if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL))
-+ return -ENOMEM;
-+ cpumask_copy(cpus_unused_mask, cpu_online_mask);
-+
- return fsl_mc_driver_register(&dpaa2_dpio_driver);
- }
-
- static void dpio_driver_exit(void)
- {
-+ free_cpumask_var(cpus_unused_mask);
- fsl_mc_driver_unregister(&dpaa2_dpio_driver);
- }
- module_init(dpio_driver_init);
---- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
-@@ -1,33 +1,8 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/types.h>
- #include "../../include/mc.h"
-@@ -43,7 +18,6 @@
- #include "qbman-portal.h"
-
- struct dpaa2_io {
-- atomic_t refs;
- struct dpaa2_io_desc dpio_desc;
- struct qbman_swp_desc swp_desc;
- struct qbman_swp *swp;
-@@ -53,6 +27,7 @@ struct dpaa2_io {
- /* protect notifications list */
- spinlock_t lock_notifications;
- struct list_head notifications;
-+ struct device *dev;
- };
-
- struct dpaa2_io_store {
-@@ -83,7 +58,7 @@ static inline struct dpaa2_io *service_s
- * If cpu == -1, choose the current cpu, with no guarantees about
- * potentially being migrated away.
- */
-- if (unlikely(cpu < 0))
-+ if (cpu < 0)
- cpu = smp_processor_id();
-
- /* If a specific cpu was requested, pick it up immediately */
-@@ -95,6 +70,10 @@ static inline struct dpaa2_io *service_s
- if (d)
- return d;
-
-+ d = service_select_by_cpu(d, -1);
-+ if (d)
-+ return d;
-+
- spin_lock(&dpio_list_lock);
- d = list_entry(dpio_list.next, struct dpaa2_io, node);
- list_del(&d->node);
-@@ -105,15 +84,34 @@ static inline struct dpaa2_io *service_s
- }
-
- /**
-+ * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
-+ * @cpu: the cpu id
-+ *
-+ * Return the affine dpaa2_io service, or NULL if there is no service affined
-+ * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
-+ * service.
-+ */
-+struct dpaa2_io *dpaa2_io_service_select(int cpu)
-+{
-+ if (cpu == DPAA2_IO_ANY_CPU)
-+ return service_select(NULL);
-+
-+ return service_select_by_cpu(NULL, cpu);
-+}
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
-+
-+/**
- * dpaa2_io_create() - create a dpaa2_io object.
- * @desc: the dpaa2_io descriptor
-+ * @dev: the actual DPIO device
- *
- * Activates a "struct dpaa2_io" corresponding to the given config of an actual
- * DPIO object.
- *
- * Return a valid dpaa2_io object for success, or NULL for failure.
- */
--struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
-+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
-+ struct device *dev)
- {
- struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
-
-@@ -126,7 +124,6 @@ struct dpaa2_io *dpaa2_io_create(const s
- return NULL;
- }
-
-- atomic_set(&obj->refs, 1);
- obj->dpio_desc = *desc;
- obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
- obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
-@@ -156,9 +153,10 @@ struct dpaa2_io *dpaa2_io_create(const s
- dpio_by_cpu[desc->cpu] = obj;
- spin_unlock(&dpio_list_lock);
-
-+ obj->dev = dev;
-+
- return obj;
- }
--EXPORT_SYMBOL(dpaa2_io_create);
-
- /**
- * dpaa2_io_down() - release the dpaa2_io object.
-@@ -171,11 +169,8 @@ EXPORT_SYMBOL(dpaa2_io_create);
- */
- void dpaa2_io_down(struct dpaa2_io *d)
- {
-- if (!atomic_dec_and_test(&d->refs))
-- return;
- kfree(d);
- }
--EXPORT_SYMBOL(dpaa2_io_down);
-
- #define DPAA_POLL_MAX 32
-
-@@ -206,7 +201,7 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io
- u64 q64;
-
- q64 = qbman_result_SCN_ctx(dq);
-- ctx = (void *)q64;
-+ ctx = (void *)(uintptr_t)q64;
- ctx->cb(ctx);
- } else {
- pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
-@@ -222,13 +217,19 @@ done:
- qbman_swp_interrupt_set_inhibit(swp, 0);
- return IRQ_HANDLED;
- }
--EXPORT_SYMBOL(dpaa2_io_irq);
-+
-+int dpaa2_io_get_cpu(struct dpaa2_io *d)
-+{
-+ return d->dpio_desc.cpu;
-+}
-+EXPORT_SYMBOL(dpaa2_io_get_cpu);
-
- /**
- * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
- * notifications on the given DPIO service.
- * @d: the given DPIO service.
- * @ctx: the notification context.
-+ * @dev: the device that requests the register
- *
- * The caller should make the MC command to attach a DPAA2 object to
- * a DPIO after this function completes successfully. In that way:
-@@ -243,7 +244,8 @@ EXPORT_SYMBOL(dpaa2_io_irq);
- * Return 0 for success, or -ENODEV for failure.
- */
- int dpaa2_io_service_register(struct dpaa2_io *d,
-- struct dpaa2_io_notification_ctx *ctx)
-+ struct dpaa2_io_notification_ctx *ctx,
-+ struct device *dev)
- {
- unsigned long irqflags;
-
-@@ -251,8 +253,10 @@ int dpaa2_io_service_register(struct dpa
- if (!d)
- return -ENODEV;
-
-+ device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
-+
- ctx->dpio_id = d->dpio_desc.dpio_id;
-- ctx->qman64 = (u64)ctx;
-+ ctx->qman64 = (u64)(uintptr_t)ctx;
- ctx->dpio_private = d;
- spin_lock_irqsave(&d->lock_notifications, irqflags);
- list_add(&ctx->node, &d->notifications);
-@@ -263,20 +267,23 @@ int dpaa2_io_service_register(struct dpa
- return qbman_swp_CDAN_set_context_enable(d->swp,
- (u16)ctx->id,
- ctx->qman64);
-+
- return 0;
- }
--EXPORT_SYMBOL(dpaa2_io_service_register);
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
-
- /**
- * dpaa2_io_service_deregister - The opposite of 'register'.
- * @service: the given DPIO service.
- * @ctx: the notification context.
-+ * @dev: the device that requests to be deregistered
- *
- * This function should be called only after sending the MC command to
- * to detach the notification-producing device from the DPIO.
- */
- void dpaa2_io_service_deregister(struct dpaa2_io *service,
-- struct dpaa2_io_notification_ctx *ctx)
-+ struct dpaa2_io_notification_ctx *ctx,
-+ struct device *dev)
- {
- struct dpaa2_io *d = ctx->dpio_private;
- unsigned long irqflags;
-@@ -287,8 +294,10 @@ void dpaa2_io_service_deregister(struct
- spin_lock_irqsave(&d->lock_notifications, irqflags);
- list_del(&ctx->node);
- spin_unlock_irqrestore(&d->lock_notifications, irqflags);
-+
-+ device_link_remove(dev, d->dev);
- }
--EXPORT_SYMBOL(dpaa2_io_service_deregister);
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
-
- /**
- * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
-@@ -322,7 +331,7 @@ int dpaa2_io_service_rearm(struct dpaa2_
-
- return err;
- }
--EXPORT_SYMBOL(dpaa2_io_service_rearm);
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
-
- /**
- * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
-@@ -385,7 +394,7 @@ int dpaa2_io_service_pull_channel(struct
-
- return err;
- }
--EXPORT_SYMBOL(dpaa2_io_service_pull_channel);
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
-
- /**
- * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
-@@ -441,7 +450,7 @@ int dpaa2_io_service_enqueue_qd(struct d
-
- return qbman_swp_enqueue(d->swp, &ed, fd);
- }
--EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd);
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
-
- /**
- * dpaa2_io_service_release() - Release buffers to a buffer pool.
-@@ -453,7 +462,7 @@ EXPORT_SYMBOL(dpaa2_io_service_enqueue_q
- * Return 0 for success, and negative error code for failure.
- */
- int dpaa2_io_service_release(struct dpaa2_io *d,
-- u32 bpid,
-+ u16 bpid,
- const u64 *buffers,
- unsigned int num_buffers)
- {
-@@ -468,7 +477,7 @@ int dpaa2_io_service_release(struct dpaa
-
- return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
- }
--EXPORT_SYMBOL(dpaa2_io_service_release);
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
-
- /**
- * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
-@@ -482,7 +491,7 @@ EXPORT_SYMBOL(dpaa2_io_service_release);
- * Eg. if the buffer pool is empty, this will return zero.
- */
- int dpaa2_io_service_acquire(struct dpaa2_io *d,
-- u32 bpid,
-+ u16 bpid,
- u64 *buffers,
- unsigned int num_buffers)
- {
-@@ -499,7 +508,7 @@ int dpaa2_io_service_acquire(struct dpaa
-
- return err;
- }
--EXPORT_SYMBOL(dpaa2_io_service_acquire);
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
-
- /*
- * 'Stores' are reusable memory blocks for holding dequeue results, and to
-@@ -553,7 +562,7 @@ struct dpaa2_io_store *dpaa2_io_store_cr
-
- return ret;
- }
--EXPORT_SYMBOL(dpaa2_io_store_create);
-+EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
-
- /**
- * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
-@@ -567,7 +576,7 @@ void dpaa2_io_store_destroy(struct dpaa2
- kfree(s->alloced_addr);
- kfree(s);
- }
--EXPORT_SYMBOL(dpaa2_io_store_destroy);
-+EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
-
- /**
- * dpaa2_io_store_next() - Determine when the next dequeue result is available.
-@@ -610,9 +619,193 @@ struct dpaa2_dq *dpaa2_io_store_next(str
- if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
- ret = NULL;
- } else {
-+ prefetch(&s->vaddr[s->idx]);
- *is_last = 0;
- }
-
- return ret;
- }
--EXPORT_SYMBOL(dpaa2_io_store_next);
-+EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
-+
-+/**
-+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
-+ * @d: the given DPIO object.
-+ * @fqid: the id of frame queue to be queried.
-+ * @fcnt: the queried frame count.
-+ * @bcnt: the queried byte count.
-+ *
-+ * Knowing the FQ count at run-time can be useful in debugging situations.
-+ * The instantaneous frame- and byte-count are hereby returned.
-+ *
-+ * Return 0 for a successful query, and negative error code if query fails.
-+ */
-+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
-+ u32 *fcnt, u32 *bcnt)
-+{
-+ struct qbman_fq_query_np_rslt state;
-+ struct qbman_swp *swp;
-+ unsigned long irqflags;
-+ int ret;
-+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
-+
-+ swp = d->swp;
-+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
-+ ret = qbman_fq_query_state(swp, fqid, &state);
-+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
-+ if (ret)
-+ return ret;
-+ *fcnt = qbman_fq_state_frame_count(&state);
-+ *bcnt = qbman_fq_state_byte_count(&state);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
-+
-+/**
-+ * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
-+ * buffer pool.
-+ * @d: the given DPIO object.
-+ * @bpid: the index of buffer pool to be queried.
-+ * @num: the queried number of buffers in the buffer pool.
-+ *
-+ * Return 0 for a successful query, and negative error code if query fails.
-+ */
-+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
-+{
-+ struct qbman_bp_query_rslt state;
-+ struct qbman_swp *swp;
-+ unsigned long irqflags;
-+ int ret;
-+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
-+
-+ swp = d->swp;
-+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
-+ ret = qbman_bp_query(swp, bpid, &state);
-+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
-+ if (ret)
-+ return ret;
-+ *num = qbman_bp_info_num_free_bufs(&state);
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
-+
-+/**
-+ * dpaa2_io_service_enqueue_orp_fq() - Enqueue a frame to a frame queue with
-+ * order restoration
-+ * @d: the given DPIO service.
-+ * @fqid: the given frame queue id.
-+ * @fd: the frame descriptor which is enqueued.
-+ * @orpid: the order restoration point ID
-+ * @seqnum: the order sequence number
-+ * @last: must be set for the final frame if seqnum is shared (spilt frame)
-+ *
-+ * Performs an enqueue to a frame queue using the specified order restoration
-+ * point. The QMan device will ensure the order of frames placed on the
-+ * queue will be ordered as per the sequence number.
-+ *
-+ * In the case a frame is split it is possible to enqueue using the same
-+ * sequence number more than once. The final frame in a shared sequence number
-+ * most be indicated by setting last = 1. For non shared sequence numbers
-+ * last = 1 must always be set.
-+ *
-+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
-+ * or -ENODEV if there is no dpio service.
-+ */
-+int dpaa2_io_service_enqueue_orp_fq(struct dpaa2_io *d, u32 fqid,
-+ const struct dpaa2_fd *fd, u16 orpid,
-+ u16 seqnum, int last)
-+{
-+ struct qbman_eq_desc ed;
-+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
-+ qbman_eq_desc_clear(&ed);
-+ qbman_eq_desc_set_orp(&ed, 0, orpid, seqnum, !last);
-+ qbman_eq_desc_set_fq(&ed, fqid);
-+ return qbman_swp_enqueue(d->swp, &ed, fd);
-+}
-+EXPORT_SYMBOL(dpaa2_io_service_enqueue_orp_fq);
-+
-+/**
-+ * dpaa2_io_service_enqueue_orp_qd() - Enqueue a frame to a queueing destination
-+ * with order restoration
-+ * @d: the given DPIO service.
-+ * @qdid: the given queuing destination id.
-+ * @fd: the frame descriptor which is enqueued.
-+ * @orpid: the order restoration point ID
-+ * @seqnum: the order sequence number
-+ * @last: must be set for the final frame if seqnum is shared (spilt frame)
-+ *
-+ * Performs an enqueue to a frame queue using the specified order restoration
-+ * point. The QMan device will ensure the order of frames placed on the
-+ * queue will be ordered as per the sequence number.
-+ *
-+ * In the case a frame is split it is possible to enqueue using the same
-+ * sequence number more than once. The final frame in a shared sequence number
-+ * most be indicated by setting last = 1. For non shared sequence numbers
-+ * last = 1 must always be set.
-+ *
-+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
-+ * or -ENODEV if there is no dpio service.
-+ */
-+int dpaa2_io_service_enqueue_orp_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
-+ u16 qdbin, const struct dpaa2_fd *fd,
-+ u16 orpid, u16 seqnum, int last)
-+{
-+ struct qbman_eq_desc ed;
-+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
-+ qbman_eq_desc_clear(&ed);
-+ qbman_eq_desc_set_orp(&ed, 0, orpid, seqnum, !last);
-+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
-+ return qbman_swp_enqueue(d->swp, &ed, fd);
-+}
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_orp_qd);
-+
-+/**
-+ * dpaa2_io_service_orp_seqnum_drop() - Remove a sequence number from
-+ * an order restoration list
-+ * @d: the given DPIO service.
-+ * @orpid: Order restoration point to remove a sequence number from
-+ * @seqnum: Sequence number to remove
-+ *
-+ * Removes a frames sequence number from an order restoration point without
-+ * enqueing the frame. Used to indicate that the order restoration hardware
-+ * should not expect to see this sequence number. Typically used to indicate
-+ * a frame was terminated or dropped from a flow.
-+ *
-+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
-+ * or -ENODEV if there is no dpio service.
-+ */
-+int dpaa2_io_service_orp_seqnum_drop(struct dpaa2_io *d, u16 orpid, u16 seqnum)
-+{
-+ struct qbman_eq_desc ed;
-+ struct dpaa2_fd fd;
-+ unsigned long irqflags;
-+ int ret;
-+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
-+
-+ if ((d->swp->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
-+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
-+ ret = qbman_orp_drop(d->swp, orpid, seqnum);
-+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
-+ return ret;
-+ }
-+
-+ qbman_eq_desc_clear(&ed);
-+ qbman_eq_desc_set_orp_hole(&ed, orpid, seqnum);
-+ return qbman_swp_enqueue(d->swp, &ed, &fd);
-+}
-+EXPORT_SYMBOL_GPL(dpaa2_io_service_orp_seqnum_drop);
---- a/drivers/staging/fsl-mc/bus/dpio/dpio.c
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
-@@ -1,34 +1,8 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/kernel.h>
- #include "../../include/mc.h"
-@@ -222,3 +196,26 @@ int dpio_get_api_version(struct fsl_mc_i
-
- return 0;
- }
-+
-+/**
-+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
---- a/drivers/staging/fsl-mc/bus/dpio/dpio.h
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h
-@@ -1,34 +1,8 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPIO_H
- #define __FSL_DPIO_H
-@@ -106,4 +80,8 @@ int dpio_get_api_version(struct fsl_mc_i
- u16 *major_ver,
- u16 *minor_ver);
-
-+int dpio_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
- #endif /* __FSL_DPIO_H */
---- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
-+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
-@@ -1,33 +1,8 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
- #include <asm/cacheflush.h>
-@@ -37,23 +12,26 @@
-
- #include "qbman-portal.h"
-
--#define QMAN_REV_4000 0x04000000
--#define QMAN_REV_4100 0x04010000
--#define QMAN_REV_4101 0x04010001
--#define QMAN_REV_MASK 0xffff0000
--
- /* All QBMan command and result structures use this "valid bit" encoding */
- #define QB_VALID_BIT ((u32)0x80)
-
- /* QBMan portal management command codes */
- #define QBMAN_MC_ACQUIRE 0x30
- #define QBMAN_WQCHAN_CONFIGURE 0x46
-+#define QBMAN_MC_ORP 0x63
-
- /* CINH register offsets */
-+#define QBMAN_CINH_SWP_EQCR_PI 0x800
- #define QBMAN_CINH_SWP_EQAR 0x8c0
-+#define QBMAN_CINH_SWP_CR_RT 0x900
-+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
-+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
-+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
- #define QBMAN_CINH_SWP_DQPI 0xa00
- #define QBMAN_CINH_SWP_DCAP 0xac0
- #define QBMAN_CINH_SWP_SDQCR 0xb00
-+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
-+#define QBMAN_CINH_SWP_RCR_PI 0xc00
- #define QBMAN_CINH_SWP_RAR 0xcc0
- #define QBMAN_CINH_SWP_ISR 0xe00
- #define QBMAN_CINH_SWP_IER 0xe40
-@@ -68,6 +46,13 @@
- #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
- #define QBMAN_CENA_SWP_VDQCR 0x780
-
-+/* CENA register offsets in memory-backed mode */
-+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
-+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
-+#define QBMAN_CENA_SWP_CR_MEM 0x1600
-+#define QBMAN_CENA_SWP_RR_MEM 0x1680
-+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
-+
- /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
- #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
-
-@@ -99,6 +84,14 @@ enum qbman_sdqcr_fc {
- qbman_sdqcr_fc_up_to_3 = 1
- };
-
-+#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
-+#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
-+static inline void qbman_inval_prefetch(struct qbman_swp *p, uint32_t offset)
-+{
-+ dcivac(p->addr_cena + offset);
-+ prefetch(p->addr_cena + offset);
-+}
-+
- /* Portal Access */
-
- static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
-@@ -121,10 +114,13 @@ static inline void *qbman_get_cmd(struct
-
- #define SWP_CFG_DQRR_MF_SHIFT 20
- #define SWP_CFG_EST_SHIFT 16
-+#define SWP_CFG_CPBS_SHIFT 15
- #define SWP_CFG_WN_SHIFT 14
- #define SWP_CFG_RPM_SHIFT 12
- #define SWP_CFG_DCM_SHIFT 10
- #define SWP_CFG_EPM_SHIFT 8
-+#define SWP_CFG_VPM_SHIFT 7
-+#define SWP_CFG_CPM_SHIFT 6
- #define SWP_CFG_SD_SHIFT 5
- #define SWP_CFG_SP_SHIFT 4
- #define SWP_CFG_SE_SHIFT 3
-@@ -150,6 +146,8 @@ static inline u32 qbman_set_swp_cfg(u8 m
- ep << SWP_CFG_EP_SHIFT);
- }
-
-+#define QMAN_RT_MODE 0x00000100
-+
- /**
- * qbman_swp_init() - Create a functional object representing the given
- * QBMan portal descriptor.
-@@ -171,6 +169,8 @@ struct qbman_swp *qbman_swp_init(const s
- p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
- p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
- p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
-+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
-+ p->mr.valid_bit = QB_VALID_BIT;
-
- atomic_set(&p->vdq.available, 1);
- p->vdq.valid_bit = QB_VALID_BIT;
-@@ -188,8 +188,11 @@ struct qbman_swp *qbman_swp_init(const s
- p->addr_cena = d->cena_bar;
- p->addr_cinh = d->cinh_bar;
-
-+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
-+ memset(p->addr_cena, 0, 64 * 1024);
-+
- reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
-- 1, /* Writes Non-cacheable */
-+ 0, /* Writes cacheable */
- 0, /* EQCR_CI stashing threshold */
- 3, /* RPM: Valid bit mode, RCR in array mode */
- 2, /* DCM: Discrete consumption ack mode */
-@@ -200,6 +203,10 @@ struct qbman_swp *qbman_swp_init(const s
- 1, /* dequeue stashing priority == TRUE */
- 0, /* dequeue stashing enable == FALSE */
- 0); /* EQCR_CI stashing priority == FALSE */
-+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
-+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
-+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
-+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
-
- qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
- reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
-@@ -208,6 +215,10 @@ struct qbman_swp *qbman_swp_init(const s
- return NULL;
- }
-
-+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
-+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
-+ qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
-+ }
- /*
- * SDQCR needs to be initialized to 0 when no channels are
- * being dequeued from or else the QMan HW will indicate an
-@@ -302,7 +313,10 @@ void qbman_swp_interrupt_set_inhibit(str
- */
- void *qbman_swp_mc_start(struct qbman_swp *p)
- {
-- return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
-+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
-+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
-+ else
-+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
- }
-
- /*
-@@ -313,8 +327,15 @@ void qbman_swp_mc_submit(struct qbman_sw
- {
- u8 *v = cmd;
-
-- dma_wmb();
-- *v = cmd_verb | p->mc.valid_bit;
-+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
-+ dma_wmb();
-+ *v = cmd_verb | p->mc.valid_bit;
-+ dccvac(cmd);
-+ } else {
-+ *v = cmd_verb | p->mc.valid_bit;
-+ dma_wmb();
-+ qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
-+ }
- }
-
- /*
-@@ -325,13 +346,28 @@ void *qbman_swp_mc_result(struct qbman_s
- {
- u32 *ret, verb;
-
-- ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
-+ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-+ /* Remove the valid-bit - command completed if the rest
-+ * is non-zero.
-+ */
-+ verb = ret[0] & ~QB_VALID_BIT;
-+ if (!verb)
-+ return NULL;
-+ p->mc.valid_bit ^= QB_VALID_BIT;
-+ } else {
-+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
-+ /* Command completed if the valid bit is toggled */
-+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
-+ return NULL;
-+ /* Command completed if the rest is non-zero */
-+ verb = ret[0] & ~QB_VALID_BIT;
-+ if (!verb)
-+ return NULL;
-+ p->mr.valid_bit ^= QB_VALID_BIT;
-+ }
-
-- /* Remove the valid-bit - command completed if the rest is non-zero */
-- verb = ret[0] & ~QB_VALID_BIT;
-- if (!verb)
-- return NULL;
-- p->mc.valid_bit ^= QB_VALID_BIT;
- return ret;
- }
-
-@@ -370,6 +406,43 @@ void qbman_eq_desc_set_no_orp(struct qbm
- d->verb |= enqueue_rejects_to_fq;
- }
-
-+/**
-+ * qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor
-+ * @d: the enqueue descriptor.
-+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
-+ * rejections returned on a FQ.
-+ * @oprid: the order point record id.
-+ * @seqnum: the order restoration sequence number.
-+ * @incomplete: indicates whether this is the last fragments using the same
-+ * sequence number.
-+ */
-+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
-+ u16 oprid, u16 seqnum, int incomplete)
-+{
-+ d->verb |= (1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
-+ if (respond_success)
-+ d->verb |= enqueue_response_always;
-+ else
-+ d->verb |= enqueue_rejects_to_fq;
-+ d->orpid = cpu_to_le16(oprid);
-+ d->seqnum = cpu_to_le16((!!incomplete << 14) | seqnum);
-+}
-+
-+/**
-+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
-+ * without any enqueue
-+ * @d: the enqueue descriptor.
-+ * @oprid: the order point record id.
-+ * @seqnum: the order restoration sequence number.
-+ */
-+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, u16 oprid,
-+ u16 seqnum)
-+{
-+ d->verb |= (1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT) | enqueue_empty;
-+ d->orpid = cpu_to_le16(oprid);
-+ d->seqnum = cpu_to_le16(seqnum);
-+}
-+
- /*
- * Exactly one of the following descriptor "targets" should be set. (Calling any
- * one of these will replace the effect of any prior call to one of these.)
-@@ -408,6 +481,18 @@ void qbman_eq_desc_set_qd(struct qbman_e
- #define EQAR_VB(eqar) ((eqar) & 0x80)
- #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
-
-+static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
-+ u8 idx)
-+{
-+ if (idx < 16)
-+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
-+ QMAN_RT_MODE);
-+ else
-+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
-+ (idx - 16) * 4,
-+ QMAN_RT_MODE);
-+}
-+
- /**
- * qbman_swp_enqueue() - Issue an enqueue command
- * @s: the software portal used for enqueue
-@@ -429,12 +514,29 @@ int qbman_swp_enqueue(struct qbman_swp *
- return -EBUSY;
-
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
-- memcpy(&p->dca, &d->dca, 31);
-+ /* This is mapped as DEVICE type memory, writes are
-+ * with address alignment:
-+ * desc.dca address alignment = 1
-+ * desc.seqnum address alignment = 2
-+ * desc.orpid address alignment = 4
-+ * desc.tgtid address alignment = 8
-+ */
-+ p->dca = d->dca;
-+ p->seqnum = d->seqnum;
-+ p->orpid = d->orpid;
-+ memcpy(&p->tgtid, &d->tgtid, 24);
- memcpy(&p->fd, fd, sizeof(*fd));
-
-- /* Set the verb byte, have to substitute in the valid-bit */
-- dma_wmb();
-- p->verb = d->verb | EQAR_VB(eqar);
-+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
-+ /* Set the verb byte, have to substitute in the valid-bit */
-+ dma_wmb();
-+ p->verb = d->verb | EQAR_VB(eqar);
-+ dccvac(p);
-+ } else {
-+ p->verb = d->verb | EQAR_VB(eqar);
-+ dma_wmb();
-+ qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
-+ }
-
- return 0;
- }
-@@ -522,7 +624,7 @@ void qbman_pull_desc_set_storage(struct
- int stash)
- {
- /* save the virtual address */
-- d->rsp_addr_virt = (u64)storage;
-+ d->rsp_addr_virt = (u64)(uintptr_t)storage;
-
- if (!storage) {
- d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
-@@ -615,18 +717,28 @@ int qbman_swp_pull(struct qbman_swp *s,
- atomic_inc(&s->vdq.available);
- return -EBUSY;
- }
-- s->vdq.storage = (void *)d->rsp_addr_virt;
-- p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
-+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
-+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
-+ else
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
- p->numf = d->numf;
- p->tok = QMAN_DQ_TOKEN_VALID;
- p->dq_src = d->dq_src;
- p->rsp_addr = d->rsp_addr;
- p->rsp_addr_virt = d->rsp_addr_virt;
-- dma_wmb();
--
-- /* Set the verb byte, have to substitute in the valid-bit */
-- p->verb = d->verb | s->vdq.valid_bit;
-- s->vdq.valid_bit ^= QB_VALID_BIT;
-+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
-+ dma_wmb();
-+ /* Set the verb byte, have to substitute in the valid-bit */
-+ p->verb = d->verb | s->vdq.valid_bit;
-+ s->vdq.valid_bit ^= QB_VALID_BIT;
-+ dccvac(p);
-+ } else {
-+ p->verb = d->verb | s->vdq.valid_bit;
-+ s->vdq.valid_bit ^= QB_VALID_BIT;
-+ dma_wmb();
-+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
-+ }
-
- return 0;
- }
-@@ -680,11 +792,13 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
- s->dqrr.next_idx, pi);
- s->dqrr.reset_bug = 0;
- }
-- prefetch(qbman_get_cmd(s,
-- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
-+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
- }
-
-- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-+ else
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
- verb = p->dq.verb;
-
- /*
-@@ -696,8 +810,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
- * knew from reading PI.
- */
- if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
-- prefetch(qbman_get_cmd(s,
-- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
-+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
- return NULL;
- }
- /*
-@@ -720,7 +833,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
- (flags & DPAA2_DQ_STAT_EXPIRED))
- atomic_inc(&s->vdq.available);
-
-- prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
-+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-
- return p;
- }
-@@ -836,18 +949,29 @@ int qbman_swp_release(struct qbman_swp *
- return -EBUSY;
-
- /* Start the release command */
-- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
-+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
-+ else
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
- /* Copy the caller's buffer pointers to the command */
- for (i = 0; i < num_buffers; i++)
- p->buf[i] = cpu_to_le64(buffers[i]);
- p->bpid = d->bpid;
-
-- /*
-- * Set the verb byte, have to substitute in the valid-bit and the number
-- * of buffers.
-- */
-- dma_wmb();
-- p->verb = d->verb | RAR_VB(rar) | num_buffers;
-+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
-+ /*
-+ * Set the verb byte, have to substitute in the valid-bit
-+ * and the number of buffers.
-+ */
-+ dma_wmb();
-+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
-+ dccvac(p);
-+ } else {
-+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
-+ dma_wmb();
-+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
-+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
-+ }
-
- return 0;
- }
-@@ -855,7 +979,7 @@ int qbman_swp_release(struct qbman_swp *
- struct qbman_acquire_desc {
- u8 verb;
- u8 reserved;
-- u16 bpid;
-+ __le16 bpid;
- u8 num;
- u8 reserved2[59];
- };
-@@ -863,10 +987,10 @@ struct qbman_acquire_desc {
- struct qbman_acquire_rslt {
- u8 verb;
- u8 rslt;
-- u16 reserved;
-+ __le16 reserved;
- u8 num;
- u8 reserved2[3];
-- u64 buf[7];
-+ __le64 buf[7];
- };
-
- /**
-@@ -929,7 +1053,7 @@ int qbman_swp_acquire(struct qbman_swp *
- struct qbman_alt_fq_state_desc {
- u8 verb;
- u8 reserved[3];
-- u32 fqid;
-+ __le32 fqid;
- u8 reserved2[56];
- };
-
-@@ -952,7 +1076,7 @@ int qbman_swp_alt_fq_state(struct qbman_
- if (!p)
- return -EBUSY;
-
-- p->fqid = cpu_to_le32(fqid) & ALT_FQ_FQID_MASK;
-+ p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
-
- /* Complete the management command */
- r = qbman_swp_mc_complete(s, p, alt_fq_verb);
-@@ -978,11 +1102,11 @@ int qbman_swp_alt_fq_state(struct qbman_
- struct qbman_cdan_ctrl_desc {
- u8 verb;
- u8 reserved;
-- u16 ch;
-+ __le16 ch;
- u8 we;
- u8 ctrl;
-- u16 reserved2;
-- u64 cdan_ctx;
-+ __le16 reserved2;
-+ __le64 cdan_ctx;
- u8 reserved3[48];
-
- };
-@@ -990,7 +1114,7 @@ struct qbman_cdan_ctrl_desc {
- struct qbman_cdan_ctrl_rslt {
- u8 verb;
- u8 rslt;
-- u16 ch;
-+ __le16 ch;
- u8 reserved[60];
- };
-
-@@ -1031,5 +1155,152 @@ int qbman_swp_CDAN_set(struct qbman_swp
- return -EIO;
- }
-
-+ return 0;
-+}
-+
-+#define QBMAN_RESPONSE_VERB_MASK 0x7f
-+#define QBMAN_FQ_QUERY_NP 0x45
-+#define QBMAN_BP_QUERY 0x32
-+
-+struct qbman_fq_query_desc {
-+ u8 verb;
-+ u8 reserved[3];
-+ __le32 fqid;
-+ u8 reserved2[56];
-+};
-+
-+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
-+ struct qbman_fq_query_np_rslt *r)
-+{
-+ struct qbman_fq_query_desc *p;
-+ void *resp;
-+
-+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ /* FQID is a 24 bit value */
-+ p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
-+ resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
-+ if (!resp) {
-+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
-+ fqid);
-+ return -EIO;
-+ }
-+ *r = *(struct qbman_fq_query_np_rslt *)resp;
-+ /* Decode the outcome */
-+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
-+
-+ /* Determine success or failure */
-+ if (r->rslt != QBMAN_MC_RSLT_OK) {
-+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
-+ p->fqid, r->rslt);
-+ return -EIO;
-+ }
-+
-+ return 0;
-+}
-+
-+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
-+{
-+ return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
-+}
-+
-+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
-+{
-+ return le32_to_cpu(r->byte_cnt);
-+}
-+
-+struct qbman_bp_query_desc {
-+ u8 verb;
-+ u8 reserved;
-+ __le16 bpid;
-+ u8 reserved2[60];
-+};
-+
-+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
-+ struct qbman_bp_query_rslt *r)
-+{
-+ struct qbman_bp_query_desc *p;
-+ void *resp;
-+
-+ p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ p->bpid = cpu_to_le16(bpid);
-+ resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
-+ if (!resp) {
-+ pr_err("qbman: Query BPID %d fields failed, no response\n",
-+ bpid);
-+ return -EIO;
-+ }
-+ *r = *(struct qbman_bp_query_rslt *)resp;
-+ /* Decode the outcome */
-+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
-+
-+ /* Determine success or failure */
-+ if (r->rslt != QBMAN_MC_RSLT_OK) {
-+ pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
-+ bpid, r->rslt);
-+ return -EIO;
-+ }
-+
-+ return 0;
-+}
-+
-+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
-+{
-+ return le32_to_cpu(a->fill);
-+}
-+
-+struct qbman_orp_cmd_desc {
-+ u8 verb;
-+ u8 reserved;
-+ u8 cid;
-+ u8 reserved2;
-+ u16 orpid;
-+ u16 seqnum;
-+ u8 reserved3[56];
-+};
-+
-+struct qbman_orp_cmd_rslt {
-+ u8 verb;
-+ u8 rslt;
-+ u8 cid;
-+ u8 reserved1[61];
-+};
-+
-+int qbman_orp_drop(struct qbman_swp *s, u16 orpid, u16 seqnum)
-+{
-+ struct qbman_orp_cmd_desc *p;
-+ struct qbman_orp_cmd_rslt *r;
-+ void *resp;
-+
-+ p = (struct qbman_orp_cmd_desc *)qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ p->cid = 0x7;
-+ p->orpid = cpu_to_le16(orpid);
-+ p->seqnum = cpu_to_le16(seqnum);
-+
-+ resp = qbman_swp_mc_complete(s, p, QBMAN_MC_ORP);
-+ if (!resp) {
-+ pr_err("qbman: Drop sequence num %d orpid 0x%x failed, no response\n",
-+ seqnum, orpid);
-+ return -EIO;
-+ }
-+ r = (struct qbman_orp_cmd_rslt *)resp;
-+ /* Decode the outcome */
-+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ORP);
-+
-+ /* Determine success or failure */
-+ if (r->rslt != QBMAN_MC_RSLT_OK) {
-+ pr_err("Drop seqnum %d of prpid 0x%x failed, code=0x%02x\n",
-+ seqnum, orpid, r->rslt);
-+ return -EIO;
-+ }
-+
- return 0;
- }
---- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
-+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
-@@ -1,46 +1,28 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-- * Copyright 2016 NXP
-+ * Copyright 2016-2019 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_QBMAN_PORTAL_H
- #define __FSL_QBMAN_PORTAL_H
-
- #include "../../include/dpaa2-fd.h"
-
-+#define QMAN_REV_4000 0x04000000
-+#define QMAN_REV_4100 0x04010000
-+#define QMAN_REV_4101 0x04010001
-+#define QMAN_REV_5000 0x05000000
-+
-+#define QMAN_REV_MASK 0xffff0000
-+
- struct dpaa2_dq;
- struct qbman_swp;
-
- /* qbman software portal descriptor structure */
- struct qbman_swp_desc {
- void *cena_bar; /* Cache-enabled portal base address */
-- void *cinh_bar; /* Cache-inhibited portal base address */
-+ void __iomem *cinh_bar; /* Cache-inhibited portal base address */
- u32 qman_version;
- };
-
-@@ -57,8 +39,8 @@ struct qbman_pull_desc {
- u8 numf;
- u8 tok;
- u8 reserved;
-- u32 dq_src;
-- u64 rsp_addr;
-+ __le32 dq_src;
-+ __le64 rsp_addr;
- u64 rsp_addr_virt;
- u8 padding[40];
- };
-@@ -95,17 +77,17 @@ enum qbman_pull_type_e {
- struct qbman_eq_desc {
- u8 verb;
- u8 dca;
-- u16 seqnum;
-- u16 orpid;
-- u16 reserved1;
-- u32 tgtid;
-- u32 tag;
-- u16 qdbin;
-+ __le16 seqnum;
-+ __le16 orpid;
-+ __le16 reserved1;
-+ __le32 tgtid;
-+ __le32 tag;
-+ __le16 qdbin;
- u8 qpri;
- u8 reserved[3];
- u8 wae;
- u8 rspid;
-- u64 rsp_addr;
-+ __le64 rsp_addr;
- u8 fd[32];
- };
-
-@@ -113,9 +95,9 @@ struct qbman_eq_desc {
- struct qbman_release_desc {
- u8 verb;
- u8 reserved;
-- u16 bpid;
-- u32 reserved2;
-- u64 buf[7];
-+ __le16 bpid;
-+ __le32 reserved2;
-+ __le64 buf[7];
- };
-
- /* Management command result codes */
-@@ -127,7 +109,7 @@ struct qbman_release_desc {
- /* portal data structure */
- struct qbman_swp {
- const struct qbman_swp_desc *desc;
-- void __iomem *addr_cena;
-+ void *addr_cena;
- void __iomem *addr_cinh;
-
- /* Management commands */
-@@ -135,6 +117,11 @@ struct qbman_swp {
- u32 valid_bit; /* 0x00 or 0x80 */
- } mc;
-
-+ /* Management response */
-+ struct {
-+ u32 valid_bit; /* 0x00 or 0x80 */
-+ } mr;
-+
- /* Push dequeues */
- u32 sdq;
-
-@@ -187,6 +174,9 @@ int qbman_result_has_new_result(struct q
-
- void qbman_eq_desc_clear(struct qbman_eq_desc *d);
- void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
-+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
-+ u16 oprid, u16 seqnum, int incomplete);
-+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, u16 oprid, u16 seqnum);
- void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
- void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
- void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
-@@ -195,6 +185,8 @@ void qbman_eq_desc_set_qd(struct qbman_e
- int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
- const struct dpaa2_fd *fd);
-
-+int qbman_orp_drop(struct qbman_swp *s, u16 orpid, u16 seqnum);
-+
- void qbman_release_desc_clear(struct qbman_release_desc *d);
- void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
- void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
-@@ -453,7 +445,7 @@ static inline int qbman_swp_CDAN_set_con
- static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
- u8 cmd_verb)
- {
-- int loopvar = 1000;
-+ int loopvar = 2000;
-
- qbman_swp_mc_submit(swp, cmd, cmd_verb);
-
-@@ -466,4 +458,62 @@ static inline void *qbman_swp_mc_complet
- return cmd;
- }
-
-+/* Query APIs */
-+struct qbman_fq_query_np_rslt {
-+ u8 verb;
-+ u8 rslt;
-+ u8 st1;
-+ u8 st2;
-+ u8 reserved[2];
-+ __le16 od1_sfdr;
-+ __le16 od2_sfdr;
-+ __le16 od3_sfdr;
-+ __le16 ra1_sfdr;
-+ __le16 ra2_sfdr;
-+ __le32 pfdr_hptr;
-+ __le32 pfdr_tptr;
-+ __le32 frm_cnt;
-+ __le32 byte_cnt;
-+ __le16 ics_surp;
-+ u8 is;
-+ u8 reserved2[29];
-+};
-+
-+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
-+ struct qbman_fq_query_np_rslt *r);
-+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
-+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
-+
-+struct qbman_bp_query_rslt {
-+ u8 verb;
-+ u8 rslt;
-+ u8 reserved[4];
-+ u8 bdi;
-+ u8 state;
-+ __le32 fill;
-+ __le32 hdotr;
-+ __le16 swdet;
-+ __le16 swdxt;
-+ __le16 hwdet;
-+ __le16 hwdxt;
-+ __le16 swset;
-+ __le16 swsxt;
-+ __le16 vbpid;
-+ __le16 icid;
-+ __le64 bpscn_addr;
-+ __le64 bpscn_ctx;
-+ __le16 hw_targ;
-+ u8 dbe;
-+ u8 reserved2;
-+ u8 sdcnt;
-+ u8 hdcnt;
-+ u8 sscnt;
-+ u8 reserved3[9];
-+};
-+
-+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
-+ struct qbman_bp_query_rslt *r);
-+
-+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
-+
- #endif /* __FSL_QBMAN_PORTAL_H */
---- a/drivers/staging/fsl-mc/bus/dpmcp.c
-+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
-@@ -1,33 +1,7 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/kernel.h>
- #include "../include/mc.h"
---- a/drivers/staging/fsl-mc/bus/dprc-driver.c
-+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
-@@ -1,12 +1,10 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * Freescale data path resource container (DPRC) driver
- *
- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
-- * This file is licensed under the terms of the GNU General Public
-- * License version 2. This program is licensed "as is" without any
-- * warranty of any kind, whether express or implied.
- */
-
- #include <linux/module.h>
---- a/drivers/staging/fsl-mc/bus/dprc.c
-+++ b/drivers/staging/fsl-mc/bus/dprc.c
-@@ -1,33 +1,7 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/kernel.h>
- #include "../include/mc.h"
---- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
-@@ -1,11 +1,9 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * fsl-mc object allocator driver
- *
- * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
- *
-- * This file is licensed under the terms of the GNU General Public
-- * License version 2. This program is licensed "as is" without any
-- * warranty of any kind, whether express or implied.
- */
-
- #include <linux/module.h>
---- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
-@@ -1,12 +1,10 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * Freescale Management Complex (MC) bus driver
- *
- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
-- * This file is licensed under the terms of the GNU General Public
-- * License version 2. This program is licensed "as is" without any
-- * warranty of any kind, whether express or implied.
- */
-
- #define pr_fmt(fmt) "fsl-mc: " fmt
---- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
-@@ -1,12 +1,10 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * Freescale Management Complex (MC) bus driver MSI support
- *
- * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
-- * This file is licensed under the terms of the GNU General Public
-- * License version 2. This program is licensed "as is" without any
-- * warranty of any kind, whether express or implied.
- */
-
- #include <linux/of_device.h>
---- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
-@@ -1,11 +1,9 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
- /*
- * Freescale Management Complex (MC) bus private declarations
- *
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- *
-- * This file is licensed under the terms of the GNU General Public
-- * License version 2. This program is licensed "as is" without any
-- * warranty of any kind, whether express or implied.
- */
- #ifndef _FSL_MC_PRIVATE_H_
- #define _FSL_MC_PRIVATE_H_
---- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
-+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
-@@ -1,12 +1,10 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * Freescale Management Complex (MC) bus driver MSI support
- *
- * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
-- * This file is licensed under the terms of the GNU General Public
-- * License version 2. This program is licensed "as is" without any
-- * warranty of any kind, whether express or implied.
- */
-
- #include <linux/of_device.h>
---- a/drivers/staging/fsl-mc/bus/mc-io.c
-+++ b/drivers/staging/fsl-mc/bus/mc-io.c
-@@ -1,33 +1,7 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
-
- #include <linux/io.h>
---- a/drivers/staging/fsl-mc/bus/mc-sys.c
-+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
-@@ -1,35 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * I/O services to send MC commands to the MC hardware
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
-
- #include <linux/delay.h>
---- a/drivers/staging/fsl-mc/include/dpaa2-fd.h
-+++ b/drivers/staging/fsl-mc/include/dpaa2-fd.h
-@@ -1,33 +1,8 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPAA2_FD_H
- #define __FSL_DPAA2_FD_H
-@@ -91,6 +66,27 @@ struct dpaa2_fd {
- #define SG_BPID_MASK 0x3FFF
- #define SG_FINAL_FLAG_MASK 0x1
- #define SG_FINAL_FLAG_SHIFT 15
-+#define FL_SHORT_LEN_FLAG_MASK 0x1
-+#define FL_SHORT_LEN_FLAG_SHIFT 14
-+#define FL_SHORT_LEN_MASK 0x3FFFF
-+#define FL_OFFSET_MASK 0x0FFF
-+#define FL_FORMAT_MASK 0x3
-+#define FL_FORMAT_SHIFT 12
-+#define FL_BPID_MASK 0x3FFF
-+#define FL_FINAL_FLAG_MASK 0x1
-+#define FL_FINAL_FLAG_SHIFT 15
-+
-+/* Error bits in FD CTRL */
-+#define FD_CTRL_ERR_MASK 0x000000FF
-+#define FD_CTRL_UFD 0x00000004
-+#define FD_CTRL_SBE 0x00000008
-+#define FD_CTRL_FLC 0x00000010
-+#define FD_CTRL_FSE 0x00000020
-+#define FD_CTRL_FAERR 0x00000040
-+
-+/* Annotation bits in FD CTRL */
-+#define FD_CTRL_PTA 0x00800000
-+#define FD_CTRL_PTV1 0x00400000
-
- enum dpaa2_fd_format {
- dpaa2_fd_single = 0,
-@@ -312,7 +308,7 @@ enum dpaa2_sg_format {
- */
- static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
- {
-- return le64_to_cpu((dma_addr_t)sg->addr);
-+ return (dma_addr_t)le64_to_cpu(sg->addr);
- }
-
- /**
-@@ -443,9 +439,243 @@ static inline bool dpaa2_sg_is_final(con
- */
- static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
- {
-- sg->format_offset &= cpu_to_le16(~(SG_FINAL_FLAG_MASK
-- << SG_FINAL_FLAG_SHIFT));
-+ sg->format_offset &= cpu_to_le16((~(SG_FINAL_FLAG_MASK
-+ << SG_FINAL_FLAG_SHIFT)) & 0xFFFF);
- sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
- }
-
-+/**
-+ * struct dpaa2_fl_entry - structure for frame list entry.
-+ * @addr: address in the FLE
-+ * @len: length in the FLE
-+ * @bpid: buffer pool ID
-+ * @format_offset: format, offset, and short-length fields
-+ * @frc: frame context
-+ * @ctrl: control bits...including pta, pvt1, pvt2, err, etc
-+ * @flc: flow context address
-+ */
-+struct dpaa2_fl_entry {
-+ __le64 addr;
-+ __le32 len;
-+ __le16 bpid;
-+ __le16 format_offset;
-+ __le32 frc;
-+ __le32 ctrl;
-+ __le64 flc;
-+};
-+
-+enum dpaa2_fl_format {
-+ dpaa2_fl_single = 0,
-+ dpaa2_fl_res,
-+ dpaa2_fl_sg
-+};
-+
-+/**
-+ * dpaa2_fl_get_addr() - get the addr field of FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return the address in the frame list entry.
-+ */
-+static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
-+{
-+ return (dma_addr_t)le64_to_cpu(fle->addr);
-+}
-+
-+/**
-+ * dpaa2_fl_set_addr() - Set the addr field of FLE
-+ * @fle: the given frame list entry
-+ * @addr: the address needs to be set in frame list entry
-+ */
-+static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
-+ dma_addr_t addr)
-+{
-+ fle->addr = cpu_to_le64(addr);
-+}
-+
-+/**
-+ * dpaa2_fl_get_frc() - Get the frame context in the FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return the frame context field in the frame lsit entry.
-+ */
-+static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle)
-+{
-+ return le32_to_cpu(fle->frc);
-+}
-+
-+/**
-+ * dpaa2_fl_set_frc() - Set the frame context in the FLE
-+ * @fle: the given frame list entry
-+ * @frc: the frame context needs to be set in frame list entry
-+ */
-+static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc)
-+{
-+ fle->frc = cpu_to_le32(frc);
-+}
-+
-+/**
-+ * dpaa2_fl_get_ctrl() - Get the control bits in the FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return the control bits field in the frame list entry.
-+ */
-+static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle)
-+{
-+ return le32_to_cpu(fle->ctrl);
-+}
-+
-+/**
-+ * dpaa2_fl_set_ctrl() - Set the control bits in the FLE
-+ * @fle: the given frame list entry
-+ * @ctrl: the control bits to be set in the frame list entry
-+ */
-+static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl)
-+{
-+ fle->ctrl = cpu_to_le32(ctrl);
-+}
-+
-+/**
-+ * dpaa2_fl_get_flc() - Get the flow context in the FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return the flow context in the frame list entry.
-+ */
-+static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
-+{
-+ return (dma_addr_t)le64_to_cpu(fle->flc);
-+}
-+
-+/**
-+ * dpaa2_fl_set_flc() - Set the flow context field of FLE
-+ * @fle: the given frame list entry
-+ * @flc_addr: the flow context needs to be set in frame list entry
-+ */
-+static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
-+ dma_addr_t flc_addr)
-+{
-+ fle->flc = cpu_to_le64(flc_addr);
-+}
-+
-+static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle)
-+{
-+ return !!((le16_to_cpu(fle->format_offset) >>
-+ FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK);
-+}
-+
-+/**
-+ * dpaa2_fl_get_len() - Get the length in the FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return the length field in the frame list entry.
-+ */
-+static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
-+{
-+ if (dpaa2_fl_short_len(fle))
-+ return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK;
-+
-+ return le32_to_cpu(fle->len);
-+}
-+
-+/**
-+ * dpaa2_fl_set_len() - Set the length field of FLE
-+ * @fle: the given frame list entry
-+ * @len: the length needs to be set in frame list entry
-+ */
-+static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
-+{
-+ fle->len = cpu_to_le32(len);
-+}
-+
-+/**
-+ * dpaa2_fl_get_offset() - Get the offset field in the frame list entry
-+ * @fle: the given frame list entry
-+ *
-+ * Return the offset.
-+ */
-+static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
-+{
-+ return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK;
-+}
-+
-+/**
-+ * dpaa2_fl_set_offset() - Set the offset field of FLE
-+ * @fle: the given frame list entry
-+ * @offset: the offset needs to be set in frame list entry
-+ */
-+static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset)
-+{
-+ fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK);
-+ fle->format_offset |= cpu_to_le16(offset);
-+}
-+
-+/**
-+ * dpaa2_fl_get_format() - Get the format field in the FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return the format.
-+ */
-+static inline enum dpaa2_fl_format dpaa2_fl_get_format(
-+ const struct dpaa2_fl_entry *fle)
-+{
-+ return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >>
-+ FL_FORMAT_SHIFT) & FL_FORMAT_MASK);
-+}
-+
-+/**
-+ * dpaa2_fl_set_format() - Set the format field of FLE
-+ * @fle: the given frame list entry
-+ * @format: the format needs to be set in frame list entry
-+ */
-+static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
-+ enum dpaa2_fl_format format)
-+{
-+ fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT));
-+ fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT);
-+}
-+
-+/**
-+ * dpaa2_fl_get_bpid() - Get the bpid field in the FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return the buffer pool id.
-+ */
-+static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
-+{
-+ return le16_to_cpu(fle->bpid) & FL_BPID_MASK;
-+}
-+
-+/**
-+ * dpaa2_fl_set_bpid() - Set the bpid field of FLE
-+ * @fle: the given frame list entry
-+ * @bpid: buffer pool id to be set
-+ */
-+static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid)
-+{
-+ fle->bpid &= cpu_to_le16(~(FL_BPID_MASK));
-+ fle->bpid |= cpu_to_le16(bpid);
-+}
-+
-+/**
-+ * dpaa2_fl_is_final() - Check final bit in FLE
-+ * @fle: the given frame list entry
-+ *
-+ * Return bool.
-+ */
-+static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
-+{
-+ return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT);
-+}
-+
-+/**
-+ * dpaa2_fl_set_final() - Set the final bit in FLE
-+ * @fle: the given frame list entry
-+ * @final: the final boolean to be set
-+ */
-+static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
-+{
-+ fle->format_offset &= cpu_to_le16(~(FL_FINAL_FLAG_MASK <<
-+ FL_FINAL_FLAG_SHIFT));
-+ fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT);
-+}
-+
- #endif /* __FSL_DPAA2_FD_H */
---- a/drivers/staging/fsl-mc/include/dpaa2-global.h
-+++ b/drivers/staging/fsl-mc/include/dpaa2-global.h
-@@ -1,33 +1,8 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPAA2_GLOBAL_H
- #define __FSL_DPAA2_GLOBAL_H
---- a/drivers/staging/fsl-mc/include/dpaa2-io.h
-+++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
-@@ -1,33 +1,8 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
-- * Copyright NXP
-+ * Copyright 2017 NXP
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPAA2_IO_H
- #define __FSL_DPAA2_IO_H
-@@ -77,17 +52,20 @@ struct dpaa2_io_desc {
- int has_8prio;
- int cpu;
- void *regs_cena;
-- void *regs_cinh;
-+ void __iomem *regs_cinh;
- int dpio_id;
- u32 qman_version;
- };
-
--struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
-+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
-+ struct device *dev);
-
- void dpaa2_io_down(struct dpaa2_io *d);
-
- irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
-
-+struct dpaa2_io *dpaa2_io_service_select(int cpu);
-+
- /**
- * struct dpaa2_io_notification_ctx - The DPIO notification context structure
- * @cb: The callback to be invoked when the notification arrives
-@@ -103,7 +81,7 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io
- * Used when a FQDAN/CDAN registration is made by drivers.
- */
- struct dpaa2_io_notification_ctx {
-- void (*cb)(struct dpaa2_io_notification_ctx *);
-+ void (*cb)(struct dpaa2_io_notification_ctx *ctx);
- int is_cdan;
- u32 id;
- int desired_cpu;
-@@ -113,10 +91,14 @@ struct dpaa2_io_notification_ctx {
- void *dpio_private;
- };
-
-+int dpaa2_io_get_cpu(struct dpaa2_io *d);
-+
- int dpaa2_io_service_register(struct dpaa2_io *service,
-- struct dpaa2_io_notification_ctx *ctx);
-+ struct dpaa2_io_notification_ctx *ctx,
-+ struct device *dev);
- void dpaa2_io_service_deregister(struct dpaa2_io *service,
-- struct dpaa2_io_notification_ctx *ctx);
-+ struct dpaa2_io_notification_ctx *ctx,
-+ struct device *dev);
- int dpaa2_io_service_rearm(struct dpaa2_io *service,
- struct dpaa2_io_notification_ctx *ctx);
-
-@@ -129,9 +111,9 @@ int dpaa2_io_service_enqueue_fq(struct d
- const struct dpaa2_fd *fd);
- int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
- u16 qdbin, const struct dpaa2_fd *fd);
--int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
-+int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,
- const u64 *buffers, unsigned int num_buffers);
--int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
-+int dpaa2_io_service_acquire(struct dpaa2_io *d, u16 bpid,
- u64 *buffers, unsigned int num_buffers);
-
- struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
-@@ -139,4 +121,64 @@ struct dpaa2_io_store *dpaa2_io_store_cr
- void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
- struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
-
-+/* Order Restoration Support */
-+int dpaa2_io_service_enqueue_orp_fq(struct dpaa2_io *d, u32 fqid,
-+ const struct dpaa2_fd *fd, u16 orpid,
-+ u16 seqnum, int last);
-+
-+int dpaa2_io_service_enqueue_orp_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
-+ u16 qdbin, const struct dpaa2_fd *fd,
-+ u16 orpid, u16 seqnum, int last);
-+
-+int dpaa2_io_service_orp_seqnum_drop(struct dpaa2_io *d, u16 orpid,
-+ u16 seqnum);
-+
-+/***************/
-+/* CSCN */
-+/***************/
-+
-+/**
-+ * struct dpaa2_cscn - The CSCN message format
-+ * @verb: identifies the type of message (should be 0x27).
-+ * @stat: status bits related to dequeuing response (not used)
-+ * @state: bit 0 = 0/1 if CG is no/is congested
-+ * @reserved: reserved byte
-+ * @cgid: congest grp ID - the first 16 bits
-+ * @ctx: context data
-+ *
-+ * Congestion management can be implemented in software through
-+ * the use of Congestion State Change Notifications (CSCN). These
-+ * are messages written by DPAA2 hardware to memory whenever the
-+ * instantaneous count (I_CNT field in the CG) exceeds the
-+ * Congestion State (CS) entrance threshold, signifying congestion
-+ * entrance, or when the instantaneous count returns below exit
-+ * threshold, signifying congestion exit. The format of the message
-+ * is given by the dpaa2_cscn structure. Bit 0 of the state field
-+ * represents congestion state written by the hardware.
-+ */
-+struct dpaa2_cscn {
-+ u8 verb;
-+ u8 stat;
-+ u8 state;
-+ u8 reserved;
-+ __le32 cgid;
-+ __le64 ctx;
-+};
-+
-+#define DPAA2_CSCN_SIZE 64
-+#define DPAA2_CSCN_ALIGN 16
-+
-+#define DPAA2_CSCN_STATE_MASK 0x1
-+#define DPAA2_CSCN_CONGESTED 1
-+
-+static inline bool dpaa2_cscn_state_congested(struct dpaa2_cscn *cscn)
-+{
-+ return ((cscn->state & DPAA2_CSCN_STATE_MASK) == DPAA2_CSCN_CONGESTED);
-+}
-+
-+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
-+ u32 *fcnt, u32 *bcnt);
-+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
-+ u32 *num);
-+
- #endif /* __FSL_DPAA2_IO_H */
---- a/drivers/staging/fsl-mc/include/dpbp.h
-+++ b/drivers/staging/fsl-mc/include/dpbp.h
-@@ -1,34 +1,7 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPBP_H
- #define __FSL_DPBP_H
---- a/drivers/staging/fsl-mc/include/dpcon.h
-+++ b/drivers/staging/fsl-mc/include/dpcon.h
-@@ -1,33 +1,7 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPCON_H
- #define __FSL_DPCON_H
---- /dev/null
-+++ b/drivers/staging/fsl-mc/include/dpopr.h
-@@ -0,0 +1,110 @@
-+/*
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPOPR_H_
-+#define __FSL_DPOPR_H_
-+
-+/* Data Path Order Restoration API
-+ * Contains initialization APIs and runtime APIs for the Order Restoration
-+ */
-+
-+/** Order Restoration properties */
-+
-+/**
-+ * Create a new Order Point Record option
-+ */
-+#define OPR_OPT_CREATE 0x1
-+/**
-+ * Retire an existing Order Point Record option
-+ */
-+#define OPR_OPT_RETIRE 0x2
-+
-+/**
-+ * struct opr_cfg - Structure representing OPR configuration
-+ * @oprrws: Order point record (OPR) restoration window size (0 to 5)
-+ * 0 - Window size is 32 frames.
-+ * 1 - Window size is 64 frames.
-+ * 2 - Window size is 128 frames.
-+ * 3 - Window size is 256 frames.
-+ * 4 - Window size is 512 frames.
-+ * 5 - Window size is 1024 frames.
-+ * @oa: OPR auto advance NESN window size (0 disabled, 1 enabled)
-+ * @olws: OPR acceptable late arrival window size (0 to 3)
-+ * 0 - Disabled. Late arrivals are always rejected.
-+ * 1 - Window size is 32 frames.
-+ * 2 - Window size is the same as the OPR restoration
-+ * window size configured in the OPRRWS field.
-+ * 3 - Window size is 8192 frames. Late arrivals are
-+ * always accepted.
-+ * @oeane: Order restoration list (ORL) resource exhaustion
-+ * advance NESN enable (0 disabled, 1 enabled)
-+ * @oloe: OPR loose ordering enable (0 disabled, 1 enabled)
-+ */
-+struct opr_cfg {
-+ u8 oprrws;
-+ u8 oa;
-+ u8 olws;
-+ u8 oeane;
-+ u8 oloe;
-+};
-+
-+/**
-+ * struct opr_qry - Structure representing OPR configuration
-+ * @enable: Enabled state
-+ * @rip: Retirement In Progress
-+ * @ndsn: Next dispensed sequence number
-+ * @nesn: Next expected sequence number
-+ * @ea_hseq: Early arrival head sequence number
-+ * @hseq_nlis: HSEQ not last in sequence
-+ * @ea_tseq: Early arrival tail sequence number
-+ * @tseq_nlis: TSEQ not last in sequence
-+ * @ea_tptr: Early arrival tail pointer
-+ * @ea_hptr: Early arrival head pointer
-+ * @opr_id: Order Point Record ID
-+ * @opr_vid: Order Point Record Virtual ID
-+ */
-+struct opr_qry {
-+ char enable;
-+ char rip;
-+ u16 ndsn;
-+ u16 nesn;
-+ u16 ea_hseq;
-+ char hseq_nlis;
-+ u16 ea_tseq;
-+ char tseq_nlis;
-+ u16 ea_tptr;
-+ u16 ea_hptr;
-+ u16 opr_id;
-+ u16 opr_vid;
-+};
-+
-+#endif /* __FSL_DPOPR_H_ */
---- a/drivers/staging/fsl-mc/include/mc.h
-+++ b/drivers/staging/fsl-mc/include/mc.h
-@@ -1,12 +1,10 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
- /*
- * Freescale Management Complex (MC) bus public interface
- *
- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
-- * This file is licensed under the terms of the GNU General Public
-- * License version 2. This program is licensed "as is" without any
-- * warranty of any kind, whether express or implied.
- */
- #ifndef _FSL_MC_H_
- #define _FSL_MC_H_
diff --git a/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch b/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch
deleted file mode 100644
index 1656ddf3e8..0000000000
--- a/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch
+++ /dev/null
@@ -1,9130 +0,0 @@
-From 90b3f1705785f0e30de6f41abc8764aae1391245 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:28 +0800
-Subject: [PATCH] dpaa2-ethernet: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of dpaa2-ethernet for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
-Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
-Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/staging/fsl-dpaa2/Kconfig | 7 +
- drivers/staging/fsl-dpaa2/ethernet/Makefile | 3 +
- .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1187 ++++++++
- .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 183 ++
- .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 356 +++
- .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
- .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 29 +-
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2509 +++++++++++++----
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 394 ++-
- .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 716 ++++-
- drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 380 ++-
- drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 255 +-
- drivers/staging/fsl-dpaa2/ethernet/dpni.c | 704 ++++-
- drivers/staging/fsl-dpaa2/ethernet/dpni.h | 401 ++-
- drivers/staging/fsl-dpaa2/ethernet/net.h | 30 +-
- 15 files changed, 6315 insertions(+), 899 deletions(-)
- create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
- create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
- create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
-
---- a/drivers/staging/fsl-dpaa2/Kconfig
-+++ b/drivers/staging/fsl-dpaa2/Kconfig
-@@ -17,6 +17,13 @@ config FSL_DPAA2_ETH
- Ethernet driver for Freescale DPAA2 SoCs, using the
- Freescale MC bus driver
-
-+config FSL_DPAA2_ETH_CEETM
-+ depends on NET_SCHED
-+ bool "DPAA2 Ethernet CEETM QoS"
-+ default n
-+ ---help---
-+ Enable QoS offloading support through the CEETM hardware block.
-+
- if FSL_DPAA2_ETH
- config FSL_DPAA2_ETH_USE_ERR_QUEUE
- bool "Enable Rx error queue"
---- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
-+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
-@@ -1,3 +1,4 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # Makefile for the Freescale DPAA2 Ethernet controller
- #
-@@ -5,6 +6,8 @@
- obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
-
- fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
-+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
-+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
-
- # Needed by the tracing framework
- CFLAGS_dpaa2-eth.o := -I$(src)
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
-@@ -0,0 +1,1187 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2017-2019 NXP
-+ *
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+
-+#include "dpaa2-eth-ceetm.h"
-+#include "dpaa2-eth.h"
-+
-+#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
-+/* Conversion formula from userspace passed Bps to expected Mbit */
-+#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
-+
-+static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
-+ [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
-+ [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
-+};
-+
-+struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
-+
-+static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
-+ struct dpni_tx_shaping_cfg *scfg,
-+ struct dpni_tx_shaping_cfg *ecfg,
-+ int coupled, int ch_id)
-+{
-+ int err = 0;
-+
-+ netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
-+ ch_id, scfg->rate_limit);
-+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
-+ ecfg, coupled);
-+ if (err)
-+ netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
-+
-+ return err;
-+}
-+
-+static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
-+ int ch_id)
-+{
-+ struct dpni_tx_shaping_cfg cfg = { 0 };
-+
-+ return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
-+}
-+
-+static inline int
-+dpaa2_eth_update_shaping_cfg(struct net_device *dev,
-+ struct dpaa2_ceetm_shaping_cfg cfg,
-+ struct dpni_tx_shaping_cfg *scfg,
-+ struct dpni_tx_shaping_cfg *ecfg)
-+{
-+ scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
-+ ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
-+
-+ if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
-+ netdev_err(dev, "Committed burst size must be under %d\n",
-+ DPAA2_ETH_MAX_BURST_SIZE);
-+ return -EINVAL;
-+ }
-+
-+ scfg->max_burst_size = cfg.cbs;
-+
-+ if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
-+ netdev_err(dev, "Excess burst size must be under %d\n",
-+ DPAA2_ETH_MAX_BURST_SIZE);
-+ return -EINVAL;
-+ }
-+
-+ ecfg->max_burst_size = cfg.ebs;
-+
-+ if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
-+ netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+enum update_tx_prio {
-+ DPAA2_ETH_ADD_CQ,
-+ DPAA2_ETH_DEL_CQ,
-+};
-+
-+/* Normalize weights based on max passed value */
-+static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
-+{
-+ struct dpni_tx_schedule_cfg *sched_cfg;
-+ struct dpaa2_ceetm_class *cl;
-+ u32 qpri;
-+ u16 weight_max = 0, increment;
-+ int i;
-+
-+ /* Check the boundaries of the provided values */
-+ for (i = 0; i < priv->clhash.hashsize; i++)
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
-+ weight_max = (weight_max == 0 ? cl->prio.weight :
-+ (weight_max < cl->prio.weight ?
-+ cl->prio.weight : weight_max));
-+
-+ /* If there are no elements, there's nothing to do */
-+ if (weight_max == 0)
-+ return 0;
-+
-+ increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
-+ weight_max;
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
-+ if (cl->prio.mode == STRICT_PRIORITY)
-+ continue;
-+
-+ qpri = cl->prio.qpri;
-+ sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
-+
-+ sched_cfg->delta_bandwidth =
-+ DPAA2_CEETM_MIN_WEIGHT +
-+ (cl->prio.weight * increment);
-+
-+ pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
-+ __func__, qpri, sched_cfg->delta_bandwidth);
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_ceetm_class *cl,
-+ enum update_tx_prio type)
-+{
-+ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
-+ struct dpni_tx_schedule_cfg *sched_cfg;
-+ struct dpni_taildrop td = {0};
-+ u8 ch_id = 0, tc_id = 0;
-+ u32 qpri = 0;
-+ int err = 0;
-+
-+ qpri = cl->prio.qpri;
-+ tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
-+
-+ switch (type) {
-+ case DPAA2_ETH_ADD_CQ:
-+ /* Enable taildrop */
-+ td.enable = 1;
-+ td.units = DPNI_CONGESTION_UNIT_FRAMES;
-+ td.threshold = DPAA2_CEETM_TD_THRESHOLD;
-+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
-+ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
-+ 0, &td);
-+ if (err) {
-+ netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
-+ err);
-+ return err;
-+ }
-+ break;
-+ case DPAA2_ETH_DEL_CQ:
-+ /* Disable taildrop */
-+ td.enable = 0;
-+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
-+ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
-+ 0, &td);
-+ if (err) {
-+ netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
-+ err);
-+ return err;
-+ }
-+ break;
-+ }
-+
-+ /* We can zero out the structure in the tx_prio_conf array */
-+ if (type == DPAA2_ETH_DEL_CQ) {
-+ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
-+ memset(sched_cfg, 0, sizeof(*sched_cfg));
-+ }
-+
-+ /* Normalize priorities */
-+ err = dpaa2_eth_normalize_tx_prio(sch);
-+
-+ /* Debug print goes here */
-+ print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
-+ &sch->prio.tx_prio_cfg,
-+ sizeof(sch->prio.tx_prio_cfg), 0);
-+
-+ /* Call dpni_set_tx_priorities for the entire prio qdisc */
-+ err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
-+ &sch->prio.tx_prio_cfg);
-+ if (err)
-+ netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
-+ err);
-+
-+ return err;
-+}
-+
-+static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
-+{
-+ priv->ceetm_en = true;
-+}
-+
-+static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
-+{
-+ priv->ceetm_en = false;
-+}
-+
-+/* Find class in qdisc hash table using given handle */
-+static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
-+ struct Qdisc *sch)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct Qdisc_class_common *clc;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
-+ __func__, handle, sch->handle);
-+
-+ clc = qdisc_class_find(&priv->clhash, handle);
-+ return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
-+}
-+
-+/* Insert a class in the qdisc's class hash */
-+static void dpaa2_ceetm_link_class(struct Qdisc *sch,
-+ struct Qdisc_class_hash *clhash,
-+ struct Qdisc_class_common *common)
-+{
-+ sch_tree_lock(sch);
-+ qdisc_class_hash_insert(clhash, common);
-+ sch_tree_unlock(sch);
-+ qdisc_class_hash_grow(sch, clhash);
-+}
-+
-+/* Destroy a ceetm class */
-+static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
-+ struct dpaa2_ceetm_class *cl)
-+{
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
-+
-+ if (!cl)
-+ return;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ /* Recurse into child first */
-+ if (cl->child) {
-+ qdisc_destroy(cl->child);
-+ cl->child = NULL;
-+ }
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
-+ netdev_err(dev, "Error resetting channel shaping\n");
-+
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
-+ netdev_err(dev, "Error resetting tx_priorities\n");
-+
-+ if (cl->prio.cstats)
-+ free_percpu(cl->prio.cstats);
-+
-+ break;
-+ }
-+
-+ tcf_block_put(cl->block);
-+ kfree(cl);
-+}
-+
-+/* Destroy a ceetm qdisc */
-+static void dpaa2_ceetm_destroy(struct Qdisc *sch)
-+{
-+ unsigned int i;
-+ struct hlist_node *next;
-+ struct dpaa2_ceetm_class *cl;
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
-+ __func__, sch->handle);
-+
-+ /* All filters need to be removed before destroying the classes */
-+ tcf_block_put(priv->block);
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
-+ tcf_block_put(cl->block);
-+ }
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
-+ common.hnode)
-+ dpaa2_ceetm_cls_destroy(sch, cl);
-+ }
-+
-+ qdisc_class_hash_destroy(&priv->clhash);
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ dpaa2_eth_ceetm_disable(priv_eth);
-+
-+ if (priv->root.qstats)
-+ free_percpu(priv->root.qstats);
-+
-+ if (!priv->root.qdiscs)
-+ break;
-+
-+ /* Destroy the pfifo qdiscs in case they haven't been attached
-+ * to the netdev queues yet.
-+ */
-+ for (i = 0; i < dev->num_tx_queues; i++)
-+ if (priv->root.qdiscs[i])
-+ qdisc_destroy(priv->root.qdiscs[i]);
-+
-+ kfree(priv->root.qdiscs);
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (priv->prio.parent)
-+ priv->prio.parent->child = NULL;
-+ break;
-+ }
-+}
-+
-+static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
-+{
-+ struct Qdisc *qdisc;
-+ unsigned int ntx, i;
-+ struct nlattr *nest;
-+ struct dpaa2_ceetm_tc_qopt qopt;
-+ struct dpaa2_ceetm_qdisc_stats *qstats;
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ sch_tree_lock(sch);
-+ memset(&qopt, 0, sizeof(qopt));
-+ qopt.type = priv->type;
-+ qopt.shaped = priv->shaped;
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ /* Gather statistics from the underlying pfifo qdiscs */
-+ sch->q.qlen = 0;
-+ memset(&sch->bstats, 0, sizeof(sch->bstats));
-+ memset(&sch->qstats, 0, sizeof(sch->qstats));
-+
-+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
-+ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
-+ sch->q.qlen += qdisc->q.qlen;
-+ sch->bstats.bytes += qdisc->bstats.bytes;
-+ sch->bstats.packets += qdisc->bstats.packets;
-+ sch->qstats.qlen += qdisc->qstats.qlen;
-+ sch->qstats.backlog += qdisc->qstats.backlog;
-+ sch->qstats.drops += qdisc->qstats.drops;
-+ sch->qstats.requeues += qdisc->qstats.requeues;
-+ sch->qstats.overlimits += qdisc->qstats.overlimits;
-+ }
-+
-+ for_each_online_cpu(i) {
-+ qstats = per_cpu_ptr(priv->root.qstats, i);
-+ sch->qstats.drops += qstats->drops;
-+ }
-+
-+ break;
-+
-+ case CEETM_PRIO:
-+ qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
-+ qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
-+ qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
-+ break;
-+
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ sch_tree_unlock(sch);
-+ return -EINVAL;
-+ }
-+
-+ nest = nla_nest_start(skb, TCA_OPTIONS);
-+ if (!nest)
-+ goto nla_put_failure;
-+ if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
-+ goto nla_put_failure;
-+ nla_nest_end(skb, nest);
-+
-+ sch_tree_unlock(sch);
-+ return skb->len;
-+
-+nla_put_failure:
-+ sch_tree_unlock(sch);
-+ nla_nest_cancel(skb, nest);
-+ return -EMSGSIZE;
-+}
-+
-+static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
-+ struct dpaa2_ceetm_qdisc *priv,
-+ struct dpaa2_ceetm_tc_qopt *qopt)
-+{
-+ /* TODO: Once LX2 support is added */
-+ /* priv->shaped = parent_cl->shaped; */
-+ priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
-+ priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
-+ priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
-+
-+ return 0;
-+}
-+
-+/* Edit a ceetm qdisc */
-+static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
-+ struct dpaa2_ceetm_tc_qopt *qopt;
-+ int err;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
-+ dpaa2_ceetm_policy, NULL);
-+ if (err < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
-+ "nla_parse_nested");
-+ return err;
-+ }
-+
-+ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
-+ "tb");
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(sch->handle)) {
-+ pr_err("CEETM: a qdisc should not have a minor\n");
-+ return -EINVAL;
-+ }
-+
-+ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
-+
-+ if (priv->type != qopt->type) {
-+ pr_err("CEETM: qdisc %X is not of the provided type\n",
-+ sch->handle);
-+ return -EINVAL;
-+ }
-+
-+ switch (priv->type) {
-+ case CEETM_PRIO:
-+ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
-+ break;
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ err = -EINVAL;
-+ }
-+
-+ return err;
-+}
-+
-+/* Configure a root ceetm qdisc */
-+static int dpaa2_ceetm_init_root(struct Qdisc *sch,
-+ struct dpaa2_ceetm_qdisc *priv,
-+ struct dpaa2_ceetm_tc_qopt *qopt)
-+{
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
-+ struct netdev_queue *dev_queue;
-+ unsigned int i, parent_id;
-+ struct Qdisc *qdisc;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ /* Validate inputs */
-+ if (sch->parent != TC_H_ROOT) {
-+ pr_err("CEETM: a root ceetm qdisc must be root\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Pre-allocate underlying pfifo qdiscs.
-+ *
-+ * We want to offload shaping and scheduling decisions to the hardware.
-+ * The pfifo qdiscs will be attached to the netdev queues and will
-+ * guide the traffic from the IP stack down to the driver with minimum
-+ * interference.
-+ *
-+ * The CEETM qdiscs and classes will be crossed when the traffic
-+ * reaches the driver.
-+ */
-+ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
-+ sizeof(priv->root.qdiscs[0]),
-+ GFP_KERNEL);
-+ if (!priv->root.qdiscs)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < dev->num_tx_queues; i++) {
-+ dev_queue = netdev_get_tx_queue(dev, i);
-+ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
-+ TC_H_MIN(i + PFIFO_MIN_OFFSET));
-+
-+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
-+ parent_id);
-+ if (!qdisc)
-+ return -ENOMEM;
-+
-+ priv->root.qdiscs[i] = qdisc;
-+ qdisc->flags |= TCQ_F_ONETXQUEUE;
-+ }
-+
-+ sch->flags |= TCQ_F_MQROOT;
-+
-+ priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
-+ if (!priv->root.qstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ return -ENOMEM;
-+ }
-+
-+ dpaa2_eth_ceetm_enable(priv_eth);
-+ return 0;
-+}
-+
-+/* Configure a prio ceetm qdisc */
-+static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
-+ struct dpaa2_ceetm_qdisc *priv,
-+ struct dpaa2_ceetm_tc_qopt *qopt)
-+{
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_ceetm_class *parent_cl;
-+ struct Qdisc *parent_qdisc;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (sch->parent == TC_H_ROOT) {
-+ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
-+ return -EINVAL;
-+ }
-+
-+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
-+ if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Obtain the parent root ceetm_class */
-+ parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
-+
-+ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
-+ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
-+ return -EINVAL;
-+ }
-+
-+ priv->prio.parent = parent_cl;
-+ parent_cl->child = sch;
-+
-+ return dpaa2_ceetm_change_prio(sch, priv, qopt);
-+}
-+
-+/* Configure a generic ceetm qdisc */
-+static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
-+ struct dpaa2_ceetm_tc_qopt *qopt;
-+ int err;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (!netif_is_multiqueue(dev))
-+ return -EOPNOTSUPP;
-+
-+ err = tcf_block_get(&priv->block, &priv->filter_list);
-+ if (err) {
-+ pr_err("CEETM: unable to get tcf_block\n");
-+ return err;
-+ }
-+
-+ if (!opt) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
-+ __func__);
-+ return -EINVAL;
-+ }
-+
-+ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
-+ dpaa2_ceetm_policy, NULL);
-+ if (err < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
-+ "nla_parse_nested");
-+ return err;
-+ }
-+
-+ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
-+ "tb");
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(sch->handle)) {
-+ pr_err("CEETM: a qdisc should not have a minor\n");
-+ return -EINVAL;
-+ }
-+
-+ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
-+
-+ /* Initialize the class hash list. Each qdisc has its own class hash */
-+ err = qdisc_class_hash_init(&priv->clhash);
-+ if (err < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ priv->type = qopt->type;
-+ priv->shaped = qopt->shaped;
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ err = dpaa2_ceetm_init_root(sch, priv, qopt);
-+ break;
-+ case CEETM_PRIO:
-+ err = dpaa2_ceetm_init_prio(sch, priv, qopt);
-+ break;
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ /* Note: dpaa2_ceetm_destroy() will be called by our caller */
-+ err = -EINVAL;
-+ }
-+
-+ return err;
-+}
-+
-+/* Attach the underlying pfifo qdiscs */
-+static void dpaa2_ceetm_attach(struct Qdisc *sch)
-+{
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct Qdisc *qdisc, *old_qdisc;
-+ unsigned int i;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ for (i = 0; i < dev->num_tx_queues; i++) {
-+ qdisc = priv->root.qdiscs[i];
-+ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
-+ if (old_qdisc)
-+ qdisc_destroy(old_qdisc);
-+ }
-+
-+ /* Remove the references to the pfifo qdiscs since the kernel will
-+ * destroy them when needed. No cleanup from our part is required from
-+ * this point on.
-+ */
-+ kfree(priv->root.qdiscs);
-+ priv->root.qdiscs = NULL;
-+}
-+
-+static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid)
-+{
-+ struct dpaa2_ceetm_class *cl;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
-+ __func__, classid, sch->handle);
-+ cl = dpaa2_ceetm_find(classid, sch);
-+
-+ return (unsigned long)cl;
-+}
-+
-+static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
-+ struct dpaa2_ceetm_tc_copt *copt,
-+ struct net_device *dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
-+ struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
-+ int err = 0;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
-+ cl->common.classid);
-+
-+ if (!cl->shaped)
-+ return 0;
-+
-+ if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
-+ &scfg, &ecfg))
-+ return -EINVAL;
-+
-+ err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
-+ copt->shaping_cfg.coupled,
-+ cl->root.ch_id);
-+ if (err)
-+ return err;
-+
-+ memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
-+ sizeof(struct dpaa2_ceetm_shaping_cfg));
-+
-+ return err;
-+}
-+
-+static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
-+ struct dpaa2_ceetm_tc_copt *copt,
-+ struct net_device *dev)
-+{
-+ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
-+ struct dpni_tx_schedule_cfg *sched_cfg;
-+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
-+ int err;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
-+ __func__, cl->common.classid, copt->mode, copt->weight);
-+
-+ if (!cl->prio.cstats) {
-+ cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
-+ if (!cl->prio.cstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ return -ENOMEM;
-+ }
-+ }
-+
-+ cl->prio.mode = copt->mode;
-+ cl->prio.weight = copt->weight;
-+
-+ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
-+
-+ switch (copt->mode) {
-+ case STRICT_PRIORITY:
-+ sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
-+ break;
-+ case WEIGHTED_A:
-+ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
-+ break;
-+ case WEIGHTED_B:
-+ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
-+ break;
-+ }
-+
-+ err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
-+
-+ return err;
-+}
-+
-+/* Add a new ceetm class */
-+static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
-+ struct dpaa2_ceetm_tc_copt *copt,
-+ unsigned long *arg)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
-+ struct dpaa2_ceetm_class *cl;
-+ int err;
-+
-+ if (copt->type == CEETM_ROOT &&
-+ priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
-+ pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
-+ dpaa2_eth_ch_count(priv_eth),
-+ dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
-+ return -EINVAL;
-+ }
-+
-+ if (copt->type == CEETM_PRIO &&
-+ priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
-+ pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
-+ dpaa2_eth_tc_count(priv_eth),
-+ dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
-+ return -EINVAL;
-+ }
-+
-+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
-+ if (!cl)
-+ return -ENOMEM;
-+
-+ err = tcf_block_get(&cl->block, &cl->filter_list);
-+ if (err) {
-+ pr_err("%s: Unable to set new root class\n", __func__);
-+ goto out_free;
-+ }
-+
-+ cl->common.classid = classid;
-+ cl->parent = sch;
-+ cl->child = NULL;
-+
-+ /* Add class handle in Qdisc */
-+ dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
-+
-+ cl->shaped = copt->shaped;
-+ cl->type = copt->type;
-+
-+ /* Claim a CEETM channel / tc - DPAA2. will assume transition from
-+ * classid to qdid/qpri, starting from qdid / qpri 0
-+ */
-+ switch (copt->type) {
-+ case CEETM_ROOT:
-+ cl->root.ch_id = classid - sch->handle - 1;
-+ err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
-+ break;
-+ case CEETM_PRIO:
-+ cl->prio.qpri = classid - sch->handle - 1;
-+ err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
-+ break;
-+ }
-+
-+ if (err) {
-+ pr_err("%s: Unable to set new %s class\n", __func__,
-+ (copt->type == CEETM_ROOT ? "root" : "prio"));
-+ goto out_free;
-+ }
-+
-+ switch (copt->type) {
-+ case CEETM_ROOT:
-+ pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
-+ __func__, classid, cl->root.ch_id);
-+ break;
-+ case CEETM_PRIO:
-+ pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
-+ __func__, classid, cl->prio.qpri);
-+ break;
-+ }
-+
-+ *arg = (unsigned long)cl;
-+ return 0;
-+
-+out_free:
-+ kfree(cl);
-+ return err;
-+}
-+
-+/* Add or configure a ceetm class */
-+static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
-+ struct nlattr **tca, unsigned long *arg)
-+{
-+ struct dpaa2_ceetm_qdisc *priv;
-+ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
-+ struct nlattr *opt = tca[TCA_OPTIONS];
-+ struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
-+ struct dpaa2_ceetm_tc_copt *copt;
-+ struct net_device *dev = qdisc_dev(sch);
-+ int err;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
-+ __func__, classid, sch->handle);
-+
-+ if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
-+ return -EINVAL;
-+ }
-+
-+ priv = qdisc_priv(sch);
-+
-+ if (!opt) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
-+ dpaa2_ceetm_policy, NULL);
-+ if (err < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
-+ "nla_parse_nested");
-+ return -EINVAL;
-+ }
-+
-+ if (!tb[DPAA2_CEETM_TCA_COPT]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
-+ "tb");
-+ return -EINVAL;
-+ }
-+
-+ copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
-+
-+ /* Configure an existing ceetm class */
-+ if (cl) {
-+ if (copt->type != cl->type) {
-+ pr_err("CEETM: class %X is not of the provided type\n",
-+ cl->common.classid);
-+ return -EINVAL;
-+ }
-+
-+ switch (copt->type) {
-+ case CEETM_ROOT:
-+ return dpaa2_ceetm_cls_change_root(cl, copt, dev);
-+ case CEETM_PRIO:
-+ return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
-+
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid class\n",
-+ __func__);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
-+}
-+
-+static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct dpaa2_ceetm_class *cl;
-+ unsigned int i;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (arg->stop)
-+ return;
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
-+ if (arg->count < arg->skip) {
-+ arg->count++;
-+ continue;
-+ }
-+ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
-+ arg->stop = 1;
-+ return;
-+ }
-+ arg->count++;
-+ }
-+ }
-+}
-+
-+static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
-+ struct sk_buff *skb, struct tcmsg *tcm)
-+{
-+ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
-+ struct nlattr *nest;
-+ struct dpaa2_ceetm_tc_copt copt;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ sch_tree_lock(sch);
-+
-+ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
-+ tcm->tcm_handle = cl->common.classid;
-+
-+ memset(&copt, 0, sizeof(copt));
-+
-+ copt.shaped = cl->shaped;
-+ copt.type = cl->type;
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (cl->child)
-+ tcm->tcm_info = cl->child->handle;
-+
-+ memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
-+ sizeof(struct dpaa2_ceetm_shaping_cfg));
-+
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (cl->child)
-+ tcm->tcm_info = cl->child->handle;
-+
-+ copt.mode = cl->prio.mode;
-+ copt.weight = cl->prio.weight;
-+
-+ break;
-+ }
-+
-+ nest = nla_nest_start(skb, TCA_OPTIONS);
-+ if (!nest)
-+ goto nla_put_failure;
-+ if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
-+ goto nla_put_failure;
-+ nla_nest_end(skb, nest);
-+ sch_tree_unlock(sch);
-+ return skb->len;
-+
-+nla_put_failure:
-+ sch_tree_unlock(sch);
-+ nla_nest_cancel(skb, nest);
-+ return -EMSGSIZE;
-+}
-+
-+static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ sch_tree_lock(sch);
-+ qdisc_class_hash_remove(&priv->clhash, &cl->common);
-+ sch_tree_unlock(sch);
-+ return 0;
-+}
-+
-+/* Get the class' child qdisc, if any */
-+static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ case CEETM_PRIO:
-+ return cl->child;
-+ }
-+
-+ return NULL;
-+}
-+
-+static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
-+ struct Qdisc *new, struct Qdisc **old)
-+{
-+ if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
-+ struct gnet_dump *d)
-+{
-+ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
-+ struct gnet_stats_basic_packed tmp_bstats;
-+ struct dpaa2_ceetm_tc_xstats xstats;
-+ union dpni_statistics dpni_stats;
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
-+ u8 ch_id = 0;
-+ int err;
-+
-+ memset(&xstats, 0, sizeof(xstats));
-+ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
-+
-+ if (cl->type == CEETM_ROOT)
-+ return 0;
-+
-+ err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
-+ DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
-+ &dpni_stats);
-+ if (err)
-+ netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
-+
-+ xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
-+ xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
-+ xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
-+ xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
-+
-+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
-+}
-+
-+static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch,
-+ unsigned long arg)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+ return cl ? cl->block : priv->block;
-+}
-+
-+static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
-+ unsigned long parent,
-+ u32 classid)
-+{
-+ struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+ return (unsigned long)cl;
-+}
-+
-+static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+}
-+
-+const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
-+ .graft = dpaa2_ceetm_cls_graft,
-+ .leaf = dpaa2_ceetm_cls_leaf,
-+ .find = dpaa2_ceetm_cls_find,
-+ .change = dpaa2_ceetm_cls_change,
-+ .delete = dpaa2_ceetm_cls_delete,
-+ .walk = dpaa2_ceetm_cls_walk,
-+ .tcf_block = dpaa2_ceetm_tcf_block,
-+ .bind_tcf = dpaa2_ceetm_tcf_bind,
-+ .unbind_tcf = dpaa2_ceetm_tcf_unbind,
-+ .dump = dpaa2_ceetm_cls_dump,
-+ .dump_stats = dpaa2_ceetm_cls_dump_stats,
-+};
-+
-+struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
-+ .id = "ceetm",
-+ .priv_size = sizeof(struct dpaa2_ceetm_qdisc),
-+ .cl_ops = &dpaa2_ceetm_cls_ops,
-+ .init = dpaa2_ceetm_init,
-+ .destroy = dpaa2_ceetm_destroy,
-+ .change = dpaa2_ceetm_change,
-+ .dump = dpaa2_ceetm_dump,
-+ .attach = dpaa2_ceetm_attach,
-+ .owner = THIS_MODULE,
-+};
-+
-+/* Run the filters and classifiers attached to the qdisc on the provided skb */
-+int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
-+ int *qdid, u8 *qpri)
-+{
-+ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct dpaa2_ceetm_class *cl = NULL;
-+ struct tcf_result res;
-+ struct tcf_proto *tcf;
-+ int result;
-+
-+ tcf = rcu_dereference_bh(priv->filter_list);
-+ while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
-+#ifdef CONFIG_NET_CLS_ACT
-+ switch (result) {
-+ case TC_ACT_QUEUED:
-+ case TC_ACT_STOLEN:
-+ case TC_ACT_SHOT:
-+ /* No valid class found due to action */
-+ return -1;
-+ }
-+#endif
-+ cl = (void *)res.class;
-+ if (!cl) {
-+ /* The filter leads to the qdisc */
-+ if (res.classid == sch->handle)
-+ return 0;
-+
-+ cl = dpaa2_ceetm_find(res.classid, sch);
-+ /* The filter leads to an invalid class */
-+ if (!cl)
-+ break;
-+ }
-+
-+ /* The class might have its own filters attached */
-+ tcf = rcu_dereference_bh(cl->filter_list);
-+ }
-+
-+ /* No valid class found */
-+ if (!cl)
-+ return 0;
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ *qdid = cl->root.ch_id;
-+
-+ /* The root class does not have a child prio qdisc */
-+ if (!cl->child)
-+ return 0;
-+
-+ /* Run the prio qdisc classifiers */
-+ return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
-+
-+ case CEETM_PRIO:
-+ *qpri = cl->prio.qpri;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+int __init dpaa2_ceetm_register(void)
-+{
-+ int err = 0;
-+
-+ pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
-+
-+ err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
-+ if (unlikely(err))
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): register_qdisc() = %d\n",
-+ KBUILD_BASENAME ".c", __LINE__, __func__, err);
-+
-+ return err;
-+}
-+
-+void __exit dpaa2_ceetm_unregister(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME ".c", __func__);
-+
-+ unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
-@@ -0,0 +1,183 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2017 NXP
-+ *
-+ */
-+
-+#ifndef __DPAA2_ETH_CEETM_H
-+#define __DPAA2_ETH_CEETM_H
-+
-+#include <net/pkt_sched.h>
-+#include <net/pkt_cls.h>
-+#include <net/netlink.h>
-+
-+#include "dpaa2-eth.h"
-+
-+/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
-+ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
-+ * are reserved for the maximum 32 CEETM channels (majors and minors are in
-+ * hex).
-+ */
-+#define PFIFO_MIN_OFFSET 0x21
-+
-+#define DPAA2_CEETM_MIN_WEIGHT 100
-+#define DPAA2_CEETM_MAX_WEIGHT 24800
-+
-+#define DPAA2_CEETM_TD_THRESHOLD 1000
-+
-+enum wbfs_group_type {
-+ WBFS_GRP_A,
-+ WBFS_GRP_B,
-+ WBFS_GRP_LARGE
-+};
-+
-+enum {
-+ DPAA2_CEETM_TCA_UNSPEC,
-+ DPAA2_CEETM_TCA_COPT,
-+ DPAA2_CEETM_TCA_QOPS,
-+ DPAA2_CEETM_TCA_MAX,
-+};
-+
-+/* CEETM configuration types */
-+enum dpaa2_ceetm_type {
-+ CEETM_ROOT = 1,
-+ CEETM_PRIO,
-+};
-+
-+enum {
-+ STRICT_PRIORITY = 0,
-+ WEIGHTED_A,
-+ WEIGHTED_B,
-+};
-+
-+struct dpaa2_ceetm_shaping_cfg {
-+ __u64 cir; /* committed information rate */
-+ __u64 eir; /* excess information rate */
-+ __u16 cbs; /* committed burst size */
-+ __u16 ebs; /* excess burst size */
-+ __u8 coupled; /* shaper coupling */
-+};
-+
-+extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
-+
-+struct dpaa2_ceetm_class;
-+struct dpaa2_ceetm_qdisc_stats;
-+struct dpaa2_ceetm_class_stats;
-+
-+/* corresponds to CEETM shaping at LNI level */
-+struct dpaa2_root_q {
-+ struct Qdisc **qdiscs;
-+ struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
-+};
-+
-+/* corresponds to the number of priorities a channel serves */
-+struct dpaa2_prio_q {
-+ struct dpaa2_ceetm_class *parent;
-+ struct dpni_tx_priorities_cfg tx_prio_cfg;
-+};
-+
-+struct dpaa2_ceetm_qdisc {
-+ struct Qdisc_class_hash clhash;
-+ struct tcf_proto *filter_list; /* qdisc attached filters */
-+ struct tcf_block *block;
-+
-+ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
-+ bool shaped;
-+ union {
-+ struct dpaa2_root_q root;
-+ struct dpaa2_prio_q prio;
-+ };
-+};
-+
-+/* CEETM Qdisc configuration parameters */
-+struct dpaa2_ceetm_tc_qopt {
-+ enum dpaa2_ceetm_type type;
-+ __u16 shaped;
-+ __u8 prio_group_A;
-+ __u8 prio_group_B;
-+ __u8 separate_groups;
-+};
-+
-+/* root class - corresponds to a channel */
-+struct dpaa2_root_c {
-+ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
-+ u32 ch_id;
-+};
-+
-+/* prio class - corresponds to a strict priority queue (group) */
-+struct dpaa2_prio_c {
-+ struct dpaa2_ceetm_class_stats __percpu *cstats;
-+ u32 qpri;
-+ u8 mode;
-+ u16 weight;
-+};
-+
-+struct dpaa2_ceetm_class {
-+ struct Qdisc_class_common common;
-+ struct tcf_proto *filter_list; /* class attached filters */
-+ struct tcf_block *block;
-+ struct Qdisc *parent;
-+ struct Qdisc *child;
-+
-+ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
-+ bool shaped;
-+ union {
-+ struct dpaa2_root_c root;
-+ struct dpaa2_prio_c prio;
-+ };
-+};
-+
-+/* CEETM Class configuration parameters */
-+struct dpaa2_ceetm_tc_copt {
-+ enum dpaa2_ceetm_type type;
-+ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
-+ __u16 shaped;
-+ __u8 mode;
-+ __u16 weight;
-+};
-+
-+/* CEETM stats */
-+struct dpaa2_ceetm_qdisc_stats {
-+ __u32 drops;
-+};
-+
-+struct dpaa2_ceetm_class_stats {
-+ /* Software counters */
-+ struct gnet_stats_basic_packed bstats;
-+ __u32 ern_drop_count;
-+ __u32 congested_count;
-+};
-+
-+struct dpaa2_ceetm_tc_xstats {
-+ __u64 ceetm_dequeue_bytes;
-+ __u64 ceetm_dequeue_frames;
-+ __u64 ceetm_reject_bytes;
-+ __u64 ceetm_reject_frames;
-+};
-+
-+#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
-+int __init dpaa2_ceetm_register(void);
-+void __exit dpaa2_ceetm_unregister(void);
-+int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
-+ int *qdid, u8 *qpri);
-+#else
-+static inline int dpaa2_ceetm_register(void)
-+{
-+ return 0;
-+}
-+
-+static inline void dpaa2_ceetm_unregister(void) {}
-+
-+static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
-+ int *qdid, u8 *qpri)
-+{
-+ return 0;
-+}
-+#endif
-+
-+static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
-+{
-+ return priv->ceetm_en;
-+}
-+
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
-@@ -0,0 +1,356 @@
-+
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/debugfs.h>
-+#include "dpaa2-eth.h"
-+#include "dpaa2-eth-debugfs.h"
-+
-+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
-+
-+static struct dentry *dpaa2_dbg_root;
-+
-+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct rtnl_link_stats64 *stats;
-+ struct dpaa2_eth_drv_stats *extras;
-+ int i;
-+
-+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
-+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
-+ "Tx SG", "Tx realloc", "Enq busy");
-+
-+ for_each_online_cpu(i) {
-+ stats = per_cpu_ptr(priv->percpu_stats, i);
-+ extras = per_cpu_ptr(priv->percpu_extras, i);
-+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
-+ i,
-+ stats->rx_packets,
-+ stats->rx_errors,
-+ extras->rx_sg_frames,
-+ stats->tx_packets,
-+ stats->tx_errors,
-+ extras->tx_conf_frames,
-+ extras->tx_sg_frames,
-+ extras->tx_reallocs,
-+ extras->tx_portal_busy);
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
-+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-+
-+ err = single_open(file, dpaa2_dbg_cpu_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
-+
-+ return err;
-+}
-+
-+static const struct file_operations dpaa2_dbg_cpu_ops = {
-+ .open = dpaa2_dbg_cpu_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
-+{
-+ switch (fq->type) {
-+ case DPAA2_RX_FQ:
-+ return "Rx";
-+ case DPAA2_TX_CONF_FQ:
-+ return "Tx conf";
-+ case DPAA2_RX_ERR_FQ:
-+ return "Rx err";
-+ default:
-+ return "N/A";
-+ }
-+}
-+
-+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct dpaa2_eth_fq *fq;
-+ u32 fcnt, bcnt;
-+ int i, err;
-+
-+ seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
-+ "VFQID", "CPU", "Traffic Class", "Type", "Frames",
-+ "Pending frames");
-+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
-+ if (err)
-+ fcnt = 0;
-+
-+ /* A lot of queues, no use displaying zero traffic ones */
-+ if (!fq->stats.frames && !fcnt)
-+ continue;
-+
-+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
-+ fq->fqid,
-+ fq->target_cpu,
-+ fq->tc,
-+ fq_type_to_str(fq),
-+ fq->stats.frames,
-+ fcnt);
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
-+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-+
-+ err = single_open(file, dpaa2_dbg_fqs_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
-+
-+ return err;
-+}
-+
-+static const struct file_operations dpaa2_dbg_fq_ops = {
-+ .open = dpaa2_dbg_fqs_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct dpaa2_eth_channel *ch;
-+ int i;
-+
-+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
-+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
-+ "Avg frm/CDAN", "Buf count");
-+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
-+ ch->ch_id,
-+ ch->nctx.desired_cpu,
-+ ch->stats.dequeue_portal_busy,
-+ ch->stats.frames,
-+ ch->stats.cdan,
-+ ch->stats.frames / ch->stats.cdan,
-+ ch->buf_count);
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
-+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-+
-+ err = single_open(file, dpaa2_dbg_ch_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
-+
-+ return err;
-+}
-+
-+static const struct file_operations dpaa2_dbg_ch_ops = {
-+ .open = dpaa2_dbg_ch_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
-+ size_t count, loff_t *offset)
-+{
-+ struct dpaa2_eth_priv *priv = file->private_data;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct dpaa2_eth_fq *fq;
-+ struct dpaa2_eth_channel *ch;
-+ int i;
-+
-+ for_each_online_cpu(i) {
-+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
-+ memset(percpu_stats, 0, sizeof(*percpu_stats));
-+
-+ percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
-+ memset(percpu_extras, 0, sizeof(*percpu_extras));
-+ }
-+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ memset(&fq->stats, 0, sizeof(fq->stats));
-+ }
-+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ memset(&ch->stats, 0, sizeof(ch->stats));
-+ }
-+
-+ return count;
-+}
-+
-+static const struct file_operations dpaa2_dbg_reset_ops = {
-+ .open = simple_open,
-+ .write = dpaa2_dbg_reset_write,
-+};
-+
-+static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
-+ const char __user *buf,
-+ size_t count, loff_t *offset)
-+{
-+ struct dpaa2_eth_priv *priv = file->private_data;
-+ int err;
-+
-+ err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
-+ if (err)
-+ netdev_err(priv->net_dev,
-+ "dpni_reset_statistics() failed %d\n", err);
-+
-+ return count;
-+}
-+
-+static const struct file_operations dpaa2_dbg_reset_mc_ops = {
-+ .open = simple_open,
-+ .write = dpaa2_dbg_reset_mc_write,
-+};
-+
-+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
-+{
-+ if (!dpaa2_dbg_root)
-+ return;
-+
-+ /* Create a directory for the interface */
-+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
-+ dpaa2_dbg_root);
-+ if (!priv->dbg.dir) {
-+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
-+ return;
-+ }
-+
-+ /* per-cpu stats file */
-+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_cpu_ops);
-+ if (!priv->dbg.cpu_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_cpu_stats;
-+ }
-+
-+ /* per-fq stats file */
-+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_fq_ops);
-+ if (!priv->dbg.fq_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_fq_stats;
-+ }
-+
-+ /* per-fq stats file */
-+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_ch_ops);
-+ if (!priv->dbg.fq_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_ch_stats;
-+ }
-+
-+ /* reset stats */
-+ priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_reset_ops);
-+ if (!priv->dbg.reset_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_reset_stats;
-+ }
-+
-+ /* reset MC stats */
-+ priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
-+ 0222, priv->dbg.dir, priv,
-+ &dpaa2_dbg_reset_mc_ops);
-+ if (!priv->dbg.reset_mc_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_reset_mc_stats;
-+ }
-+
-+ return;
-+
-+err_reset_mc_stats:
-+ debugfs_remove(priv->dbg.reset_stats);
-+err_reset_stats:
-+ debugfs_remove(priv->dbg.ch_stats);
-+err_ch_stats:
-+ debugfs_remove(priv->dbg.fq_stats);
-+err_fq_stats:
-+ debugfs_remove(priv->dbg.cpu_stats);
-+err_cpu_stats:
-+ debugfs_remove(priv->dbg.dir);
-+}
-+
-+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
-+{
-+ debugfs_remove(priv->dbg.reset_mc_stats);
-+ debugfs_remove(priv->dbg.reset_stats);
-+ debugfs_remove(priv->dbg.fq_stats);
-+ debugfs_remove(priv->dbg.ch_stats);
-+ debugfs_remove(priv->dbg.cpu_stats);
-+ debugfs_remove(priv->dbg.dir);
-+}
-+
-+void dpaa2_eth_dbg_init(void)
-+{
-+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
-+ if (!dpaa2_dbg_root) {
-+ pr_err("DPAA2-ETH: debugfs create failed\n");
-+ return;
-+ }
-+
-+ pr_info("DPAA2-ETH: debugfs created\n");
-+}
-+
-+void __exit dpaa2_eth_dbg_exit(void)
-+{
-+ debugfs_remove(dpaa2_dbg_root);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
-@@ -0,0 +1,60 @@
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPAA2_ETH_DEBUGFS_H
-+#define DPAA2_ETH_DEBUGFS_H
-+
-+#include <linux/dcache.h>
-+
-+struct dpaa2_eth_priv;
-+
-+struct dpaa2_debugfs {
-+ struct dentry *dir;
-+ struct dentry *fq_stats;
-+ struct dentry *ch_stats;
-+ struct dentry *cpu_stats;
-+ struct dentry *reset_stats;
-+ struct dentry *reset_mc_stats;
-+};
-+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+void dpaa2_eth_dbg_init(void);
-+void dpaa2_eth_dbg_exit(void);
-+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
-+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
-+#else
-+static inline void dpaa2_eth_dbg_init(void) {}
-+static inline void dpaa2_eth_dbg_exit(void) {}
-+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
-+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
-+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
-+
-+#endif /* DPAA2_ETH_DEBUGFS_H */
---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
-@@ -1,32 +1,5 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /* Copyright 2014-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
- #undef TRACE_SYSTEM
---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-@@ -1,33 +1,6 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/init.h>
- #include <linux/module.h>
-@@ -38,9 +11,14 @@
- #include <linux/msi.h>
- #include <linux/kthread.h>
- #include <linux/iommu.h>
--
-+#include <linux/net_tstamp.h>
-+#include <linux/bpf.h>
-+#include <linux/filter.h>
-+#include <linux/atomic.h>
-+#include <net/sock.h>
- #include "../../fsl-mc/include/mc.h"
- #include "dpaa2-eth.h"
-+#include "dpaa2-eth-ceetm.h"
-
- /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
- * using trace events only need to #include <trace/events/sched.h>
-@@ -52,8 +30,6 @@ MODULE_LICENSE("Dual BSD/GPL");
- MODULE_AUTHOR("Freescale Semiconductor, Inc");
- MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
-
--const char dpaa2_eth_drv_version[] = "0.1";
--
- static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
- dma_addr_t iova_addr)
- {
-@@ -104,26 +80,27 @@ static void free_rx_fd(struct dpaa2_eth_
- /* We don't support any other format */
- return;
-
-- /* For S/G frames, we first need to free all SG entries */
-+ /* For S/G frames, we first need to free all SG entries
-+ * except the first one, which was taken care of already
-+ */
- sgt = vaddr + dpaa2_fd_get_offset(fd);
-- for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
-+ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
- addr = dpaa2_sg_get_addr(&sgt[i]);
- sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-- DMA_FROM_DEVICE);
-+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-
-- skb_free_frag(sg_vaddr);
-+ free_pages((unsigned long)sg_vaddr, 0);
- if (dpaa2_sg_is_final(&sgt[i]))
- break;
- }
-
- free_buf:
-- skb_free_frag(vaddr);
-+ free_pages((unsigned long)vaddr, 0);
- }
-
- /* Build a linear skb based on a single-buffer frame descriptor */
--static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
-- struct dpaa2_eth_channel *ch,
-+static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- void *fd_vaddr)
- {
-@@ -133,8 +110,7 @@ static struct sk_buff *build_linear_skb(
-
- ch->buf_count--;
-
-- skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
-- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
- if (unlikely(!skb))
- return NULL;
-
-@@ -169,16 +145,20 @@ static struct sk_buff *build_frag_skb(st
- /* Get the address and length from the S/G entry */
- sg_addr = dpaa2_sg_get_addr(sge);
- sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
-- dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
-- DMA_FROM_DEVICE);
-+ dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-
- sg_length = dpaa2_sg_get_len(sge);
-
- if (i == 0) {
- /* We build the skb around the first data buffer */
-- skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
-- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
- if (unlikely(!skb)) {
-+ /* Free the first SG entry now, since we already
-+ * unmapped it and obtained the virtual address
-+ */
-+ free_pages((unsigned long)sg_vaddr, 0);
-+
- /* We still need to subtract the buffers used
- * by this FD from our software counter
- */
-@@ -213,17 +193,172 @@ static struct sk_buff *build_frag_skb(st
- break;
- }
-
-+ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
-+
- /* Count all data buffers + SG table buffer */
- ch->buf_count -= i + 2;
-
- return skb;
- }
-
-+static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_fd *fd,
-+ void *buf_start,
-+ u16 queue_id)
-+{
-+ struct dpaa2_eth_fq *fq;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct dpaa2_faead *faead;
-+ u32 ctrl, frc;
-+ int i, err;
-+
-+ /* Mark the egress frame annotation area as valid */
-+ frc = dpaa2_fd_get_frc(fd);
-+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
-+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
-+
-+ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
-+ faead = dpaa2_get_faead(buf_start, false);
-+ faead->ctrl = cpu_to_le32(ctrl);
-+ faead->conf_fqid = 0;
-+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
-+
-+ fq = &priv->fq[queue_id];
-+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-+ err = priv->enqueue(priv, fq, fd, 0);
-+ if (err != -EBUSY)
-+ break;
-+ }
-+
-+ percpu_extras->tx_portal_busy += i;
-+ if (unlikely(err)) {
-+ percpu_stats->tx_errors++;
-+ } else {
-+ percpu_stats->tx_packets++;
-+ percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
-+ }
-+
-+ return err;
-+}
-+
-+/* Free buffers acquired from the buffer pool or which were meant to
-+ * be released in the pool
-+ */
-+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ void *vaddr;
-+ int i;
-+
-+ for (i = 0; i < count; i++) {
-+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
-+ dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-+ free_pages((unsigned long)vaddr, 0);
-+ }
-+}
-+
-+static void release_fd_buf(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ dma_addr_t addr)
-+{
-+ int err;
-+
-+ ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
-+ if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
-+ return;
-+
-+ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
-+ ch->rel_buf_array,
-+ ch->rel_buf_cnt)) == -EBUSY)
-+ cpu_relax();
-+
-+ if (err)
-+ free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
-+
-+ ch->rel_buf_cnt = 0;
-+}
-+
-+static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ struct dpaa2_fd *fd,
-+ u16 queue_id,
-+ void *vaddr)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct bpf_prog *xdp_prog;
-+ struct xdp_buff xdp;
-+ u32 xdp_act = XDP_PASS;
-+
-+ xdp_prog = READ_ONCE(ch->xdp_prog);
-+ if (!xdp_prog)
-+ return xdp_act;
-+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+
-+ xdp.data = vaddr + dpaa2_fd_get_offset(fd);
-+ xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
-+ /* Allow the XDP program to use the specially reserved headroom */
-+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
-+
-+ rcu_read_lock();
-+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
-+
-+ /* xdp.data pointer may have changed */
-+ dpaa2_fd_set_offset(fd, xdp.data - vaddr);
-+ dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
-+
-+ switch (xdp_act) {
-+ case XDP_PASS:
-+ break;
-+ default:
-+ bpf_warn_invalid_xdp_action(xdp_act);
-+ case XDP_ABORTED:
-+ case XDP_DROP:
-+ /* This is our buffer, so we can release it back to hardware */
-+ release_fd_buf(priv, ch, addr);
-+ percpu_stats->rx_dropped++;
-+ break;
-+ case XDP_TX:
-+ if (dpaa2_eth_xdp_tx(priv, fd, vaddr, queue_id)) {
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-+ free_rx_fd(priv, fd, vaddr);
-+ ch->buf_count--;
-+ }
-+ break;
-+ case XDP_REDIRECT:
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-+ ch->buf_count--;
-+ ch->flush = true;
-+ /* Mark the actual start of the data buffer */
-+ xdp.data_hard_start = vaddr;
-+ if (xdp_do_redirect(priv->net_dev, &xdp, xdp_prog))
-+ free_rx_fd(priv, fd, vaddr);
-+ break;
-+ }
-+
-+ if (xdp_act == XDP_TX || xdp_act == XDP_REDIRECT) {
-+ percpu_stats->rx_packets++;
-+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
-+ }
-+
-+ rcu_read_unlock();
-+
-+ return xdp_act;
-+}
-+
- /* Main Rx frame processing routine */
- static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
-- struct napi_struct *napi)
-+ struct dpaa2_eth_fq *fq)
- {
- dma_addr_t addr = dpaa2_fd_get_addr(fd);
- u8 fd_format = dpaa2_fd_get_format(fd);
-@@ -235,14 +370,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
- struct dpaa2_fas *fas;
- void *buf_data;
- u32 status = 0;
-+ u32 xdp_act;
-
- /* Tracing point */
- trace_dpaa2_rx_fd(priv->net_dev, fd);
-
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
-+ dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-
-- fas = dpaa2_get_fas(vaddr);
-+ fas = dpaa2_get_fas(vaddr, false);
- prefetch(fas);
- buf_data = vaddr + dpaa2_fd_get_offset(fd);
- prefetch(buf_data);
-@@ -251,22 +388,43 @@ static void dpaa2_eth_rx(struct dpaa2_et
- percpu_extras = this_cpu_ptr(priv->percpu_extras);
-
- if (fd_format == dpaa2_fd_single) {
-- skb = build_linear_skb(priv, ch, fd, vaddr);
-+ xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd,
-+ fq->flowid, vaddr);
-+ if (xdp_act != XDP_PASS)
-+ return;
-+
-+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-+ skb = build_linear_skb(ch, fd, vaddr);
- } else if (fd_format == dpaa2_fd_sg) {
-+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
- skb = build_frag_skb(priv, ch, buf_data);
-- skb_free_frag(vaddr);
-+ free_pages((unsigned long)vaddr, 0);
- percpu_extras->rx_sg_frames++;
- percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
- } else {
- /* We don't support any other format */
-- goto err_frame_format;
-+ goto drop_cnt;
- }
-
- if (unlikely(!skb))
-- goto err_build_skb;
-+ goto drop_fd;
-
- prefetch(skb->data);
-
-+ /* Get the timestamp value */
-+ if (priv->ts_rx_en) {
-+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
-+ __le64 *ts = dpaa2_get_ts(vaddr, false);
-+ u64 ns;
-+
-+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-+
-+ ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
-+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
-+ }
-+
- /* Check if we need to validate the L4 csum */
- if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
- status = le32_to_cpu(fas->status);
-@@ -274,30 +432,80 @@ static void dpaa2_eth_rx(struct dpaa2_et
- }
-
- skb->protocol = eth_type_trans(skb, priv->net_dev);
-+ skb_record_rx_queue(skb, fq->flowid);
-
- percpu_stats->rx_packets++;
- percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
-
-- napi_gro_receive(napi, skb);
-+ napi_gro_receive(&ch->napi, skb);
-
- return;
-
--err_build_skb:
-+drop_fd:
- free_rx_fd(priv, fd, vaddr);
--err_frame_format:
-+drop_cnt:
- percpu_stats->rx_dropped++;
- }
-
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+/* Processing of Rx frames received on the error FQ
-+ * We check and print the error bits and then free the frame
-+ */
-+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct napi_struct *napi __always_unused,
-+ u16 queue_id __always_unused)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ void *vaddr;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_fas *fas;
-+ u32 status = 0;
-+ u32 fd_errors;
-+ bool has_fas_errors = false;
-+
-+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
-+
-+ /* check frame errors in the FD field */
-+ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
-+ if (likely(fd_errors)) {
-+ has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
-+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
-+ fd_errors);
-+ }
-+
-+ /* check frame errors in the FAS field */
-+ if (has_fas_errors) {
-+ fas = dpaa2_get_fas(vaddr, false);
-+ status = le32_to_cpu(fas->status);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
-+ status & DPAA2_FAS_RX_ERR_MASK);
-+ }
-+ free_rx_fd(priv, fd, vaddr);
-+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_stats->rx_errors++;
-+ ch->buf_count--;
-+}
-+#endif
-+
- /* Consume all frames pull-dequeued into the store. This is the simplest way to
- * make sure we don't accidentally issue another volatile dequeue which would
- * overwrite (leak) frames already in the store.
- *
- * Observance of NAPI budget is not our concern, leaving that to the caller.
- */
--static int consume_frames(struct dpaa2_eth_channel *ch)
-+static int consume_frames(struct dpaa2_eth_channel *ch,
-+ struct dpaa2_eth_fq **src)
- {
- struct dpaa2_eth_priv *priv = ch->priv;
-- struct dpaa2_eth_fq *fq;
-+ struct dpaa2_eth_fq *fq = NULL;
- struct dpaa2_dq *dq;
- const struct dpaa2_fd *fd;
- int cleaned = 0;
-@@ -315,16 +523,51 @@ static int consume_frames(struct dpaa2_e
- }
-
- fd = dpaa2_dq_fd(dq);
-+ prefetch(fd);
-+
- fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
-- fq->stats.frames++;
-
-- fq->consume(priv, ch, fd, &ch->napi);
-+ fq->consume(priv, ch, fd, fq);
- cleaned++;
- } while (!is_last);
-
-+ if (!cleaned)
-+ return 0;
-+
-+ fq->stats.frames += cleaned;
-+ ch->stats.frames += cleaned;
-+
-+ /* A dequeue operation only pulls frames from a single queue
-+ * into the store. Return the frame queue as an out param.
-+ */
-+ if (src)
-+ *src = fq;
-+
- return cleaned;
- }
-
-+/* Configure the egress frame annotation for timestamp update */
-+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
-+{
-+ struct dpaa2_faead *faead;
-+ u32 ctrl, frc;
-+
-+ /* Mark the egress frame annotation area as valid */
-+ frc = dpaa2_fd_get_frc(fd);
-+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
-+
-+ /* Set hardware annotation size */
-+ ctrl = dpaa2_fd_get_ctrl(fd);
-+ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
-+
-+ /* enable UPD (update prepanded data) bit in FAEAD field of
-+ * hardware frame annotation area
-+ */
-+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
-+ faead = dpaa2_get_faead(buf_start, true);
-+ faead->ctrl = cpu_to_le32(ctrl);
-+}
-+
- /* Create a frame descriptor based on a fragmented skb */
- static int build_sg_fd(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
-@@ -341,7 +584,6 @@ static int build_sg_fd(struct dpaa2_eth_
- int num_sg;
- int num_dma_bufs;
- struct dpaa2_eth_swa *swa;
-- struct dpaa2_fas *fas;
-
- /* Create and map scatterlist.
- * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
-@@ -365,21 +607,14 @@ static int build_sg_fd(struct dpaa2_eth_
-
- /* Prepare the HW SGT structure */
- sgt_buf_size = priv->tx_data_offset +
-- sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-- sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
-+ sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
-+ sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
- if (unlikely(!sgt_buf)) {
- err = -ENOMEM;
- goto sgt_buf_alloc_failed;
- }
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
--
-- /* PTA from egress side is passed as is to the confirmation side so
-- * we need to clear some fields here in order to find consistent values
-- * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-- * field from the hardware annotation area
-- */
-- fas = dpaa2_get_fas(sgt_buf);
-- memset(fas, 0, DPAA2_FAS_SIZE);
-+ memset(sgt_buf, 0, sgt_buf_size);
-
- sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
-
-@@ -402,10 +637,11 @@ static int build_sg_fd(struct dpaa2_eth_
- * all of them on Tx Conf.
- */
- swa = (struct dpaa2_eth_swa *)sgt_buf;
-- swa->skb = skb;
-- swa->scl = scl;
-- swa->num_sg = num_sg;
-- swa->num_dma_bufs = num_dma_bufs;
-+ swa->type = DPAA2_ETH_SWA_SG;
-+ swa->sg.skb = skb;
-+ swa->sg.scl = scl;
-+ swa->sg.num_sg = num_sg;
-+ swa->sg.sgt_size = sgt_buf_size;
-
- /* Separately map the SGT buffer */
- addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
-@@ -417,13 +653,15 @@ static int build_sg_fd(struct dpaa2_eth_
- dpaa2_fd_set_format(fd, dpaa2_fd_sg);
- dpaa2_fd_set_addr(fd, addr);
- dpaa2_fd_set_len(fd, skb->len);
-- dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
-- DPAA2_FD_CTRL_PTV1);
-+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
-+
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
-+ enable_tx_tstamp(fd, sgt_buf);
-
- return 0;
-
- dma_map_single_failed:
-- kfree(sgt_buf);
-+ skb_free_frag(sgt_buf);
- sgt_buf_alloc_failed:
- dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
- dma_map_sg_failed:
-@@ -437,29 +675,27 @@ static int build_single_fd(struct dpaa2_
- struct dpaa2_fd *fd)
- {
- struct device *dev = priv->net_dev->dev.parent;
-- u8 *buffer_start;
-- struct dpaa2_fas *fas;
-- struct sk_buff **skbh;
-+ u8 *buffer_start, *aligned_start;
-+ struct dpaa2_eth_swa *swa;
- dma_addr_t addr;
-
-- buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
-- DPAA2_ETH_TX_BUF_ALIGN,
-- DPAA2_ETH_TX_BUF_ALIGN);
--
-- /* PTA from egress side is passed as is to the confirmation side so
-- * we need to clear some fields here in order to find consistent values
-- * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-- * field from the hardware annotation area
-+ buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
-+
-+ /* If there's enough room to align the FD address, do it.
-+ * It will help hardware optimize accesses.
- */
-- fas = dpaa2_get_fas(buffer_start);
-- memset(fas, 0, DPAA2_FAS_SIZE);
-+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
-+ DPAA2_ETH_TX_BUF_ALIGN);
-+ if (aligned_start >= skb->head)
-+ buffer_start = aligned_start;
-
- /* Store a backpointer to the skb at the beginning of the buffer
- * (in the private data area) such that we can release it
- * on Tx confirm
- */
-- skbh = (struct sk_buff **)buffer_start;
-- *skbh = skb;
-+ swa = (struct dpaa2_eth_swa *)buffer_start;
-+ swa->type = DPAA2_ETH_SWA_SINGLE;
-+ swa->single.skb = skb;
-
- addr = dma_map_single(dev, buffer_start,
- skb_tail_pointer(skb) - buffer_start,
-@@ -471,8 +707,10 @@ static int build_single_fd(struct dpaa2_
- dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
- dpaa2_fd_set_len(fd, skb->len);
- dpaa2_fd_set_format(fd, dpaa2_fd_single);
-- dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
-- DPAA2_FD_CTRL_PTV1);
-+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
-+
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
-+ enable_tx_tstamp(fd, buffer_start);
-
- return 0;
- }
-@@ -483,72 +721,75 @@ static int build_single_fd(struct dpaa2_
- * back-pointed to is also freed.
- * This can be called either from dpaa2_eth_tx_conf() or on the error path of
- * dpaa2_eth_tx().
-- * Optionally, return the frame annotation status word (FAS), which needs
-- * to be checked if we're on the confirmation path.
- */
- static void free_tx_fd(const struct dpaa2_eth_priv *priv,
-- const struct dpaa2_fd *fd,
-- u32 *status)
-+ const struct dpaa2_fd *fd, bool in_napi)
- {
- struct device *dev = priv->net_dev->dev.parent;
- dma_addr_t fd_addr;
-- struct sk_buff **skbh, *skb;
-+ struct sk_buff *skb = NULL;
- unsigned char *buffer_start;
-- int unmap_size;
-- struct scatterlist *scl;
-- int num_sg, num_dma_bufs;
- struct dpaa2_eth_swa *swa;
- u8 fd_format = dpaa2_fd_get_format(fd);
-- struct dpaa2_fas *fas;
-
- fd_addr = dpaa2_fd_get_addr(fd);
-- skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
-- fas = dpaa2_get_fas(skbh);
-+ buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
-+ swa = (struct dpaa2_eth_swa *)buffer_start;
-
- if (fd_format == dpaa2_fd_single) {
-- skb = *skbh;
-- buffer_start = (unsigned char *)skbh;
-- /* Accessing the skb buffer is safe before dma unmap, because
-- * we didn't map the actual skb shell.
-- */
-- dma_unmap_single(dev, fd_addr,
-- skb_tail_pointer(skb) - buffer_start,
-- DMA_BIDIRECTIONAL);
-+ if (swa->type == DPAA2_ETH_SWA_SINGLE) {
-+ skb = swa->single.skb;
-+ /* Accessing the skb buffer is safe before dma unmap,
-+ * because we didn't map the actual skb shell.
-+ */
-+ dma_unmap_single(dev, fd_addr,
-+ skb_tail_pointer(skb) - buffer_start,
-+ DMA_BIDIRECTIONAL);
-+ } else {
-+ WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP,
-+ "Wrong SWA type");
-+ dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
-+ DMA_BIDIRECTIONAL);
-+ }
- } else if (fd_format == dpaa2_fd_sg) {
-- swa = (struct dpaa2_eth_swa *)skbh;
-- skb = swa->skb;
-- scl = swa->scl;
-- num_sg = swa->num_sg;
-- num_dma_bufs = swa->num_dma_bufs;
-+ skb = swa->sg.skb;
-
- /* Unmap the scatterlist */
-- dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
-- kfree(scl);
-+ dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
-+ kfree(swa->sg.scl);
-
- /* Unmap the SGT buffer */
-- unmap_size = priv->tx_data_offset +
-- sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-- dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
-+ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
-+ DMA_BIDIRECTIONAL);
- } else {
-- /* Unsupported format, mark it as errored and give up */
-- if (status)
-- *status = ~0;
-+ netdev_dbg(priv->net_dev, "Invalid FD format\n");
- return;
- }
-
-- /* Read the status from the Frame Annotation after we unmap the first
-- * buffer but before we free it. The caller function is responsible
-- * for checking the status value.
-- */
-- if (status)
-- *status = le32_to_cpu(fas->status);
-+ if (swa->type == DPAA2_ETH_SWA_XDP) {
-+ page_frag_free(buffer_start);
-+ return;
-+ }
-+
-+ /* Get the timestamp value */
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-+ struct skb_shared_hwtstamps shhwtstamps;
-+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
-+ u64 ns;
-+
-+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-+
-+ ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
-+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ }
-
-- /* Free SGT buffer kmalloc'ed on tx */
-+ /* Free SGT buffer allocated on tx */
- if (fd_format != dpaa2_fd_single)
-- kfree(skbh);
-+ skb_free_frag(buffer_start);
-
- /* Move on with skb release */
-- dev_kfree_skb(skb);
-+ napi_consume_skb(skb, in_napi);
- }
-
- static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
-@@ -558,20 +799,41 @@ static netdev_tx_t dpaa2_eth_tx(struct s
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa2_eth_drv_stats *percpu_extras;
- struct dpaa2_eth_fq *fq;
-+ struct netdev_queue *nq;
- u16 queue_mapping;
-- int err, i;
-+ unsigned int needed_headroom;
-+ u32 fd_len;
-+ u8 prio;
-+ int err, i, ch_id = 0;
-+
-+ queue_mapping = skb_get_queue_mapping(skb);
-+ prio = netdev_txq_to_tc(net_dev, queue_mapping);
-+ /* Hardware interprets priority level 0 as being the highest,
-+ * so we need to do a reverse mapping to the netdev tc index
-+ */
-+ if (net_dev->num_tc)
-+ prio = net_dev->num_tc - prio - 1;
-+
-+ queue_mapping %= dpaa2_eth_queue_count(priv);
-+ fq = &priv->fq[queue_mapping];
-
- percpu_stats = this_cpu_ptr(priv->percpu_stats);
- percpu_extras = this_cpu_ptr(priv->percpu_extras);
-
-- if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
-+ needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
-+ if (skb_headroom(skb) < needed_headroom) {
- struct sk_buff *ns;
-
-- ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
-+ ns = skb_realloc_headroom(skb, needed_headroom);
- if (unlikely(!ns)) {
- percpu_stats->tx_dropped++;
- goto err_alloc_headroom;
- }
-+ percpu_extras->tx_reallocs++;
-+
-+ if (skb->sk)
-+ skb_set_owner_w(ns, skb->sk);
-+
- dev_kfree_skb(skb);
- skb = ns;
- }
-@@ -602,17 +864,24 @@ static netdev_tx_t dpaa2_eth_tx(struct s
- goto err_build_fd;
- }
-
-+ if (dpaa2_eth_ceetm_is_enabled(priv)) {
-+ err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio);
-+ if (err)
-+ goto err_ceetm_classify;
-+ }
-+
- /* Tracing point */
- trace_dpaa2_tx_fd(net_dev, &fd);
-
-- /* TxConf FQ selection primarily based on cpu affinity; this is
-- * non-migratable context, so it's safe to call smp_processor_id().
-+ fd_len = dpaa2_fd_get_len(&fd);
-+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
-+ netdev_tx_sent_queue(nq, fd_len);
-+
-+ /* Everything that happens after this enqueues might race with
-+ * the Tx confirmation callback for this frame
- */
-- queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
-- fq = &priv->fq[queue_mapping];
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-- err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
-- fq->tx_qdbin, &fd);
-+ err = priv->enqueue(priv, fq, &fd, 0);
- if (err != -EBUSY)
- break;
- }
-@@ -620,14 +889,17 @@ static netdev_tx_t dpaa2_eth_tx(struct s
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- /* Clean up everything, including freeing the skb */
-- free_tx_fd(priv, &fd, NULL);
-+ free_tx_fd(priv, &fd, false);
-+ netdev_tx_completed_queue(nq, 1, fd_len);
- } else {
- percpu_stats->tx_packets++;
-- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
-+ percpu_stats->tx_bytes += fd_len;
- }
-
- return NETDEV_TX_OK;
-
-+err_ceetm_classify:
-+ free_tx_fd(priv, &fd, false);
- err_build_fd:
- err_alloc_headroom:
- dev_kfree_skb(skb);
-@@ -637,48 +909,39 @@ err_alloc_headroom:
-
- /* Tx confirmation frame processing routine */
- static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
-- struct dpaa2_eth_channel *ch,
-+ struct dpaa2_eth_channel *ch __always_unused,
- const struct dpaa2_fd *fd,
-- struct napi_struct *napi __always_unused)
-+ struct dpaa2_eth_fq *fq)
- {
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa2_eth_drv_stats *percpu_extras;
-- u32 status = 0;
-+ u32 fd_len = dpaa2_fd_get_len(fd);
- u32 fd_errors;
-- bool has_fas_errors = false;
-
- /* Tracing point */
- trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
-
- percpu_extras = this_cpu_ptr(priv->percpu_extras);
- percpu_extras->tx_conf_frames++;
-- percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
-+ percpu_extras->tx_conf_bytes += fd_len;
-+
-+ fq->dq_frames++;
-+ fq->dq_bytes += fd_len;
-
- /* Check frame errors in the FD field */
- fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
-- if (unlikely(fd_errors)) {
-- /* We only check error bits in the FAS field if corresponding
-- * FAERR bit is set in FD and the FAS field is marked as valid
-- */
-- has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
-- !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-- if (net_ratelimit())
-- netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
-- fd_errors);
-- }
--
-- free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
-+ free_tx_fd(priv, fd, true);
-
- if (likely(!fd_errors))
- return;
-
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
-+ fd_errors);
-+
- percpu_stats = this_cpu_ptr(priv->percpu_stats);
- /* Tx-conf logically pertains to the egress path. */
- percpu_stats->tx_errors++;
--
-- if (has_fas_errors && net_ratelimit())
-- netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n",
-- status & DPAA2_FAS_TX_ERR_MASK);
- }
-
- static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
-@@ -728,26 +991,29 @@ static int set_tx_csum(struct dpaa2_eth_
- /* Perform a single release command to add buffers
- * to the specified buffer pool
- */
--static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
-+static int add_bufs(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch, u16 bpid)
- {
- struct device *dev = priv->net_dev->dev.parent;
- u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
-- void *buf;
-+ struct page *page;
- dma_addr_t addr;
-- int i;
-+ int i, err;
-
- for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
- /* Allocate buffer visible to WRIOP + skb shared info +
- * alignment padding
- */
-- buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
-- if (unlikely(!buf))
-+ /* allocate one page for each Rx buffer. WRIOP sees
-+ * the entire page except for a tailroom reserved for
-+ * skb shared info
-+ */
-+ page = dev_alloc_pages(0);
-+ if (!page)
- goto err_alloc;
-
-- buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
--
-- addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
-- DMA_FROM_DEVICE);
-+ addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr)))
- goto err_map;
-
-@@ -755,28 +1021,33 @@ static int add_bufs(struct dpaa2_eth_pri
-
- /* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
-- buf, DPAA2_ETH_BUF_RAW_SIZE,
-+ page, DPAA2_ETH_RX_BUF_RAW_SIZE,
- addr, DPAA2_ETH_RX_BUF_SIZE,
- bpid);
- }
-
- release_bufs:
-- /* In case the portal is busy, retry until successful.
-- * The buffer release function would only fail if the QBMan portal
-- * was busy, which implies portal contention (i.e. more CPUs than
-- * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
-- * there is little we can realistically do, short of giving up -
-- * in which case we'd risk depleting the buffer pool and never again
-- * receiving the Rx interrupt which would kick-start the refill logic.
-- * So just keep retrying, at the risk of being moved to ksoftirqd.
-- */
-- while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
-+ /* In case the portal is busy, retry until successful */
-+ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
-+ buf_array, i)) == -EBUSY)
- cpu_relax();
-+
-+ /* If release command failed, clean up and bail out;
-+ * not much else we can do about it
-+ */
-+ if (err) {
-+ free_bufs(priv, buf_array, i);
-+ return 0;
-+ }
-+
- return i;
-
- err_map:
-- skb_free_frag(buf);
-+ __free_pages(page, 0);
- err_alloc:
-+ /* If we managed to allocate at least some buffers,
-+ * release them to hardware
-+ */
- if (i)
- goto release_bufs;
-
-@@ -796,9 +1067,10 @@ static int seed_pool(struct dpaa2_eth_pr
- */
- preempt_disable();
- for (j = 0; j < priv->num_channels; j++) {
-- for (i = 0; i < DPAA2_ETH_NUM_BUFS;
-+ priv->channel[j]->buf_count = 0;
-+ for (i = 0; i < priv->max_bufs_per_ch;
- i += DPAA2_ETH_BUFS_PER_CMD) {
-- new_count = add_bufs(priv, bpid);
-+ new_count = add_bufs(priv, priv->channel[j], bpid);
- priv->channel[j]->buf_count += new_count;
-
- if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
-@@ -818,10 +1090,8 @@ static int seed_pool(struct dpaa2_eth_pr
- */
- static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
- {
-- struct device *dev = priv->net_dev->dev.parent;
- u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
-- void *vaddr;
-- int ret, i;
-+ int ret;
-
- do {
- ret = dpaa2_io_service_acquire(NULL, priv->bpid,
-@@ -830,27 +1100,16 @@ static void drain_bufs(struct dpaa2_eth_
- netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
- return;
- }
-- for (i = 0; i < ret; i++) {
-- /* Same logic as on regular Rx path */
-- vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
-- buf_array[i]);
-- dma_unmap_single(dev, buf_array[i],
-- DPAA2_ETH_RX_BUF_SIZE,
-- DMA_FROM_DEVICE);
-- skb_free_frag(vaddr);
-- }
-+ free_bufs(priv, buf_array, ret);
- } while (ret);
- }
-
- static void drain_pool(struct dpaa2_eth_priv *priv)
- {
-- int i;
--
-+ preempt_disable();
- drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
- drain_bufs(priv, 1);
--
-- for (i = 0; i < priv->num_channels; i++)
-- priv->channel[i]->buf_count = 0;
-+ preempt_enable();
- }
-
- /* Function is called from softirq context only, so we don't need to guard
-@@ -862,19 +1121,19 @@ static int refill_pool(struct dpaa2_eth_
- {
- int new_count;
-
-- if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
-+ if (likely(ch->buf_count >= priv->refill_thresh))
- return 0;
-
- do {
-- new_count = add_bufs(priv, bpid);
-+ new_count = add_bufs(priv, ch, bpid);
- if (unlikely(!new_count)) {
- /* Out of memory; abort for now, we'll try later on */
- break;
- }
- ch->buf_count += new_count;
-- } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
-+ } while (ch->buf_count < priv->max_bufs_per_ch);
-
-- if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
-+ if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
- return -ENOMEM;
-
- return 0;
-@@ -887,7 +1146,8 @@ static int pull_channel(struct dpaa2_eth
-
- /* Retry while portal is busy */
- do {
-- err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
-+ err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
-+ ch->store);
- dequeues++;
- cpu_relax();
- } while (err == -EBUSY);
-@@ -908,14 +1168,17 @@ static int pull_channel(struct dpaa2_eth
- static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
- {
- struct dpaa2_eth_channel *ch;
-- int cleaned = 0, store_cleaned;
- struct dpaa2_eth_priv *priv;
-+ int rx_cleaned = 0, txconf_cleaned = 0;
-+ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
-+ struct netdev_queue *nq;
-+ int store_cleaned, work_done;
- int err;
-
- ch = container_of(napi, struct dpaa2_eth_channel, napi);
- priv = ch->priv;
-
-- while (cleaned < budget) {
-+ do {
- err = pull_channel(ch);
- if (unlikely(err))
- break;
-@@ -923,29 +1186,56 @@ static int dpaa2_eth_poll(struct napi_st
- /* Refill pool if appropriate */
- refill_pool(priv, ch, priv->bpid);
-
-- store_cleaned = consume_frames(ch);
-- cleaned += store_cleaned;
-+ store_cleaned = consume_frames(ch, &fq);
-+ if (!store_cleaned)
-+ break;
-+ if (fq->type == DPAA2_RX_FQ) {
-+ rx_cleaned += store_cleaned;
-+ /* If these are XDP_REDIRECT frames, flush them now */
-+ /* TODO: Do we need this? */
-+ if (ch->flush) {
-+ xdp_do_flush_map();
-+ ch->flush = false;
-+ }
-+ } else {
-+ txconf_cleaned += store_cleaned;
-+ /* We have a single Tx conf FQ on this channel */
-+ txc_fq = fq;
-+ }
-
-- /* If we have enough budget left for a full store,
-- * try a new pull dequeue, otherwise we're done here
-+ /* If we either consumed the whole NAPI budget with Rx frames
-+ * or we reached the Tx confirmations threshold, we're done.
- */
-- if (store_cleaned == 0 ||
-- cleaned > budget - DPAA2_ETH_STORE_SIZE)
-- break;
-- }
-+ if (rx_cleaned >= budget ||
-+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
-+ work_done = budget;
-+ goto out;
-+ }
-+ } while (store_cleaned);
-
-- if (cleaned < budget) {
-- napi_complete_done(napi, cleaned);
-- /* Re-enable data available notifications */
-- do {
-- err = dpaa2_io_service_rearm(NULL, &ch->nctx);
-- cpu_relax();
-- } while (err == -EBUSY);
-- }
-+ /* We didn't consume the entire budget, so finish napi and
-+ * re-enable data availability notifications
-+ */
-+ napi_complete_done(napi, rx_cleaned);
-+ do {
-+ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
-+ cpu_relax();
-+ } while (err == -EBUSY);
-+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
-+ ch->nctx.desired_cpu);
-
-- ch->stats.frames += cleaned;
-+ work_done = max(rx_cleaned, 1);
-
-- return cleaned;
-+out:
-+ if (txc_fq) {
-+ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
-+ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
-+ txc_fq->dq_bytes);
-+ txc_fq->dq_frames = 0;
-+ txc_fq->dq_bytes = 0;
-+ }
-+
-+ return work_done;
- }
-
- static void enable_ch_napi(struct dpaa2_eth_priv *priv)
-@@ -970,9 +1260,23 @@ static void disable_ch_napi(struct dpaa2
- }
- }
-
-+static void update_tx_fqids(struct dpaa2_eth_priv *priv);
-+
-+static void update_pf(struct dpaa2_eth_priv *priv,
-+ struct dpni_link_state *state)
-+{
-+ bool pause_frames;
-+
-+ pause_frames = !!(state->options & DPNI_LINK_OPT_PAUSE);
-+ if (priv->tx_pause_frames != pause_frames) {
-+ priv->tx_pause_frames = pause_frames;
-+ set_rx_taildrop(priv);
-+ }
-+}
-+
- static int link_state_update(struct dpaa2_eth_priv *priv)
- {
-- struct dpni_link_state state;
-+ struct dpni_link_state state = {0};
- int err;
-
- err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-@@ -988,6 +1292,8 @@ static int link_state_update(struct dpaa
-
- priv->link_state = state;
- if (state.up) {
-+ update_tx_fqids(priv);
-+ update_pf(priv, &state);
- netif_carrier_on(priv->net_dev);
- netif_tx_start_all_queues(priv->net_dev);
- } else {
-@@ -1006,28 +1312,30 @@ static int dpaa2_eth_open(struct net_dev
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int err;
-
-- err = seed_pool(priv, priv->bpid);
-- if (err) {
-- /* Not much to do; the buffer pool, though not filled up,
-- * may still contain some buffers which would enable us
-- * to limp on.
-- */
-- netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
-- priv->dpbp_dev->obj_desc.id, priv->bpid);
-- }
--
- /* We'll only start the txqs when the link is actually ready; make sure
- * we don't race against the link up notification, which may come
- * immediately after dpni_enable();
- */
- netif_tx_stop_all_queues(net_dev);
-- enable_ch_napi(priv);
-+
- /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
- * return true and cause 'ip link show' to report the LOWER_UP flag,
- * even though the link notification wasn't even received.
- */
- netif_carrier_off(net_dev);
-
-+ err = seed_pool(priv, priv->bpid);
-+ if (err) {
-+ /* Not much to do; the buffer pool, though not filled up,
-+ * may still contain some buffers which would enable us
-+ * to limp on.
-+ */
-+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
-+ priv->dpbp_dev->obj_desc.id, priv->bpid);
-+ }
-+
-+ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
-+
- err = dpni_enable(priv->mc_io, 0, priv->mc_token);
- if (err < 0) {
- netdev_err(net_dev, "dpni_enable() failed\n");
-@@ -1047,48 +1355,17 @@ static int dpaa2_eth_open(struct net_dev
-
- link_state_err:
- enable_err:
-- disable_ch_napi(priv);
-+ priv->refill_thresh = 0;
- drain_pool(priv);
- return err;
- }
-
--/* The DPIO store must be empty when we call this,
-- * at the end of every NAPI cycle.
-- */
--static u32 drain_channel(struct dpaa2_eth_priv *priv,
-- struct dpaa2_eth_channel *ch)
--{
-- u32 drained = 0, total = 0;
--
-- do {
-- pull_channel(ch);
-- drained = consume_frames(ch);
-- total += drained;
-- } while (drained);
--
-- return total;
--}
--
--static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
--{
-- struct dpaa2_eth_channel *ch;
-- int i;
-- u32 drained = 0;
--
-- for (i = 0; i < priv->num_channels; i++) {
-- ch = priv->channel[i];
-- drained += drain_channel(priv, ch);
-- }
--
-- return drained;
--}
--
- static int dpaa2_eth_stop(struct net_device *net_dev)
- {
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-- int dpni_enabled;
-- int retries = 10;
-- u32 drained;
-+ int dpni_enabled = 0;
-+ int retries = 10, i;
-+ int err = 0;
-
- netif_tx_stop_all_queues(net_dev);
- netif_carrier_off(net_dev);
-@@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev
- } while (dpni_enabled && --retries);
- if (!retries) {
- netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
-- /* Must go on and disable NAPI nonetheless, so we don't crash at
-- * the next "ifconfig up"
-+ /* Must go on and finish processing pending frames, so we don't
-+ * crash at the next "ifconfig up"
- */
-+ err = -ETIMEDOUT;
- }
-
-- /* Wait for NAPI to complete on every core and disable it.
-- * In particular, this will also prevent NAPI from being rescheduled if
-- * a new CDAN is serviced, effectively discarding the CDAN. We therefore
-- * don't even need to disarm the channels, except perhaps for the case
-- * of a huge coalescing value.
-- */
-- disable_ch_napi(priv);
-+ priv->refill_thresh = 0;
-
-- /* Manually drain the Rx and TxConf queues */
-- drained = drain_ingress_frames(priv);
-- if (drained)
-- netdev_dbg(net_dev, "Drained %d frames.\n", drained);
-+ /* Wait for all running napi poll routines to finish, so that no
-+ * new refill operations are started
-+ */
-+ for (i = 0; i < priv->num_channels; i++)
-+ napi_synchronize(&priv->channel[i]->napi);
-
- /* Empty the buffer pool */
- drain_pool(priv);
-
-- return 0;
--}
--
--static int dpaa2_eth_init(struct net_device *net_dev)
--{
-- u64 supported = 0;
-- u64 not_supported = 0;
-- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-- u32 options = priv->dpni_attrs.options;
--
-- /* Capabilities listing */
-- supported |= IFF_LIVE_ADDR_CHANGE;
--
-- if (options & DPNI_OPT_NO_MAC_FILTER)
-- not_supported |= IFF_UNICAST_FLT;
-- else
-- supported |= IFF_UNICAST_FLT;
--
-- net_dev->priv_flags |= supported;
-- net_dev->priv_flags &= ~not_supported;
--
-- /* Features */
-- net_dev->features = NETIF_F_RXCSUM |
-- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-- NETIF_F_SG | NETIF_F_HIGHDMA |
-- NETIF_F_LLTX;
-- net_dev->hw_features = net_dev->features;
--
-- return 0;
-+ return err;
- }
-
- static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
-@@ -1200,25 +1445,6 @@ static void dpaa2_eth_get_stats(struct n
- }
- }
-
--static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
--{
-- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-- int err;
--
-- /* Set the maximum Rx frame length to match the transmit side;
-- * account for L2 headers when computing the MFL
-- */
-- err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
-- (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
-- if (err) {
-- netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
-- return err;
-- }
--
-- net_dev->mtu = mtu;
-- return 0;
--}
--
- /* Copy mac unicast addresses from @net_dev to @priv.
- * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
- */
-@@ -1380,16 +1606,430 @@ static int dpaa2_eth_set_features(struct
- return 0;
- }
-
-+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
-+ struct hwtstamp_config config;
-+
-+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
-+ return -EFAULT;
-+
-+ switch (config.tx_type) {
-+ case HWTSTAMP_TX_OFF:
-+ priv->ts_tx_en = false;
-+ break;
-+ case HWTSTAMP_TX_ON:
-+ priv->ts_tx_en = true;
-+ break;
-+ default:
-+ return -ERANGE;
-+ }
-+
-+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
-+ priv->ts_rx_en = false;
-+ } else {
-+ priv->ts_rx_en = true;
-+ /* TS is set for all frame types, not only those requested */
-+ config.rx_filter = HWTSTAMP_FILTER_ALL;
-+ }
-+
-+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-+ -EFAULT : 0;
-+}
-+
-+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ if (cmd == SIOCSHWTSTAMP)
-+ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
-+
-+ return -EINVAL;
-+}
-+
-+static int set_buffer_layout(struct dpaa2_eth_priv *priv)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_buffer_layout buf_layout = {0};
-+ u16 rx_buf_align;
-+ int err;
-+
-+ /* We need to check for WRIOP version 1.0.0, but depending on the MC
-+ * version, this number is not always provided correctly on rev1.
-+ * We need to check for both alternatives in this situation.
-+ */
-+ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
-+ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
-+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
-+ else
-+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
-+
-+ /* tx buffer */
-+ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-+ buf_layout.pass_timestamp = true;
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, &buf_layout);
-+ if (err) {
-+ dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
-+ return err;
-+ }
-+
-+ /* tx-confirm buffer */
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
-+ if (err) {
-+ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
-+ return err;
-+ }
-+
-+ /* Now that we've set our tx buffer layout, retrieve the minimum
-+ * required tx data offset.
-+ */
-+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
-+ &priv->tx_data_offset);
-+ if (err) {
-+ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
-+ return err;
-+ }
-+
-+ if ((priv->tx_data_offset % 64) != 0)
-+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
-+ priv->tx_data_offset);
-+
-+ /* rx buffer */
-+ buf_layout.pass_frame_status = true;
-+ buf_layout.pass_parser_result = true;
-+ buf_layout.data_align = rx_buf_align;
-+ buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
-+ buf_layout.private_data_size = 0;
-+ /* If XDP program is attached, reserve extra space for
-+ * potential header expansions
-+ */
-+ if (priv->has_xdp_prog)
-+ buf_layout.data_head_room += XDP_PACKET_HEADROOM;
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
-+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, &buf_layout);
-+ if (err) {
-+ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
-+#define DPNI_ENQUEUE_FQID_VER_MINOR 9
-+
-+static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq,
-+ struct dpaa2_fd *fd, u8 prio)
-+{
-+ return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
-+ priv->tx_qdid, prio,
-+ fq->tx_qdbin, fd);
-+}
-+
-+static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq,
-+ struct dpaa2_fd *fd,
-+ u8 prio __always_unused)
-+{
-+ return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
-+ fq->tx_fqid, fd);
-+}
-+
-+static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
-+{
-+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
-+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
-+ priv->enqueue = dpaa2_eth_enqueue_qd;
-+ else
-+ priv->enqueue = dpaa2_eth_enqueue_fq;
-+}
-+
-+static void update_tx_fqids(struct dpaa2_eth_priv *priv)
-+{
-+ struct dpaa2_eth_fq *fq;
-+ struct dpni_queue queue;
-+ struct dpni_queue_id qid = {0};
-+ int i, err;
-+
-+ /* We only use Tx FQIDs for FQID-based enqueue, so check
-+ * if DPNI version supports it before updating FQIDs
-+ */
-+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
-+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
-+ return;
-+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ if (fq->type != DPAA2_TX_CONF_FQ)
-+ continue;
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0, fq->flowid,
-+ &queue, &qid);
-+ if (err)
-+ goto out_err;
-+
-+ fq->tx_fqid = qid.fqid;
-+ if (fq->tx_fqid == 0)
-+ goto out_err;
-+ }
-+
-+ return;
-+
-+out_err:
-+ netdev_info(priv->net_dev,
-+ "Error reading Tx FQID, fallback to QDID-based enqueue");
-+ priv->enqueue = dpaa2_eth_enqueue_qd;
-+}
-+
-+static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpaa2_eth_channel *ch;
-+ struct bpf_prog *old_prog = NULL;
-+ int i, err;
-+
-+ /* No support for SG frames */
-+ if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
-+ return -EINVAL;
-+
-+ if (netif_running(net_dev)) {
-+ err = dpaa2_eth_stop(net_dev);
-+ if (err)
-+ return err;
-+ }
-+
-+ if (prog) {
-+ prog = bpf_prog_add(prog, priv->num_channels - 1);
-+ if (IS_ERR(prog))
-+ return PTR_ERR(prog);
-+ }
-+
-+ priv->has_xdp_prog = !!prog;
-+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ old_prog = xchg(&ch->xdp_prog, prog);
-+ if (old_prog)
-+ bpf_prog_put(old_prog);
-+ }
-+
-+ /* When turning XDP on/off we need to do some reconfiguring
-+ * of the Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
-+ * so we are sure no old format buffers will be used from now on
-+ */
-+ if (priv->has_xdp_prog != !!old_prog)
-+ set_buffer_layout(priv);
-+
-+ if (netif_running(net_dev)) {
-+ err = dpaa2_eth_open(net_dev);
-+ if (err)
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
-+
-+ switch (xdp->command) {
-+ case XDP_SETUP_PROG:
-+ return dpaa2_eth_set_xdp(dev, xdp->prog);
-+ case XDP_QUERY_PROG:
-+ xdp->prog_attached = priv->has_xdp_prog;
-+ return 0;
-+ default:
-+ return -EINVAL;
-+ }
-+}
-+
-+static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, struct xdp_buff *xdp)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ unsigned int needed_headroom;
-+ struct dpaa2_eth_swa *swa;
-+ struct dpaa2_eth_fq *fq;
-+ struct dpaa2_fd fd;
-+ void *buffer_start, *aligned_start;
-+ dma_addr_t addr;
-+ int err, i;
-+
-+ if (!netif_running(net_dev))
-+ return -ENETDOWN;
-+
-+ /* We require a minimum headroom to be able to transmit the frame.
-+ * Otherwise return an error and let the original net_device handle it
-+ */
-+ /* TODO: Do we update i/f counters here or just on the Rx device? */
-+ needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
-+ if (xdp->data < xdp->data_hard_start ||
-+ xdp->data - xdp->data_hard_start < needed_headroom) {
-+ percpu_stats->tx_dropped++;
-+ return -EINVAL;
-+ }
-+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
-+
-+ /* Setup the FD fields */
-+ memset(&fd, 0, sizeof(fd));
-+
-+ /* Align FD address, if possible */
-+ buffer_start = xdp->data - needed_headroom;
-+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
-+ DPAA2_ETH_TX_BUF_ALIGN);
-+ if (aligned_start >= xdp->data_hard_start)
-+ buffer_start = aligned_start;
-+
-+ swa = (struct dpaa2_eth_swa *)buffer_start;
-+ /* fill in necessary fields here */
-+ swa->type = DPAA2_ETH_SWA_XDP;
-+ swa->xdp.dma_size = xdp->data_end - buffer_start;
-+
-+ addr = dma_map_single(dev, buffer_start,
-+ xdp->data_end - buffer_start,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dev, addr))) {
-+ percpu_stats->tx_dropped++;
-+ return -ENOMEM;
-+ }
-+
-+ dpaa2_fd_set_addr(&fd, addr);
-+ dpaa2_fd_set_offset(&fd, xdp->data - buffer_start);
-+ dpaa2_fd_set_len(&fd, xdp->data_end - xdp->data);
-+ dpaa2_fd_set_format(&fd, dpaa2_fd_single);
-+ dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
-+
-+ fq = &priv->fq[smp_processor_id()];
-+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
-+ fq->tx_qdbin, &fd);
-+ if (err != -EBUSY)
-+ break;
-+ }
-+ percpu_extras->tx_portal_busy += i;
-+ if (unlikely(err < 0)) {
-+ percpu_stats->tx_errors++;
-+ /* let the Rx device handle the cleanup */
-+ return err;
-+ }
-+
-+ percpu_stats->tx_packets++;
-+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
-+
-+ return 0;
-+}
-+
-+static void dpaa2_eth_xdp_flush(struct net_device *net_dev)
-+{
-+ /* We don't have hardware support for Tx batching,
-+ * so we do the actual frame enqueue in ndo_xdp_xmit
-+ */
-+}
-+static int dpaa2_eth_update_xps(struct dpaa2_eth_priv *priv)
-+{
-+ struct net_device *net_dev = priv->net_dev;
-+ unsigned int i, num_queues;
-+ struct cpumask xps_mask;
-+ struct dpaa2_eth_fq *fq;
-+ int err = 0;
-+
-+ num_queues = (net_dev->num_tc ? : 1) * dpaa2_eth_queue_count(priv);
-+ for (i = 0; i < num_queues; i++) {
-+ fq = &priv->fq[i % dpaa2_eth_queue_count(priv)];
-+ cpumask_clear(&xps_mask);
-+ cpumask_set_cpu(fq->target_cpu, &xps_mask);
-+ err = netif_set_xps_queue(net_dev, &xps_mask, i);
-+ if (err) {
-+ dev_info_once(net_dev->dev.parent,
-+ "Error setting XPS queue\n");
-+ break;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
-+ enum tc_setup_type type,
-+ void *type_data)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct tc_mqprio_qopt *mqprio = (struct tc_mqprio_qopt *)type_data;
-+ int i, err = 0;
-+
-+ if (type != TC_SETUP_MQPRIO)
-+ return -EINVAL;
-+
-+ if (mqprio->num_tc > dpaa2_eth_tc_count(priv)) {
-+ netdev_err(net_dev, "Max %d traffic classes supported\n",
-+ dpaa2_eth_tc_count(priv));
-+ return -EINVAL;
-+ }
-+
-+ if (mqprio->num_tc == net_dev->num_tc)
-+ return 0;
-+
-+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
-+
-+ if (!mqprio->num_tc) {
-+ netdev_reset_tc(net_dev);
-+ err = netif_set_real_num_tx_queues(net_dev,
-+ dpaa2_eth_queue_count(priv));
-+ if (err)
-+ return err;
-+
-+ goto update_xps;
-+ }
-+
-+ err = netdev_set_num_tc(net_dev, mqprio->num_tc);
-+ if (err)
-+ return err;
-+
-+ err = netif_set_real_num_tx_queues(net_dev, mqprio->num_tc *
-+ dpaa2_eth_queue_count(priv));
-+ if (err)
-+ return err;
-+
-+ for (i = 0; i < mqprio->num_tc; i++) {
-+ err = netdev_set_tc_queue(net_dev, i,
-+ dpaa2_eth_queue_count(priv),
-+ i * dpaa2_eth_queue_count(priv));
-+ if (err)
-+ return err;
-+ }
-+
-+update_xps:
-+ err = dpaa2_eth_update_xps(priv);
-+ return err;
-+}
-+
- static const struct net_device_ops dpaa2_eth_ops = {
- .ndo_open = dpaa2_eth_open,
- .ndo_start_xmit = dpaa2_eth_tx,
- .ndo_stop = dpaa2_eth_stop,
-- .ndo_init = dpaa2_eth_init,
- .ndo_set_mac_address = dpaa2_eth_set_addr,
- .ndo_get_stats64 = dpaa2_eth_get_stats,
-- .ndo_change_mtu = dpaa2_eth_change_mtu,
- .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
- .ndo_set_features = dpaa2_eth_set_features,
-+ .ndo_do_ioctl = dpaa2_eth_ioctl,
-+ .ndo_xdp = dpaa2_eth_xdp,
-+ .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
-+ .ndo_xdp_flush = dpaa2_eth_xdp_flush,
-+ .ndo_setup_tc = dpaa2_eth_setup_tc,
- };
-
- static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
-@@ -1422,34 +2062,32 @@ static struct fsl_mc_device *setup_dpcon
- err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
- if (err) {
- dev_err(dev, "dpcon_open() failed\n");
-- goto err_open;
-+ goto free;
- }
-
- err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
- if (err) {
- dev_err(dev, "dpcon_reset() failed\n");
-- goto err_reset;
-+ goto close;
- }
-
- err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
- if (err) {
- dev_err(dev, "dpcon_get_attributes() failed\n");
-- goto err_get_attr;
-+ goto close;
- }
-
- err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
- if (err) {
- dev_err(dev, "dpcon_enable() failed\n");
-- goto err_enable;
-+ goto close;
- }
-
- return dpcon;
-
--err_enable:
--err_get_attr:
--err_reset:
-+close:
- dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
--err_open:
-+free:
- fsl_mc_object_free(dpcon);
-
- return NULL;
-@@ -1502,7 +2140,14 @@ err_setup:
- static void free_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *channel)
- {
-+ struct bpf_prog *prog;
-+
- free_dpcon(priv, channel->dpcon);
-+
-+ prog = READ_ONCE(channel->xdp_prog);
-+ if (prog)
-+ bpf_prog_put(prog);
-+
- kfree(channel);
- }
-
-@@ -1546,7 +2191,8 @@ static int setup_dpio(struct dpaa2_eth_p
- nctx->desired_cpu = i;
-
- /* Register the new context */
-- err = dpaa2_io_service_register(NULL, nctx);
-+ channel->dpio = dpaa2_io_service_select(i);
-+ err = dpaa2_io_service_register(channel->dpio, nctx, dev);
- if (err) {
- dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
- /* If no affine DPIO for this core, there's probably
-@@ -1579,14 +2225,14 @@ static int setup_dpio(struct dpaa2_eth_p
- /* Stop if we already have enough channels to accommodate all
- * RX and TX conf queues
- */
-- if (priv->num_channels == dpaa2_eth_queue_count(priv))
-+ if (priv->num_channels == priv->dpni_attrs.num_queues)
- break;
- }
-
- return 0;
-
- err_set_cdan:
-- dpaa2_io_service_deregister(NULL, nctx);
-+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
- err_service_reg:
- free_channel(priv, channel);
- err_alloc_ch:
-@@ -1603,13 +2249,14 @@ err_alloc_ch:
-
- static void free_dpio(struct dpaa2_eth_priv *priv)
- {
-- int i;
-+ struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_eth_channel *ch;
-+ int i;
-
- /* deregister CDAN notifications and free channels */
- for (i = 0; i < priv->num_channels; i++) {
- ch = priv->channel[i];
-- dpaa2_io_service_deregister(NULL, &ch->nctx);
-+ dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
- free_channel(priv, ch);
- }
- }
-@@ -1636,8 +2283,7 @@ static void set_fq_affinity(struct dpaa2
- {
- struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_eth_fq *fq;
-- int rx_cpu, txc_cpu;
-- int i;
-+ int rx_cpu, txc_cpu, i;
-
- /* For each FQ, pick one channel/CPU to deliver frames to.
- * This may well change at runtime, either through irqbalance or
-@@ -1649,6 +2295,7 @@ static void set_fq_affinity(struct dpaa2
- fq = &priv->fq[i];
- switch (fq->type) {
- case DPAA2_RX_FQ:
-+ case DPAA2_RX_ERR_FQ:
- fq->target_cpu = rx_cpu;
- rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
- if (rx_cpu >= nr_cpu_ids)
-@@ -1665,11 +2312,13 @@ static void set_fq_affinity(struct dpaa2
- }
- fq->channel = get_affine_channel(priv, fq->target_cpu);
- }
-+
-+ dpaa2_eth_update_xps(priv);
- }
-
- static void setup_fqs(struct dpaa2_eth_priv *priv)
- {
-- int i;
-+ int i, j;
-
- /* We have one TxConf FQ per Tx flow.
- * The number of Tx and Rx queues is the same.
-@@ -1681,11 +2330,19 @@ static void setup_fqs(struct dpaa2_eth_p
- priv->fq[priv->num_fqs++].flowid = (u16)i;
- }
-
-- for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
-- priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
-- priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
-- priv->fq[priv->num_fqs++].flowid = (u16)i;
-- }
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
-+ for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
-+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
-+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
-+ priv->fq[priv->num_fqs].tc = (u8)i;
-+ priv->fq[priv->num_fqs++].flowid = (u16)j;
-+ }
-+
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ /* We have exactly one Rx error queue per DPNI */
-+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
-+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
-+#endif
-
- /* For each FQ, decide on which core to process incoming frames */
- set_fq_affinity(priv);
-@@ -1735,6 +2392,9 @@ static int setup_dpbp(struct dpaa2_eth_p
- }
- priv->bpid = dpbp_attrs.bpid;
-
-+ /* By default we start with flow control enabled */
-+ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
-+
- return 0;
-
- err_get_attr:
-@@ -1762,7 +2422,7 @@ static int setup_dpni(struct fsl_mc_devi
- struct device *dev = &ls_dev->dev;
- struct dpaa2_eth_priv *priv;
- struct net_device *net_dev;
-- struct dpni_buffer_layout buf_layout = {0};
-+ struct dpni_link_cfg cfg = {0};
- int err;
-
- net_dev = dev_get_drvdata(dev);
-@@ -1772,7 +2432,22 @@ static int setup_dpni(struct fsl_mc_devi
- err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
- if (err) {
- dev_err(dev, "dpni_open() failed\n");
-- goto err_open;
-+ return err;
-+ }
-+
-+ /* Check if we can work with this DPNI object */
-+ err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
-+ &priv->dpni_ver_minor);
-+ if (err) {
-+ dev_err(dev, "dpni_get_api_version() failed\n");
-+ goto close;
-+ }
-+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
-+ dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
-+ priv->dpni_ver_major, priv->dpni_ver_minor,
-+ DPNI_VER_MAJOR, DPNI_VER_MINOR);
-+ err = -ENOTSUPP;
-+ goto close;
- }
-
- ls_dev->mc_io = priv->mc_io;
-@@ -1781,77 +2456,41 @@ static int setup_dpni(struct fsl_mc_devi
- err = dpni_reset(priv->mc_io, 0, priv->mc_token);
- if (err) {
- dev_err(dev, "dpni_reset() failed\n");
-- goto err_reset;
-+ goto close;
- }
-
- err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
- &priv->dpni_attrs);
- if (err) {
- dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
-- goto err_get_attr;
-+ goto close;
- }
-
-- /* Configure buffer layouts */
-- /* rx buffer */
-- buf_layout.pass_parser_result = true;
-- buf_layout.pass_frame_status = true;
-- buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-- buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
-- buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-- DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-- DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
-- DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
-- err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-- DPNI_QUEUE_RX, &buf_layout);
-- if (err) {
-- dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
-- goto err_buf_layout;
-- }
-+ err = set_buffer_layout(priv);
-+ if (err)
-+ goto close;
-
-- /* tx buffer */
-- buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-- DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-- err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-- DPNI_QUEUE_TX, &buf_layout);
-- if (err) {
-- dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
-- goto err_buf_layout;
-- }
-+ set_enqueue_mode(priv);
-
-- /* tx-confirm buffer */
-- buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
-- err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-- DPNI_QUEUE_TX_CONFIRM, &buf_layout);
-- if (err) {
-- dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
-- goto err_buf_layout;
-- }
-+ priv->cls_rule = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
-+ dpaa2_eth_fs_count(priv), GFP_KERNEL);
-+ if (!priv->cls_rule)
-+ goto close;
-
-- /* Now that we've set our tx buffer layout, retrieve the minimum
-- * required tx data offset.
-- */
-- err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
-- &priv->tx_data_offset);
-+ /* Enable flow control */
-+ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
-+ priv->tx_pause_frames = true;
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
- if (err) {
-- dev_err(dev, "dpni_get_tx_data_offset() failed\n");
-- goto err_data_offset;
-+ dev_err(dev, "dpni_set_link_cfg() failed\n");
-+ goto close;
- }
-
-- if ((priv->tx_data_offset % 64) != 0)
-- dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
-- priv->tx_data_offset);
--
-- /* Accommodate software annotation space (SWA) */
-- priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
--
- return 0;
-
--err_data_offset:
--err_buf_layout:
--err_get_attr:
--err_reset:
-+close:
- dpni_close(priv->mc_io, 0, priv->mc_token);
--err_open:
-+
- return err;
- }
-
-@@ -1865,6 +2504,7 @@ static void free_dpni(struct dpaa2_eth_p
- err);
-
- dpni_close(priv->mc_io, 0, priv->mc_token);
-+
- }
-
- static int setup_rx_flow(struct dpaa2_eth_priv *priv,
-@@ -1873,11 +2513,10 @@ static int setup_rx_flow(struct dpaa2_et
- struct device *dev = priv->net_dev->dev.parent;
- struct dpni_queue queue;
- struct dpni_queue_id qid;
-- struct dpni_taildrop td;
- int err;
-
- err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-- DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
-+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
- if (err) {
- dev_err(dev, "dpni_get_queue(RX) failed\n");
- return err;
-@@ -1889,24 +2528,136 @@ static int setup_rx_flow(struct dpaa2_et
- queue.destination.type = DPNI_DEST_DPCON;
- queue.destination.priority = 1;
- queue.user_context = (u64)(uintptr_t)fq;
-+ queue.flc.stash_control = 1;
-+ queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
-+ /* 01 01 00 - data, annotation, flow context*/
-+ queue.flc.value |= 0x14;
-+
- err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-- DPNI_QUEUE_RX, 0, fq->flowid,
-- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
-+ DPNI_QUEUE_RX, fq->tc, fq->flowid,
-+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
-+ DPNI_QUEUE_OPT_FLC,
- &queue);
- if (err) {
- dev_err(dev, "dpni_set_queue(RX) failed\n");
- return err;
- }
-
-- td.enable = 1;
-- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
-- err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
-- DPNI_QUEUE_RX, 0, fq->flowid, &td);
-- if (err) {
-- dev_err(dev, "dpni_set_threshold() failed\n");
-- return err;
-+ return 0;
-+}
-+
-+static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
-+ struct dpni_taildrop *td)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ int i, err;
-+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ if (priv->fq[i].type != DPAA2_RX_FQ)
-+ continue;
-+
-+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
-+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
-+ priv->fq[i].tc, priv->fq[i].flowid,
-+ td);
-+ if (err) {
-+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
-+ (td->enable ? "Enabled" : "Disabled"),
-+ priv->fq[i].flowid, priv->fq[i].tc);
-+ }
-+
-+ return 0;
-+}
-+
-+static int set_group_taildrop(struct dpaa2_eth_priv *priv,
-+ struct dpni_taildrop *td)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_taildrop disable_td, *tc_td;
-+ int i, err;
-+
-+ memset(&disable_td, 0, sizeof(struct dpni_taildrop));
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
-+ /* Do not set taildrop thresholds for PFC-enabled
-+ * traffic classes. We will enable congestion
-+ * notifications for them.
-+ */
-+ tc_td = &disable_td;
-+ else
-+ tc_td = td;
-+
-+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
-+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
-+ i, 0, tc_td);
-+ if (err) {
-+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
-+ (tc_td->enable ? "Enabled" : "Disabled"),
-+ i);
-+ }
-+
-+ return 0;
-+}
-+
-+/* Enable/disable Rx FQ taildrop
-+ *
-+ * Rx FQ taildrop is mutually exclusive with flow control and it only gets
-+ * disabled when FC is active. Depending on FC status, we need to compute
-+ * the maximum number of buffers in the pool differently, so use the
-+ * opportunity to update max number of buffers as well.
-+ */
-+int set_rx_taildrop(struct dpaa2_eth_priv *priv)
-+{
-+ enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
-+ struct dpni_taildrop td_queue, td_group;
-+ int err = 0;
-+
-+ switch (cfg) {
-+ case DPAA2_ETH_TD_NONE:
-+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
-+ memset(&td_group, 0, sizeof(struct dpni_taildrop));
-+ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
-+ priv->num_channels;
-+ break;
-+ case DPAA2_ETH_TD_QUEUE:
-+ memset(&td_group, 0, sizeof(struct dpni_taildrop));
-+ td_queue.enable = 1;
-+ td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
-+ td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
-+ dpaa2_eth_tc_count(priv);
-+ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
-+ break;
-+ case DPAA2_ETH_TD_GROUP:
-+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
-+ td_group.enable = 1;
-+ td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
-+ td_group.threshold = NAPI_POLL_WEIGHT *
-+ dpaa2_eth_queue_count(priv);
-+ priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
-+ dpaa2_eth_tc_count(priv);
-+ break;
-+ default:
-+ break;
- }
-
-+ err = set_queue_taildrop(priv, &td_queue);
-+ if (err)
-+ return err;
-+
-+ err = set_group_taildrop(priv, &td_group);
-+ if (err)
-+ return err;
-+
-+ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
-+
- return 0;
- }
-
-@@ -1926,6 +2677,7 @@ static int setup_tx_flow(struct dpaa2_et
- }
-
- fq->tx_qdbin = qid.qdbin;
-+ fq->tx_fqid = qid.fqid;
-
- err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
-@@ -1953,23 +2705,88 @@ static int setup_tx_flow(struct dpaa2_et
- return 0;
- }
-
--/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
--static const struct dpaa2_eth_hash_fields hash_fields[] = {
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
-+ int err;
-+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ fq->fqid = qid.fqid;
-+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 1;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
-+ if (err) {
-+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+#endif
-+
-+/* Supported header fields for Rx hash distribution key */
-+static const struct dpaa2_eth_dist_fields dist_fields[] = {
- {
-+ /* L2 header */
-+ .rxnfc_field = RXH_L2DA,
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_DA,
-+ .id = DPAA2_ETH_DIST_ETHDST,
-+ .size = 6,
-+ }, {
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_SA,
-+ .id = DPAA2_ETH_DIST_ETHSRC,
-+ .size = 6,
-+ }, {
-+ /* This is the last ethertype field parsed:
-+ * depending on frame format, it can be the MAC ethertype
-+ * or the VLAN etype.
-+ */
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_TYPE,
-+ .id = DPAA2_ETH_DIST_ETHTYPE,
-+ .size = 2,
-+ }, {
-+ /* VLAN header */
-+ .rxnfc_field = RXH_VLAN,
-+ .cls_prot = NET_PROT_VLAN,
-+ .cls_field = NH_FLD_VLAN_TCI,
-+ .id = DPAA2_ETH_DIST_VLAN,
-+ .size = 2,
-+ }, {
- /* IP header */
- .rxnfc_field = RXH_IP_SRC,
- .cls_prot = NET_PROT_IP,
- .cls_field = NH_FLD_IP_SRC,
-+ .id = DPAA2_ETH_DIST_IPSRC,
- .size = 4,
- }, {
- .rxnfc_field = RXH_IP_DST,
- .cls_prot = NET_PROT_IP,
- .cls_field = NH_FLD_IP_DST,
-+ .id = DPAA2_ETH_DIST_IPDST,
- .size = 4,
- }, {
- .rxnfc_field = RXH_L3_PROTO,
- .cls_prot = NET_PROT_IP,
- .cls_field = NH_FLD_IP_PROTO,
-+ .id = DPAA2_ETH_DIST_IPPROTO,
- .size = 1,
- }, {
- /* Using UDP ports, this is functionally equivalent to raw
-@@ -1978,41 +2795,170 @@ static const struct dpaa2_eth_hash_field
- .rxnfc_field = RXH_L4_B_0_1,
- .cls_prot = NET_PROT_UDP,
- .cls_field = NH_FLD_UDP_PORT_SRC,
-+ .id = DPAA2_ETH_DIST_L4SRC,
- .size = 2,
- }, {
- .rxnfc_field = RXH_L4_B_2_3,
- .cls_prot = NET_PROT_UDP,
- .cls_field = NH_FLD_UDP_PORT_DST,
-+ .id = DPAA2_ETH_DIST_L4DST,
- .size = 2,
- },
- };
-
--/* Set RX hash options
-+/* Configure the Rx hash key using the legacy API */
-+static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_rx_tc_dist_cfg dist_cfg;
-+ int i, err = 0;
-+
-+ memset(&dist_cfg, 0, sizeof(dist_cfg));
-+
-+ dist_cfg.key_cfg_iova = key;
-+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
-+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
-+ i, &dist_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
-+ break;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+/* Configure the Rx hash key using the new API */
-+static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_rx_dist_cfg dist_cfg;
-+ int i, err = 0;
-+
-+ memset(&dist_cfg, 0, sizeof(dist_cfg));
-+
-+ dist_cfg.key_cfg_iova = key;
-+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ dist_cfg.enable = 1;
-+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ dist_cfg.tc = i;
-+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
-+ &dist_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
-+ break;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+/* Configure the Rx flow classification key */
-+static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_rx_dist_cfg dist_cfg;
-+ int i, err = 0;
-+
-+ memset(&dist_cfg, 0, sizeof(dist_cfg));
-+
-+ dist_cfg.key_cfg_iova = key;
-+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ dist_cfg.enable = 1;
-+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ dist_cfg.tc = i;
-+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
-+ &dist_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
-+ break;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+/* Size of the Rx flow classification key */
-+int dpaa2_eth_cls_key_size(u64 fields)
-+{
-+ int i, size = 0;
-+
-+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
-+ if (!(fields & dist_fields[i].id))
-+ continue;
-+ size += dist_fields[i].size;
-+ }
-+
-+ return size;
-+}
-+
-+/* Offset of header field in Rx classification key */
-+int dpaa2_eth_cls_fld_off(int prot, int field)
-+{
-+ int i, off = 0;
-+
-+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
-+ if (dist_fields[i].cls_prot == prot &&
-+ dist_fields[i].cls_field == field)
-+ return off;
-+ off += dist_fields[i].size;
-+ }
-+
-+ WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
-+ return 0;
-+}
-+
-+/* Prune unused fields from the classification rule.
-+ * Used when masking is not supported
-+ */
-+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
-+{
-+ int off = 0, new_off = 0;
-+ int i, size;
-+
-+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
-+ size = dist_fields[i].size;
-+ if (dist_fields[i].id & fields) {
-+ memcpy(key_mem + new_off, key_mem + off, size);
-+ new_off += size;
-+ }
-+ off += size;
-+ }
-+}
-+
-+/* Set Rx distribution (hash or flow classification) key
- * flags is a combination of RXH_ bits
- */
--static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
-+static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
-+ enum dpaa2_eth_rx_dist type, u64 flags)
- {
- struct device *dev = net_dev->dev.parent;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpkg_profile_cfg cls_cfg;
-- struct dpni_rx_tc_dist_cfg dist_cfg;
-+ u32 rx_hash_fields = 0;
-+ dma_addr_t key_iova;
- u8 *dma_mem;
- int i;
- int err = 0;
-
-- if (!dpaa2_eth_hash_enabled(priv)) {
-- dev_dbg(dev, "Hashing support is not enabled\n");
-- return 0;
-- }
--
- memset(&cls_cfg, 0, sizeof(cls_cfg));
-
-- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
-+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
- struct dpkg_extract *key =
- &cls_cfg.extracts[cls_cfg.num_extracts];
-
-- if (!(flags & hash_fields[i].rxnfc_field))
-+ /* For both Rx hashing and classification keys
-+ * we set only the selected fields.
-+ */
-+ if (!(flags & dist_fields[i].id))
- continue;
-+ if (type == DPAA2_ETH_RX_DIST_HASH)
-+ rx_hash_fields |= dist_fields[i].rxnfc_field;
-
- if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
- dev_err(dev, "error adding key extraction rule, too many rules?\n");
-@@ -2020,12 +2966,10 @@ static int dpaa2_eth_set_hash(struct net
- }
-
- key->type = DPKG_EXTRACT_FROM_HDR;
-- key->extract.from_hdr.prot = hash_fields[i].cls_prot;
-+ key->extract.from_hdr.prot = dist_fields[i].cls_prot;
- key->extract.from_hdr.type = DPKG_FULL_FIELD;
-- key->extract.from_hdr.field = hash_fields[i].cls_field;
-+ key->extract.from_hdr.field = dist_fields[i].cls_field;
- cls_cfg.num_extracts++;
--
-- priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
- }
-
- dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
-@@ -2035,36 +2979,96 @@ static int dpaa2_eth_set_hash(struct net
- err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
- if (err) {
- dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
-- goto err_prep_key;
-+ goto free_key;
- }
-
-- memset(&dist_cfg, 0, sizeof(dist_cfg));
--
- /* Prepare for setting the rx dist */
-- dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
-- DPAA2_CLASSIFIER_DMA_SIZE,
-- DMA_TO_DEVICE);
-- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
-+ key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, key_iova)) {
- dev_err(dev, "DMA mapping failed\n");
- err = -ENOMEM;
-- goto err_dma_map;
-+ goto free_key;
- }
-
-- dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
-+ if (type == DPAA2_ETH_RX_DIST_HASH) {
-+ if (dpaa2_eth_has_legacy_dist(priv))
-+ err = config_legacy_hash_key(priv, key_iova);
-+ else
-+ err = config_hash_key(priv, key_iova);
-+ } else {
-+ err = config_cls_key(priv, key_iova);
-+ }
-
-- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
-- dma_unmap_single(dev, dist_cfg.key_cfg_iova,
-- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-- if (err)
-- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
-+ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (!err && type == DPAA2_ETH_RX_DIST_HASH)
-+ priv->rx_hash_fields = rx_hash_fields;
-
--err_dma_map:
--err_prep_key:
-+free_key:
- kfree(dma_mem);
- return err;
- }
-
-+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ u64 key = 0;
-+ int i;
-+
-+ if (!dpaa2_eth_hash_enabled(priv))
-+ return -EOPNOTSUPP;
-+
-+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
-+ if (dist_fields[i].rxnfc_field & flags)
-+ key |= dist_fields[i].id;
-+
-+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
-+}
-+
-+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
-+{
-+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
-+}
-+
-+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
-+
-+ /* Check if we actually support Rx flow classification */
-+ if (dpaa2_eth_has_legacy_dist(priv)) {
-+ dev_dbg(dev, "Rx cls not supported by current MC version\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ if (!dpaa2_eth_fs_enabled(priv)) {
-+ dev_dbg(dev, "Rx cls disabled in DPNI options\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ if (!dpaa2_eth_hash_enabled(priv)) {
-+ dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ /* If there is no support for masking in the classification table,
-+ * we don't set a default key, as it will depend on the rules
-+ * added by the user at runtime.
-+ */
-+ if (!dpaa2_eth_fs_mask_enabled(priv))
-+ goto out;
-+
-+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
-+ if (err)
-+ return err;
-+
-+out:
-+ priv->rx_cls_enabled = 1;
-+
-+ return 0;
-+}
-+
- /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
- * frame queues and channels
- */
-@@ -2080,6 +3084,7 @@ static int bind_dpni(struct dpaa2_eth_pr
- pools_params.num_dpbp = 1;
- pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
- pools_params.pools[0].backup_pool = 0;
-+ pools_params.pools[0].priority_mask = 0xff;
- pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
- err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
- if (err) {
-@@ -2087,17 +3092,28 @@ static int bind_dpni(struct dpaa2_eth_pr
- return err;
- }
-
-- /* have the interface implicitly distribute traffic based on supported
-- * header fields
-+ /* have the interface implicitly distribute traffic based on
-+ * the default hash key
- */
-- err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
-- if (err)
-- netdev_err(net_dev, "Failed to configure hashing\n");
-+ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
-+ if (err && err != -EOPNOTSUPP)
-+ dev_err(dev, "Failed to configure hashing\n");
-+
-+ /* Configure the flow classification key; it includes all
-+ * supported header fields and cannot be modified at runtime
-+ */
-+ err = dpaa2_eth_set_default_cls(priv);
-+ if (err && err != -EOPNOTSUPP)
-+ dev_err(dev, "Failed to configure Rx classification key\n");
-
- /* Configure handling of error frames */
- err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
- err_cfg.set_frame_annotation = 1;
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
-+#else
- err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
-+#endif
- err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
- &err_cfg);
- if (err) {
-@@ -2114,6 +3130,11 @@ static int bind_dpni(struct dpaa2_eth_pr
- case DPAA2_TX_CONF_FQ:
- err = setup_tx_flow(priv, &priv->fq[i]);
- break;
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ case DPAA2_RX_ERR_FQ:
-+ err = setup_rx_err_flow(priv, &priv->fq[i]);
-+ break;
-+#endif
- default:
- dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
- return -EINVAL;
-@@ -2237,11 +3258,14 @@ static int netdev_init(struct net_device
- {
- struct device *dev = net_dev->dev.parent;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ u32 options = priv->dpni_attrs.options;
-+ u64 supported = 0, not_supported = 0;
- u8 bcast_addr[ETH_ALEN];
- u8 num_queues;
- int err;
-
- net_dev->netdev_ops = &dpaa2_eth_ops;
-+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
-
- err = set_mac_addr(priv);
- if (err)
-@@ -2255,14 +3279,14 @@ static int netdev_init(struct net_device
- return err;
- }
-
-- /* Reserve enough space to align buffer as per hardware requirement;
-- * NOTE: priv->tx_data_offset MUST be initialized at this point.
-- */
-- net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
--
-- /* Set MTU limits */
-- net_dev->min_mtu = 68;
-+ /* Set MTU upper limit; lower limit is 68B (default value) */
- net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
-+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
-+ DPAA2_ETH_MFL);
-+ if (err) {
-+ dev_err(dev, "dpni_set_max_frame_length() failed\n");
-+ return err;
-+ }
-
- /* Set actual number of queues in the net device */
- num_queues = dpaa2_eth_queue_count(priv);
-@@ -2277,12 +3301,23 @@ static int netdev_init(struct net_device
- return err;
- }
-
-- /* Our .ndo_init will be called herein */
-- err = register_netdev(net_dev);
-- if (err < 0) {
-- dev_err(dev, "register_netdev() failed\n");
-- return err;
-- }
-+ /* Capabilities listing */
-+ supported |= IFF_LIVE_ADDR_CHANGE;
-+
-+ if (options & DPNI_OPT_NO_MAC_FILTER)
-+ not_supported |= IFF_UNICAST_FLT;
-+ else
-+ supported |= IFF_UNICAST_FLT;
-+
-+ net_dev->priv_flags |= supported;
-+ net_dev->priv_flags &= ~not_supported;
-+
-+ /* Features */
-+ net_dev->features = NETIF_F_RXCSUM |
-+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-+ NETIF_F_SG | NETIF_F_HIGHDMA |
-+ NETIF_F_LLTX;
-+ net_dev->hw_features = net_dev->features;
-
- return 0;
- }
-@@ -2303,14 +3338,9 @@ static int poll_link_state(void *arg)
- return 0;
- }
-
--static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
--{
-- return IRQ_WAKE_THREAD;
--}
--
- static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
- {
-- u32 status = 0, clear = 0;
-+ u32 status = ~0;
- struct device *dev = (struct device *)arg;
- struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
- struct net_device *net_dev = dev_get_drvdata(dev);
-@@ -2320,18 +3350,12 @@ static irqreturn_t dpni_irq0_handler_thr
- DPNI_IRQ_INDEX, &status);
- if (unlikely(err)) {
- netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
-- clear = 0xffffffff;
-- goto out;
-+ return IRQ_HANDLED;
- }
-
-- if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
-- clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
-+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
- link_state_update(netdev_priv(net_dev));
-- }
-
--out:
-- dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
-- DPNI_IRQ_INDEX, clear);
- return IRQ_HANDLED;
- }
-
-@@ -2348,8 +3372,7 @@ static int setup_irqs(struct fsl_mc_devi
-
- irq = ls_dev->irqs[0];
- err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
-- dpni_irq0_handler,
-- dpni_irq0_handler_thread,
-+ NULL, dpni_irq0_handler_thread,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(&ls_dev->dev), &ls_dev->dev);
- if (err < 0) {
-@@ -2405,6 +3428,393 @@ static void del_ch_napi(struct dpaa2_eth
- }
- }
-
-+/* SysFS support */
-+static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ /* No MC API for getting the shaping config. We're stateful. */
-+ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
-+
-+ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
-+}
-+
-+static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf,
-+ size_t count)
-+{
-+ int err, items;
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
-+
-+ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
-+ if (items != 2) {
-+ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
-+ return -EINVAL;
-+ }
-+ /* Size restriction as per MC API documentation */
-+ if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
-+ pr_err("max_burst_size must be <= %d\n",
-+ DPAA2_ETH_MAX_BURST_SIZE);
-+ return -EINVAL;
-+ }
-+
-+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
-+ &ercfg, 0);
-+ if (err) {
-+ dev_err(dev, "dpni_set_tx_shaping() failed\n");
-+ return -EPERM;
-+ }
-+ /* If successful, save the current configuration for future inquiries */
-+ priv->shaping_cfg = scfg;
-+
-+ return count;
-+}
-+
-+static struct device_attribute dpaa2_eth_attrs[] = {
-+ __ATTR(tx_shaping,
-+ 0600,
-+ dpaa2_eth_show_tx_shaping,
-+ dpaa2_eth_write_tx_shaping),
-+};
-+
-+static void dpaa2_eth_sysfs_init(struct device *dev)
-+{
-+ int i, err;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
-+ err = device_create_file(dev, &dpaa2_eth_attrs[i]);
-+ if (err) {
-+ dev_err(dev, "ERROR creating sysfs file\n");
-+ goto undo;
-+ }
-+ }
-+ return;
-+
-+undo:
-+ while (i > 0)
-+ device_remove_file(dev, &dpaa2_eth_attrs[--i]);
-+}
-+
-+static void dpaa2_eth_sysfs_remove(struct device *dev)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
-+ device_remove_file(dev, &dpaa2_eth_attrs[i]);
-+}
-+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
-+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
-+ struct ieee_pfc *pfc)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_congestion_notification_cfg notification_cfg;
-+ struct dpni_link_state state;
-+ int err, i;
-+
-+ priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
-+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ return err;
-+ }
-+
-+ if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
-+ return 0;
-+
-+ priv->pfc.pfc_en = 0;
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ err = dpni_get_congestion_notification(priv->mc_io, 0,
-+ priv->mc_token,
-+ DPNI_QUEUE_RX,
-+ i, &notification_cfg);
-+ if (err) {
-+ netdev_err(net_dev, "Error %d getting congestion notif",
-+ err);
-+ return err;
-+ }
-+
-+ if (notification_cfg.threshold_entry)
-+ priv->pfc.pfc_en |= 1 << i;
-+ }
-+
-+ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
-+
-+ return 0;
-+}
-+
-+/* Configure ingress classification based on VLAN PCP */
-+static int set_vlan_qos(struct dpaa2_eth_priv *priv)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpkg_profile_cfg kg_cfg = {0};
-+ struct dpni_qos_tbl_cfg qos_cfg = {0};
-+ struct dpni_rule_cfg key_params;
-+ u8 *params_iova, *key, *mask = NULL;
-+ /* We only need the trailing 16 bits, without the TPID */
-+ u8 key_size = VLAN_HLEN / 2;
-+ int err = 0, i, j = 0;
-+
-+ if (priv->vlan_clsf_set)
-+ return 0;
-+
-+ params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
-+ if (!params_iova)
-+ return -ENOMEM;
-+
-+ kg_cfg.num_extracts = 1;
-+ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
-+ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
-+ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
-+
-+ err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
-+ if (err) {
-+ dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
-+ goto out_free;
-+ }
-+
-+ /* Set QoS table */
-+ qos_cfg.default_tc = 0;
-+ qos_cfg.discard_on_miss = 0;
-+ qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
-+ DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
-+ dev_err(dev, "%s: DMA mapping failed\n", __func__);
-+ err = -ENOMEM;
-+ goto out_free;
-+ }
-+ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
-+ dma_unmap_single(dev, qos_cfg.key_cfg_iova,
-+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-+
-+ if (err) {
-+ dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
-+ goto out_free;
-+ }
-+
-+ key_params.key_size = key_size;
-+
-+ if (dpaa2_eth_fs_mask_enabled(priv)) {
-+ mask = kzalloc(key_size, GFP_KERNEL);
-+ if (!mask)
-+ goto out_free;
-+
-+ *mask = cpu_to_be16(VLAN_PRIO_MASK);
-+
-+ key_params.mask_iova = dma_map_single(dev, mask, key_size,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, key_params.mask_iova)) {
-+ dev_err(dev, "DMA mapping failed %s\n", __func__);
-+ err = -ENOMEM;
-+ goto out_free_mask;
-+ }
-+ } else {
-+ key_params.mask_iova = 0;
-+ }
-+
-+ key = kzalloc(key_size, GFP_KERNEL);
-+ if (!key)
-+ goto out_cleanup_mask;
-+
-+ key_params.key_iova = dma_map_single(dev, key, key_size,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, key_params.key_iova)) {
-+ dev_err(dev, "%s: DMA mapping failed\n", __func__);
-+ err = -ENOMEM;
-+ goto out_free_key;
-+ }
-+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
-+
-+ dma_sync_single_for_device(dev, key_params.key_iova,
-+ key_size, DMA_TO_DEVICE);
-+
-+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
-+ &key_params, i, j++);
-+ if (err) {
-+ dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
-+ goto out_remove;
-+ }
-+ }
-+
-+ priv->vlan_clsf_set = true;
-+ dev_dbg(dev, "Vlan PCP QoS classification set\n");
-+ goto out_cleanup;
-+
-+out_remove:
-+ for (j = 0; j < i; j++) {
-+ *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
-+
-+ dma_sync_single_for_device(dev, key_params.key_iova, key_size,
-+ DMA_TO_DEVICE);
-+
-+ err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
-+ &key_params);
-+ if (err)
-+ dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
-+ }
-+
-+out_cleanup:
-+ dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
-+out_free_key:
-+ kfree(key);
-+out_cleanup_mask:
-+ if (key_params.mask_iova)
-+ dma_unmap_single(dev, key_params.mask_iova, key_size,
-+ DMA_TO_DEVICE);
-+out_free_mask:
-+ kfree(mask);
-+out_free:
-+ kfree(params_iova);
-+ return err;
-+}
-+
-+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
-+ struct ieee_pfc *pfc)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_congestion_notification_cfg notification_cfg = {0};
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
-+ struct ieee_pfc old_pfc;
-+ int err = 0, i;
-+
-+ if (dpaa2_eth_tc_count(priv) == 1) {
-+ netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
-+ return 0;
-+ }
-+
-+ /* Zero out pfc_enabled prios greater than tc_count */
-+ pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
-+
-+ if (priv->pfc.pfc_en == pfc->pfc_en)
-+ /* Same enabled mask, nothing to be done */
-+ return 0;
-+
-+ err = set_vlan_qos(priv);
-+ if (err)
-+ return err;
-+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ return err;
-+ }
-+
-+ cfg.rate = state.rate;
-+ cfg.options = state.options;
-+ if (pfc->pfc_en)
-+ cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
-+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d setting link cfg", err);
-+ return err;
-+ }
-+
-+ memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
-+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
-+
-+ err = set_rx_taildrop(priv);
-+ if (err)
-+ goto out_restore_config;
-+
-+ /* configure congestion notifications */
-+ notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
-+ notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
-+ notification_cfg.message_iova = 0ULL;
-+ notification_cfg.message_ctx = 0ULL;
-+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ if (dpaa2_eth_is_pfc_enabled(priv, i)) {
-+ notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
-+ notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
-+ } else {
-+ notification_cfg.threshold_entry = 0;
-+ notification_cfg.threshold_exit = 0;
-+ }
-+
-+ err = dpni_set_congestion_notification(priv->mc_io, 0,
-+ priv->mc_token,
-+ DPNI_QUEUE_RX,
-+ i, &notification_cfg);
-+ if (err) {
-+ netdev_err(net_dev, "Error %d setting congestion notif",
-+ err);
-+ goto out_restore_config;
-+ }
-+
-+ netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
-+ (notification_cfg.threshold_entry ?
-+ "Enabled" : "Disabled"), i);
-+ }
-+
-+ return 0;
-+
-+out_restore_config:
-+ memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
-+ return err;
-+}
-+
-+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+
-+ return priv->dcbx_mode;
-+}
-+
-+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+
-+ priv->dcbx_mode = mode;
-+ return 0;
-+}
-+
-+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+
-+ switch (capid) {
-+ case DCB_CAP_ATTR_PFC:
-+ *cap = true;
-+ break;
-+ case DCB_CAP_ATTR_PFC_TCS:
-+ /* bitmap where each bit represents a number of traffic
-+ * classes the device can be configured to use for Priority
-+ * Flow Control
-+ */
-+ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
-+ break;
-+ case DCB_CAP_ATTR_DCBX:
-+ *cap = priv->dcbx_mode;
-+ break;
-+ default:
-+ *cap = false;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
-+ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
-+ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
-+ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
-+ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
-+ .getcap = dpaa2_eth_dcbnl_getcap,
-+};
-+#endif
-+
- static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
- {
- struct device *dev;
-@@ -2415,7 +3825,7 @@ static int dpaa2_eth_probe(struct fsl_mc
- dev = &dpni_dev->dev;
-
- /* Net device */
-- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
-+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
- if (!net_dev) {
- dev_err(dev, "alloc_etherdev_mq() failed\n");
- return -ENOMEM;
-@@ -2433,7 +3843,10 @@ static int dpaa2_eth_probe(struct fsl_mc
- err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
- &priv->mc_io);
- if (err) {
-- dev_err(dev, "MC portal allocation failed\n");
-+ if (err == -ENXIO)
-+ err = -EPROBE_DEFER;
-+ else
-+ dev_err(dev, "MC portal allocation failed\n");
- goto err_portal_alloc;
- }
-
-@@ -2456,9 +3869,6 @@ static int dpaa2_eth_probe(struct fsl_mc
- if (err)
- goto err_bind;
-
-- /* Add a NAPI context for each channel */
-- add_ch_napi(priv);
--
- /* Percpu statistics */
- priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
- if (!priv->percpu_stats) {
-@@ -2491,7 +3901,14 @@ static int dpaa2_eth_probe(struct fsl_mc
- if (err)
- goto err_alloc_rings;
-
-- net_dev->ethtool_ops = &dpaa2_ethtool_ops;
-+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
-+ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
-+ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
-+#endif
-+
-+ /* Add a NAPI context for each channel */
-+ add_ch_napi(priv);
-+ enable_ch_napi(priv);
-
- err = setup_irqs(dpni_dev);
- if (err) {
-@@ -2499,25 +3916,41 @@ static int dpaa2_eth_probe(struct fsl_mc
- priv->poll_thread = kthread_run(poll_link_state, priv,
- "%s_poll_link", net_dev->name);
- if (IS_ERR(priv->poll_thread)) {
-- netdev_err(net_dev, "Error starting polling thread\n");
-+ dev_err(dev, "Error starting polling thread\n");
- goto err_poll_thread;
- }
- priv->do_link_poll = true;
- }
-
-+ err = register_netdev(net_dev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev() failed\n");
-+ goto err_netdev_reg;
-+ }
-+
-+ dpaa2_eth_sysfs_init(&net_dev->dev);
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ dpaa2_dbg_add(priv);
-+#endif
-+
- dev_info(dev, "Probed interface %s\n", net_dev->name);
- return 0;
-
-+err_netdev_reg:
-+ if (priv->do_link_poll)
-+ kthread_stop(priv->poll_thread);
-+ else
-+ fsl_mc_free_irqs(dpni_dev);
- err_poll_thread:
- free_rings(priv);
- err_alloc_rings:
- err_csum:
-- unregister_netdev(net_dev);
- err_netdev_init:
- free_percpu(priv->percpu_extras);
- err_alloc_percpu_extras:
- free_percpu(priv->percpu_stats);
- err_alloc_percpu_stats:
-+ disable_ch_napi(priv);
- del_ch_napi(priv);
- err_bind:
- free_dpbp(priv);
-@@ -2544,8 +3977,15 @@ static int dpaa2_eth_remove(struct fsl_m
- net_dev = dev_get_drvdata(dev);
- priv = netdev_priv(net_dev);
-
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ dpaa2_dbg_remove(priv);
-+#endif
-+ dpaa2_eth_sysfs_remove(&net_dev->dev);
-+
- unregister_netdev(net_dev);
-- dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
-+
-+ disable_ch_napi(priv);
-+ del_ch_napi(priv);
-
- if (priv->do_link_poll)
- kthread_stop(priv->poll_thread);
-@@ -2555,17 +3995,16 @@ static int dpaa2_eth_remove(struct fsl_m
- free_rings(priv);
- free_percpu(priv->percpu_stats);
- free_percpu(priv->percpu_extras);
--
-- del_ch_napi(priv);
- free_dpbp(priv);
- free_dpio(priv);
- free_dpni(priv);
-
- fsl_mc_portal_free(priv->mc_io);
-
-- dev_set_drvdata(dev, NULL);
- free_netdev(net_dev);
-
-+ dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
-+
- return 0;
- }
-
-@@ -2588,4 +4027,34 @@ static struct fsl_mc_driver dpaa2_eth_dr
- .match_id_table = dpaa2_eth_match_id_table
- };
-
--module_fsl_mc_driver(dpaa2_eth_driver);
-+static int __init dpaa2_eth_driver_init(void)
-+{
-+ int err;
-+
-+ dpaa2_eth_dbg_init();
-+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
-+ if (err)
-+ goto out_debugfs_err;
-+
-+ err = dpaa2_ceetm_register();
-+ if (err)
-+ goto out_ceetm_err;
-+
-+ return 0;
-+
-+out_ceetm_err:
-+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
-+out_debugfs_err:
-+ dpaa2_eth_dbg_exit();
-+ return err;
-+}
-+
-+static void __exit dpaa2_eth_driver_exit(void)
-+{
-+ dpaa2_ceetm_unregister();
-+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
-+ dpaa2_eth_dbg_exit();
-+}
-+
-+module_init(dpaa2_eth_driver_init);
-+module_exit(dpaa2_eth_driver_exit);
---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-@@ -1,40 +1,15 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
- #ifndef __DPAA2_ETH_H
- #define __DPAA2_ETH_H
-
-+#include <linux/dcbnl.h>
- #include <linux/netdevice.h>
- #include <linux/if_vlan.h>
-+#include <linux/filter.h>
-
- #include "../../fsl-mc/include/dpaa2-io.h"
- #include "../../fsl-mc/include/dpaa2-fd.h"
-@@ -44,6 +19,9 @@
- #include "dpni-cmd.h"
-
- #include "dpaa2-eth-trace.h"
-+#include "dpaa2-eth-debugfs.h"
-+
-+#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
-
- #define DPAA2_ETH_STORE_SIZE 16
-
-@@ -60,43 +38,59 @@
- /* Convert L3 MTU to L2 MFL */
- #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
-
--/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
-- * frames in the Rx queues (length of the current frame is not
-- * taken into account when making the taildrop decision)
-- */
--#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
--
--/* Buffer quota per queue. Must be large enough such that for minimum sized
-- * frames taildrop kicks in before the bpool gets depleted, so we compute
-- * how many 64B frames fit inside the taildrop threshold and add a margin
-- * to accommodate the buffer refill delay.
-- */
--#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
--#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
--#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
-+/* Maximum burst size value for Tx shaping */
-+#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
-
- /* Maximum number of buffers that can be acquired/released through a single
- * QBMan command
- */
- #define DPAA2_ETH_BUFS_PER_CMD 7
-
--/* Hardware requires alignment for ingress/egress buffer addresses
-- * and ingress buffer lengths.
-+/* Set the taildrop threshold to 1MB to allow the enqueue of a sufficiently
-+ * large number of jumbo frames in the Rx queues (length of the current frame
-+ * is not taken into account when making the taildrop decision)
-+ */
-+#define DPAA2_ETH_TAILDROP_THRESH (1024 * 1024)
-+
-+/* Maximum number of Tx confirmation frames to be processed
-+ * in a single NAPI call
-+ */
-+#define DPAA2_ETH_TXCONF_PER_NAPI 256
-+
-+/* Buffer quota per channel.
-+ * We want to keep in check number of ingress frames in flight: for small
-+ * sized frames, buffer pool depletion will kick in first; for large sizes,
-+ * Rx FQ taildrop threshold will ensure only a reasonable number of frames
-+ * will be pending at any given time.
- */
--#define DPAA2_ETH_RX_BUF_SIZE 2048
-+#define DPAA2_ETH_NUM_BUFS_PER_CH 1024
-+#define DPAA2_ETH_REFILL_THRESH(priv) \
-+ ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
-+
-+/* Global buffer quota in case flow control is enabled */
-+#define DPAA2_ETH_NUM_BUFS_FC 256
-+
-+/* Hardware requires alignment for ingress/egress buffer addresses */
- #define DPAA2_ETH_TX_BUF_ALIGN 64
--#define DPAA2_ETH_RX_BUF_ALIGN 256
--#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
-- ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
--
--/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
-- * buffers large enough to allow building an skb around them and also account
-- * for alignment restrictions
-- */
--#define DPAA2_ETH_BUF_RAW_SIZE \
-- (DPAA2_ETH_RX_BUF_SIZE + \
-- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
-- DPAA2_ETH_RX_BUF_ALIGN)
-+
-+#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
-+#define DPAA2_ETH_RX_BUF_TAILROOM \
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-+#define DPAA2_ETH_RX_BUF_SIZE \
-+ (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
-+
-+/* Hardware annotation area in RX/TX buffers */
-+#define DPAA2_ETH_RX_HWA_SIZE 64
-+#define DPAA2_ETH_TX_HWA_SIZE 128
-+
-+/* PTP nominal frequency 1GHz */
-+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
-+
-+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
-+ * to 256B. For newer revisions, the requirement is only for 64B alignment
-+ */
-+#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
-+#define DPAA2_ETH_RX_BUF_ALIGN 64
-
- /* We are accommodating a skb backpointer and some S/G info
- * in the frame's software annotation. The hardware
-@@ -104,12 +98,32 @@
- */
- #define DPAA2_ETH_SWA_SIZE 64
-
-+/* We store different information in the software annotation area of a Tx frame
-+ * based on what type of frame it is
-+ */
-+enum dpaa2_eth_swa_type {
-+ DPAA2_ETH_SWA_SINGLE,
-+ DPAA2_ETH_SWA_SG,
-+ DPAA2_ETH_SWA_XDP,
-+};
-+
- /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
- struct dpaa2_eth_swa {
-- struct sk_buff *skb;
-- struct scatterlist *scl;
-- int num_sg;
-- int num_dma_bufs;
-+ enum dpaa2_eth_swa_type type;
-+ union {
-+ struct {
-+ struct sk_buff *skb;
-+ } single;
-+ struct {
-+ struct sk_buff *skb;
-+ struct scatterlist *scl;
-+ int num_sg;
-+ int sgt_size;
-+ } sg;
-+ struct {
-+ int dma_size;
-+ } xdp;
-+ };
- };
-
- /* Annotation valid bits in FD FRC */
-@@ -121,22 +135,14 @@ struct dpaa2_eth_swa {
- #define DPAA2_FD_FRC_FAICFDV 0x0400
-
- /* Error bits in FD CTRL */
--#define DPAA2_FD_CTRL_UFD 0x00000004
--#define DPAA2_FD_CTRL_SBE 0x00000008
--#define DPAA2_FD_CTRL_FSE 0x00000020
--#define DPAA2_FD_CTRL_FAERR 0x00000040
--
--#define DPAA2_FD_RX_ERR_MASK (DPAA2_FD_CTRL_SBE | \
-- DPAA2_FD_CTRL_FAERR)
--#define DPAA2_FD_TX_ERR_MASK (DPAA2_FD_CTRL_UFD | \
-- DPAA2_FD_CTRL_SBE | \
-- DPAA2_FD_CTRL_FSE | \
-- DPAA2_FD_CTRL_FAERR)
-+#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
-+#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
-+ FD_CTRL_SBE | \
-+ FD_CTRL_FSE | \
-+ FD_CTRL_FAERR)
-
- /* Annotation bits in FD CTRL */
--#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
--#define DPAA2_FD_CTRL_PTA 0x00800000
--#define DPAA2_FD_CTRL_PTV1 0x00400000
-+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */
-
- /* Frame annotation status */
- struct dpaa2_fas {
-@@ -144,7 +150,7 @@ struct dpaa2_fas {
- u8 ppid;
- __le16 ifpid;
- __le32 status;
--} __packed;
-+};
-
- /* Frame annotation status word is located in the first 8 bytes
- * of the buffer's hardware annoatation area
-@@ -152,11 +158,45 @@ struct dpaa2_fas {
- #define DPAA2_FAS_OFFSET 0
- #define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
-
-+/* Timestamp is located in the next 8 bytes of the buffer's
-+ * hardware annotation area
-+ */
-+#define DPAA2_TS_OFFSET 0x8
-+
-+/* Frame annotation egress action descriptor */
-+#define DPAA2_FAEAD_OFFSET 0x58
-+
-+struct dpaa2_faead {
-+ __le32 conf_fqid;
-+ __le32 ctrl;
-+};
-+
-+#define DPAA2_FAEAD_A2V 0x20000000
-+#define DPAA2_FAEAD_A4V 0x08000000
-+#define DPAA2_FAEAD_UPDV 0x00001000
-+#define DPAA2_FAEAD_EBDDV 0x00002000
-+#define DPAA2_FAEAD_UPD 0x00000010
-+
- /* Accessors for the hardware annotation fields that we use */
--#define dpaa2_get_hwa(buf_addr) \
-- ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
--#define dpaa2_get_fas(buf_addr) \
-- (struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
-+static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
-+{
-+ return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
-+}
-+
-+static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
-+{
-+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
-+}
-+
-+static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
-+{
-+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
-+}
-+
-+static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
-+{
-+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
-+}
-
- /* Error and status bits in the frame annotation status word */
- /* Debug frame, otherwise supposed to be discarded */
-@@ -203,11 +243,6 @@ struct dpaa2_fas {
- DPAA2_FAS_BLE | \
- DPAA2_FAS_L3CE | \
- DPAA2_FAS_L4CE)
--/* Tx errors */
--#define DPAA2_FAS_TX_ERR_MASK (DPAA2_FAS_KSE | \
-- DPAA2_FAS_EOFHE | \
-- DPAA2_FAS_MNLE | \
-- DPAA2_FAS_TIDE)
-
- /* Time in milliseconds between link state updates */
- #define DPAA2_ETH_LINK_STATE_REFRESH 1000
-@@ -226,6 +261,7 @@ struct dpaa2_eth_drv_stats {
- __u64 tx_conf_bytes;
- __u64 tx_sg_frames;
- __u64 tx_sg_bytes;
-+ __u64 tx_reallocs;
- __u64 rx_sg_frames;
- __u64 rx_sg_bytes;
- /* Enqueues retried due to portal busy */
-@@ -250,17 +286,23 @@ struct dpaa2_eth_ch_stats {
- __u64 pull_err;
- };
-
-+#define DPAA2_ETH_MAX_TCS 8
-+
- /* Maximum number of queues associated with a DPNI */
--#define DPAA2_ETH_MAX_RX_QUEUES 16
--#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
-+#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
-+#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
-+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
- #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
-- DPAA2_ETH_MAX_TX_QUEUES)
-+ DPAA2_ETH_MAX_TX_QUEUES + \
-+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
-+#define DPAA2_ETH_MAX_NETDEV_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
-
--#define DPAA2_ETH_MAX_DPCONS NR_CPUS
-+#define DPAA2_ETH_MAX_DPCONS 16
-
- enum dpaa2_eth_fq_type {
- DPAA2_RX_FQ = 0,
- DPAA2_TX_CONF_FQ,
-+ DPAA2_RX_ERR_FQ
- };
-
- struct dpaa2_eth_priv;
-@@ -268,15 +310,19 @@ struct dpaa2_eth_priv;
- struct dpaa2_eth_fq {
- u32 fqid;
- u32 tx_qdbin;
-+ u32 tx_fqid;
- u16 flowid;
-+ u8 tc;
- int target_cpu;
-+ u32 dq_frames;
-+ u32 dq_bytes;
- struct dpaa2_eth_channel *channel;
- enum dpaa2_eth_fq_type type;
-
-- void (*consume)(struct dpaa2_eth_priv *,
-- struct dpaa2_eth_channel *,
-- const struct dpaa2_fd *,
-- struct napi_struct *);
-+ void (*consume)(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct dpaa2_eth_fq *fq);
- struct dpaa2_eth_fq_stats stats;
- };
-
-@@ -285,19 +331,29 @@ struct dpaa2_eth_channel {
- struct fsl_mc_device *dpcon;
- int dpcon_id;
- int ch_id;
-- int dpio_id;
- struct napi_struct napi;
-+ struct dpaa2_io *dpio;
- struct dpaa2_io_store *store;
- struct dpaa2_eth_priv *priv;
- int buf_count;
- struct dpaa2_eth_ch_stats stats;
-+ struct bpf_prog *xdp_prog;
-+ u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
-+ u8 rel_buf_cnt;
-+ bool flush;
- };
-
--struct dpaa2_eth_hash_fields {
-+struct dpaa2_eth_dist_fields {
- u64 rxnfc_field;
- enum net_prot cls_prot;
- int cls_field;
- int size;
-+ u64 id;
-+};
-+
-+struct dpaa2_eth_cls_rule {
-+ struct ethtool_rx_flow_spec fs;
-+ u8 in_use;
- };
-
- /* Driver private data */
-@@ -306,17 +362,29 @@ struct dpaa2_eth_priv {
-
- u8 num_fqs;
- struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
-+ int (*enqueue)(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq,
-+ struct dpaa2_fd *fd, u8 prio);
-
- u8 num_channels;
- struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
-+ int max_bufs_per_ch;
-+ int refill_thresh;
-+
-+ bool has_xdp_prog;
-
- struct dpni_attr dpni_attrs;
-+ u16 dpni_ver_major;
-+ u16 dpni_ver_minor;
- u16 tx_data_offset;
-
- struct fsl_mc_device *dpbp_dev;
- u16 bpid;
- struct iommu_domain *iommu_domain;
-
-+ bool ts_tx_en; /* Tx timestamping enabled */
-+ bool ts_rx_en; /* Rx timestamping enabled */
-+
- u16 tx_qdid;
- struct fsl_mc_io *mc_io;
- /* Cores which have an affine DPIO/DPCON.
-@@ -337,13 +405,30 @@ struct dpaa2_eth_priv {
-
- /* enabled ethtool hashing bits */
- u64 rx_hash_fields;
-+ u64 rx_cls_fields;
-+ struct dpaa2_eth_cls_rule *cls_rule;
-+ u8 rx_cls_enabled;
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ struct dpaa2_debugfs dbg;
-+#endif
-+ struct dpni_tx_shaping_cfg shaping_cfg;
-+
-+ u8 dcbx_mode;
-+ struct ieee_pfc pfc;
-+ bool vlan_clsf_set;
-+ bool tx_pause_frames;
-+
-+ bool ceetm_en;
- };
-
--/* default Rx hash options, set during probing */
- #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
- | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
- | RXH_L4_B_2_3)
-
-+/* default Rx hash options, set during probing */
-+#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
-+ RXH_L4_B_0_1 | RXH_L4_B_2_3)
-+
- #define dpaa2_eth_hash_enabled(priv) \
- ((priv)->dpni_attrs.num_queues > 1)
-
-@@ -352,10 +437,127 @@ struct dpaa2_eth_priv {
-
- extern const struct ethtool_ops dpaa2_ethtool_ops;
- extern const char dpaa2_eth_drv_version[];
-+extern int dpaa2_phc_index;
-+
-+static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
-+ u16 ver_major, u16 ver_minor)
-+{
-+ if (priv->dpni_ver_major == ver_major)
-+ return priv->dpni_ver_minor - ver_minor;
-+ return priv->dpni_ver_major - ver_major;
-+}
-+
-+/* Minimum firmware version that supports a more flexible API
-+ * for configuring the Rx flow hash key
-+ */
-+#define DPNI_RX_DIST_KEY_VER_MAJOR 7
-+#define DPNI_RX_DIST_KEY_VER_MINOR 5
-+
-+#define dpaa2_eth_has_legacy_dist(priv) \
-+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
-+ DPNI_RX_DIST_KEY_VER_MINOR) < 0)
-+
-+#define dpaa2_eth_fs_enabled(priv) \
-+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
-+
-+#define dpaa2_eth_fs_mask_enabled(priv) \
-+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
-+
-+#define dpaa2_eth_fs_count(priv) \
-+ ((priv)->dpni_attrs.fs_entries)
-+
-+#define dpaa2_eth_queue_count(priv) \
-+ ((priv)->num_channels)
-+
-+#define dpaa2_eth_tc_count(priv) \
-+ ((priv)->dpni_attrs.num_tcs)
-+
-+enum dpaa2_eth_rx_dist {
-+ DPAA2_ETH_RX_DIST_HASH,
-+ DPAA2_ETH_RX_DIST_CLS
-+};
-+
-+/* Unique IDs for the supported Rx classification header fields */
-+#define DPAA2_ETH_DIST_ETHDST BIT(0)
-+#define DPAA2_ETH_DIST_ETHSRC BIT(1)
-+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
-+#define DPAA2_ETH_DIST_VLAN BIT(3)
-+#define DPAA2_ETH_DIST_IPSRC BIT(4)
-+#define DPAA2_ETH_DIST_IPDST BIT(5)
-+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
-+#define DPAA2_ETH_DIST_L4SRC BIT(7)
-+#define DPAA2_ETH_DIST_L4DST BIT(8)
-+#define DPAA2_ETH_DIST_ALL (~0U)
-+
-+static inline
-+unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
-+ struct sk_buff *skb)
-+{
-+ unsigned int headroom = DPAA2_ETH_SWA_SIZE;
-+
-+ /* If we don't have an skb (e.g. XDP buffer), we only need space for
-+ * the software annotation area
-+ */
-+ if (!skb)
-+ return headroom;
-
--static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
-+ /* For non-linear skbs we have no headroom requirement, as we build a
-+ * SG frame with a newly allocated SGT buffer
-+ */
-+ if (skb_is_nonlinear(skb))
-+ return 0;
-+
-+ /* If we have Tx timestamping, need 128B hardware annotation */
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
-+ headroom += DPAA2_ETH_TX_HWA_SIZE;
-+
-+ return headroom;
-+}
-+
-+/* Extra headroom space requested to hardware, in order to make sure there's
-+ * no realloc'ing in forwarding scenarios
-+ */
-+static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
-+{
-+ return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
-+}
-+
-+static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
-+ int traffic_class)
-+{
-+ return priv->pfc.pfc_en & (1 << traffic_class);
-+}
-+
-+enum dpaa2_eth_td_cfg {
-+ DPAA2_ETH_TD_NONE,
-+ DPAA2_ETH_TD_QUEUE,
-+ DPAA2_ETH_TD_GROUP
-+};
-+
-+static inline enum dpaa2_eth_td_cfg
-+dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
-+{
-+ bool pfc_enabled = !!(priv->pfc.pfc_en);
-+
-+ if (pfc_enabled)
-+ return DPAA2_ETH_TD_GROUP;
-+ else if (priv->tx_pause_frames)
-+ return DPAA2_ETH_TD_NONE;
-+ else
-+ return DPAA2_ETH_TD_QUEUE;
-+}
-+
-+static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
- {
-- return priv->dpni_attrs.num_queues;
-+ return 1;
- }
-
-+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
-+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
-+int dpaa2_eth_cls_key_size(u64 key);
-+int dpaa2_eth_cls_fld_off(int prot, int field);
-+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
-+
-+int set_rx_taildrop(struct dpaa2_eth_priv *priv);
-+
- #endif /* __DPAA2_H */
---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
-@@ -1,35 +1,10 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /* Copyright 2014-2016 Freescale Semiconductor Inc.
-- * Copyright 2016 NXP
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ * Copyright 2016-2017 NXP
- */
-
-+#include <linux/net_tstamp.h>
-+
- #include "dpni.h" /* DPNI_LINK_OPT_* */
- #include "dpaa2-eth.h"
-
-@@ -52,6 +27,10 @@ static char dpaa2_ethtool_stats[][ETH_GS
- "[hw] rx nobuffer discards",
- "[hw] tx discarded frames",
- "[hw] tx confirmed frames",
-+ "[hw] tx dequeued bytes",
-+ "[hw] tx dequeued frames",
-+ "[hw] tx rejected bytes",
-+ "[hw] tx rejected frames",
- };
-
- #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
-@@ -62,6 +41,7 @@ static char dpaa2_ethtool_extras[][ETH_G
- "[drv] tx conf bytes",
- "[drv] tx sg frames",
- "[drv] tx sg bytes",
-+ "[drv] tx realloc frames",
- "[drv] rx sg frames",
- "[drv] rx sg bytes",
- "[drv] enqueue portal busy",
-@@ -69,6 +49,12 @@ static char dpaa2_ethtool_extras[][ETH_G
- "[drv] dequeue portal busy",
- "[drv] channel pull errors",
- "[drv] cdan",
-+ /* FQ stats */
-+ "rx pending frames",
-+ "rx pending bytes",
-+ "tx conf pending frames",
-+ "tx conf pending bytes",
-+ "buffer count"
- };
-
- #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
-@@ -76,14 +62,55 @@ static char dpaa2_ethtool_extras[][ETH_G
- static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
- struct ethtool_drvinfo *drvinfo)
- {
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-- strlcpy(drvinfo->version, dpaa2_eth_drv_version,
-- sizeof(drvinfo->version));
-- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-+
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
-+
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
- sizeof(drvinfo->bus_info));
- }
-
-+#define DPNI_LINK_AUTONEG_VER_MAJOR 7
-+#define DPNI_LINK_AUTONEG_VER_MINOR 8
-+
-+struct dpaa2_eth_link_mode_map {
-+ u64 dpni_lm;
-+ u64 ethtool_lm;
-+};
-+
-+static const struct dpaa2_eth_link_mode_map dpaa2_eth_lm_map[] = {
-+ {DPNI_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
-+ {DPNI_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
-+ {DPNI_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
-+ {DPNI_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
-+ {DPNI_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
-+ {DPNI_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
-+};
-+
-+static void link_mode_dpni2ethtool(u64 dpni_lm, unsigned long *ethtool_lm)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
-+ if (dpni_lm & dpaa2_eth_lm_map[i].dpni_lm)
-+ __set_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm);
-+ }
-+}
-+
-+static void link_mode_ethtool2dpni(const unsigned long *ethtool_lm,
-+ u64 *dpni_lm)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
-+ if (test_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm))
-+ *dpni_lm |= dpaa2_eth_lm_map[i].dpni_lm;
-+ }
-+}
-+
- static int
- dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
- struct ethtool_link_ksettings *link_settings)
-@@ -92,17 +119,27 @@ dpaa2_eth_get_link_ksettings(struct net_
- int err = 0;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-
-- err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-- if (err) {
-- netdev_err(net_dev, "ERROR %d getting link state\n", err);
-- goto out;
-+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
-+ DPNI_LINK_AUTONEG_VER_MINOR) < 0) {
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token,
-+ &state);
-+ if (err) {
-+ netdev_err(net_dev, "dpni_get_link_state failed\n");
-+ goto out;
-+ }
-+ } else {
-+ err = dpni_get_link_state_v2(priv->mc_io, 0, priv->mc_token,
-+ &state);
-+ if (err) {
-+ netdev_err(net_dev, "dpni_get_link_state_v2 failed\n");
-+ goto out;
-+ }
-+ link_mode_dpni2ethtool(state.supported,
-+ link_settings->link_modes.supported);
-+ link_mode_dpni2ethtool(state.advertising,
-+ link_settings->link_modes.advertising);
- }
-
-- /* At the moment, we have no way of interrogating the DPMAC
-- * from the DPNI side - and for that matter there may exist
-- * no DPMAC at all. So for now we just don't report anything
-- * beyond the DPNI attributes.
-- */
- if (state.options & DPNI_LINK_OPT_AUTONEG)
- link_settings->base.autoneg = AUTONEG_ENABLE;
- if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
-@@ -113,25 +150,37 @@ out:
- return err;
- }
-
-+#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
-+#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
- static int
- dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
- const struct ethtool_link_ksettings *link_settings)
- {
-- struct dpni_link_cfg cfg = {0};
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
- int err = 0;
-
-- netdev_dbg(net_dev, "Setting link parameters...");
-+ /* If using an older MC version, the DPNI must be down
-+ * in order to be able to change link settings. Taking steps to let
-+ * the user know that.
-+ */
-+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
-+ DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
-+ if (netif_running(net_dev)) {
-+ netdev_info(net_dev, "Interface must be brought down first.\n");
-+ return -EACCES;
-+ }
-+ }
-
-- /* Due to a temporary MC limitation, the DPNI must be down
-- * in order to be able to change link settings. Taking steps to let
-- * the user know that.
-- */
-- if (netif_running(net_dev)) {
-- netdev_info(net_dev, "Sorry, interface must be brought down first.\n");
-- return -EACCES;
-+ /* Need to interrogate link state to get flow control params */
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_err(net_dev, "Error getting link state\n");
-+ goto out;
- }
-
-+ cfg.options = state.options;
- cfg.rate = link_settings->base.speed;
- if (link_settings->base.autoneg == AUTONEG_ENABLE)
- cfg.options |= DPNI_LINK_OPT_AUTONEG;
-@@ -142,13 +191,92 @@ dpaa2_eth_set_link_ksettings(struct net_
- else
- cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
-
-+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
-+ DPNI_LINK_AUTONEG_VER_MINOR)) {
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
-+ } else {
-+ link_mode_ethtool2dpni(link_settings->link_modes.advertising,
-+ &cfg.advertising);
-+ dpni_set_link_cfg_v2(priv->mc_io, 0, priv->mc_token, &cfg);
-+ }
-+ if (err)
-+ netdev_err(net_dev, "dpni_set_link_cfg failed");
-+
-+out:
-+ return err;
-+}
-+
-+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *pause)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
-+ int err;
-+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err)
-+ netdev_dbg(net_dev, "Error getting link state\n");
-+
-+ /* Report general port autonegotiation status */
-+ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
-+ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
-+ pause->tx_pause = pause->rx_pause ^
-+ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
-+}
-+
-+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *pause)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
-+ u32 current_tx_pause;
-+ int err = 0;
-+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_dbg(net_dev, "Error getting link state\n");
-+ goto out;
-+ }
-+
-+ cfg.rate = state.rate;
-+ cfg.options = state.options;
-+ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
-+ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
-+
-+ /* We don't support changing pause frame autonegotiation separately
-+ * from general port autoneg
-+ */
-+ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
-+ netdev_warn(net_dev,
-+ "Cannot change pause frame autoneg separately\n");
-+
-+ if (pause->rx_pause)
-+ cfg.options |= DPNI_LINK_OPT_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
-+
-+ if (pause->rx_pause ^ pause->tx_pause)
-+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
-+
- err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
-+ if (err) {
-+ netdev_dbg(net_dev, "Error setting link\n");
-+ goto out;
-+ }
-+
-+ /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
-+ if (current_tx_pause == pause->tx_pause)
-+ goto out;
-+
-+ priv->tx_pause_frames = pause->tx_pause;
-+ err = set_rx_taildrop(priv);
- if (err)
-- /* ethtool will be loud enough if we return an error; no point
-- * in putting our own error message on the console by default
-- */
-- netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
-+ netdev_dbg(net_dev, "Error configuring taildrop\n");
-
-+out:
- return err;
- }
-
-@@ -192,6 +320,10 @@ static void dpaa2_eth_get_ethtool_stats(
- int j, k, err;
- int num_cnt;
- union dpni_statistics dpni_stats;
-+ u32 fcnt, bcnt;
-+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
-+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
-+ u32 buf_cnt;
- u64 cdan = 0;
- u64 portal_busy = 0, pull_err = 0;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-@@ -202,9 +334,9 @@ static void dpaa2_eth_get_ethtool_stats(
- sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
-
- /* Print standard counters, from DPNI statistics */
-- for (j = 0; j <= 2; j++) {
-+ for (j = 0; j <= 3; j++) {
- err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
-- j, &dpni_stats);
-+ j, 0, &dpni_stats);
- if (err != 0)
- netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
- switch (j) {
-@@ -217,6 +349,9 @@ static void dpaa2_eth_get_ethtool_stats(
- case 2:
- num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
- break;
-+ case 3:
-+ num_cnt = sizeof(dpni_stats.page_3) / sizeof(u64);
-+ break;
- }
- for (k = 0; k < num_cnt; k++)
- *(data + i++) = dpni_stats.raw.counter[k];
-@@ -240,12 +375,410 @@ static void dpaa2_eth_get_ethtool_stats(
- *(data + i++) = portal_busy;
- *(data + i++) = pull_err;
- *(data + i++) = cdan;
-+
-+ for (j = 0; j < priv->num_fqs; j++) {
-+ /* Print FQ instantaneous counts */
-+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
-+ &fcnt, &bcnt);
-+ if (err) {
-+ netdev_warn(net_dev, "FQ query error %d", err);
-+ return;
-+ }
-+
-+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
-+ fcnt_tx_total += fcnt;
-+ bcnt_tx_total += bcnt;
-+ } else {
-+ fcnt_rx_total += fcnt;
-+ bcnt_rx_total += bcnt;
-+ }
-+ }
-+
-+ *(data + i++) = fcnt_rx_total;
-+ *(data + i++) = bcnt_rx_total;
-+ *(data + i++) = fcnt_tx_total;
-+ *(data + i++) = bcnt_tx_total;
-+
-+ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
-+ if (err) {
-+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
-+ return;
-+ }
-+ *(data + i++) = buf_cnt;
-+}
-+
-+static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
-+ void *key, void *mask, u64 *fields)
-+{
-+ int off;
-+
-+ if (eth_mask->h_proto) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(__be16 *)(key + off) = eth_value->h_proto;
-+ *(__be16 *)(mask + off) = eth_mask->h_proto;
-+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
-+ }
-+
-+ if (!is_zero_ether_addr(eth_mask->h_source)) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
-+ ether_addr_copy(key + off, eth_value->h_source);
-+ ether_addr_copy(mask + off, eth_mask->h_source);
-+ *fields |= DPAA2_ETH_DIST_ETHSRC;
-+ }
-+
-+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + off, eth_value->h_dest);
-+ ether_addr_copy(mask + off, eth_mask->h_dest);
-+ *fields |= DPAA2_ETH_DIST_ETHDST;
-+ }
-+
-+ return 0;
-+}
-+
-+static int prep_user_ip_rule(struct ethtool_usrip4_spec *uip_value,
-+ struct ethtool_usrip4_spec *uip_mask,
-+ void *key, void *mask, u64 *fields)
-+{
-+ int off;
-+ u32 tmp_value, tmp_mask;
-+
-+ if (uip_mask->tos || uip_mask->ip_ver)
-+ return -EOPNOTSUPP;
-+
-+ if (uip_mask->ip4src) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(__be32 *)(key + off) = uip_value->ip4src;
-+ *(__be32 *)(mask + off) = uip_mask->ip4src;
-+ *fields |= DPAA2_ETH_DIST_IPSRC;
-+ }
-+
-+ if (uip_mask->ip4dst) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
-+ *(__be32 *)(key + off) = uip_value->ip4dst;
-+ *(__be32 *)(mask + off) = uip_mask->ip4dst;
-+ *fields |= DPAA2_ETH_DIST_IPDST;
-+ }
-+
-+ if (uip_mask->proto) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u8 *)(key + off) = uip_value->proto;
-+ *(u8 *)(mask + off) = uip_mask->proto;
-+ *fields |= DPAA2_ETH_DIST_IPPROTO;
-+ }
-+
-+ if (uip_mask->l4_4_bytes) {
-+ tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
-+ tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
-+
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(__be16 *)(key + off) = htons(tmp_value >> 16);
-+ *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
-+ *fields |= DPAA2_ETH_DIST_L4SRC;
-+
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
-+ *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
-+ *fields |= DPAA2_ETH_DIST_L4DST;
-+ }
-+
-+ /* Only apply the rule for IPv4 frames */
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(__be16 *)(key + off) = htons(ETH_P_IP);
-+ *(__be16 *)(mask + off) = htons(0xFFFF);
-+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
-+
-+ return 0;
-+}
-+
-+static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
-+ struct ethtool_tcpip4_spec *l4_mask,
-+ void *key, void *mask, u8 l4_proto, u64 *fields)
-+{
-+ int off;
-+
-+ if (l4_mask->tos)
-+ return -EOPNOTSUPP;
-+ if (l4_mask->ip4src) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(__be32 *)(key + off) = l4_value->ip4src;
-+ *(__be32 *)(mask + off) = l4_mask->ip4src;
-+ *fields |= DPAA2_ETH_DIST_IPSRC;
-+ }
-+
-+ if (l4_mask->ip4dst) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
-+ *(__be32 *)(key + off) = l4_value->ip4dst;
-+ *(__be32 *)(mask + off) = l4_mask->ip4dst;
-+ *fields |= DPAA2_ETH_DIST_IPDST;
-+ }
-+
-+ if (l4_mask->psrc) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(__be16 *)(key + off) = l4_value->psrc;
-+ *(__be16 *)(mask + off) = l4_mask->psrc;
-+ *fields |= DPAA2_ETH_DIST_L4SRC;
-+ }
-+
-+ if (l4_mask->pdst) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(__be16 *)(key + off) = l4_value->pdst;
-+ *(__be16 *)(mask + off) = l4_mask->pdst;
-+ *fields |= DPAA2_ETH_DIST_L4DST;
-+ }
-+
-+ /* Only apply the rule for the user-specified L4 protocol
-+ * and if ethertype matches IPv4
-+ */
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(__be16 *)(key + off) = htons(ETH_P_IP);
-+ *(__be16 *)(mask + off) = htons(0xFFFF);
-+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
-+
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u8 *)(key + off) = l4_proto;
-+ *(u8 *)(mask + off) = 0xFF;
-+ *fields |= DPAA2_ETH_DIST_IPPROTO;
-+
-+ return 0;
-+}
-+
-+static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
-+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask, u64 *fields)
-+{
-+ int off;
-+
-+ if (ext_mask->vlan_etype)
-+ return -EOPNOTSUPP;
-+
-+ if (ext_mask->vlan_tci) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
-+ *(__be16 *)(key + off) = ext_value->vlan_tci;
-+ *(__be16 *)(mask + off) = ext_mask->vlan_tci;
-+ *fields |= DPAA2_ETH_DIST_VLAN;
-+ }
-+
-+ return 0;
-+}
-+
-+static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
-+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask, u64 *fields)
-+{
-+ int off;
-+
-+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
-+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + off, ext_value->h_dest);
-+ ether_addr_copy(mask + off, ext_mask->h_dest);
-+ *fields |= DPAA2_ETH_DIST_ETHDST;
-+ }
-+
-+ return 0;
-+}
-+
-+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
-+ u64 *fields)
-+{
-+ int err;
-+
-+ switch (fs->flow_type & 0xFF) {
-+ case ETHER_FLOW:
-+ err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
-+ key, mask, fields);
-+ break;
-+ case IP_USER_FLOW:
-+ err = prep_user_ip_rule(&fs->h_u.usr_ip4_spec,
-+ &fs->m_u.usr_ip4_spec, key, mask, fields);
-+ break;
-+ case TCP_V4_FLOW:
-+ err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
-+ key, mask, IPPROTO_TCP, fields);
-+ break;
-+ case UDP_V4_FLOW:
-+ err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
-+ key, mask, IPPROTO_UDP, fields);
-+ break;
-+ case SCTP_V4_FLOW:
-+ err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, &fs->m_u.sctp_ip4_spec,
-+ key, mask, IPPROTO_SCTP, fields);
-+ break;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ if (fs->flow_type & FLOW_EXT) {
-+ err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
-+ if (err)
-+ return err;
-+ }
-+
-+ if (fs->flow_type & FLOW_MAC_EXT) {
-+ err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
-+ fields);
-+ if (err)
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static int do_cls_rule(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs,
-+ bool add)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ struct dpni_rule_cfg rule_cfg = { 0 };
-+ struct dpni_fs_action_cfg fs_act = { 0 };
-+ dma_addr_t key_iova;
-+ u64 fields = 0;
-+ void *key_buf;
-+ int i, err = 0;
-+
-+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
-+ fs->ring_cookie >= dpaa2_eth_queue_count(priv))
-+ return -EINVAL;
-+
-+ rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
-+
-+ /* allocate twice the key size, for the actual key and for mask */
-+ key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
-+ if (!key_buf)
-+ return -ENOMEM;
-+
-+ /* Fill the key and mask memory areas */
-+ err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
-+ if (err)
-+ goto free_mem;
-+
-+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
-+ /* Masking allows us to configure a maximal key during init and
-+ * use it for all flow steering rules. Without it, we include
-+ * in the key only the fields actually used, so we need to
-+ * extract the others from the final key buffer.
-+ *
-+ * Program the FS key if needed, or return error if previously
-+ * set key can't be used for the current rule. User needs to
-+ * delete existing rules in this case to allow for the new one.
-+ */
-+ if (!priv->rx_cls_fields) {
-+ err = dpaa2_eth_set_cls(net_dev, fields);
-+ if (err)
-+ goto free_mem;
-+
-+ priv->rx_cls_fields = fields;
-+ } else if (priv->rx_cls_fields != fields) {
-+ netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
-+ err = -EOPNOTSUPP;
-+ goto free_mem;
-+ }
-+
-+ dpaa2_eth_cls_trim_rule(key_buf, fields);
-+ rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
-+ }
-+
-+ key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, key_iova)) {
-+ err = -ENOMEM;
-+ goto free_mem;
-+ }
-+
-+ rule_cfg.key_iova = key_iova;
-+ if (dpaa2_eth_fs_mask_enabled(priv))
-+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
-+
-+ if (add) {
-+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
-+ fs_act.options |= DPNI_FS_OPT_DISCARD;
-+ else
-+ fs_act.flow_id = fs->ring_cookie;
-+ }
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ if (add)
-+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
-+ i, fs->location, &rule_cfg,
-+ &fs_act);
-+ else
-+ err = dpni_remove_fs_entry(priv->mc_io, 0,
-+ priv->mc_token, i,
-+ &rule_cfg);
-+ if (err)
-+ break;
-+ }
-+
-+ dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
-+
-+free_mem:
-+ kfree(key_buf);
-+
-+ return err;
-+}
-+
-+static int num_rules(struct dpaa2_eth_priv *priv)
-+{
-+ int i, rules = 0;
-+
-+ for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
-+ if (priv->cls_rule[i].in_use)
-+ rules++;
-+
-+ return rules;
-+}
-+
-+static int update_cls_rule(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *new_fs,
-+ int location)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpaa2_eth_cls_rule *rule;
-+ int err = -EINVAL;
-+
-+ if (!priv->rx_cls_enabled)
-+ return -EOPNOTSUPP;
-+
-+ if (location >= dpaa2_eth_fs_count(priv))
-+ return -EINVAL;
-+
-+ rule = &priv->cls_rule[location];
-+
-+ /* If a rule is present at the specified location, delete it. */
-+ if (rule->in_use) {
-+ err = do_cls_rule(net_dev, &rule->fs, false);
-+ if (err)
-+ return err;
-+
-+ rule->in_use = 0;
-+
-+ if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
-+ priv->rx_cls_fields = 0;
-+ }
-+
-+ /* If no new entry to add, return here */
-+ if (!new_fs)
-+ return err;
-+
-+ err = do_cls_rule(net_dev, new_fs, true);
-+ if (err)
-+ return err;
-+
-+ rule->in_use = 1;
-+ rule->fs = *new_fs;
-+
-+ return 0;
- }
-
- static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
- {
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int rule_cnt = dpaa2_eth_fs_count(priv);
-+ int i, j = 0;
-
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
-@@ -258,6 +791,29 @@ static int dpaa2_eth_get_rxnfc(struct ne
- case ETHTOOL_GRXRINGS:
- rxnfc->data = dpaa2_eth_queue_count(priv);
- break;
-+ case ETHTOOL_GRXCLSRLCNT:
-+ rxnfc->rule_cnt = 0;
-+ rxnfc->rule_cnt = num_rules(priv);
-+ rxnfc->data = rule_cnt;
-+ break;
-+ case ETHTOOL_GRXCLSRULE:
-+ if (rxnfc->fs.location >= rule_cnt)
-+ return -EINVAL;
-+ if (!priv->cls_rule[rxnfc->fs.location].in_use)
-+ return -EINVAL;
-+ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
-+ break;
-+ case ETHTOOL_GRXCLSRLALL:
-+ for (i = 0; i < rule_cnt; i++) {
-+ if (!priv->cls_rule[i].in_use)
-+ continue;
-+ if (j == rxnfc->rule_cnt)
-+ return -EMSGSIZE;
-+ rule_locs[j++] = i;
-+ }
-+ rxnfc->rule_cnt = j;
-+ rxnfc->data = rule_cnt;
-+ break;
- default:
- return -EOPNOTSUPP;
- }
-@@ -265,13 +821,61 @@ static int dpaa2_eth_get_rxnfc(struct ne
- return 0;
- }
-
-+int dpaa2_phc_index = -1;
-+EXPORT_SYMBOL(dpaa2_phc_index);
-+
-+static int dpaa2_eth_get_ts_info(struct net_device *dev,
-+ struct ethtool_ts_info *info)
-+{
-+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
-+ SOF_TIMESTAMPING_RX_HARDWARE |
-+ SOF_TIMESTAMPING_RAW_HARDWARE;
-+
-+ info->phc_index = dpaa2_phc_index;
-+
-+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
-+ (1 << HWTSTAMP_TX_ON);
-+
-+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-+ (1 << HWTSTAMP_FILTER_ALL);
-+ return 0;
-+}
-+
-+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
-+ struct ethtool_rxnfc *rxnfc)
-+{
-+ int err = 0;
-+
-+ switch (rxnfc->cmd) {
-+ case ETHTOOL_SRXFH:
-+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
-+ return -EOPNOTSUPP;
-+ err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
-+ break;
-+ case ETHTOOL_SRXCLSRLINS:
-+ err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
-+ break;
-+ case ETHTOOL_SRXCLSRLDEL:
-+ err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
-+ break;
-+ default:
-+ err = -EOPNOTSUPP;
-+ }
-+
-+ return err;
-+}
-+
- const struct ethtool_ops dpaa2_ethtool_ops = {
- .get_drvinfo = dpaa2_eth_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_link_ksettings = dpaa2_eth_get_link_ksettings,
- .set_link_ksettings = dpaa2_eth_set_link_ksettings,
-+ .get_pauseparam = dpaa2_eth_get_pauseparam,
-+ .set_pauseparam = dpaa2_eth_set_pauseparam,
- .get_sset_count = dpaa2_eth_get_sset_count,
- .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
- .get_strings = dpaa2_eth_get_strings,
- .get_rxnfc = dpaa2_eth_get_rxnfc,
-+ .set_rxnfc = dpaa2_eth_set_rxnfc,
-+ .get_ts_info = dpaa2_eth_get_ts_info,
- };
---- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
-@@ -1,39 +1,10 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /* Copyright 2013-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPKG_H_
- #define __FSL_DPKG_H_
-
- #include <linux/types.h>
--#include "net.h"
-
- /* Data Path Key Generator API
- * Contains initialization APIs and runtime APIs for the Key Generator
-@@ -86,6 +57,355 @@ struct dpkg_mask {
- u8 offset;
- };
-
-+/* Protocol fields */
-+
-+/* Ethernet fields */
-+#define NH_FLD_ETH_DA BIT(0)
-+#define NH_FLD_ETH_SA BIT(1)
-+#define NH_FLD_ETH_LENGTH BIT(2)
-+#define NH_FLD_ETH_TYPE BIT(3)
-+#define NH_FLD_ETH_FINAL_CKSUM BIT(4)
-+#define NH_FLD_ETH_PADDING BIT(5)
-+#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1)
-+
-+/* VLAN fields */
-+#define NH_FLD_VLAN_VPRI BIT(0)
-+#define NH_FLD_VLAN_CFI BIT(1)
-+#define NH_FLD_VLAN_VID BIT(2)
-+#define NH_FLD_VLAN_LENGTH BIT(3)
-+#define NH_FLD_VLAN_TYPE BIT(4)
-+#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1)
-+
-+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
-+ NH_FLD_VLAN_CFI | \
-+ NH_FLD_VLAN_VID)
-+
-+/* IP (generic) fields */
-+#define NH_FLD_IP_VER BIT(0)
-+#define NH_FLD_IP_DSCP BIT(2)
-+#define NH_FLD_IP_ECN BIT(3)
-+#define NH_FLD_IP_PROTO BIT(4)
-+#define NH_FLD_IP_SRC BIT(5)
-+#define NH_FLD_IP_DST BIT(6)
-+#define NH_FLD_IP_TOS_TC BIT(7)
-+#define NH_FLD_IP_ID BIT(8)
-+#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1)
-+
-+/* IPV4 fields */
-+#define NH_FLD_IPV4_VER BIT(0)
-+#define NH_FLD_IPV4_HDR_LEN BIT(1)
-+#define NH_FLD_IPV4_TOS BIT(2)
-+#define NH_FLD_IPV4_TOTAL_LEN BIT(3)
-+#define NH_FLD_IPV4_ID BIT(4)
-+#define NH_FLD_IPV4_FLAG_D BIT(5)
-+#define NH_FLD_IPV4_FLAG_M BIT(6)
-+#define NH_FLD_IPV4_OFFSET BIT(7)
-+#define NH_FLD_IPV4_TTL BIT(8)
-+#define NH_FLD_IPV4_PROTO BIT(9)
-+#define NH_FLD_IPV4_CKSUM BIT(10)
-+#define NH_FLD_IPV4_SRC_IP BIT(11)
-+#define NH_FLD_IPV4_DST_IP BIT(12)
-+#define NH_FLD_IPV4_OPTS BIT(13)
-+#define NH_FLD_IPV4_OPTS_COUNT BIT(14)
-+#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1)
-+
-+/* IPV6 fields */
-+#define NH_FLD_IPV6_VER BIT(0)
-+#define NH_FLD_IPV6_TC BIT(1)
-+#define NH_FLD_IPV6_SRC_IP BIT(2)
-+#define NH_FLD_IPV6_DST_IP BIT(3)
-+#define NH_FLD_IPV6_NEXT_HDR BIT(4)
-+#define NH_FLD_IPV6_FL BIT(5)
-+#define NH_FLD_IPV6_HOP_LIMIT BIT(6)
-+#define NH_FLD_IPV6_ID BIT(7)
-+#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1)
-+
-+/* ICMP fields */
-+#define NH_FLD_ICMP_TYPE BIT(0)
-+#define NH_FLD_ICMP_CODE BIT(1)
-+#define NH_FLD_ICMP_CKSUM BIT(2)
-+#define NH_FLD_ICMP_ID BIT(3)
-+#define NH_FLD_ICMP_SQ_NUM BIT(4)
-+#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1)
-+
-+/* IGMP fields */
-+#define NH_FLD_IGMP_VERSION BIT(0)
-+#define NH_FLD_IGMP_TYPE BIT(1)
-+#define NH_FLD_IGMP_CKSUM BIT(2)
-+#define NH_FLD_IGMP_DATA BIT(3)
-+#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1)
-+
-+/* TCP fields */
-+#define NH_FLD_TCP_PORT_SRC BIT(0)
-+#define NH_FLD_TCP_PORT_DST BIT(1)
-+#define NH_FLD_TCP_SEQ BIT(2)
-+#define NH_FLD_TCP_ACK BIT(3)
-+#define NH_FLD_TCP_OFFSET BIT(4)
-+#define NH_FLD_TCP_FLAGS BIT(5)
-+#define NH_FLD_TCP_WINDOW BIT(6)
-+#define NH_FLD_TCP_CKSUM BIT(7)
-+#define NH_FLD_TCP_URGPTR BIT(8)
-+#define NH_FLD_TCP_OPTS BIT(9)
-+#define NH_FLD_TCP_OPTS_COUNT BIT(10)
-+#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1)
-+
-+/* UDP fields */
-+#define NH_FLD_UDP_PORT_SRC BIT(0)
-+#define NH_FLD_UDP_PORT_DST BIT(1)
-+#define NH_FLD_UDP_LEN BIT(2)
-+#define NH_FLD_UDP_CKSUM BIT(3)
-+#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1)
-+
-+/* UDP-lite fields */
-+#define NH_FLD_UDP_LITE_PORT_SRC BIT(0)
-+#define NH_FLD_UDP_LITE_PORT_DST BIT(1)
-+#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1)
-+
-+/* UDP-encap-ESP fields */
-+#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0)
-+#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1)
-+#define NH_FLD_UDP_ENC_ESP_LEN BIT(2)
-+#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3)
-+#define NH_FLD_UDP_ENC_ESP_SPI BIT(4)
-+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5)
-+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1)
-+
-+/* SCTP fields */
-+#define NH_FLD_SCTP_PORT_SRC BIT(0)
-+#define NH_FLD_SCTP_PORT_DST BIT(1)
-+#define NH_FLD_SCTP_VER_TAG BIT(2)
-+#define NH_FLD_SCTP_CKSUM BIT(3)
-+#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1)
-+
-+/* DCCP fields */
-+#define NH_FLD_DCCP_PORT_SRC BIT(0)
-+#define NH_FLD_DCCP_PORT_DST BIT(1)
-+#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1)
-+
-+/* IPHC fields */
-+#define NH_FLD_IPHC_CID BIT(0)
-+#define NH_FLD_IPHC_CID_TYPE BIT(1)
-+#define NH_FLD_IPHC_HCINDEX BIT(2)
-+#define NH_FLD_IPHC_GEN BIT(3)
-+#define NH_FLD_IPHC_D_BIT BIT(4)
-+#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1)
-+
-+/* SCTP fields */
-+#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0)
-+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1)
-+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2)
-+#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5)
-+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6)
-+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7)
-+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8)
-+#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9)
-+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1)
-+
-+/* L2TPV2 fields */
-+#define NH_FLD_L2TPV2_TYPE_BIT BIT(0)
-+#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1)
-+#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2)
-+#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3)
-+#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4)
-+#define NH_FLD_L2TPV2_VERSION BIT(5)
-+#define NH_FLD_L2TPV2_LEN BIT(6)
-+#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7)
-+#define NH_FLD_L2TPV2_SESSION_ID BIT(8)
-+#define NH_FLD_L2TPV2_NS BIT(9)
-+#define NH_FLD_L2TPV2_NR BIT(10)
-+#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11)
-+#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12)
-+#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1)
-+
-+/* L2TPV3 fields */
-+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1)
-+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2)
-+#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4)
-+#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5)
-+#define NH_FLD_L2TPV3_CTRL_SENT BIT(6)
-+#define NH_FLD_L2TPV3_CTRL_RECV BIT(7)
-+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8)
-+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1)
-+
-+#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0)
-+#define NH_FLD_L2TPV3_SESS_VERSION BIT(1)
-+#define NH_FLD_L2TPV3_SESS_ID BIT(2)
-+#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3)
-+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1)
-+
-+/* PPP fields */
-+#define NH_FLD_PPP_PID BIT(0)
-+#define NH_FLD_PPP_COMPRESSED BIT(1)
-+#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1)
-+
-+/* PPPoE fields */
-+#define NH_FLD_PPPOE_VER BIT(0)
-+#define NH_FLD_PPPOE_TYPE BIT(1)
-+#define NH_FLD_PPPOE_CODE BIT(2)
-+#define NH_FLD_PPPOE_SID BIT(3)
-+#define NH_FLD_PPPOE_LEN BIT(4)
-+#define NH_FLD_PPPOE_SESSION BIT(5)
-+#define NH_FLD_PPPOE_PID BIT(6)
-+#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1)
-+
-+/* PPP-Mux fields */
-+#define NH_FLD_PPPMUX_PID BIT(0)
-+#define NH_FLD_PPPMUX_CKSUM BIT(1)
-+#define NH_FLD_PPPMUX_COMPRESSED BIT(2)
-+#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1)
-+
-+/* PPP-Mux sub-frame fields */
-+#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0)
-+#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1)
-+#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2)
-+#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3)
-+#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4)
-+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1)
-+
-+/* LLC fields */
-+#define NH_FLD_LLC_DSAP BIT(0)
-+#define NH_FLD_LLC_SSAP BIT(1)
-+#define NH_FLD_LLC_CTRL BIT(2)
-+#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1)
-+
-+/* NLPID fields */
-+#define NH_FLD_NLPID_NLPID BIT(0)
-+#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1)
-+
-+/* SNAP fields */
-+#define NH_FLD_SNAP_OUI BIT(0)
-+#define NH_FLD_SNAP_PID BIT(1)
-+#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1)
-+
-+/* LLC SNAP fields */
-+#define NH_FLD_LLC_SNAP_TYPE BIT(0)
-+#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1)
-+
-+/* ARP fields */
-+#define NH_FLD_ARP_HTYPE BIT(0)
-+#define NH_FLD_ARP_PTYPE BIT(1)
-+#define NH_FLD_ARP_HLEN BIT(2)
-+#define NH_FLD_ARP_PLEN BIT(3)
-+#define NH_FLD_ARP_OPER BIT(4)
-+#define NH_FLD_ARP_SHA BIT(5)
-+#define NH_FLD_ARP_SPA BIT(6)
-+#define NH_FLD_ARP_THA BIT(7)
-+#define NH_FLD_ARP_TPA BIT(8)
-+#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1)
-+
-+/* RFC2684 fields */
-+#define NH_FLD_RFC2684_LLC BIT(0)
-+#define NH_FLD_RFC2684_NLPID BIT(1)
-+#define NH_FLD_RFC2684_OUI BIT(2)
-+#define NH_FLD_RFC2684_PID BIT(3)
-+#define NH_FLD_RFC2684_VPN_OUI BIT(4)
-+#define NH_FLD_RFC2684_VPN_IDX BIT(5)
-+#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1)
-+
-+/* User defined fields */
-+#define NH_FLD_USER_DEFINED_SRCPORT BIT(0)
-+#define NH_FLD_USER_DEFINED_PCDID BIT(1)
-+#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1)
-+
-+/* Payload fields */
-+#define NH_FLD_PAYLOAD_BUFFER BIT(0)
-+#define NH_FLD_PAYLOAD_SIZE BIT(1)
-+#define NH_FLD_MAX_FRM_SIZE BIT(2)
-+#define NH_FLD_MIN_FRM_SIZE BIT(3)
-+#define NH_FLD_PAYLOAD_TYPE BIT(4)
-+#define NH_FLD_FRAME_SIZE BIT(5)
-+#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1)
-+
-+/* GRE fields */
-+#define NH_FLD_GRE_TYPE BIT(0)
-+#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1)
-+
-+/* MINENCAP fields */
-+#define NH_FLD_MINENCAP_SRC_IP BIT(0)
-+#define NH_FLD_MINENCAP_DST_IP BIT(1)
-+#define NH_FLD_MINENCAP_TYPE BIT(2)
-+#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1)
-+
-+/* IPSEC AH fields */
-+#define NH_FLD_IPSEC_AH_SPI BIT(0)
-+#define NH_FLD_IPSEC_AH_NH BIT(1)
-+#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1)
-+
-+/* IPSEC ESP fields */
-+#define NH_FLD_IPSEC_ESP_SPI BIT(0)
-+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1)
-+#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1)
-+
-+/* MPLS fields */
-+#define NH_FLD_MPLS_LABEL_STACK BIT(0)
-+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1)
-+
-+/* MACSEC fields */
-+#define NH_FLD_MACSEC_SECTAG BIT(0)
-+#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1)
-+
-+/* GTP fields */
-+#define NH_FLD_GTP_TEID BIT(0)
-+
-+/* Supported protocols */
-+enum net_prot {
-+ NET_PROT_NONE = 0,
-+ NET_PROT_PAYLOAD,
-+ NET_PROT_ETH,
-+ NET_PROT_VLAN,
-+ NET_PROT_IPV4,
-+ NET_PROT_IPV6,
-+ NET_PROT_IP,
-+ NET_PROT_TCP,
-+ NET_PROT_UDP,
-+ NET_PROT_UDP_LITE,
-+ NET_PROT_IPHC,
-+ NET_PROT_SCTP,
-+ NET_PROT_SCTP_CHUNK_DATA,
-+ NET_PROT_PPPOE,
-+ NET_PROT_PPP,
-+ NET_PROT_PPPMUX,
-+ NET_PROT_PPPMUX_SUBFRM,
-+ NET_PROT_L2TPV2,
-+ NET_PROT_L2TPV3_CTRL,
-+ NET_PROT_L2TPV3_SESS,
-+ NET_PROT_LLC,
-+ NET_PROT_LLC_SNAP,
-+ NET_PROT_NLPID,
-+ NET_PROT_SNAP,
-+ NET_PROT_MPLS,
-+ NET_PROT_IPSEC_AH,
-+ NET_PROT_IPSEC_ESP,
-+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
-+ NET_PROT_MACSEC,
-+ NET_PROT_GRE,
-+ NET_PROT_MINENCAP,
-+ NET_PROT_DCCP,
-+ NET_PROT_ICMP,
-+ NET_PROT_IGMP,
-+ NET_PROT_ARP,
-+ NET_PROT_CAPWAP_DATA,
-+ NET_PROT_CAPWAP_CTRL,
-+ NET_PROT_RFC2684,
-+ NET_PROT_ICMPV6,
-+ NET_PROT_FCOE,
-+ NET_PROT_FIP,
-+ NET_PROT_ISCSI,
-+ NET_PROT_GTP,
-+ NET_PROT_USER_DEFINED_L2,
-+ NET_PROT_USER_DEFINED_L3,
-+ NET_PROT_USER_DEFINED_L4,
-+ NET_PROT_USER_DEFINED_L5,
-+ NET_PROT_USER_DEFINED_SHIM1,
-+ NET_PROT_USER_DEFINED_SHIM2,
-+
-+ NET_PROT_DUMMY_LAST
-+};
-+
- /**
- * struct dpkg_extract - A structure for defining a single extraction
- * @type: Determines how the union below is interpreted:
---- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
-@@ -1,34 +1,6 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef _FSL_DPNI_CMD_H
- #define _FSL_DPNI_CMD_H
-@@ -39,9 +11,11 @@
- #define DPNI_VER_MAJOR 7
- #define DPNI_VER_MINOR 0
- #define DPNI_CMD_BASE_VERSION 1
-+#define DPNI_CMD_2ND_VERSION 2
- #define DPNI_CMD_ID_OFFSET 4
-
- #define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
-+#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
-
- #define DPNI_CMDID_OPEN DPNI_CMD(0x801)
- #define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-@@ -64,16 +38,18 @@
- #define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
- #define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
-
--#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
-+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
- #define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
-
- #define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
- #define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
- #define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
-+#define DPNI_CMDID_GET_LINK_STATE_V2 DPNI_CMD_V2(0x215)
- #define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
- #define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
- #define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
--#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
-+#define DPNI_CMDID_SET_LINK_CFG_V2 DPNI_CMD_V2(0x21A)
-+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
-
- #define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
- #define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
-@@ -87,11 +63,16 @@
-
- #define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
-
-+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
-+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
-+#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
- #define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
- #define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
- #define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
-
--#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
-+#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
-+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
-+#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
- #define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
- #define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
- #define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
-@@ -110,6 +91,9 @@
- #define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
- #define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
-
-+#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
-+#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
-+
- /* Macros for accessing command fields smaller than 1byte */
- #define DPNI_MASK(field) \
- GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
-@@ -126,13 +110,14 @@ struct dpni_cmd_open {
-
- #define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
- struct dpni_cmd_set_pools {
-- /* cmd word 0 */
- u8 num_dpbp;
- u8 backup_pool_mask;
- __le16 pad;
-- /* cmd word 0..4 */
-- __le32 dpbp_id[DPNI_MAX_DPBP];
-- /* cmd word 4..6 */
-+ struct {
-+ __le16 dpbp_id;
-+ u8 priority_mask;
-+ u8 pad;
-+ } pool[DPNI_MAX_DPBP];
- __le16 buffer_size[DPNI_MAX_DPBP];
- };
-
-@@ -303,6 +288,7 @@ struct dpni_rsp_get_tx_data_offset {
-
- struct dpni_cmd_get_statistics {
- u8 page_number;
-+ u8 param;
- };
-
- struct dpni_rsp_get_statistics {
-@@ -319,8 +305,22 @@ struct dpni_cmd_set_link_cfg {
- __le64 options;
- };
-
-+struct dpni_cmd_set_link_cfg_v2 {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad1;
-+ /* cmd word 2 */
-+ __le64 options;
-+ /* cmd word 3 */
-+ __le64 advertising;
-+};
-+
- #define DPNI_LINK_STATE_SHIFT 0
- #define DPNI_LINK_STATE_SIZE 1
-+#define DPNI_STATE_VALID_SHIFT 1
-+#define DPNI_STATE_VALID_SIZE 1
-
- struct dpni_rsp_get_link_state {
- /* response word 0 */
-@@ -335,6 +335,39 @@ struct dpni_rsp_get_link_state {
- __le64 options;
- };
-
-+struct dpni_rsp_get_link_state_v2 {
-+ /* response word 0 */
-+ __le32 pad0;
-+ /* from LSB: up:1, valid:1 */
-+ u8 flags;
-+ u8 pad1[3];
-+ /* response word 1 */
-+ __le32 rate;
-+ __le32 pad2;
-+ /* response word 2 */
-+ __le64 options;
-+ /* cmd word 3 */
-+ __le64 supported;
-+ /* cmd word 4 */
-+ __le64 advertising;
-+};
-+
-+#define DPNI_COUPLED_SHIFT 0
-+#define DPNI_COUPLED_SIZE 1
-+
-+struct dpni_cmd_set_tx_shaping {
-+ /* cmd word 0 */
-+ __le16 tx_cr_max_burst_size;
-+ __le16 tx_er_max_burst_size;
-+ __le32 pad;
-+ /* cmd word 1 */
-+ __le32 tx_cr_rate_limit;
-+ __le32 tx_er_rate_limit;
-+ /* cmd word 2 */
-+ /* from LSB: coupled:1 */
-+ u8 coupled;
-+};
-+
- struct dpni_cmd_set_max_frame_length {
- __le16 max_frame_length;
- };
-@@ -394,6 +427,24 @@ struct dpni_cmd_clear_mac_filters {
- u8 flags;
- };
-
-+#define DPNI_SEPARATE_GRP_SHIFT 0
-+#define DPNI_SEPARATE_GRP_SIZE 1
-+#define DPNI_MODE_1_SHIFT 0
-+#define DPNI_MODE_1_SIZE 4
-+#define DPNI_MODE_2_SHIFT 4
-+#define DPNI_MODE_2_SIZE 4
-+
-+struct dpni_cmd_set_tx_priorities {
-+ __le16 flags;
-+ u8 prio_group_A;
-+ u8 prio_group_B;
-+ __le32 pad0;
-+ u8 modes[4];
-+ __le32 pad1;
-+ __le64 pad2;
-+ __le16 delta_bandwidth[8];
-+};
-+
- #define DPNI_DIST_MODE_SHIFT 0
- #define DPNI_DIST_MODE_SIZE 4
- #define DPNI_MISS_ACTION_SHIFT 4
-@@ -503,6 +554,63 @@ struct dpni_cmd_set_queue {
- __le64 user_context;
- };
-
-+#define DPNI_DISCARD_ON_MISS_SHIFT 0
-+#define DPNI_DISCARD_ON_MISS_SIZE 1
-+
-+struct dpni_cmd_set_qos_table {
-+ __le32 pad;
-+ u8 default_tc;
-+ /* only the LSB */
-+ u8 discard_on_miss;
-+ __le16 pad1[21];
-+ __le64 key_cfg_iova;
-+};
-+
-+struct dpni_cmd_add_qos_entry {
-+ __le16 pad;
-+ u8 tc_id;
-+ u8 key_size;
-+ __le16 index;
-+ __le16 pad2;
-+ __le64 key_iova;
-+ __le64 mask_iova;
-+};
-+
-+struct dpni_cmd_remove_qos_entry {
-+ u8 pad1[3];
-+ u8 key_size;
-+ __le32 pad2;
-+ __le64 key_iova;
-+ __le64 mask_iova;
-+};
-+
-+struct dpni_cmd_add_fs_entry {
-+ /* cmd word 0 */
-+ __le16 options;
-+ u8 tc_id;
-+ u8 key_size;
-+ __le16 index;
-+ __le16 flow_id;
-+ /* cmd word 1 */
-+ __le64 key_iova;
-+ /* cmd word 2 */
-+ __le64 mask_iova;
-+ /* cmd word 3 */
-+ __le64 flc;
-+};
-+
-+struct dpni_cmd_remove_fs_entry {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ u8 tc_id;
-+ u8 key_size;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ __le64 key_iova;
-+ /* cmd word 2 */
-+ __le64 mask_iova;
-+};
-+
- struct dpni_cmd_set_taildrop {
- /* cmd word 0 */
- u8 congestion_point;
-@@ -538,4 +646,79 @@ struct dpni_rsp_get_taildrop {
- __le32 threshold;
- };
-
-+struct dpni_rsp_get_api_version {
-+ u16 major;
-+ u16 minor;
-+};
-+
-+#define DPNI_DEST_TYPE_SHIFT 0
-+#define DPNI_DEST_TYPE_SIZE 4
-+#define DPNI_CONG_UNITS_SHIFT 4
-+#define DPNI_CONG_UNITS_SIZE 2
-+
-+struct dpni_cmd_set_congestion_notification {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 tc;
-+ u8 pad[6];
-+ /* cmd word 1 */
-+ __le32 dest_id;
-+ __le16 notification_mode;
-+ u8 dest_priority;
-+ /* from LSB: dest_type: 4 units:2 */
-+ u8 type_units;
-+ /* cmd word 2 */
-+ __le64 message_iova;
-+ /* cmd word 3 */
-+ __le64 message_ctx;
-+ /* cmd word 4 */
-+ __le32 threshold_entry;
-+ __le32 threshold_exit;
-+};
-+
-+struct dpni_cmd_get_congestion_notification {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 tc;
-+};
-+
-+struct dpni_rsp_get_congestion_notification {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le32 dest_id;
-+ __le16 notification_mode;
-+ u8 dest_priority;
-+ /* from LSB: dest_type: 4 units:2 */
-+ u8 type_units;
-+ /* cmd word 2 */
-+ __le64 message_iova;
-+ /* cmd word 3 */
-+ __le64 message_ctx;
-+ /* cmd word 4 */
-+ __le32 threshold_entry;
-+ __le32 threshold_exit;
-+};
-+
-+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
-+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
-+struct dpni_cmd_set_rx_fs_dist {
-+ __le16 dist_size;
-+ u8 enable;
-+ u8 tc;
-+ __le16 miss_flow_id;
-+ __le16 pad;
-+ __le64 key_cfg_iova;
-+};
-+
-+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
-+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
-+struct dpni_cmd_set_rx_hash_dist {
-+ __le16 dist_size;
-+ u8 enable;
-+ u8 tc;
-+ __le32 pad;
-+ __le64 key_cfg_iova;
-+};
-+
- #endif /* _FSL_DPNI_CMD_H */
---- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-@@ -1,34 +1,6 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #include <linux/kernel.h>
- #include <linux/errno.h>
-@@ -122,7 +94,7 @@ int dpni_open(struct fsl_mc_io *mc_io,
- int dpni_id,
- u16 *token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_open *cmd_params;
-
- int err;
-@@ -160,7 +132,7 @@ int dpni_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
-@@ -188,7 +160,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_
- u16 token,
- const struct dpni_pools_cfg *cfg)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_pools *cmd_params;
- int i;
-
-@@ -199,7 +171,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_
- cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
- cmd_params->num_dpbp = cfg->num_dpbp;
- for (i = 0; i < DPNI_MAX_DPBP; i++) {
-- cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
-+ cmd_params->pool[i].dpbp_id =
-+ cpu_to_le16(cfg->pools[i].dpbp_id);
-+ cmd_params->pool[i].priority_mask =
-+ cfg->pools[i].priority_mask;
- cmd_params->buffer_size[i] =
- cpu_to_le16(cfg->pools[i].buffer_size);
- cmd_params->backup_pool_mask |=
-@@ -222,7 +197,7 @@ int dpni_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
-@@ -245,7 +220,7 @@ int dpni_disable(struct fsl_mc_io *mc_io
- u32 cmd_flags,
- u16 token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
-@@ -270,7 +245,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc
- u16 token,
- int *en)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_is_enabled *rsp_params;
- int err;
-
-@@ -303,7 +278,7 @@ int dpni_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
-@@ -335,7 +310,7 @@ int dpni_set_irq_enable(struct fsl_mc_io
- u8 irq_index,
- u8 en)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_irq_enable *cmd_params;
-
- /* prepare command */
-@@ -366,7 +341,7 @@ int dpni_get_irq_enable(struct fsl_mc_io
- u8 irq_index,
- u8 *en)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_irq_enable *cmd_params;
- struct dpni_rsp_get_irq_enable *rsp_params;
-
-@@ -413,7 +388,7 @@ int dpni_set_irq_mask(struct fsl_mc_io *
- u8 irq_index,
- u32 mask)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_irq_mask *cmd_params;
-
- /* prepare command */
-@@ -447,7 +422,7 @@ int dpni_get_irq_mask(struct fsl_mc_io *
- u8 irq_index,
- u32 *mask)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_irq_mask *cmd_params;
- struct dpni_rsp_get_irq_mask *rsp_params;
- int err;
-@@ -489,7 +464,7 @@ int dpni_get_irq_status(struct fsl_mc_io
- u8 irq_index,
- u32 *status)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_irq_status *cmd_params;
- struct dpni_rsp_get_irq_status *rsp_params;
- int err;
-@@ -532,7 +507,7 @@ int dpni_clear_irq_status(struct fsl_mc_
- u8 irq_index,
- u32 status)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_clear_irq_status *cmd_params;
-
- /* prepare command */
-@@ -561,7 +536,7 @@ int dpni_get_attributes(struct fsl_mc_io
- u16 token,
- struct dpni_attr *attr)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_attr *rsp_params;
-
- int err;
-@@ -609,7 +584,7 @@ int dpni_set_errors_behavior(struct fsl_
- u16 token,
- struct dpni_error_cfg *cfg)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_errors_behavior *cmd_params;
-
- /* prepare command */
-@@ -641,7 +616,7 @@ int dpni_get_buffer_layout(struct fsl_mc
- enum dpni_queue_type qtype,
- struct dpni_buffer_layout *layout)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_buffer_layout *cmd_params;
- struct dpni_rsp_get_buffer_layout *rsp_params;
- int err;
-@@ -689,7 +664,7 @@ int dpni_set_buffer_layout(struct fsl_mc
- enum dpni_queue_type qtype,
- const struct dpni_buffer_layout *layout)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_buffer_layout *cmd_params;
-
- /* prepare command */
-@@ -731,7 +706,7 @@ int dpni_set_offload(struct fsl_mc_io *m
- enum dpni_offload type,
- u32 config)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_offload *cmd_params;
-
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
-@@ -750,7 +725,7 @@ int dpni_get_offload(struct fsl_mc_io *m
- enum dpni_offload type,
- u32 *config)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_offload *cmd_params;
- struct dpni_rsp_get_offload *rsp_params;
- int err;
-@@ -792,7 +767,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i
- enum dpni_queue_type qtype,
- u16 *qdid)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_qdid *cmd_params;
- struct dpni_rsp_get_qdid *rsp_params;
- int err;
-@@ -830,7 +805,7 @@ int dpni_get_tx_data_offset(struct fsl_m
- u16 token,
- u16 *data_offset)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_tx_data_offset *rsp_params;
- int err;
-
-@@ -865,7 +840,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *
- u16 token,
- const struct dpni_link_cfg *cfg)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_link_cfg *cmd_params;
-
- /* prepare command */
-@@ -881,6 +856,36 @@ int dpni_set_link_cfg(struct fsl_mc_io *
- }
-
- /**
-+ * dpni_set_link_cfg_v2() - set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_link_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpni_cmd_set_link_cfg_v2 *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG_V2,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_link_cfg_v2 *)cmd.params;
-+ cmd_params->rate = cpu_to_le32(cfg->rate);
-+ cmd_params->options = cpu_to_le64(cfg->options);
-+ cmd_params->advertising = cpu_to_le64(cfg->advertising);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
- * dpni_get_link_state() - Return the link state (either up or down)
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -894,7 +899,7 @@ int dpni_get_link_state(struct fsl_mc_io
- u16 token,
- struct dpni_link_state *state)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_link_state *rsp_params;
- int err;
-
-@@ -918,6 +923,84 @@ int dpni_get_link_state(struct fsl_mc_io
- }
-
- /**
-+ * dpni_get_link_state_v2() - Return the link state (either up or down)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @state: Returned link state;
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_link_state *state)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpni_rsp_get_link_state_v2 *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE_V2,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_link_state_v2 *)cmd.params;
-+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
-+ state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID);
-+ state->rate = le32_to_cpu(rsp_params->rate);
-+ state->options = le64_to_cpu(rsp_params->options);
-+ state->supported = le64_to_cpu(rsp_params->supported);
-+ state->advertising = le64_to_cpu(rsp_params->advertising);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpni_set_tx_shaping() - Set the transmit shaping
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tx_cr_shaper: TX committed rate shaping configuration
-+ * @tx_er_shaper: TX excess rate shaping configuration
-+ * @coupled: Committed and excess rate shapers are coupled
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
-+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
-+ int coupled)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpni_cmd_set_tx_shaping *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
-+ cmd_params->tx_cr_max_burst_size =
-+ cpu_to_le16(tx_cr_shaper->max_burst_size);
-+ cmd_params->tx_er_max_burst_size =
-+ cpu_to_le16(tx_er_shaper->max_burst_size);
-+ cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
-+ cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
-+ dpni_set_field(cmd_params->coupled, COUPLED, coupled);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
- * dpni_set_max_frame_length() - Set the maximum received frame length.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -933,7 +1016,7 @@ int dpni_set_max_frame_length(struct fsl
- u16 token,
- u16 max_frame_length)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_max_frame_length *cmd_params;
-
- /* prepare command */
-@@ -963,7 +1046,7 @@ int dpni_get_max_frame_length(struct fsl
- u16 token,
- u16 *max_frame_length)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_max_frame_length *rsp_params;
- int err;
-
-@@ -998,7 +1081,7 @@ int dpni_set_multicast_promisc(struct fs
- u16 token,
- int en)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_multicast_promisc *cmd_params;
-
- /* prepare command */
-@@ -1026,7 +1109,7 @@ int dpni_get_multicast_promisc(struct fs
- u16 token,
- int *en)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_multicast_promisc *rsp_params;
- int err;
-
-@@ -1061,7 +1144,7 @@ int dpni_set_unicast_promisc(struct fsl_
- u16 token,
- int en)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_unicast_promisc *cmd_params;
-
- /* prepare command */
-@@ -1089,7 +1172,7 @@ int dpni_get_unicast_promisc(struct fsl_
- u16 token,
- int *en)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_unicast_promisc *rsp_params;
- int err;
-
-@@ -1124,7 +1207,7 @@ int dpni_set_primary_mac_addr(struct fsl
- u16 token,
- const u8 mac_addr[6])
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_primary_mac_addr *cmd_params;
- int i;
-
-@@ -1154,7 +1237,7 @@ int dpni_get_primary_mac_addr(struct fsl
- u16 token,
- u8 mac_addr[6])
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_primary_mac_addr *rsp_params;
- int i, err;
-
-@@ -1193,7 +1276,7 @@ int dpni_get_port_mac_addr(struct fsl_mc
- u16 token,
- u8 mac_addr[6])
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_rsp_get_port_mac_addr *rsp_params;
- int i, err;
-
-@@ -1229,7 +1312,7 @@ int dpni_add_mac_addr(struct fsl_mc_io *
- u16 token,
- const u8 mac_addr[6])
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_add_mac_addr *cmd_params;
- int i;
-
-@@ -1259,7 +1342,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i
- u16 token,
- const u8 mac_addr[6])
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_remove_mac_addr *cmd_params;
- int i;
-
-@@ -1293,7 +1376,7 @@ int dpni_clear_mac_filters(struct fsl_mc
- int unicast,
- int multicast)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_clear_mac_filters *cmd_params;
-
- /* prepare command */
-@@ -1309,6 +1392,55 @@ int dpni_clear_mac_filters(struct fsl_mc
- }
-
- /**
-+ * dpni_set_tx_priorities() - Set transmission TC priority configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Transmission selection configuration
-+ *
-+ * warning: Allowed only when DPNI is disabled
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_priorities_cfg *cfg)
-+{
-+ struct dpni_cmd_set_tx_priorities *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
-+ dpni_set_field(cmd_params->flags,
-+ SEPARATE_GRP,
-+ cfg->separate_groups);
-+ cmd_params->prio_group_A = cfg->prio_group_A;
-+ cmd_params->prio_group_B = cfg->prio_group_B;
-+
-+ for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
-+ dpni_set_field(cmd_params->modes[i / 2],
-+ MODE_1,
-+ cfg->tc_sched[i].mode);
-+ dpni_set_field(cmd_params->modes[i / 2],
-+ MODE_2,
-+ cfg->tc_sched[i + 1].mode);
-+ }
-+
-+ for (i = 0; i < DPNI_MAX_TC; i++) {
-+ cmd_params->delta_bandwidth[i] =
-+ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
-+ }
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
- * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -1327,7 +1459,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
- u8 tc_id,
- const struct dpni_rx_tc_dist_cfg *cfg)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_rx_tc_dist *cmd_params;
-
- /* prepare command */
-@@ -1346,6 +1478,215 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
- return mc_send_command(mc_io, &cmd);
- }
-
-+/*
-+ * dpni_set_qos_table() - Set QoS mapping table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS table configuration
-+ *
-+ * This function and all QoS-related functions require that
-+ *'max_tcs > 1' was set at DPNI creation.
-+ *
-+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
-+ * prepare the key_cfg_iova parameter
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_qos_tbl_cfg *cfg)
-+{
-+ struct dpni_cmd_set_qos_table *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
-+ cmd_params->default_tc = cfg->default_tc;
-+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
-+ dpni_set_field(cmd_params->discard_on_miss,
-+ ENABLE,
-+ cfg->discard_on_miss);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS rule to add
-+ * @tc_id: Traffic class selection (0-7)
-+ * @index: Location in the QoS table where to insert the entry.
-+ * Only relevant if MASKING is enabled for QoS classification on
-+ * this DPNI, it is ignored for exact match.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rule_cfg *cfg,
-+ u8 tc_id,
-+ u16 index)
-+{
-+ struct dpni_cmd_add_qos_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->index = cpu_to_le16(index);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_remove_qos_entry() - Remove QoS mapping entry
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS rule to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rule_cfg *cfg)
-+{
-+ struct dpni_cmd_remove_qos_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_set_congestion_notification() - Set traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_congestion_notification(
-+ struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct dpni_cmd_set_congestion_notification *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc_id;
-+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
-+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
-+ cmd_params->dest_priority = cfg->dest_cfg.priority;
-+ dpni_set_field(cmd_params->type_units, DEST_TYPE,
-+ cfg->dest_cfg.dest_type);
-+ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
-+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
-+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
-+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
-+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_get_congestion_notification() - Get traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
-+ * @tc_id: bits 7-4 contain ceetm channel index (valid only for TX);
-+ * bits 3-0 contain traffic class.
-+ * Use macro DPNI_BUILD_CH_TC() to build correct value for
-+ * tc_id parameter.
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_congestion_notification(
-+ struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct dpni_rsp_get_congestion_notification *rsp_params;
-+ struct dpni_cmd_get_congestion_notification *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc_id;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
-+ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
-+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
-+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
-+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
-+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
-+ cfg->dest_cfg.priority = rsp_params->dest_priority;
-+ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
-+ DEST_TYPE);
-+
-+ return 0;
-+}
-+
- /**
- * dpni_set_queue() - Set queue parameters
- * @mc_io: Pointer to MC portal's I/O object
-@@ -1371,7 +1712,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_
- u8 options,
- const struct dpni_queue *queue)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_queue *cmd_params;
-
- /* prepare command */
-@@ -1419,7 +1760,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_
- struct dpni_queue *queue,
- struct dpni_queue_id *qid)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_queue *cmd_params;
- struct dpni_rsp_get_queue *rsp_params;
- int err;
-@@ -1463,6 +1804,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_
- * @token: Token of DPNI object
- * @page: Selects the statistics page to retrieve, see
- * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
-+ * @param: Custom parameter for some pages used to select a certain
-+ * statistic source, for example the TC.
- * @stat: Structure containing the statistics
- *
- * Return: '0' on Success; Error code otherwise.
-@@ -1471,9 +1814,10 @@ int dpni_get_statistics(struct fsl_mc_io
- u32 cmd_flags,
- u16 token,
- u8 page,
-+ u8 param,
- union dpni_statistics *stat)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_statistics *cmd_params;
- struct dpni_rsp_get_statistics *rsp_params;
- int i, err;
-@@ -1484,6 +1828,7 @@ int dpni_get_statistics(struct fsl_mc_io
- token);
- cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
- cmd_params->page_number = page;
-+ cmd_params->param = param;
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
-@@ -1499,6 +1844,29 @@ int dpni_get_statistics(struct fsl_mc_io
- }
-
- /**
-+ * dpni_reset_statistics() - Clears DPNI statistics
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
- * dpni_set_taildrop() - Set taildrop per queue or TC
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -1506,7 +1874,10 @@ int dpni_get_statistics(struct fsl_mc_io
- * @cg_point: Congestion point
- * @q_type: Queue type on which the taildrop is configured.
- * Only Rx queues are supported for now
-- * @tc: Traffic class to apply this taildrop to
-+ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
-+ * bits 3-0 contain traffic class.
-+ * Use macro DPNI_BUILD_CH_TC() to build correct value for
-+ * tc parameter.
- * @q_index: Index of the queue if the DPNI supports multiple queues for
- * traffic distribution. Ignored if CONGESTION_POINT is not 0.
- * @taildrop: Taildrop structure
-@@ -1522,7 +1893,7 @@ int dpni_set_taildrop(struct fsl_mc_io *
- u8 index,
- struct dpni_taildrop *taildrop)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_taildrop *cmd_params;
-
- /* prepare command */
-@@ -1550,7 +1921,10 @@ int dpni_set_taildrop(struct fsl_mc_io *
- * @cg_point: Congestion point
- * @q_type: Queue type on which the taildrop is configured.
- * Only Rx queues are supported for now
-- * @tc: Traffic class to apply this taildrop to
-+ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
-+ * bits 3-0 contain traffic class.
-+ * Use macro DPNI_BUILD_CH_TC() to build correct value for
-+ * tc parameter.
- * @q_index: Index of the queue if the DPNI supports multiple queues for
- * traffic distribution. Ignored if CONGESTION_POINT is not 0.
- * @taildrop: Taildrop structure
-@@ -1566,7 +1940,7 @@ int dpni_get_taildrop(struct fsl_mc_io *
- u8 index,
- struct dpni_taildrop *taildrop)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_get_taildrop *cmd_params;
- struct dpni_rsp_get_taildrop *rsp_params;
- int err;
-@@ -1594,3 +1968,187 @@ int dpni_get_taildrop(struct fsl_mc_io *
-
- return 0;
- }
-+
-+/**
-+ * dpni_get_api_version() - Get Data Path Network Interface API version
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of data path network interface API
-+ * @minor_ver: Minor version of data path network interface API
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
-+{
-+ struct dpni_rsp_get_api_version *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
-+ cmd_flags, 0);
-+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
-+ *major_ver = le16_to_cpu(rsp_params->major);
-+ *minor_ver = le16_to_cpu(rsp_params->minor);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Distribution configuration
-+ * If the FS is already enabled with a previous call the classification
-+ * key will be changed but all the table rules are kept. If the
-+ * existing rules do not match the key the results will not be
-+ * predictable. It is the user responsibility to keep key integrity.
-+ * If cfg.enable is set to 1 the command will create a flow steering table
-+ * and will classify packets according to this table. The packets that
-+ * miss all the table rules will be classified according to settings
-+ * made in dpni_set_rx_hash_dist()
-+ * If cfg.enable is set to 0 the command will clear flow steering table.
-+ * The packets will be classified according to settings made in
-+ * dpni_set_rx_hash_dist()
-+ */
-+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rx_dist_cfg *cfg)
-+{
-+ struct dpni_cmd_set_rx_fs_dist *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
-+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
-+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
-+ cmd_params->tc = cfg->tc;
-+ cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
-+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Distribution configuration
-+ * If cfg.enable is set to 1 the packets will be classified using a hash
-+ * function based on the key received in cfg.key_cfg_iova parameter.
-+ * If cfg.enable is set to 0 the packets will be sent to the queue configured
-+ * in dpni_set_rx_dist_default_queue() call
-+ */
-+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rx_dist_cfg *cfg)
-+{
-+ struct dpni_cmd_set_rx_hash_dist *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
-+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
-+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
-+ cmd_params->tc = cfg->tc;
-+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
-+ * (to select a flow ID)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @index: Location in the QoS table where to insert the entry.
-+ * Only relevant if MASKING is enabled for QoS
-+ * classification on this DPNI, it is ignored for exact match.
-+ * @cfg: Flow steering rule to add
-+ * @action: Action to be taken as result of a classification hit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ u16 index,
-+ const struct dpni_rule_cfg *cfg,
-+ const struct dpni_fs_action_cfg *action)
-+{
-+ struct dpni_cmd_add_fs_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->index = cpu_to_le16(index);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+ cmd_params->options = cpu_to_le16(action->options);
-+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
-+ cmd_params->flc = cpu_to_le64(action->flc);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
-+ * traffic class
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Flow steering rule to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rule_cfg *cfg)
-+{
-+ struct dpni_cmd_remove_fs_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
---- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
-@@ -1,34 +1,6 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_DPNI_H
- #define __FSL_DPNI_H
-@@ -52,6 +24,14 @@ struct fsl_mc_io;
- * Maximum number of buffer pools per DPNI
- */
- #define DPNI_MAX_DPBP 8
-+/**
-+ * Maximum number of senders
-+ */
-+#define DPNI_MAX_SENDERS 16
-+/**
-+ * Maximum distribution size
-+ */
-+#define DPNI_MAX_DIST_SIZE 16
-
- /**
- * All traffic classes considered; see dpni_set_queue()
-@@ -123,13 +103,15 @@ struct dpni_pools_cfg {
- /**
- * struct pools - Buffer pools parameters
- * @dpbp_id: DPBP object ID
-+ * @priority_mask: priorities served by DPBP
- * @buffer_size: Buffer size
- * @backup_pool: Backup pool
- */
- struct {
-- int dpbp_id;
-+ u16 dpbp_id;
-+ u8 priority_mask;
- u16 buffer_size;
-- int backup_pool;
-+ u8 backup_pool;
- } pools[DPNI_MAX_DPBP];
- };
-
-@@ -476,6 +458,24 @@ union dpni_statistics {
- u64 egress_confirmed_frames;
- } page_2;
- /**
-+ * struct page_3 - Page_3 statistics structure with values for the
-+ * selected TC
-+ * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
-+ * dequeued
-+ * @ceetm_dequeue_frames: Cumulative count of the number of frames
-+ * dequeued
-+ * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
-+ * frames whose enqueue was rejected
-+ * @ceetm_reject_frames: Cumulative count of all frame enqueues
-+ * rejected
-+ */
-+ struct {
-+ u64 ceetm_dequeue_bytes;
-+ u64 ceetm_dequeue_frames;
-+ u64 ceetm_reject_bytes;
-+ u64 ceetm_reject_frames;
-+ } page_3;
-+ /**
- * struct raw - raw statistics structure
- */
- struct {
-@@ -487,8 +487,13 @@ int dpni_get_statistics(struct fsl_mc_io
- u32 cmd_flags,
- u16 token,
- u8 page,
-+ u8 param,
- union dpni_statistics *stat);
-
-+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
- /**
- * Enable auto-negotiation
- */
-@@ -505,6 +510,23 @@ int dpni_get_statistics(struct fsl_mc_io
- * Enable a-symmetric pause frames
- */
- #define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+/**
-+ * Enable priority flow control pause frames
-+ */
-+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
-+/**
-+ * Advertised link speeds
-+ */
-+#define DPNI_ADVERTISED_10BASET_FULL 0x0000000000000001ULL
-+#define DPNI_ADVERTISED_100BASET_FULL 0x0000000000000002ULL
-+#define DPNI_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL
-+#define DPNI_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL
-+#define DPNI_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL
-+
-+/**
-+ * Advertise auto-negotiation enabled
-+ */
-+#define DPNI_ADVERTISED_AUTONEG 0x0000000000000008ULL
-
- /**
- * struct - Structure representing DPNI link configuration
-@@ -514,6 +536,7 @@ int dpni_get_statistics(struct fsl_mc_io
- struct dpni_link_cfg {
- u32 rate;
- u64 options;
-+ u64 advertising;
- };
-
- int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
-@@ -521,6 +544,11 @@ int dpni_set_link_cfg(struct fsl_mc_io
- u16 token,
- const struct dpni_link_cfg *cfg);
-
-+int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_link_cfg *cfg);
-+
- /**
- * struct dpni_link_state - Structure representing DPNI link state
- * @rate: Rate
-@@ -530,7 +558,10 @@ int dpni_set_link_cfg(struct fsl_mc_io
- struct dpni_link_state {
- u32 rate;
- u64 options;
-+ u64 supported;
-+ u64 advertising;
- int up;
-+ int state_valid;
- };
-
- int dpni_get_link_state(struct fsl_mc_io *mc_io,
-@@ -538,6 +569,28 @@ int dpni_get_link_state(struct fsl_mc_io
- u16 token,
- struct dpni_link_state *state);
-
-+int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_link_state *state);
-+
-+/**
-+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
-+ * @rate_limit: rate in Mbps
-+ * @max_burst_size: burst size in bytes (up to 64KB)
-+ */
-+struct dpni_tx_shaping_cfg {
-+ u32 rate_limit;
-+ u16 max_burst_size;
-+};
-+
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
-+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
-+ int coupled);
-+
- int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
-@@ -639,6 +692,70 @@ int dpni_prepare_key_cfg(const struct dp
- u8 *key_cfg_buf);
-
- /**
-+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
-+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
-+ * key extractions to be used as the QoS criteria by calling
-+ * dpkg_prepare_key_cfg()
-+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
-+ * '0' to use the 'default_tc' in such cases
-+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
-+ */
-+struct dpni_qos_tbl_cfg {
-+ u64 key_cfg_iova;
-+ int discard_on_miss;
-+ u8 default_tc;
-+};
-+
-+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_qos_tbl_cfg *cfg);
-+
-+/**
-+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
-+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
-+ * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
-+ * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
-+ */
-+enum dpni_tx_schedule_mode {
-+ DPNI_TX_SCHED_STRICT_PRIORITY = 0,
-+ DPNI_TX_SCHED_WEIGHTED_A,
-+ DPNI_TX_SCHED_WEIGHTED_B,
-+};
-+
-+/**
-+ * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
-+ * @mode: Scheduling mode
-+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
-+ * not applicable for 'strict-priority' mode;
-+ */
-+struct dpni_tx_schedule_cfg {
-+ enum dpni_tx_schedule_mode mode;
-+ u16 delta_bandwidth;
-+};
-+
-+/**
-+ * struct dpni_tx_priorities_cfg - Structure representing transmission
-+ * priorities for DPNI TCs
-+ * @tc_sched: An array of traffic-classes
-+ * @prio_group_A: Priority of group A
-+ * @prio_group_B: Priority of group B
-+ * @separate_groups: Treat A and B groups as separate
-+ * @ceetm_ch_idx: ceetm channel index to apply the changes
-+ */
-+struct dpni_tx_priorities_cfg {
-+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
-+ u8 prio_group_A;
-+ u8 prio_group_B;
-+ u8 separate_groups;
-+};
-+
-+int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_priorities_cfg *cfg);
-+
-+/**
- * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
- * @dist_size: Set the distribution size;
- * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
-@@ -784,6 +901,108 @@ enum dpni_congestion_point {
- };
-
- /**
-+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid
-+ * values are 0-1 or 0-7, depending on the number of priorities
-+ * in that channel; not relevant for 'DPNI_DEST_NONE' option
-+ */
-+struct dpni_dest_cfg {
-+ enum dpni_dest dest_type;
-+ int dest_id;
-+ u8 priority;
-+};
-+
-+/* DPNI congestion options */
-+
-+/**
-+ * CSCN message is written to message_iova once entering a
-+ * congestion state (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
-+/**
-+ * CSCN message is written to message_iova once exiting a
-+ * congestion state (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
-+/**
-+ * CSCN write will attempt to allocate into a cache (coherent write);
-+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
-+ */
-+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once entering a congestion state
-+ * (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once exiting a congestion state
-+ * (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
-+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
-+ */
-+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
-+/**
-+ * This congestion will trigger flow control or priority flow control.
-+ * This will have effect only if flow control is enabled with
-+ * dpni_set_link_cfg().
-+ */
-+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
-+
-+/**
-+ * struct dpni_congestion_notification_cfg - congestion notification
-+ * configuration
-+ * @units: Units type
-+ * @threshold_entry: Above this threshold we enter a congestion state.
-+ * set it to '0' to disable it
-+ * @threshold_exit: Below this threshold we exit the congestion state.
-+ * @message_ctx: The context that will be part of the CSCN message
-+ * @message_iova: I/O virtual address (must be in DMA-able memory),
-+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
-+ * is contained in 'options'
-+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
-+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
-+ */
-+
-+struct dpni_congestion_notification_cfg {
-+ enum dpni_congestion_unit units;
-+ u32 threshold_entry;
-+ u32 threshold_exit;
-+ u64 message_ctx;
-+ u64 message_iova;
-+ struct dpni_dest_cfg dest_cfg;
-+ u16 notification_mode;
-+};
-+
-+/** Compose TC parameter for function dpni_set_congestion_notification()
-+ * and dpni_get_congestion_notification().
-+ */
-+#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
-+ ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
-+
-+int dpni_set_congestion_notification(
-+ struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg);
-+
-+int dpni_get_congestion_notification(
-+ struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ struct dpni_congestion_notification_cfg *cfg);
-+
-+/**
- * struct dpni_taildrop - Structure representing the taildrop
- * @enable: Indicates whether the taildrop is active or not.
- * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
-@@ -829,4 +1048,124 @@ struct dpni_rule_cfg {
- u8 key_size;
- };
-
-+int dpni_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
-+
-+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rule_cfg *cfg,
-+ u8 tc_id,
-+ u16 index);
-+
-+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rule_cfg *cfg);
-+
-+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * Discard matching traffic. If set, this takes precedence over any other
-+ * configuration and matching traffic is always discarded.
-+ */
-+ #define DPNI_FS_OPT_DISCARD 0x1
-+
-+/**
-+ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
-+ * override the FLC value set per queue.
-+ * For more details check the Frame Descriptor section in the hardware
-+ * documentation.
-+ */
-+#define DPNI_FS_OPT_SET_FLC 0x2
-+
-+/*
-+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
-+ * control. If set, the 6 least significant bits in value are interpreted as
-+ * follows:
-+ * - bits 0-1: indicates the number of 64 byte units of context that are
-+ * stashed. FLC value is interpreted as a memory address in this case,
-+ * excluding the 6 LS bits.
-+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
-+ * to be stashed. Annotation is placed at FD[ADDR].
-+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
-+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
-+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
-+ */
-+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
-+
-+/**
-+ * struct dpni_fs_action_cfg - Action configuration for table look-up
-+ * @flc: FLC value for traffic matching this rule. Please check the
-+ * Frame Descriptor section in the hardware documentation for
-+ * more information.
-+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
-+ * values are in range 0 to num_queue-1.
-+ * @options: Any combination of DPNI_FS_OPT_ values.
-+ */
-+struct dpni_fs_action_cfg {
-+ u64 flc;
-+ u16 flow_id;
-+ u16 options;
-+};
-+
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ u16 index,
-+ const struct dpni_rule_cfg *cfg,
-+ const struct dpni_fs_action_cfg *action);
-+
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rule_cfg *cfg);
-+
-+/**
-+ * When used for queue_idx in function dpni_set_rx_dist_default_queue
-+ * will signal to dpni to drop all unclassified frames
-+ */
-+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
-+
-+/**
-+ * struct dpni_rx_dist_cfg - distribution configuration
-+ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
-+ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
-+ * 512,768,896,1024
-+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
-+ * the extractions to be used for the distribution key by calling
-+ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
-+ * it can be '0'
-+ * @enable: enable/disable the distribution.
-+ * @tc: TC id for which distribution is set
-+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
-+ * hash is disabled it will be put into this queue id; use
-+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
-+ * used only when flow steering distribution is enabled and hash
-+ * distribution is disabled
-+ */
-+struct dpni_rx_dist_cfg {
-+ u16 dist_size;
-+ u64 key_cfg_iova;
-+ u8 enable;
-+ u8 tc;
-+ u16 fs_miss_flow_id;
-+};
-+
-+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rx_dist_cfg *cfg);
-+
-+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rx_dist_cfg *cfg);
-+
- #endif /* __FSL_DPNI_H */
---- a/drivers/staging/fsl-dpaa2/ethernet/net.h
-+++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
-@@ -1,33 +1,5 @@
-+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
- /* Copyright 2013-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef __FSL_NET_H
- #define __FSL_NET_H
diff --git a/target/linux/layerscape/patches-4.14/703-dpaa2-l2switch-support-layerscape.patch b/target/linux/layerscape/patches-4.14/703-dpaa2-l2switch-support-layerscape.patch
deleted file mode 100644
index c188e6d3dd..0000000000
--- a/target/linux/layerscape/patches-4.14/703-dpaa2-l2switch-support-layerscape.patch
+++ /dev/null
@@ -1,4040 +0,0 @@
-From 1c96e22d28e1b18c41c71e7d0948378561a6526f Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:29 +0800
-Subject: [PATCH] dpaa2-l2switch: support layerscape
-
-This is an integrated patch of dpaa2-l2switch for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
----
- drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
- drivers/staging/fsl-dpaa2/ethsw/README | 106 ++
- drivers/staging/fsl-dpaa2/ethsw/TODO | 14 +
- drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 359 ++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1165 +++++++++++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 592 +++++++
- .../staging/fsl-dpaa2/ethsw/ethsw-ethtool.c | 206 +++
- drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1438 +++++++++++++++++
- drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 90 ++
- 9 files changed, 3980 insertions(+)
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/README
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/TODO
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
-
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
-@@ -0,0 +1,10 @@
-+# SPDX-License-Identifier: GPL-2.0
-+#
-+# Makefile for the Freescale DPAA2 Ethernet Switch
-+#
-+# Copyright 2014-2017 Freescale Semiconductor, Inc.
-+# Copyright 2017-2018 NXP
-+
-+obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
-+
-+dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/README
-@@ -0,0 +1,106 @@
-+DPAA2 Ethernet Switch driver
-+============================
-+
-+This file provides documentation for the DPAA2 Ethernet Switch driver
-+
-+
-+Contents
-+========
-+ Supported Platforms
-+ Architecture Overview
-+ Creating an Ethernet Switch
-+ Features
-+
-+
-+ Supported Platforms
-+===================
-+This driver provides networking support for Freescale LS2085A, LS2088A
-+DPAA2 SoCs.
-+
-+
-+Architecture Overview
-+=====================
-+The Ethernet Switch in the DPAA2 architecture consists of several hardware
-+resources that provide the functionality. These are allocated and
-+configured via the Management Complex (MC) portals. MC abstracts most of
-+these resources as DPAA2 objects and exposes ABIs through which they can
-+be configured and controlled.
-+
-+For a more detailed description of the DPAA2 architecture and its object
-+abstractions see:
-+ drivers/staging/fsl-mc/README.txt
-+
-+The Ethernet Switch is built on top of a Datapath Switch (DPSW) object.
-+
-+Configuration interface:
-+
-+ ---------------------
-+ | DPAA2 Switch driver |
-+ ---------------------
-+ .
-+ .
-+ ----------
-+ | DPSW API |
-+ ----------
-+ . software
-+ ================= . ==============
-+ . hardware
-+ ---------------------
-+ | MC hardware portals |
-+ ---------------------
-+ .
-+ .
-+ ------
-+ | DPSW |
-+ ------
-+
-+Driver uses the switch device driver model and exposes each switch port as
-+a network interface, which can be included in a bridge. Traffic switched
-+between ports is offloaded into the hardware. Exposed network interfaces
-+are not used for I/O, they are used just for configuration. This
-+limitation is going to be addressed in the future.
-+
-+The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
-+
-+
-+ [ethA] [ethB] [ethC] [ethD] [ethE] [ethF]
-+ : : : : : :
-+ : : : : : :
-+[eth drv] [eth drv] [ ethsw drv ]
-+ : : : : : : kernel
-+========================================================================
-+ : : : : : : hardware
-+ [DPNI] [DPNI] [============= DPSW =================]
-+ | | | | | |
-+ | ---------- | [DPMAC] [DPMAC]
-+ ------------------------------- | |
-+ | |
-+ [PHY] [PHY]
-+
-+For a more detailed description of the Ethernet switch device driver model
-+see:
-+ Documentation/networking/switchdev.txt
-+
-+Creating an Ethernet Switch
-+===========================
-+A device is created for the switch objects probed on the MC bus. Each DPSW
-+has a number of properties which determine the configuration options and
-+associated hardware resources.
-+
-+A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can
-+be added to a container on the MC bus in one of two ways: statically,
-+through a Datapath Layout Binary file (DPL) that is parsed by MC at boot
-+time; or created dynamically at runtime, via the DPAA2 objects APIs.
-+
-+Features
-+========
-+Driver configures DPSW to perform hardware switching offload of
-+unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its
-+ports.
-+
-+It allows configuration of hardware learning, flooding, multicast groups,
-+port VLAN configuration and STP state.
-+
-+Static entries can be added/removed from the FDB.
-+
-+Hardware statistics for each port are provided through ethtool -S option.
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/TODO
-@@ -0,0 +1,14 @@
-+* Add I/O capabilities on switch port netdevices. This will allow control
-+traffic to reach the CPU.
-+* Add ACL to redirect control traffic to CPU.
-+* Add support for displaying learned FDB entries
-+* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
-+need to be kept in sync with binary interface changes in MC
-+* refine README file
-+* cleanup
-+
-+NOTE: At least first three of the above are required before getting the
-+DPAA2 Ethernet Switch driver out of staging. Another requirement is that
-+the fsl-mc bus driver is moved to drivers/bus and dpio driver is moved to
-+drivers/soc (this is required for I/O).
-+
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
-@@ -0,0 +1,359 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017-2018 NXP
-+ *
-+ */
-+
-+#ifndef __FSL_DPSW_CMD_H
-+#define __FSL_DPSW_CMD_H
-+
-+/* DPSW Version */
-+#define DPSW_VER_MAJOR 8
-+#define DPSW_VER_MINOR 0
-+
-+#define DPSW_CMD_BASE_VERSION 1
-+#define DPSW_CMD_ID_OFFSET 4
-+
-+#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
-+#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
-+
-+#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
-+
-+#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
-+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
-+#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
-+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
-+
-+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
-+
-+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
-+
-+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
-+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
-+
-+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
-+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
-+
-+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
-+
-+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
-+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
-+
-+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
-+
-+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
-+#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
-+#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
-+
-+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
-+
-+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
-+
-+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
-+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
-+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
-+
-+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
-+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
-+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
-+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
-+
-+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
-+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
-+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
-+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
-+#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
-+
-+/* Macros for accessing command fields smaller than 1byte */
-+#define DPSW_MASK(field) \
-+ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
-+ DPSW_##field##_SHIFT)
-+#define dpsw_set_field(var, field, val) \
-+ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
-+#define dpsw_get_field(var, field) \
-+ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
-+#define dpsw_get_bit(var, bit) \
-+ (((var) >> (bit)) & GENMASK(0, 0))
-+
-+struct dpsw_cmd_open {
-+ __le32 dpsw_id;
-+};
-+
-+#define DPSW_COMPONENT_TYPE_SHIFT 0
-+#define DPSW_COMPONENT_TYPE_SIZE 4
-+
-+struct dpsw_cmd_create {
-+ /* cmd word 0 */
-+ __le16 num_ifs;
-+ u8 max_fdbs;
-+ u8 max_meters_per_if;
-+ /* from LSB: only the first 4 bits */
-+ u8 component_type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ __le16 max_vlans;
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le16 max_fdb_mc_groups;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
-+
-+struct dpsw_cmd_destroy {
-+ __le32 dpsw_id;
-+};
-+
-+#define DPSW_ENABLE_SHIFT 0
-+#define DPSW_ENABLE_SIZE 1
-+
-+struct dpsw_rsp_is_enabled {
-+ /* from LSB: enable:1 */
-+ u8 enabled;
-+};
-+
-+struct dpsw_cmd_set_irq_enable {
-+ u8 enable_state;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
-+
-+struct dpsw_cmd_get_irq_enable {
-+ __le32 pad;
-+ u8 irq_index;
-+};
-+
-+struct dpsw_rsp_get_irq_enable {
-+ u8 enable_state;
-+};
-+
-+struct dpsw_cmd_set_irq_mask {
-+ __le32 mask;
-+ u8 irq_index;
-+};
-+
-+struct dpsw_cmd_get_irq_mask {
-+ __le32 pad;
-+ u8 irq_index;
-+};
-+
-+struct dpsw_rsp_get_irq_mask {
-+ __le32 mask;
-+};
-+
-+struct dpsw_cmd_get_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
-+
-+struct dpsw_rsp_get_irq_status {
-+ __le32 status;
-+};
-+
-+struct dpsw_cmd_clear_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
-+
-+#define DPSW_COMPONENT_TYPE_SHIFT 0
-+#define DPSW_COMPONENT_TYPE_SIZE 4
-+
-+struct dpsw_rsp_get_attr {
-+ /* cmd word 0 */
-+ __le16 num_ifs;
-+ u8 max_fdbs;
-+ u8 num_fdbs;
-+ __le16 max_vlans;
-+ __le16 num_vlans;
-+ /* cmd word 1 */
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le32 dpsw_id;
-+ /* cmd word 2 */
-+ __le16 mem_size;
-+ __le16 max_fdb_mc_groups;
-+ u8 max_meters_per_if;
-+ /* from LSB only the first 4 bits */
-+ u8 component_type;
-+ __le16 pad;
-+ /* cmd word 3 */
-+ __le64 options;
-+};
-+
-+struct dpsw_cmd_if_set_flooding {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
-+};
-+
-+struct dpsw_cmd_if_set_broadcast {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
-+};
-+
-+#define DPSW_VLAN_ID_SHIFT 0
-+#define DPSW_VLAN_ID_SIZE 12
-+#define DPSW_DEI_SHIFT 12
-+#define DPSW_DEI_SIZE 1
-+#define DPSW_PCP_SHIFT 13
-+#define DPSW_PCP_SIZE 3
-+
-+struct dpsw_cmd_if_set_tci {
-+ __le16 if_id;
-+ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
-+ __le16 conf;
-+};
-+
-+struct dpsw_cmd_if_get_tci {
-+ __le16 if_id;
-+};
-+
-+struct dpsw_rsp_if_get_tci {
-+ __le16 pad;
-+ __le16 vlan_id;
-+ u8 dei;
-+ u8 pcp;
-+};
-+
-+#define DPSW_STATE_SHIFT 0
-+#define DPSW_STATE_SIZE 4
-+
-+struct dpsw_cmd_if_set_stp {
-+ __le16 if_id;
-+ __le16 vlan_id;
-+ /* only the first LSB 4 bits */
-+ u8 state;
-+};
-+
-+#define DPSW_COUNTER_TYPE_SHIFT 0
-+#define DPSW_COUNTER_TYPE_SIZE 5
-+
-+struct dpsw_cmd_if_get_counter {
-+ __le16 if_id;
-+ /* from LSB: type:5 */
-+ u8 type;
-+};
-+
-+struct dpsw_rsp_if_get_counter {
-+ __le64 pad;
-+ __le64 counter;
-+};
-+
-+struct dpsw_cmd_if {
-+ __le16 if_id;
-+};
-+
-+struct dpsw_cmd_if_set_max_frame_length {
-+ __le16 if_id;
-+ __le16 frame_length;
-+};
-+
-+struct dpsw_cmd_if_set_link_cfg {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 pad[6];
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad1;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
-+
-+struct dpsw_cmd_if_get_link_state {
-+ __le16 if_id;
-+};
-+
-+#define DPSW_UP_SHIFT 0
-+#define DPSW_UP_SIZE 1
-+
-+struct dpsw_rsp_if_get_link_state {
-+ /* cmd word 0 */
-+ __le32 pad0;
-+ u8 up;
-+ u8 pad1[3];
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad2;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
-+
-+struct dpsw_vlan_add {
-+ __le16 fdb_id;
-+ __le16 vlan_id;
-+};
-+
-+struct dpsw_cmd_vlan_manage_if {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 vlan_id;
-+ __le32 pad1;
-+ /* cmd word 1-4 */
-+ __le64 if_id[4];
-+};
-+
-+struct dpsw_cmd_vlan_remove {
-+ __le16 pad;
-+ __le16 vlan_id;
-+};
-+
-+struct dpsw_cmd_fdb_add {
-+ __le32 pad;
-+ __le16 fdb_aging_time;
-+ __le16 num_fdb_entries;
-+};
-+
-+struct dpsw_rsp_fdb_add {
-+ __le16 fdb_id;
-+};
-+
-+struct dpsw_cmd_fdb_remove {
-+ __le16 fdb_id;
-+};
-+
-+#define DPSW_ENTRY_TYPE_SHIFT 0
-+#define DPSW_ENTRY_TYPE_SIZE 4
-+
-+struct dpsw_cmd_fdb_unicast_op {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+ /* cmd word 1 */
-+ __le16 if_egress;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+};
-+
-+struct dpsw_cmd_fdb_multicast_op {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ u8 mac_addr[6];
-+ __le16 pad2;
-+ /* cmd word 2-5 */
-+ __le64 if_id[4];
-+};
-+
-+#define DPSW_LEARNING_MODE_SHIFT 0
-+#define DPSW_LEARNING_MODE_SIZE 4
-+
-+struct dpsw_cmd_fdb_set_learning_mode {
-+ __le16 fdb_id;
-+ /* only the first 4 bits from LSB */
-+ u8 mode;
-+};
-+
-+struct dpsw_rsp_get_api_version {
-+ __le16 version_major;
-+ __le16 version_minor;
-+};
-+
-+#endif /* __FSL_DPSW_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
-@@ -0,0 +1,1165 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017-2018 NXP
-+ *
-+ */
-+
-+#include <linux/fsl/mc.h>
-+#include "dpsw.h"
-+#include "dpsw-cmd.h"
-+
-+static void build_if_id_bitmap(__le64 *bmap,
-+ const u16 *id,
-+ const u16 num_ifs)
-+{
-+ int i;
-+
-+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
-+ if (id[i] < DPSW_MAX_IF)
-+ bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
-+ }
-+}
-+
-+/**
-+ * dpsw_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpsw_id: DPSW unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpsw_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpsw_id,
-+ u16 *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_open *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpsw_cmd_open *)cmd.params;
-+ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_enable() - Enable DPSW functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_disable() - Disable DPSW functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq_enable *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
-+ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq_mask *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_get_irq_status *cmd_params;
-+ struct dpsw_rsp_get_irq_status *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_clear_irq_status *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_get_attributes() - Retrieve DPSW attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @attr: Returned DPSW attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_rsp_get_attr *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ attr->max_fdbs = rsp_params->max_fdbs;
-+ attr->num_fdbs = rsp_params->num_fdbs;
-+ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
-+ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
-+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
-+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
-+ attr->id = le32_to_cpu(rsp_params->dpsw_id);
-+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
-+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
-+ attr->max_meters_per_if = rsp_params->max_meters_per_if;
-+ attr->options = le64_to_cpu(rsp_params->options);
-+ attr->component_type = dpsw_get_field(rsp_params->component_type,
-+ COMPONENT_TYPE);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_if_set_link_cfg() - Set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface id
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->rate = cpu_to_le32(cfg->rate);
-+ cmd_params->options = cpu_to_le64(cfg->options);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_get_link_state - Return the link state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface id
-+ * @state: Link state 1 - linkup, 0 - link down or disconnected
-+ *
-+ * @Return '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_state *state)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_link_state *cmd_params;
-+ struct dpsw_rsp_if_get_link_state *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
-+ state->rate = le32_to_cpu(rsp_params->rate);
-+ state->options = le64_to_cpu(rsp_params->options);
-+ state->up = dpsw_get_field(rsp_params->up, UP);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_flooding *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_broadcast *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tci_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_tci *cmd_params;
-+ u16 tmp_conf = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
-+ dpsw_set_field(tmp_conf, DEI, cfg->dei);
-+ dpsw_set_field(tmp_conf, PCP, cfg->pcp);
-+ cmd_params->conf = cpu_to_le16(tmp_conf);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_tci_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_tci *cmd_params;
-+ struct dpsw_rsp_if_get_tci *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
-+ cfg->pcp = rsp_params->pcp;
-+ cfg->dei = rsp_params->dei;
-+ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: STP State configuration parameters
-+ *
-+ * The following STP states are supported -
-+ * blocking, listening, learning, forwarding and disabled.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_stp_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_stp *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->state, STATE, cfg->state);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_get_counter() - Get specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: return value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 *counter)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_counter *cmd_params;
-+ struct dpsw_rsp_if_get_counter *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
-+ *counter = le64_to_cpu(rsp_params->counter);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpsw_if_enable() - Enable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_disable() - Disable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Maximum Frame Length
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 frame_length)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->frame_length = cpu_to_le16(frame_length);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: VLAN configuration
-+ *
-+ * Only VLAN ID and FDB ID are required parameters here.
-+ * 12 bit VLAN ID is defined in IEEE802.1Q.
-+ * Adding a duplicate VLAN ID is not allowed.
-+ * FDB ID can be shared across multiple VLANs. Shared learning
-+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
-+ * with same fdb_id
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_vlan_add *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_vlan_add *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces to add
-+ *
-+ * It adds only interfaces not belonging to this VLAN yet,
-+ * otherwise an error is generated and an entire command is
-+ * ignored. This function can be called numerous times always
-+ * providing required interfaces delta.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
-+ * transmitted as untagged.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be transmitted as untagged
-+ *
-+ * These interfaces should already belong to this VLAN.
-+ * By default all interfaces are transmitted as tagged.
-+ * Providing un-existing interface or untagged interface that is
-+ * configured untagged already generates an error and the entire
-+ * command is ignored.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be removed
-+ *
-+ * Interfaces must belong to this VLAN, otherwise an error
-+ * is returned and an the command is ignored
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
-+ * converted from transmitted as untagged to transmit as tagged.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be removed
-+ *
-+ * Interfaces provided by API have to belong to this VLAN and
-+ * configured untagged, otherwise an error is returned and the
-+ * command is ignored
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_vlan_remove() - Remove an entire VLAN
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_remove *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
-+ int i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
-+ int i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
-+ *
-+ * If group doesn't exist, it will be created.
-+ * It adds only interfaces not belonging to this multicast group
-+ * yet, otherwise error will be generated and the command is
-+ * ignored.
-+ * This function may be called numerous times always providing
-+ * required interfaces delta.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
-+ int i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
-+ * group.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
-+ *
-+ * Interfaces provided by this API have to exist in the group,
-+ * otherwise an error will be returned and an entire command
-+ * ignored. If there is no interface left in the group,
-+ * an entire group is deleted
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
-+ int i;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @mode: Learning mode
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ enum dpsw_fdb_learning_mode mode)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_get_api_version() - Get Data Path Switch API version
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of data path switch API
-+ * @minor_ver: Minor version of data path switch API
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpsw_rsp_get_api_version *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
-+ cmd_flags,
-+ 0);
-+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
-+ *major_ver = le16_to_cpu(rsp_params->version_major);
-+ *minor_ver = le16_to_cpu(rsp_params->version_minor);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
-@@ -0,0 +1,592 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017-2018 NXP
-+ *
-+ */
-+
-+#ifndef __FSL_DPSW_H
-+#define __FSL_DPSW_H
-+
-+/* Data Path L2-Switch API
-+ * Contains API for handling DPSW topology and functionality
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * DPSW general definitions
-+ */
-+
-+/**
-+ * Maximum number of traffic class priorities
-+ */
-+#define DPSW_MAX_PRIORITIES 8
-+/**
-+ * Maximum number of interfaces
-+ */
-+#define DPSW_MAX_IF 64
-+
-+int dpsw_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpsw_id,
-+ u16 *token);
-+
-+int dpsw_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * DPSW options
-+ */
-+
-+/**
-+ * Disable flooding
-+ */
-+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
-+/**
-+ * Disable Multicast
-+ */
-+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
-+/**
-+ * Support control interface
-+ */
-+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
-+/**
-+ * Disable flooding metering
-+ */
-+#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
-+/**
-+ * Enable metering
-+ */
-+#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
-+
-+/**
-+ * enum dpsw_component_type - component type of a bridge
-+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
-+ * enterprise VLAN bridge or of a Provider Bridge used
-+ * to process C-tagged frames
-+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
-+ * Provider Bridge
-+ *
-+ */
-+enum dpsw_component_type {
-+ DPSW_COMPONENT_TYPE_C_VLAN = 0,
-+ DPSW_COMPONENT_TYPE_S_VLAN
-+};
-+
-+/**
-+ * struct dpsw_cfg - DPSW configuration
-+ * @num_ifs: Number of external and internal interfaces
-+ * @adv: Advanced parameters; default is all zeros;
-+ * use this structure to change default settings
-+ */
-+struct dpsw_cfg {
-+ u16 num_ifs;
-+ /**
-+ * struct adv - Advanced parameters
-+ * @options: Enable/Disable DPSW features (bitmap)
-+ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
-+ * @max_meters_per_if: Number of meters per interface
-+ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
-+ * @max_fdb_entries: Number of FDB entries for default FDB table;
-+ * 0 - indicates default 1024 entries.
-+ * @fdb_aging_time: Default FDB aging time for default FDB table;
-+ * 0 - indicates default 300 seconds
-+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
-+ * 0 - indicates default 32
-+ * @component_type: Indicates the component type of this bridge
-+ */
-+ struct {
-+ u64 options;
-+ u16 max_vlans;
-+ u8 max_meters_per_if;
-+ u8 max_fdbs;
-+ u16 max_fdb_entries;
-+ u16 fdb_aging_time;
-+ u16 max_fdb_mc_groups;
-+ enum dpsw_component_type component_type;
-+ } adv;
-+};
-+
-+int dpsw_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpsw_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpsw_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * DPSW IRQ Index and Events
-+ */
-+
-+#define DPSW_IRQ_INDEX_IF 0x0000
-+#define DPSW_IRQ_INDEX_L2SW 0x0001
-+
-+/**
-+ * IRQ event - Indicates that the link state changed
-+ */
-+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
-+
-+/**
-+ * struct dpsw_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpsw_irq_cfg {
-+ u64 addr;
-+ u32 val;
-+ int irq_num;
-+};
-+
-+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en);
-+
-+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask);
-+
-+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status);
-+
-+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status);
-+
-+/**
-+ * struct dpsw_attr - Structure representing DPSW attributes
-+ * @id: DPSW object ID
-+ * @options: Enable/Disable DPSW features
-+ * @max_vlans: Maximum Number of VLANs
-+ * @max_meters_per_if: Number of meters per interface
-+ * @max_fdbs: Maximum Number of FDBs
-+ * @max_fdb_entries: Number of FDB entries for default FDB table;
-+ * 0 - indicates default 1024 entries.
-+ * @fdb_aging_time: Default FDB aging time for default FDB table;
-+ * 0 - indicates default 300 seconds
-+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
-+ * 0 - indicates default 32
-+ * @mem_size: DPSW frame storage memory size
-+ * @num_ifs: Number of interfaces
-+ * @num_vlans: Current number of VLANs
-+ * @num_fdbs: Current number of FDBs
-+ * @component_type: Component type of this bridge
-+ */
-+struct dpsw_attr {
-+ int id;
-+ u64 options;
-+ u16 max_vlans;
-+ u8 max_meters_per_if;
-+ u8 max_fdbs;
-+ u16 max_fdb_entries;
-+ u16 fdb_aging_time;
-+ u16 max_fdb_mc_groups;
-+ u16 num_ifs;
-+ u16 mem_size;
-+ u16 num_vlans;
-+ u8 num_fdbs;
-+ enum dpsw_component_type component_type;
-+};
-+
-+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_attr *attr);
-+
-+/**
-+ * enum dpsw_action - Action selection for special/control frames
-+ * @DPSW_ACTION_DROP: Drop frame
-+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
-+ */
-+enum dpsw_action {
-+ DPSW_ACTION_DROP = 0,
-+ DPSW_ACTION_REDIRECT = 1
-+};
-+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+
-+/**
-+ * struct dpsw_link_cfg - Structure representing DPSW link configuration
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
-+ */
-+struct dpsw_link_cfg {
-+ u32 rate;
-+ u64 options;
-+};
-+
-+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_cfg *cfg);
-+/**
-+ * struct dpsw_link_state - Structure representing DPSW link state
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
-+ * @up: 0 - covers two cases: down and disconnected, 1 - up
-+ */
-+struct dpsw_link_state {
-+ u32 rate;
-+ u64 options;
-+ u8 up;
-+};
-+
-+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_state *state);
-+
-+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 en);
-+
-+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 en);
-+
-+/**
-+ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
-+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
-+ * to the IEEE 802.1p priority
-+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
-+ * separately or in conjunction with PCP to indicate frames
-+ * eligible to be dropped in the presence of congestion
-+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
-+ * to which the frame belongs. The hexadecimal values
-+ * of 0x000 and 0xFFF are reserved;
-+ * all other values may be used as VLAN identifiers,
-+ * allowing up to 4,094 VLANs
-+ */
-+struct dpsw_tci_cfg {
-+ u8 pcp;
-+ u8 dei;
-+ u16 vlan_id;
-+};
-+
-+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tci_cfg *cfg);
-+
-+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_tci_cfg *cfg);
-+
-+/**
-+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
-+ * @DPSW_STP_STATE_BLOCKING: Blocking state
-+ * @DPSW_STP_STATE_LISTENING: Listening state
-+ * @DPSW_STP_STATE_LEARNING: Learning state
-+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
-+ *
-+ */
-+enum dpsw_stp_state {
-+ DPSW_STP_STATE_DISABLED = 0,
-+ DPSW_STP_STATE_LISTENING = 1,
-+ DPSW_STP_STATE_LEARNING = 2,
-+ DPSW_STP_STATE_FORWARDING = 3,
-+ DPSW_STP_STATE_BLOCKING = 0
-+};
-+
-+/**
-+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
-+ * @vlan_id: VLAN ID STP state
-+ * @state: STP state
-+ */
-+struct dpsw_stp_cfg {
-+ u16 vlan_id;
-+ enum dpsw_stp_state state;
-+};
-+
-+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_stp_cfg *cfg);
-+
-+/**
-+ * enum dpsw_accepted_frames - Types of frames to accept
-+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
-+ * priority tagged frames
-+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
-+ * Priority-Tagged frames received on this interface.
-+ *
-+ */
-+enum dpsw_accepted_frames {
-+ DPSW_ADMIT_ALL = 1,
-+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
-+};
-+
-+/**
-+ * enum dpsw_counter - Counters types
-+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
-+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
-+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
-+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
-+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
-+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
-+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
-+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
-+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
-+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
-+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
-+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
-+ */
-+enum dpsw_counter {
-+ DPSW_CNT_ING_FRAME = 0x0,
-+ DPSW_CNT_ING_BYTE = 0x1,
-+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
-+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
-+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
-+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
-+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
-+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
-+ DPSW_CNT_EGR_FRAME = 0x8,
-+ DPSW_CNT_EGR_BYTE = 0x9,
-+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
-+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
-+};
-+
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 *counter);
-+
-+int dpsw_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id);
-+
-+int dpsw_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id);
-+
-+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 frame_length);
-+
-+/**
-+ * struct dpsw_vlan_cfg - VLAN Configuration
-+ * @fdb_id: Forwarding Data Base
-+ */
-+struct dpsw_vlan_cfg {
-+ u16 fdb_id;
-+};
-+
-+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_cfg *cfg);
-+
-+/**
-+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
-+ * @num_ifs: The number of interfaces that are assigned to the egress
-+ * list for this VLAN
-+ * @if_id: The set of interfaces that are
-+ * assigned to the egress list for this VLAN
-+ */
-+struct dpsw_vlan_if_cfg {
-+ u16 num_ifs;
-+ u16 if_id[DPSW_MAX_IF];
-+};
-+
-+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id);
-+
-+/**
-+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
-+ * @DPSW_FDB_ENTRY_STATIC: Static entry
-+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
-+ */
-+enum dpsw_fdb_entry_type {
-+ DPSW_FDB_ENTRY_STATIC = 0,
-+ DPSW_FDB_ENTRY_DINAMIC = 1
-+};
-+
-+/**
-+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
-+ * @type: Select static or dynamic entry
-+ * @mac_addr: MAC address
-+ * @if_egress: Egress interface ID
-+ */
-+struct dpsw_fdb_unicast_cfg {
-+ enum dpsw_fdb_entry_type type;
-+ u8 mac_addr[6];
-+ u16 if_egress;
-+};
-+
-+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg);
-+
-+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg);
-+
-+/**
-+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
-+ * @type: Select static or dynamic entry
-+ * @mac_addr: MAC address
-+ * @num_ifs: Number of external and internal interfaces
-+ * @if_id: Egress interface IDs
-+ */
-+struct dpsw_fdb_multicast_cfg {
-+ enum dpsw_fdb_entry_type type;
-+ u8 mac_addr[6];
-+ u16 num_ifs;
-+ u16 if_id[DPSW_MAX_IF];
-+};
-+
-+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg);
-+
-+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg);
-+
-+/**
-+ * enum dpsw_fdb_learning_mode - Auto-learning modes
-+ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
-+ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
-+ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
-+ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
-+ *
-+ * NONE - SECURE LEARNING
-+ * SMAC found DMAC found CTLU Action
-+ * v v Forward frame to
-+ * 1. DMAC destination
-+ * - v Forward frame to
-+ * 1. DMAC destination
-+ * 2. Control interface
-+ * v - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * - - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * 2. Control interface
-+ * SECURE LEARING
-+ * SMAC found DMAC found CTLU Action
-+ * v v Forward frame to
-+ * 1. DMAC destination
-+ * - v Forward frame to
-+ * 1. Control interface
-+ * v - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * - - Forward frame to
-+ * 1. Control interface
-+ */
-+enum dpsw_fdb_learning_mode {
-+ DPSW_FDB_LEARNING_MODE_DIS = 0,
-+ DPSW_FDB_LEARNING_MODE_HW = 1,
-+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
-+ DPSW_FDB_LEARNING_MODE_SECURE = 3
-+};
-+
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ enum dpsw_fdb_learning_mode mode);
-+
-+/**
-+ * struct dpsw_fdb_attr - FDB Attributes
-+ * @max_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ * @learning_mode: Learning mode
-+ * @num_fdb_mc_groups: Current number of multicast groups
-+ * @max_fdb_mc_groups: Maximum number of multicast groups
-+ */
-+struct dpsw_fdb_attr {
-+ u16 max_fdb_entries;
-+ u16 fdb_aging_time;
-+ enum dpsw_fdb_learning_mode learning_mode;
-+ u16 num_fdb_mc_groups;
-+ u16 max_fdb_mc_groups;
-+};
-+
-+int dpsw_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
-+
-+#endif /* __FSL_DPSW_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
-@@ -0,0 +1,206 @@
-+/* Copyright 2014-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "ethsw.h"
-+
-+static struct {
-+ enum dpsw_counter id;
-+ char name[ETH_GSTRING_LEN];
-+} ethsw_ethtool_counters[] = {
-+ {DPSW_CNT_ING_FRAME, "rx frames"},
-+ {DPSW_CNT_ING_BYTE, "rx bytes"},
-+ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
-+ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
-+ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
-+ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
-+ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
-+ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
-+ {DPSW_CNT_EGR_FRAME, "tx frames"},
-+ {DPSW_CNT_EGR_BYTE, "tx bytes"},
-+ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
-+
-+};
-+
-+#define ETHSW_NUM_COUNTERS ARRAY_SIZE(ethsw_ethtool_counters)
-+
-+static void ethsw_get_drvinfo(struct net_device *netdev,
-+ struct ethtool_drvinfo *drvinfo)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u16 version_major, version_minor;
-+ int err;
-+
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+
-+ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
-+ &version_major,
-+ &version_minor);
-+ if (err)
-+ strlcpy(drvinfo->fw_version, "N/A",
-+ sizeof(drvinfo->fw_version));
-+ else
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%u.%u", version_major, version_minor);
-+
-+ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
-+}
-+
-+static int
-+ethsw_get_link_ksettings(struct net_device *netdev,
-+ struct ethtool_link_ksettings *link_ksettings)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state = {0};
-+ int err = 0;
-+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
-+ }
-+
-+ /* At the moment, we have no way of interrogating the DPMAC
-+ * from the DPSW side or there may not exist a DPMAC at all.
-+ * Report only autoneg state, duplexity and speed.
-+ */
-+ if (state.options & DPSW_LINK_OPT_AUTONEG)
-+ link_ksettings->base.autoneg = AUTONEG_ENABLE;
-+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
-+ link_ksettings->base.duplex = DUPLEX_FULL;
-+ link_ksettings->base.speed = state.rate;
-+
-+out:
-+ return err;
-+}
-+
-+static int
-+ethsw_set_link_ksettings(struct net_device *netdev,
-+ const struct ethtool_link_ksettings *link_ksettings)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_cfg cfg = {0};
-+ int err = 0;
-+
-+ netdev_dbg(netdev, "Setting link parameters...");
-+
-+ /* Due to a temporary MC limitation, the DPSW port must be down
-+ * in order to be able to change link settings. Taking steps to let
-+ * the user know that.
-+ */
-+ if (netif_running(netdev)) {
-+ netdev_info(netdev, "Sorry, interface must be brought down first.\n");
-+ return -EACCES;
-+ }
-+
-+ cfg.rate = link_ksettings->base.speed;
-+ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
-+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
-+ if (link_ksettings->base.duplex == DUPLEX_HALF)
-+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
-+
-+ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ &cfg);
-+ if (err)
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
-+
-+ return err;
-+}
-+
-+static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
-+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ETHSW_NUM_COUNTERS;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static void ethsw_ethtool_get_strings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
-+{
-+ int i;
-+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ETHSW_NUM_COUNTERS; i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
-+ break;
-+ }
-+}
-+
-+static void ethsw_ethtool_get_stats(struct net_device *netdev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int i, err;
-+
-+ memset(data, 0,
-+ sizeof(u64) * ETHSW_NUM_COUNTERS);
-+
-+ for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ ethsw_ethtool_counters[i].id,
-+ &data[i]);
-+ if (err)
-+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
-+ ethsw_ethtool_counters[i].name, err);
-+ }
-+}
-+
-+const struct ethtool_ops ethsw_port_ethtool_ops = {
-+ .get_drvinfo = ethsw_get_drvinfo,
-+ .get_link = ethtool_op_get_link,
-+ .get_link_ksettings = ethsw_get_link_ksettings,
-+ .set_link_ksettings = ethsw_set_link_ksettings,
-+ .get_strings = ethsw_ethtool_get_strings,
-+ .get_ethtool_stats = ethsw_ethtool_get_stats,
-+ .get_sset_count = ethsw_ethtool_get_sset_count,
-+};
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
-@@ -0,0 +1,1438 @@
-+/* Copyright 2014-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/module.h>
-+
-+#include <linux/interrupt.h>
-+#include <linux/msi.h>
-+#include <linux/kthread.h>
-+#include <linux/workqueue.h>
-+
-+#include <linux/fsl/mc.h>
-+
-+#include "ethsw.h"
-+
-+static struct workqueue_struct *ethsw_owq;
-+
-+/* Minimal supported DPSW version */
-+#define DPSW_MIN_VER_MAJOR 8
-+#define DPSW_MIN_VER_MINOR 0
-+
-+#define DEFAULT_VLAN_ID 1
-+
-+static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
-+{
-+ int err;
-+
-+ struct dpsw_vlan_cfg vcfg = {
-+ .fdb_id = 0,
-+ };
-+
-+ if (ethsw->vlans[vid]) {
-+ dev_err(ethsw->dev, "VLAN already configured\n");
-+ return -EEXIST;
-+ }
-+
-+ err = dpsw_vlan_add(ethsw->mc_io, 0,
-+ ethsw->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
-+ return err;
-+ }
-+ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
-+
-+ return 0;
-+}
-+
-+static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
-+{
-+ struct ethsw_core *ethsw = port_priv->ethsw_data;
-+ struct net_device *netdev = port_priv->netdev;
-+ struct dpsw_tci_cfg tci_cfg = { 0 };
-+ bool is_oper;
-+ int err, ret;
-+
-+ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ port_priv->idx, &tci_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
-+ return err;
-+ }
-+
-+ tci_cfg.vlan_id = pvid;
-+
-+ /* Interface needs to be down to change PVID */
-+ is_oper = netif_oper_up(netdev);
-+ if (is_oper) {
-+ err = dpsw_if_disable(ethsw->mc_io, 0,
-+ ethsw->dpsw_handle,
-+ port_priv->idx);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
-+ return err;
-+ }
-+ }
-+
-+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ port_priv->idx, &tci_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
-+ goto set_tci_error;
-+ }
-+
-+ /* Delete previous PVID info and mark the new one */
-+ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
-+ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
-+ port_priv->pvid = pvid;
-+
-+set_tci_error:
-+ if (is_oper) {
-+ ret = dpsw_if_enable(ethsw->mc_io, 0,
-+ ethsw->dpsw_handle,
-+ port_priv->idx);
-+ if (ret) {
-+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
-+ return ret;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
-+ u16 vid, u16 flags)
-+{
-+ struct ethsw_core *ethsw = port_priv->ethsw_data;
-+ struct net_device *netdev = port_priv->netdev;
-+ struct dpsw_vlan_if_cfg vcfg;
-+ int err;
-+
-+ if (port_priv->vlans[vid]) {
-+ netdev_warn(netdev, "VLAN %d already configured\n", vid);
-+ return -EEXIST;
-+ }
-+
-+ vcfg.num_ifs = 1;
-+ vcfg.if_id[0] = port_priv->idx;
-+ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
-+ return err;
-+ }
-+
-+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
-+
-+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
-+ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
-+ ethsw->dpsw_handle,
-+ vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev,
-+ "dpsw_vlan_add_if_untagged err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
-+ }
-+
-+ if (flags & BRIDGE_VLAN_INFO_PVID) {
-+ err = ethsw_port_set_pvid(port_priv, vid);
-+ if (err)
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
-+{
-+ enum dpsw_fdb_learning_mode learn_mode;
-+ int err;
-+
-+ if (flag)
-+ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
-+ else
-+ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
-+
-+ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
-+ learn_mode);
-+ if (err) {
-+ dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
-+ return err;
-+ }
-+ ethsw->learning = !!flag;
-+
-+ return 0;
-+}
-+
-+static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
-+{
-+ int err;
-+
-+ err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx, flag);
-+ if (err) {
-+ netdev_err(port_priv->netdev,
-+ "dpsw_fdb_set_learning_mode err %d\n", err);
-+ return err;
-+ }
-+ port_priv->flood = !!flag;
-+
-+ return 0;
-+}
-+
-+static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
-+{
-+ struct dpsw_stp_cfg stp_cfg = {
-+ .vlan_id = DEFAULT_VLAN_ID,
-+ .state = state,
-+ };
-+ int err;
-+
-+ if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
-+ return 0; /* Nothing to do */
-+
-+ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx, &stp_cfg);
-+ if (err) {
-+ netdev_err(port_priv->netdev,
-+ "dpsw_if_set_stp err %d\n", err);
-+ return err;
-+ }
-+
-+ port_priv->stp_state = state;
-+
-+ return 0;
-+}
-+
-+static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
-+{
-+ struct ethsw_port_priv *ppriv_local = NULL;
-+ int i, err;
-+
-+ if (!ethsw->vlans[vid])
-+ return -ENOENT;
-+
-+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
-+ if (err) {
-+ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
-+ return err;
-+ }
-+ ethsw->vlans[vid] = 0;
-+
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
-+ ppriv_local = ethsw->ports[i];
-+ ppriv_local->vlans[vid] = 0;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
-+ const unsigned char *addr)
-+{
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
-+
-+ entry.if_egress = port_priv->idx;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
-+
-+ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "dpsw_fdb_add_unicast err %d\n", err);
-+ return err;
-+}
-+
-+static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
-+ const unsigned char *addr)
-+{
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
-+
-+ entry.if_egress = port_priv->idx;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
-+
-+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ 0, &entry);
-+ /* Silently discard calling multiple times the del command */
-+ if (err && err != -ENXIO)
-+ netdev_err(port_priv->netdev,
-+ "dpsw_fdb_remove_unicast err %d\n", err);
-+ return err;
-+}
-+
-+static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
-+ const unsigned char *addr)
-+{
-+ struct dpsw_fdb_multicast_cfg entry = {0};
-+ int err;
-+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->idx;
-+
-+ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ 0, &entry);
-+ /* Silently discard calling multiple times the add command */
-+ if (err && err != -ENXIO)
-+ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
-+ err);
-+ return err;
-+}
-+
-+static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
-+ const unsigned char *addr)
-+{
-+ struct dpsw_fdb_multicast_cfg entry = {0};
-+ int err;
-+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->idx;
-+
-+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ 0, &entry);
-+ /* Silently discard calling multiple times the del command */
-+ if (err && err != -ENAVAIL)
-+ netdev_err(port_priv->netdev,
-+ "dpsw_fdb_remove_multicast err %d\n", err);
-+ return err;
-+}
-+
-+static void port_get_stats(struct net_device *netdev,
-+ struct rtnl_link_stats64 *stats)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u64 tmp;
-+ int err;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ DPSW_CNT_ING_FRAME, &stats->rx_packets);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ DPSW_CNT_ING_FRAME_DISCARD,
-+ &stats->rx_dropped);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ DPSW_CNT_ING_FLTR_FRAME,
-+ &tmp);
-+ if (err)
-+ goto error;
-+ stats->rx_dropped += tmp;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ DPSW_CNT_EGR_FRAME_DISCARD,
-+ &stats->tx_dropped);
-+ if (err)
-+ goto error;
-+
-+ return;
-+
-+error:
-+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
-+}
-+
-+static bool port_has_offload_stats(const struct net_device *netdev,
-+ int attr_id)
-+{
-+ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
-+}
-+
-+static int port_get_offload_stats(int attr_id,
-+ const struct net_device *netdev,
-+ void *sp)
-+{
-+ switch (attr_id) {
-+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
-+ port_get_stats((struct net_device *)netdev, sp);
-+ return 0;
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+static int port_change_mtu(struct net_device *netdev, int mtu)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
-+
-+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
-+ 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx,
-+ (u16)ETHSW_L2_MAX_FRM(mtu));
-+ if (err) {
-+ netdev_err(netdev,
-+ "dpsw_if_set_max_frame_length() err %d\n", err);
-+ return err;
-+ }
-+
-+ netdev->mtu = mtu;
-+ return 0;
-+}
-+
-+static int port_carrier_state_sync(struct net_device *netdev)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state;
-+ int err;
-+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx, &state);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
-+ return err;
-+ }
-+
-+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
-+
-+ if (state.up != port_priv->link_state) {
-+ if (state.up)
-+ netif_carrier_on(netdev);
-+ else
-+ netif_carrier_off(netdev);
-+ port_priv->link_state = state.up;
-+ }
-+ return 0;
-+}
-+
-+static int port_open(struct net_device *netdev)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
-+
-+ /* No need to allow Tx as control interface is disabled */
-+ netif_tx_stop_all_queues(netdev);
-+
-+ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
-+ return err;
-+ }
-+
-+ /* sync carrier state */
-+ err = port_carrier_state_sync(netdev);
-+ if (err) {
-+ netdev_err(netdev,
-+ "port_carrier_state_sync err %d\n", err);
-+ goto err_carrier_sync;
-+ }
-+
-+ return 0;
-+
-+err_carrier_sync:
-+ dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx);
-+ return err;
-+}
-+
-+static int port_stop(struct net_device *netdev)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
-+
-+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
-+ port_priv->ethsw_data->dpsw_handle,
-+ port_priv->idx);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static netdev_tx_t port_dropframe(struct sk_buff *skb,
-+ struct net_device *netdev)
-+{
-+ /* we don't support I/O for now, drop the frame */
-+ dev_kfree_skb_any(skb);
-+
-+ return NETDEV_TX_OK;
-+}
-+
-+static const struct net_device_ops ethsw_port_ops = {
-+ .ndo_open = port_open,
-+ .ndo_stop = port_stop,
-+
-+ .ndo_set_mac_address = eth_mac_addr,
-+ .ndo_change_mtu = port_change_mtu,
-+ .ndo_has_offload_stats = port_has_offload_stats,
-+ .ndo_get_offload_stats = port_get_offload_stats,
-+
-+ .ndo_start_xmit = port_dropframe,
-+};
-+
-+static void ethsw_links_state_update(struct ethsw_core *ethsw)
-+{
-+ int i;
-+
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
-+ port_carrier_state_sync(ethsw->ports[i]->netdev);
-+}
-+
-+static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
-+{
-+ return IRQ_WAKE_THREAD;
-+}
-+
-+static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
-+{
-+ struct device *dev = (struct device *)arg;
-+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
-+
-+ /* Mask the events and the if_id reserved bits to be cleared on read */
-+ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
-+ int err;
-+
-+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, &status);
-+ if (err) {
-+ dev_err(dev, "Can't get irq status (err %d)", err);
-+
-+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
-+ if (err)
-+ dev_err(dev, "Can't clear irq status (err %d)", err);
-+ goto out;
-+ }
-+
-+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
-+ ethsw_links_state_update(ethsw);
-+
-+out:
-+ return IRQ_HANDLED;
-+}
-+
-+static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
-+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
-+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
-+ struct fsl_mc_device_irq *irq;
-+ int err;
-+
-+ err = fsl_mc_allocate_irqs(sw_dev);
-+ if (err) {
-+ dev_err(dev, "MC irqs allocation failed\n");
-+ return err;
-+ }
-+
-+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
-+ err = -EINVAL;
-+ goto free_irq;
-+ }
-+
-+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, 0);
-+ if (err) {
-+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
-+ goto free_irq;
-+ }
-+
-+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
-+
-+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
-+ ethsw_irq0_handler,
-+ ethsw_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(dev), dev);
-+ if (err) {
-+ dev_err(dev, "devm_request_threaded_irq(): %d", err);
-+ goto free_irq;
-+ }
-+
-+ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, mask);
-+ if (err) {
-+ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
-+ goto free_devm_irq;
-+ }
-+
-+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, 1);
-+ if (err) {
-+ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
-+ goto free_devm_irq;
-+ }
-+
-+ return 0;
-+
-+free_devm_irq:
-+ devm_free_irq(dev, irq->msi_desc->irq, dev);
-+free_irq:
-+ fsl_mc_free_irqs(sw_dev);
-+ return err;
-+}
-+
-+static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
-+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
-+ struct fsl_mc_device_irq *irq;
-+ int err;
-+
-+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
-+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, 0);
-+ if (err)
-+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
-+
-+ fsl_mc_free_irqs(sw_dev);
-+}
-+
-+static int swdev_port_attr_get(struct net_device *netdev,
-+ struct switchdev_attr *attr)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+
-+ switch (attr->id) {
-+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
-+ attr->u.ppid.id_len = 1;
-+ attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
-+ break;
-+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
-+ attr->u.brport_flags =
-+ (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
-+ (port_priv->flood ? BR_FLOOD : 0);
-+ break;
-+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
-+ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
-+ break;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+
-+ return 0;
-+}
-+
-+static int port_attr_stp_state_set(struct net_device *netdev,
-+ struct switchdev_trans *trans,
-+ u8 state)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+
-+ if (switchdev_trans_ph_prepare(trans))
-+ return 0;
-+
-+ return ethsw_port_set_stp_state(port_priv, state);
-+}
-+
-+static int port_attr_br_flags_set(struct net_device *netdev,
-+ struct switchdev_trans *trans,
-+ unsigned long flags)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err = 0;
-+
-+ if (switchdev_trans_ph_prepare(trans))
-+ return 0;
-+
-+ /* Learning is enabled per switch */
-+ err = ethsw_set_learning(port_priv->ethsw_data, !!(flags & BR_LEARNING));
-+ if (err)
-+ goto exit;
-+
-+ err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
-+
-+exit:
-+ return err;
-+}
-+
-+static int swdev_port_attr_set(struct net_device *netdev,
-+ const struct switchdev_attr *attr,
-+ struct switchdev_trans *trans)
-+{
-+ int err = 0;
-+
-+ switch (attr->id) {
-+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
-+ err = port_attr_stp_state_set(netdev, trans,
-+ attr->u.stp_state);
-+ break;
-+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
-+ err = port_attr_br_flags_set(netdev, trans,
-+ attr->u.brport_flags);
-+ break;
-+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
-+ /* VLANs are supported by default */
-+ break;
-+ default:
-+ err = -EOPNOTSUPP;
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int port_vlans_add(struct net_device *netdev,
-+ const struct switchdev_obj_port_vlan *vlan,
-+ struct switchdev_trans *trans)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int vid, err;
-+
-+ if (switchdev_trans_ph_prepare(trans))
-+ return 0;
-+
-+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
-+ if (!port_priv->ethsw_data->vlans[vid]) {
-+ /* this is a new VLAN */
-+ err = ethsw_add_vlan(port_priv->ethsw_data, vid);
-+ if (err)
-+ return err;
-+
-+ port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
-+ }
-+ err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
-+ if (err)
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int swdev_port_obj_add(struct net_device *netdev,
-+ const struct switchdev_obj *obj,
-+ struct switchdev_trans *trans)
-+{
-+ int err;
-+
-+ switch (obj->id) {
-+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
-+ err = port_vlans_add(netdev,
-+ SWITCHDEV_OBJ_PORT_VLAN(obj),
-+ trans);
-+ break;
-+ default:
-+ err = -EOPNOTSUPP;
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
-+{
-+ struct ethsw_core *ethsw = port_priv->ethsw_data;
-+ struct net_device *netdev = port_priv->netdev;
-+ struct dpsw_vlan_if_cfg vcfg;
-+ int i, err;
-+
-+ if (!port_priv->vlans[vid])
-+ return -ENOENT;
-+
-+ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
-+ err = ethsw_port_set_pvid(port_priv, 0);
-+ if (err)
-+ return err;
-+ }
-+
-+ vcfg.num_ifs = 1;
-+ vcfg.if_id[0] = port_priv->idx;
-+ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
-+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
-+ ethsw->dpsw_handle,
-+ vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev,
-+ "dpsw_vlan_remove_if_untagged err %d\n",
-+ err);
-+ }
-+ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
-+ }
-+
-+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
-+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev,
-+ "dpsw_vlan_remove_if err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
-+
-+ /* Delete VLAN from switch if it is no longer configured on
-+ * any port
-+ */
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
-+ if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
-+ return 0; /* Found a port member in VID */
-+
-+ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
-+
-+ err = ethsw_dellink_switch(ethsw, vid);
-+ if (err)
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static int port_vlans_del(struct net_device *netdev,
-+ const struct switchdev_obj_port_vlan *vlan)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int vid, err;
-+
-+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
-+ err = ethsw_port_del_vlan(port_priv, vid);
-+ if (err)
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static int swdev_port_obj_del(struct net_device *netdev,
-+ const struct switchdev_obj *obj)
-+{
-+ int err;
-+
-+ switch (obj->id) {
-+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
-+ err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
-+ break;
-+ default:
-+ err = -EOPNOTSUPP;
-+ break;
-+ }
-+ return err;
-+}
-+
-+static const struct switchdev_ops ethsw_port_switchdev_ops = {
-+ .switchdev_port_attr_get = swdev_port_attr_get,
-+ .switchdev_port_attr_set = swdev_port_attr_set,
-+ .switchdev_port_obj_add = swdev_port_obj_add,
-+ .switchdev_port_obj_del = swdev_port_obj_del,
-+};
-+
-+/* For the moment, only flood setting needs to be updated */
-+static int port_bridge_join(struct net_device *netdev)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+
-+ /* Enable flooding */
-+ return ethsw_port_set_flood(port_priv, 1);
-+}
-+
-+static int port_bridge_leave(struct net_device *netdev)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+
-+ /* Disable flooding */
-+ return ethsw_port_set_flood(port_priv, 0);
-+}
-+
-+static int port_netdevice_event(struct notifier_block *unused,
-+ unsigned long event, void *ptr)
-+{
-+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-+ struct netdev_notifier_changeupper_info *info = ptr;
-+ struct net_device *upper_dev;
-+ int err = 0;
-+
-+ if (netdev->netdev_ops != &ethsw_port_ops)
-+ return NOTIFY_DONE;
-+
-+ /* Handle just upper dev link/unlink for the moment */
-+ if (event == NETDEV_CHANGEUPPER) {
-+ upper_dev = info->upper_dev;
-+ if (netif_is_bridge_master(upper_dev)) {
-+ if (info->linking)
-+ err = port_bridge_join(netdev);
-+ else
-+ err = port_bridge_leave(netdev);
-+ }
-+ }
-+
-+ return notifier_from_errno(err);
-+}
-+
-+static struct notifier_block port_nb __read_mostly = {
-+ .notifier_call = port_netdevice_event,
-+};
-+
-+struct ethsw_switchdev_event_work {
-+ struct work_struct work;
-+ struct switchdev_notifier_fdb_info fdb_info;
-+ struct net_device *dev;
-+ unsigned long event;
-+};
-+
-+static void ethsw_switchdev_event_work(struct work_struct *work)
-+{
-+ struct ethsw_switchdev_event_work *switchdev_work =
-+ container_of(work, struct ethsw_switchdev_event_work, work);
-+ struct net_device *dev = switchdev_work->dev;
-+ struct switchdev_notifier_fdb_info *fdb_info;
-+ struct ethsw_port_priv *port_priv;
-+
-+ rtnl_lock();
-+ port_priv = netdev_priv(dev);
-+ fdb_info = &switchdev_work->fdb_info;
-+
-+ switch (switchdev_work->event) {
-+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
-+ if (is_unicast_ether_addr(fdb_info->addr))
-+ ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
-+ else
-+ ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
-+ break;
-+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
-+ if (is_unicast_ether_addr(fdb_info->addr))
-+ ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
-+ else
-+ ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
-+ break;
-+ }
-+
-+ rtnl_unlock();
-+ kfree(switchdev_work->fdb_info.addr);
-+ kfree(switchdev_work);
-+ dev_put(dev);
-+}
-+
-+/* Called under rcu_read_lock() */
-+static int port_switchdev_event(struct notifier_block *unused,
-+ unsigned long event, void *ptr)
-+{
-+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
-+ struct ethsw_switchdev_event_work *switchdev_work;
-+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
-+
-+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
-+ if (!switchdev_work)
-+ return NOTIFY_BAD;
-+
-+ INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
-+ switchdev_work->dev = dev;
-+ switchdev_work->event = event;
-+
-+ switch (event) {
-+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
-+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
-+ memcpy(&switchdev_work->fdb_info, ptr,
-+ sizeof(switchdev_work->fdb_info));
-+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
-+ if (!switchdev_work->fdb_info.addr)
-+ goto err_addr_alloc;
-+
-+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
-+ fdb_info->addr);
-+
-+ /* Take a reference on the device to avoid being freed. */
-+ dev_hold(dev);
-+ break;
-+ default:
-+ return NOTIFY_DONE;
-+ }
-+
-+ queue_work(ethsw_owq, &switchdev_work->work);
-+
-+ return NOTIFY_DONE;
-+
-+err_addr_alloc:
-+ kfree(switchdev_work);
-+ return NOTIFY_BAD;
-+}
-+
-+static struct notifier_block port_switchdev_nb = {
-+ .notifier_call = port_switchdev_event,
-+};
-+
-+static int ethsw_register_notifier(struct device *dev)
-+{
-+ int err;
-+
-+ err = register_netdevice_notifier(&port_nb);
-+ if (err) {
-+ dev_err(dev, "Failed to register netdev notifier\n");
-+ return err;
-+ }
-+
-+ err = register_switchdev_notifier(&port_switchdev_nb);
-+ if (err) {
-+ dev_err(dev, "Failed to register switchdev notifier\n");
-+ goto err_switchdev_nb;
-+ }
-+
-+ return 0;
-+
-+err_switchdev_nb:
-+ unregister_netdevice_notifier(&port_nb);
-+ return err;
-+}
-+
-+static int ethsw_open(struct ethsw_core *ethsw)
-+{
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int i, err;
-+
-+ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
-+ if (err) {
-+ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
-+ return err;
-+ }
-+
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
-+ port_priv = ethsw->ports[i];
-+ err = dev_open(port_priv->netdev);
-+ if (err) {
-+ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
-+ return err;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_stop(struct ethsw_core *ethsw)
-+{
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int i, err;
-+
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
-+ port_priv = ethsw->ports[i];
-+ dev_close(port_priv->netdev);
-+ }
-+
-+ err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
-+ if (err) {
-+ dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_init(struct fsl_mc_device *sw_dev)
-+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
-+ u16 version_major, version_minor, i;
-+ struct dpsw_stp_cfg stp_cfg;
-+ int err;
-+
-+ ethsw->dev_id = sw_dev->obj_desc.id;
-+
-+ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
-+ if (err) {
-+ dev_err(dev, "dpsw_open err %d\n", err);
-+ return err;
-+ }
-+
-+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ &ethsw->sw_attr);
-+ if (err) {
-+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
-+ goto err_close;
-+ }
-+
-+ err = dpsw_get_api_version(ethsw->mc_io, 0,
-+ &version_major,
-+ &version_minor);
-+ if (err) {
-+ dev_err(dev, "dpsw_get_api_version err %d\n", err);
-+ goto err_close;
-+ }
-+
-+ /* Minimum supported DPSW version check */
-+ if (version_major < DPSW_MIN_VER_MAJOR ||
-+ (version_major == DPSW_MIN_VER_MAJOR &&
-+ version_minor < DPSW_MIN_VER_MINOR)) {
-+ dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
-+ version_major,
-+ version_minor,
-+ DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
-+ err = -ENOTSUPP;
-+ goto err_close;
-+ }
-+
-+ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
-+ if (err) {
-+ dev_err(dev, "dpsw_reset err %d\n", err);
-+ goto err_close;
-+ }
-+
-+ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
-+ DPSW_FDB_LEARNING_MODE_HW);
-+ if (err) {
-+ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
-+ goto err_close;
-+ }
-+
-+ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
-+ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
-+
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
-+ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
-+ &stp_cfg);
-+ if (err) {
-+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
-+ err, i);
-+ goto err_close;
-+ }
-+
-+ err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
-+ ethsw->dpsw_handle, i, 1);
-+ if (err) {
-+ dev_err(dev,
-+ "dpsw_if_set_broadcast err %d for port %d\n",
-+ err, i);
-+ goto err_close;
-+ }
-+ }
-+
-+ ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
-+ "ethsw");
-+ if (!ethsw_owq) {
-+ err = -ENOMEM;
-+ goto err_close;
-+ }
-+
-+ err = ethsw_register_notifier(dev);
-+ if (err)
-+ goto err_destroy_ordered_workqueue;
-+
-+ return 0;
-+
-+err_destroy_ordered_workqueue:
-+ destroy_workqueue(ethsw_owq);
-+
-+err_close:
-+ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
-+ return err;
-+}
-+
-+static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
-+{
-+ const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
-+ struct net_device *netdev = port_priv->netdev;
-+ struct ethsw_core *ethsw = port_priv->ethsw_data;
-+ struct dpsw_vlan_if_cfg vcfg;
-+ int err;
-+
-+ /* Switch starts with all ports configured to VLAN 1. Need to
-+ * remove this setting to allow configuration at bridge join
-+ */
-+ vcfg.num_ifs = 1;
-+ vcfg.if_id[0] = port_priv->idx;
-+
-+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DEFAULT_VLAN_ID, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ err = ethsw_port_set_pvid(port_priv, 0);
-+ if (err)
-+ return err;
-+
-+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
-+ DEFAULT_VLAN_ID, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
-+ return err;
-+ }
-+
-+ err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
-+
-+ return err;
-+}
-+
-+static void ethsw_unregister_notifier(struct device *dev)
-+{
-+ int err;
-+
-+ err = unregister_switchdev_notifier(&port_switchdev_nb);
-+ if (err)
-+ dev_err(dev,
-+ "Failed to unregister switchdev notifier (%d)\n", err);
-+
-+ err = unregister_netdevice_notifier(&port_nb);
-+ if (err)
-+ dev_err(dev,
-+ "Failed to unregister netdev notifier (%d)\n", err);
-+}
-+
-+static void ethsw_takedown(struct fsl_mc_device *sw_dev)
-+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
-+ int err;
-+
-+ ethsw_unregister_notifier(dev);
-+
-+ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
-+ if (err)
-+ dev_warn(dev, "dpsw_close err %d\n", err);
-+}
-+
-+static int ethsw_remove(struct fsl_mc_device *sw_dev)
-+{
-+ struct ethsw_port_priv *port_priv;
-+ struct ethsw_core *ethsw;
-+ struct device *dev;
-+ int i;
-+
-+ dev = &sw_dev->dev;
-+ ethsw = dev_get_drvdata(dev);
-+
-+ ethsw_teardown_irqs(sw_dev);
-+
-+ destroy_workqueue(ethsw_owq);
-+
-+ rtnl_lock();
-+ ethsw_stop(ethsw);
-+ rtnl_unlock();
-+
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
-+ port_priv = ethsw->ports[i];
-+ unregister_netdev(port_priv->netdev);
-+ free_netdev(port_priv->netdev);
-+ }
-+ kfree(ethsw->ports);
-+
-+ ethsw_takedown(sw_dev);
-+ fsl_mc_portal_free(ethsw->mc_io);
-+
-+ kfree(ethsw);
-+
-+ dev_set_drvdata(dev, NULL);
-+
-+ return 0;
-+}
-+
-+static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
-+{
-+ struct ethsw_port_priv *port_priv;
-+ struct device *dev = ethsw->dev;
-+ struct net_device *port_netdev;
-+ int err;
-+
-+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
-+ if (!port_netdev) {
-+ dev_err(dev, "alloc_etherdev error\n");
-+ return -ENOMEM;
-+ }
-+
-+ port_priv = netdev_priv(port_netdev);
-+ port_priv->netdev = port_netdev;
-+ port_priv->ethsw_data = ethsw;
-+
-+ port_priv->idx = port_idx;
-+ port_priv->stp_state = BR_STATE_FORWARDING;
-+
-+ /* Flooding is implicitly enabled */
-+ port_priv->flood = true;
-+
-+ SET_NETDEV_DEV(port_netdev, dev);
-+ port_netdev->netdev_ops = &ethsw_port_ops;
-+ port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
-+ port_netdev->switchdev_ops = &ethsw_port_switchdev_ops;
-+
-+ /* Set MTU limits */
-+ port_netdev->min_mtu = ETH_MIN_MTU;
-+ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
-+
-+ err = register_netdev(port_netdev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev error %d\n", err);
-+ free_netdev(port_netdev);
-+ return err;
-+ }
-+
-+ ethsw->ports[port_idx] = port_priv;
-+
-+ return ethsw_port_init(port_priv, port_idx);
-+}
-+
-+static int ethsw_probe(struct fsl_mc_device *sw_dev)
-+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_core *ethsw;
-+ int i, err;
-+
-+ /* Allocate switch core*/
-+ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
-+
-+ if (!ethsw)
-+ return -ENOMEM;
-+
-+ ethsw->dev = dev;
-+ dev_set_drvdata(dev, ethsw);
-+
-+ err = fsl_mc_portal_allocate(sw_dev, 0, &ethsw->mc_io);
-+ if (err) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
-+ goto err_free_drvdata;
-+ }
-+
-+ err = ethsw_init(sw_dev);
-+ if (err)
-+ goto err_free_cmdport;
-+
-+ /* DEFAULT_VLAN_ID is implicitly configured on the switch */
-+ ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
-+
-+ /* Learning is implicitly enabled */
-+ ethsw->learning = true;
-+
-+ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
-+ GFP_KERNEL);
-+ if (!(ethsw->ports)) {
-+ err = -ENOMEM;
-+ goto err_takedown;
-+ }
-+
-+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
-+ err = ethsw_probe_port(ethsw, i);
-+ if (err)
-+ goto err_free_ports;
-+ }
-+
-+ /* Switch starts up enabled */
-+ rtnl_lock();
-+ err = ethsw_open(ethsw);
-+ rtnl_unlock();
-+ if (err)
-+ goto err_free_ports;
-+
-+ /* Setup IRQs */
-+ err = ethsw_setup_irqs(sw_dev);
-+ if (err)
-+ goto err_stop;
-+
-+ dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
-+ return 0;
-+
-+err_stop:
-+ rtnl_lock();
-+ ethsw_stop(ethsw);
-+ rtnl_unlock();
-+
-+err_free_ports:
-+ /* Cleanup registered ports only */
-+ for (i--; i >= 0; i--) {
-+ unregister_netdev(ethsw->ports[i]->netdev);
-+ free_netdev(ethsw->ports[i]->netdev);
-+ }
-+ kfree(ethsw->ports);
-+
-+err_takedown:
-+ ethsw_takedown(sw_dev);
-+
-+err_free_cmdport:
-+ fsl_mc_portal_free(ethsw->mc_io);
-+
-+err_free_drvdata:
-+ kfree(ethsw);
-+ dev_set_drvdata(dev, NULL);
-+
-+ return err;
-+}
-+
-+static const struct fsl_mc_device_id ethsw_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpsw",
-+ },
-+ { .vendor = 0x0 }
-+};
-+MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
-+
-+static struct fsl_mc_driver eth_sw_drv = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = ethsw_probe,
-+ .remove = ethsw_remove,
-+ .match_id_table = ethsw_match_id_table
-+};
-+
-+module_fsl_mc_driver(eth_sw_drv);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
-@@ -0,0 +1,90 @@
-+/* Copyright 2014-2017 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __ETHSW_H
-+#define __ETHSW_H
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/if_vlan.h>
-+#include <uapi/linux/if_bridge.h>
-+#include <net/switchdev.h>
-+#include <linux/if_bridge.h>
-+
-+#include "dpsw.h"
-+
-+/* Number of IRQs supported */
-+#define DPSW_IRQ_NUM 2
-+
-+#define ETHSW_VLAN_MEMBER 1
-+#define ETHSW_VLAN_UNTAGGED 2
-+#define ETHSW_VLAN_PVID 4
-+#define ETHSW_VLAN_GLOBAL 8
-+
-+/* Maximum Frame Length supported by HW (currently 10k) */
-+#define DPAA2_MFL (10 * 1024)
-+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
-+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
-+
-+extern const struct ethtool_ops ethsw_port_ethtool_ops;
-+
-+struct ethsw_core;
-+
-+/* Per port private data */
-+struct ethsw_port_priv {
-+ struct net_device *netdev;
-+ u16 idx;
-+ struct ethsw_core *ethsw_data;
-+ u8 link_state;
-+ u8 stp_state;
-+ bool flood;
-+
-+ u8 vlans[VLAN_VID_MASK + 1];
-+ u16 pvid;
-+};
-+
-+/* Switch data */
-+struct ethsw_core {
-+ struct device *dev;
-+ struct fsl_mc_io *mc_io;
-+ u16 dpsw_handle;
-+ struct dpsw_attr sw_attr;
-+ int dev_id;
-+ struct ethsw_port_priv **ports;
-+
-+ u8 vlans[VLAN_VID_MASK + 1];
-+ bool learning;
-+};
-+
-+#endif /* __ETHSW_H */
diff --git a/target/linux/layerscape/patches-4.14/704-dpaa2-mac-phy-support-layerscape.patch b/target/linux/layerscape/patches-4.14/704-dpaa2-mac-phy-support-layerscape.patch
deleted file mode 100644
index f36da51fff..0000000000
--- a/target/linux/layerscape/patches-4.14/704-dpaa2-mac-phy-support-layerscape.patch
+++ /dev/null
@@ -1,2164 +0,0 @@
-From dd0cc8d0739a72ee5d85039a9ba7812383e8f555 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:30 +0800
-Subject: [PATCH] dpaa2-mac-phy: support layerscape
-
-This is an integrated patch of dpaa2-mac-phy for layerscape
-
-Signed-off-by: Alex Marginean <alexandru.marginean@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc@nxp.com>
-Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
-Signed-off-by: Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
-Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
-Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: Itai Katz <itai.katz@freescale.com>
-Signed-off-by: J. German Rivera <German.Rivera@freescale.com>
-Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
-Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
-Signed-off-by: Stuart Yoder <stuart.yoder@freescale.com>
-Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
----
- drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
- drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
- drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 196 ++++++
- drivers/staging/fsl-dpaa2/mac/dpmac.c | 689 ++++++++++++++++++
- drivers/staging/fsl-dpaa2/mac/dpmac.h | 374 ++++++++++
- drivers/staging/fsl-dpaa2/mac/mac.c | 817 ++++++++++++++++++++++
- 6 files changed, 2109 insertions(+)
- create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
- create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
- create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
- create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
- create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
- create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
-
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
-@@ -0,0 +1,23 @@
-+config FSL_DPAA2_MAC
-+ tristate "DPAA2 MAC / PHY interface"
-+ depends on FSL_MC_BUS && FSL_DPAA2
-+ select MDIO_BUS_MUX_MMIOREG
-+ select FSL_XGMAC_MDIO
-+ select FIXED_PHY
-+ ---help---
-+ Prototype driver for DPAA2 MAC / PHY interface object.
-+ This driver works as a proxy between phylib including phy drivers and
-+ the MC firmware. It receives updates on link state changes from PHY
-+ lib and forwards them to MC and receives interrupt from MC whenever
-+ a request is made to change the link state.
-+
-+
-+config FSL_DPAA2_MAC_NETDEVS
-+ bool "Expose net interfaces for PHYs"
-+ default n
-+ depends on FSL_DPAA2_MAC
-+ ---help---
-+ Exposes macX net interfaces which allow direct control over MACs and
-+ PHYs.
-+ .
-+ Leave disabled if unsure.
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/mac/Makefile
-@@ -0,0 +1,10 @@
-+
-+obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
-+
-+dpaa2-mac-objs := mac.o dpmac.o
-+
-+all:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
-+
-+clean:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
-@@ -0,0 +1,196 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPMAC_CMD_H
-+#define _FSL_DPMAC_CMD_H
-+
-+/* DPMAC Version */
-+#define DPMAC_VER_MAJOR 4
-+#define DPMAC_VER_MINOR 2
-+#define DPMAC_CMD_BASE_VERSION 1
-+#define DPMAC_CMD_2ND_VERSION 2
-+#define DPMAC_CMD_ID_OFFSET 4
-+
-+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
-+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
-+
-+/* Command IDs */
-+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
-+#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
-+#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
-+#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
-+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
-+
-+#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
-+#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
-+
-+#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
-+#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
-+#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
-+#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
-+#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
-+#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
-+
-+#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
-+#define DPMAC_CMDID_GET_LINK_CFG_V2 DPMAC_CMD_V2(0x0c2)
-+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
-+#define DPMAC_CMDID_SET_LINK_STATE_V2 DPMAC_CMD_V2(0x0c3)
-+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
-+
-+#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
-+
-+/* Macros for accessing command fields smaller than 1byte */
-+#define DPMAC_MASK(field) \
-+ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
-+ DPMAC_##field##_SHIFT)
-+#define dpmac_set_field(var, field, val) \
-+ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
-+#define dpmac_get_field(var, field) \
-+ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
-+
-+struct dpmac_cmd_open {
-+ u32 dpmac_id;
-+};
-+
-+struct dpmac_cmd_create {
-+ u32 mac_id;
-+};
-+
-+struct dpmac_cmd_destroy {
-+ u32 dpmac_id;
-+};
-+
-+struct dpmac_cmd_set_irq_enable {
-+ u8 enable;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
-+
-+struct dpmac_cmd_get_irq_enable {
-+ u32 pad;
-+ u8 irq_index;
-+};
-+
-+struct dpmac_rsp_get_irq_enable {
-+ u8 enabled;
-+};
-+
-+struct dpmac_cmd_set_irq_mask {
-+ u32 mask;
-+ u8 irq_index;
-+};
-+
-+struct dpmac_cmd_get_irq_mask {
-+ u32 pad;
-+ u8 irq_index;
-+};
-+
-+struct dpmac_rsp_get_irq_mask {
-+ u32 mask;
-+};
-+
-+struct dpmac_cmd_get_irq_status {
-+ u32 status;
-+ u8 irq_index;
-+};
-+
-+struct dpmac_rsp_get_irq_status {
-+ u32 status;
-+};
-+
-+struct dpmac_cmd_clear_irq_status {
-+ u32 status;
-+ u8 irq_index;
-+};
-+
-+struct dpmac_rsp_get_attributes {
-+ u8 eth_if;
-+ u8 link_type;
-+ u16 id;
-+ u32 max_rate;
-+};
-+
-+struct dpmac_rsp_get_link_cfg {
-+ u64 options;
-+ u32 rate;
-+};
-+
-+struct dpmac_rsp_get_link_cfg_v2 {
-+ u64 options;
-+ u32 rate;
-+ u32 pad;
-+ u64 advertising;
-+};
-+
-+#define DPMAC_STATE_SIZE 1
-+#define DPMAC_STATE_SHIFT 0
-+#define DPMAC_STATE_VALID_SIZE 1
-+#define DPMAC_STATE_VALID_SHIFT 1
-+
-+struct dpmac_cmd_set_link_state {
-+ u64 options;
-+ u32 rate;
-+ u32 pad;
-+ /* only least significant bit is valid */
-+ u8 up;
-+};
-+
-+struct dpmac_cmd_set_link_state_v2 {
-+ u64 options;
-+ u32 rate;
-+ u32 pad0;
-+ /* from lsb: up:1, state_valid:1 */
-+ u8 state;
-+ u8 pad1[7];
-+ u64 supported;
-+ u64 advertising;
-+};
-+
-+struct dpmac_cmd_get_counter {
-+ u8 type;
-+};
-+
-+struct dpmac_rsp_get_counter {
-+ u64 pad;
-+ u64 counter;
-+};
-+
-+struct dpmac_rsp_get_api_version {
-+ u16 major;
-+ u16 minor;
-+};
-+
-+struct dpmac_cmd_set_port_mac_addr {
-+ u8 pad[2];
-+ u8 addr[6];
-+};
-+
-+#endif /* _FSL_DPMAC_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
-@@ -0,0 +1,689 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/fsl/mc.h>
-+#include "dpmac.h"
-+#include "dpmac-cmd.h"
-+
-+/**
-+ * dpmac_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpmac_id: DPMAC unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpmac_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpmac_id,
-+ u16 *token)
-+{
-+ struct dpmac_cmd_open *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpmac_cmd_open *)cmd.params;
-+ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return err;
-+}
-+
-+/**
-+ * dpmac_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_create() - Create the DPMAC object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @obj_id: Returned object id
-+ *
-+ * Create the DPMAC object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The function accepts an authentication token of a parent
-+ * container that this object should be assigned to. The token
-+ * can be '0' so the object will be assigned to the default container.
-+ * The newly created object can be opened with the returned
-+ * object id and using the container's associated tokens and MC portals.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_create(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ const struct dpmac_cfg *cfg,
-+ u32 *obj_id)
-+{
-+ struct dpmac_cmd_create *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
-+ cmd_flags,
-+ dprc_token);
-+ cmd_params = (struct dpmac_cmd_create *)cmd.params;
-+ cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *obj_id = mc_cmd_read_object_id(&cmd);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @object_id: The object id; it must be a valid id within the container that
-+ * created this object;
-+ *
-+ * The function accepts the authentication token of the parent container that
-+ * created the object (not the one that currently owns the object). The object
-+ * is searched within parent using the provided 'object_id'.
-+ * All tokens to the object must be closed before calling destroy.
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpmac_destroy(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ u32 object_id)
-+{
-+ struct dpmac_cmd_destroy *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
-+ cmd_flags,
-+ dprc_token);
-+ cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
-+ cmd_params->dpmac_id = cpu_to_le32(object_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en)
-+{
-+ struct dpmac_cmd_set_irq_enable *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->enable = en;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en)
-+{
-+ struct dpmac_cmd_get_irq_enable *cmd_params;
-+ struct dpmac_rsp_get_irq_enable *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
-+ *en = rsp_params->enabled;
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpmac_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
-+{
-+ struct dpmac_cmd_set_irq_mask *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask)
-+{
-+ struct dpmac_cmd_get_irq_mask *cmd_params;
-+ struct dpmac_rsp_get_irq_mask *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
-+ *mask = le32_to_cpu(rsp_params->mask);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpmac_get_irq_status() - Get the current status of any pending interrupts.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
-+{
-+ struct dpmac_cmd_get_irq_status *cmd_params;
-+ struct dpmac_rsp_get_irq_status *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpmac_clear_irq_status() - Clear a pending interrupt's status
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
-+{
-+ struct dpmac_cmd_clear_irq_status *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_get_attributes - Retrieve DPMAC attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_attr *attr)
-+{
-+ struct dpmac_rsp_get_attributes *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
-+ attr->eth_if = rsp_params->eth_if;
-+ attr->link_type = rsp_params->link_type;
-+ attr->id = le16_to_cpu(rsp_params->id);
-+ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpmac_get_link_cfg() - Get Ethernet link configuration
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @cfg: Returned structure with the link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_cfg *cfg)
-+{
-+ struct dpmac_rsp_get_link_cfg *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
-+ cfg->options = le64_to_cpu(rsp_params->options);
-+ cfg->rate = le32_to_cpu(rsp_params->rate);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpmac_get_link_cfg_v2() - Get Ethernet link configuration
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @cfg: Returned structure with the link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_link_cfg_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_cfg *cfg)
-+{
-+ struct dpmac_rsp_get_link_cfg_v2 *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG_V2,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpmac_rsp_get_link_cfg_v2 *)cmd.params;
-+ cfg->options = le64_to_cpu(rsp_params->options);
-+ cfg->rate = le32_to_cpu(rsp_params->rate);
-+ cfg->advertising = le64_to_cpu(rsp_params->advertising);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpmac_set_link_state() - Set the Ethernet link status
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @link_state: Link state configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_state *link_state)
-+{
-+ struct dpmac_cmd_set_link_state *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
-+ cmd_params->options = cpu_to_le64(link_state->options);
-+ cmd_params->rate = cpu_to_le32(link_state->rate);
-+ dpmac_set_field(cmd_params->up, STATE, link_state->up);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_set_link_state_v2() - Set the Ethernet link status
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @link_state: Link state configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_link_state_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_state *link_state)
-+{
-+ struct dpmac_cmd_set_link_state_v2 *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE_V2,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpmac_cmd_set_link_state_v2 *)cmd.params;
-+ cmd_params->options = cpu_to_le64(link_state->options);
-+ cmd_params->rate = cpu_to_le32(link_state->rate);
-+ dpmac_set_field(cmd_params->state, STATE, link_state->up);
-+ dpmac_set_field(cmd_params->state, STATE_VALID,
-+ link_state->state_valid);
-+ cmd_params->supported = cpu_to_le64(link_state->supported);
-+ cmd_params->advertising = cpu_to_le64(link_state->advertising);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_get_counter() - Read a specific DPMAC counter
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @type: The requested counter
-+ * @counter: Returned counter value
-+ *
-+ * Return: The requested counter; '0' otherwise.
-+ */
-+int dpmac_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpmac_counter type,
-+ u64 *counter)
-+{
-+ struct dpmac_cmd_get_counter *dpmac_cmd;
-+ struct dpmac_rsp_get_counter *dpmac_rsp;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
-+ dpmac_cmd->type = type;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
-+ *counter = le64_to_cpu(dpmac_rsp->counter);
-+
-+ return 0;
-+}
-+
-+/* untested */
-+int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 addr[6])
-+{
-+ struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+ dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
-+ dpmac_cmd->addr[0] = addr[5];
-+ dpmac_cmd->addr[1] = addr[4];
-+ dpmac_cmd->addr[2] = addr[3];
-+ dpmac_cmd->addr[3] = addr[2];
-+ dpmac_cmd->addr[4] = addr[1];
-+ dpmac_cmd->addr[5] = addr[0];
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmac_get_api_version() - Get Data Path MAC version
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of data path mac API
-+ * @minor_ver: Minor version of data path mac API
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
-+{
-+ struct dpmac_rsp_get_api_version *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
-+ cmd_flags,
-+ 0);
-+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
-+ *major_ver = le16_to_cpu(rsp_params->major);
-+ *minor_ver = le16_to_cpu(rsp_params->minor);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
-@@ -0,0 +1,374 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPMAC_H
-+#define __FSL_DPMAC_H
-+
-+/* Data Path MAC API
-+ * Contains initialization APIs and runtime control APIs for DPMAC
-+ */
-+
-+struct fsl_mc_io;
-+
-+int dpmac_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpmac_id,
-+ u16 *token);
-+
-+int dpmac_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * enum dpmac_link_type - DPMAC link type
-+ * @DPMAC_LINK_TYPE_NONE: No link
-+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
-+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
-+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
-+ */
-+enum dpmac_link_type {
-+ DPMAC_LINK_TYPE_NONE,
-+ DPMAC_LINK_TYPE_FIXED,
-+ DPMAC_LINK_TYPE_PHY,
-+ DPMAC_LINK_TYPE_BACKPLANE
-+};
-+
-+/**
-+ * enum dpmac_eth_if - DPMAC Ethrnet interface
-+ * @DPMAC_ETH_IF_MII: MII interface
-+ * @DPMAC_ETH_IF_RMII: RMII interface
-+ * @DPMAC_ETH_IF_SMII: SMII interface
-+ * @DPMAC_ETH_IF_GMII: GMII interface
-+ * @DPMAC_ETH_IF_RGMII: RGMII interface
-+ * @DPMAC_ETH_IF_SGMII: SGMII interface
-+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
-+ * @DPMAC_ETH_IF_XAUI: XAUI interface
-+ * @DPMAC_ETH_IF_XFI: XFI interface
-+ */
-+enum dpmac_eth_if {
-+ DPMAC_ETH_IF_MII,
-+ DPMAC_ETH_IF_RMII,
-+ DPMAC_ETH_IF_SMII,
-+ DPMAC_ETH_IF_GMII,
-+ DPMAC_ETH_IF_RGMII,
-+ DPMAC_ETH_IF_SGMII,
-+ DPMAC_ETH_IF_QSGMII,
-+ DPMAC_ETH_IF_XAUI,
-+ DPMAC_ETH_IF_XFI
-+};
-+
-+/**
-+ * struct dpmac_cfg - Structure representing DPMAC configuration
-+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
-+ * the MAC IDs are continuous.
-+ * For example: 2 WRIOPs, 16 MACs in each:
-+ * MAC IDs for the 1st WRIOP: 1-16,
-+ * MAC IDs for the 2nd WRIOP: 17-32.
-+ */
-+struct dpmac_cfg {
-+ u16 mac_id;
-+};
-+
-+int dpmac_create(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ const struct dpmac_cfg *cfg,
-+ u32 *obj_id);
-+
-+int dpmac_destroy(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ u32 object_id);
-+
-+/**
-+ * DPMAC IRQ Index and Events
-+ */
-+
-+/**
-+ * IRQ index
-+ */
-+#define DPMAC_IRQ_INDEX 0
-+/**
-+ * IRQ event - indicates a change in link state
-+ */
-+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
-+/**
-+ * IRQ event - Indicates that the link state changed
-+ */
-+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
-+
-+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en);
-+
-+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
-+
-+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask);
-+
-+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
-+
-+int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status);
-+
-+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status);
-+
-+/**
-+ * struct dpmac_attr - Structure representing DPMAC attributes
-+ * @id: DPMAC object ID
-+ * @max_rate: Maximum supported rate - in Mbps
-+ * @eth_if: Ethernet interface
-+ * @link_type: link type
-+ */
-+struct dpmac_attr {
-+ u16 id;
-+ u32 max_rate;
-+ enum dpmac_eth_if eth_if;
-+ enum dpmac_link_type link_type;
-+};
-+
-+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_attr *attr);
-+
-+/**
-+ * DPMAC link configuration/state options
-+ */
-+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+
-+/**
-+ * Advertised link speeds
-+ */
-+#define DPMAC_ADVERTISED_10BASET_FULL 0x0000000000000001ULL
-+#define DPMAC_ADVERTISED_100BASET_FULL 0x0000000000000002ULL
-+#define DPMAC_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL
-+#define DPMAC_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL
-+#define DPMAC_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL
-+
-+/**
-+ * Advertise auto-negotiation enable
-+ */
-+#define DPMAC_ADVERTISED_AUTONEG 0x0000000000000008ULL
-+
-+/**
-+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration
-+ * @rate: Link's rate - in Mbps
-+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
-+ * @advertising: Speeds that are advertised for autoneg (bitmap)
-+ */
-+struct dpmac_link_cfg {
-+ u32 rate;
-+ u64 options;
-+ u64 advertising;
-+};
-+
-+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_cfg *cfg);
-+
-+int dpmac_get_link_cfg_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_cfg *cfg);
-+
-+/**
-+ * struct dpmac_link_state - DPMAC link configuration request
-+ * @rate: Rate in Mbps
-+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
-+ * @up: Link state
-+ * @state_valid: Ignore/Update the state of the link
-+ * @supported: Speeds capability of the phy (bitmap)
-+ * @advertising: Speeds that are advertised for autoneg (bitmap)
-+ */
-+struct dpmac_link_state {
-+ u32 rate;
-+ u64 options;
-+ int up;
-+ int state_valid;
-+ u64 supported;
-+ u64 advertising;
-+};
-+
-+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_state *link_state);
-+
-+int dpmac_set_link_state_v2(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpmac_link_state *link_state);
-+
-+/**
-+ * enum dpmac_counter - DPMAC counter types
-+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
-+ * (up to max frame length specified),
-+ * good or bad.
-+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
-+ * with a wrong CRC
-+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
-+ * specified, with a bad frame check sequence.
-+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
-+ * Occurs when a receive FIFO overflows.
-+ * Includes also frames truncated as a result of
-+ * the receive FIFO overflow.
-+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
-+ * (optional used for wrong SFD).
-+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
-+ * bytes long with a good CRC.
-+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
-+ * specified, with a good frame check sequence.
-+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
-+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
-+ * (regular and PFC).
-+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
-+ * frames and valid pause frames.
-+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
-+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
-+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
-+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
-+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
-+ * (except for undersized/fragment frame).
-+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
-+ * frames and valid pause frames transmitted.
-+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
-+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
-+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
-+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
-+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
-+ * pause frames.
-+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
-+ * pause frames.
-+ */
-+enum dpmac_counter {
-+ DPMAC_CNT_ING_FRAME_64,
-+ DPMAC_CNT_ING_FRAME_127,
-+ DPMAC_CNT_ING_FRAME_255,
-+ DPMAC_CNT_ING_FRAME_511,
-+ DPMAC_CNT_ING_FRAME_1023,
-+ DPMAC_CNT_ING_FRAME_1518,
-+ DPMAC_CNT_ING_FRAME_1519_MAX,
-+ DPMAC_CNT_ING_FRAG,
-+ DPMAC_CNT_ING_JABBER,
-+ DPMAC_CNT_ING_FRAME_DISCARD,
-+ DPMAC_CNT_ING_ALIGN_ERR,
-+ DPMAC_CNT_EGR_UNDERSIZED,
-+ DPMAC_CNT_ING_OVERSIZED,
-+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
-+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
-+ DPMAC_CNT_ING_BYTE,
-+ DPMAC_CNT_ING_MCAST_FRAME,
-+ DPMAC_CNT_ING_BCAST_FRAME,
-+ DPMAC_CNT_ING_ALL_FRAME,
-+ DPMAC_CNT_ING_UCAST_FRAME,
-+ DPMAC_CNT_ING_ERR_FRAME,
-+ DPMAC_CNT_EGR_BYTE,
-+ DPMAC_CNT_EGR_MCAST_FRAME,
-+ DPMAC_CNT_EGR_BCAST_FRAME,
-+ DPMAC_CNT_EGR_UCAST_FRAME,
-+ DPMAC_CNT_EGR_ERR_FRAME,
-+ DPMAC_CNT_ING_GOOD_FRAME,
-+ DPMAC_CNT_ENG_GOOD_FRAME
-+};
-+
-+int dpmac_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpmac_counter type,
-+ u64 *counter);
-+
-+/**
-+ * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
-+ * port. This is not used for filtering, MAC is always in
-+ * promiscuous mode, it is passed to DPNIs through DPNI API for
-+ * application used.
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @addr: MAC address to set
-+ *
-+ * Return: The requested counter; '0' otherwise.
-+ */
-+int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 addr[6]);
-+
-+int dpmac_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
-+
-+#endif /* __FSL_DPMAC_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/mac/mac.c
-@@ -0,0 +1,817 @@
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ * Copyright 2018 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/module.h>
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/msi.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/if_vlan.h>
-+
-+#include <uapi/linux/if_bridge.h>
-+#include <net/netlink.h>
-+
-+#include <linux/of.h>
-+#include <linux/of_mdio.h>
-+#include <linux/of_net.h>
-+#include <linux/phy.h>
-+#include <linux/phy_fixed.h>
-+
-+#include <linux/fsl/mc.h>
-+
-+#include "dpmac.h"
-+#include "dpmac-cmd.h"
-+
-+struct dpaa2_mac_priv {
-+ struct net_device *netdev;
-+ struct fsl_mc_device *mc_dev;
-+ struct dpmac_attr attr;
-+ struct dpmac_link_state old_state;
-+ u16 dpmac_ver_major;
-+ u16 dpmac_ver_minor;
-+};
-+
-+/* TODO: fix the 10G modes, mapping can't be right:
-+ * XGMII is paralel
-+ * XAUI is serial, using 8b/10b encoding
-+ * XFI is also serial but using 64b/66b encoding
-+ * they can't all map to XGMII...
-+ *
-+ * This must be kept in sync with enum dpmac_eth_if.
-+ */
-+static phy_interface_t dpaa2_mac_iface_mode[] = {
-+ PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
-+ PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
-+ PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
-+ PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
-+ PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
-+ PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
-+ PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
-+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
-+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
-+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_CAUI */
-+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_1000BASEX */
-+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_USXGMII */
-+};
-+
-+static int cmp_dpmac_ver(struct dpaa2_mac_priv *priv,
-+ u16 ver_major, u16 ver_minor)
-+{
-+ if (priv->dpmac_ver_major == ver_major)
-+ return priv->dpmac_ver_minor - ver_minor;
-+ return priv->dpmac_ver_major - ver_major;
-+}
-+
-+#define DPMAC_LINK_AUTONEG_VER_MAJOR 4
-+#define DPMAC_LINK_AUTONEG_VER_MINOR 3
-+
-+struct dpaa2_mac_link_mode_map {
-+ u64 dpmac_lm;
-+ u64 ethtool_lm;
-+};
-+
-+static const struct dpaa2_mac_link_mode_map dpaa2_mac_lm_map[] = {
-+ {DPMAC_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
-+ {DPMAC_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
-+ {DPMAC_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
-+ {DPMAC_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
-+ {DPMAC_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
-+ {DPMAC_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
-+};
-+
-+static void link_mode_dpmac2phydev(u64 dpmac_lm, u32 *phydev_lm)
-+{
-+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_lm_map); i++) {
-+ if (dpmac_lm & dpaa2_mac_lm_map[i].dpmac_lm)
-+ __set_bit(dpaa2_mac_lm_map[i].ethtool_lm, mask);
-+ }
-+
-+ ethtool_convert_link_mode_to_legacy_u32(phydev_lm, mask);
-+}
-+
-+static void link_mode_phydev2dpmac(u32 phydev_lm, u64 *dpni_lm)
-+{
-+ unsigned long lm;
-+ int i;
-+
-+ ethtool_convert_legacy_u32_to_link_mode(&lm, phydev_lm);
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_lm_map); i++) {
-+ if (test_bit(dpaa2_mac_lm_map[i].ethtool_lm, &lm))
-+ *dpni_lm |= dpaa2_mac_lm_map[i].dpmac_lm;
-+ }
-+}
-+
-+static void dpaa2_mac_link_changed(struct net_device *netdev)
-+{
-+ struct phy_device *phydev;
-+ struct dpmac_link_state state = { 0 };
-+ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
-+ int err;
-+
-+ /* the PHY just notified us of link state change */
-+ phydev = netdev->phydev;
-+
-+ state.up = !!phydev->link;
-+ if (phydev->link) {
-+ state.rate = phydev->speed;
-+
-+ if (!phydev->duplex)
-+ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
-+ if (phydev->autoneg)
-+ state.options |= DPMAC_LINK_OPT_AUTONEG;
-+
-+ if (phydev->pause && (phydev->advertising & ADVERTISED_Pause))
-+ state.options |= DPMAC_LINK_OPT_PAUSE;
-+ if (phydev->pause &&
-+ (phydev->advertising & ADVERTISED_Asym_Pause))
-+ state.options |= DPMAC_LINK_OPT_ASYM_PAUSE;
-+
-+ netif_carrier_on(netdev);
-+ } else {
-+ netif_carrier_off(netdev);
-+ }
-+
-+ if (priv->old_state.up != state.up ||
-+ priv->old_state.rate != state.rate ||
-+ priv->old_state.options != state.options) {
-+ priv->old_state = state;
-+ phy_print_status(phydev);
-+ }
-+
-+ if (cmp_dpmac_ver(priv, DPMAC_LINK_AUTONEG_VER_MAJOR,
-+ DPMAC_LINK_AUTONEG_VER_MINOR) < 0) {
-+ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
-+ priv->mc_dev->mc_handle, &state);
-+ } else {
-+ link_mode_phydev2dpmac(phydev->supported, &state.supported);
-+ link_mode_phydev2dpmac(phydev->advertising, &state.advertising);
-+ state.state_valid = 1;
-+
-+ err = dpmac_set_link_state_v2(priv->mc_dev->mc_io, 0,
-+ priv->mc_dev->mc_handle, &state);
-+ }
-+ if (unlikely(err))
-+ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
-+}
-+
-+static int dpaa2_mac_open(struct net_device *netdev)
-+{
-+ /* start PHY state machine */
-+ phy_start(netdev->phydev);
-+
-+ return 0;
-+}
-+
-+static int dpaa2_mac_stop(struct net_device *netdev)
-+{
-+ if (!netdev->phydev)
-+ goto done;
-+
-+ /* stop PHY state machine */
-+ phy_stop(netdev->phydev);
-+
-+ /* signal link down to firmware */
-+ netdev->phydev->link = 0;
-+ dpaa2_mac_link_changed(netdev);
-+
-+done:
-+ return 0;
-+}
-+
-+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
-+static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
-+ struct net_device *dev)
-+{
-+ /* we don't support I/O for now, drop the frame */
-+ dev_kfree_skb_any(skb);
-+ return NETDEV_TX_OK;
-+}
-+
-+static void dpaa2_mac_get_drvinfo(struct net_device *net_dev,
-+ struct ethtool_drvinfo *drvinfo)
-+{
-+ struct dpaa2_mac_priv *priv = netdev_priv(net_dev);
-+
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%u.%u", priv->dpmac_ver_major, priv->dpmac_ver_minor);
-+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
-+}
-+
-+static int dpaa2_mac_get_link_ksettings(struct net_device *netdev,
-+ struct ethtool_link_ksettings *ks)
-+{
-+ phy_ethtool_ksettings_get(netdev->phydev, ks);
-+
-+ return 0;
-+}
-+
-+static int dpaa2_mac_set_link_ksettings(struct net_device *netdev,
-+ const struct ethtool_link_ksettings *ks)
-+{
-+ return phy_ethtool_ksettings_set(netdev->phydev, ks);
-+}
-+
-+static void dpaa2_mac_get_stats(struct net_device *netdev,
-+ struct rtnl_link_stats64 *storage)
-+{
-+ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
-+ u64 tmp;
-+ int err;
-+
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_EGR_MCAST_FRAME,
-+ &storage->tx_packets);
-+ if (err)
-+ goto error;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
-+ if (err)
-+ goto error;
-+ storage->tx_packets += tmp;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
-+ if (err)
-+ goto error;
-+ storage->tx_packets += tmp;
-+
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
-+ if (err)
-+ goto error;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
-+ if (err)
-+ goto error;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
-+ if (err)
-+ goto error;
-+
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
-+ if (err)
-+ goto error;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
-+ if (err)
-+ goto error;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_ING_FRAME_DISCARD,
-+ &storage->rx_dropped);
-+ if (err)
-+ goto error;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
-+ if (err)
-+ goto error;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_ING_OVERSIZED, &tmp);
-+ if (err)
-+ goto error;
-+ storage->rx_errors += tmp;
-+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
-+ DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
-+ if (err)
-+ goto error;
-+
-+ return;
-+error:
-+ netdev_err(netdev, "dpmac_get_counter err %d\n", err);
-+}
-+
-+static struct {
-+ enum dpmac_counter id;
-+ char name[ETH_GSTRING_LEN];
-+} dpaa2_mac_counters[] = {
-+ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
-+ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
-+ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
-+ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
-+ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
-+ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
-+ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
-+ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
-+ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
-+ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
-+ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
-+ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
-+ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
-+ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
-+ {DPMAC_CNT_ING_FRAG, "rx frags"},
-+ {DPMAC_CNT_ING_JABBER, "rx jabber"},
-+ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
-+ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
-+ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
-+ {DPMAC_CNT_ING_BYTE, "rx bytes"},
-+ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
-+ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
-+ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
-+ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
-+ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
-+ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
-+ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
-+ {DPMAC_CNT_EGR_BYTE, "tx bytes"},
-+
-+};
-+
-+static void dpaa2_mac_get_strings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
-+{
-+ int i;
-+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ dpaa2_mac_counters[i].name,
-+ ETH_GSTRING_LEN);
-+ break;
-+ }
-+}
-+
-+static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
-+{
-+ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
-+ int i;
-+ int err;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
-+ err = dpmac_get_counter(priv->mc_dev->mc_io,
-+ 0,
-+ priv->mc_dev->mc_handle,
-+ dpaa2_mac_counters[i].id, &data[i]);
-+ if (err)
-+ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
-+ dpaa2_mac_counters[i].name, err);
-+ }
-+}
-+
-+static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
-+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ARRAY_SIZE(dpaa2_mac_counters);
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static const struct net_device_ops dpaa2_mac_ndo_ops = {
-+ .ndo_open = &dpaa2_mac_open,
-+ .ndo_stop = &dpaa2_mac_stop,
-+ .ndo_start_xmit = &dpaa2_mac_drop_frame,
-+ .ndo_get_stats64 = &dpaa2_mac_get_stats,
-+};
-+
-+static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
-+ .get_drvinfo = &dpaa2_mac_get_drvinfo,
-+ .get_link_ksettings = &dpaa2_mac_get_link_ksettings,
-+ .set_link_ksettings = &dpaa2_mac_set_link_ksettings,
-+ .get_strings = &dpaa2_mac_get_strings,
-+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
-+ .get_sset_count = &dpaa2_mac_get_sset_count,
-+};
-+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
-+
-+static void configure_link(struct dpaa2_mac_priv *priv,
-+ struct dpmac_link_cfg *cfg)
-+{
-+ struct phy_device *phydev = priv->netdev->phydev;
-+
-+ if (unlikely(!phydev))
-+ return;
-+
-+ phydev->speed = cfg->rate;
-+ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
-+
-+ if (cfg->advertising != 0) {
-+ phydev->advertising = 0;
-+ link_mode_dpmac2phydev(cfg->advertising, &phydev->advertising);
-+ }
-+
-+ if (phydev->supported & SUPPORTED_Pause) {
-+ if (cfg->options & DPMAC_LINK_OPT_PAUSE)
-+ phydev->advertising |= ADVERTISED_Pause;
-+ else
-+ phydev->advertising &= ~ADVERTISED_Pause;
-+ }
-+
-+ if (phydev->supported & SUPPORTED_Asym_Pause) {
-+ if (cfg->options & DPMAC_LINK_OPT_ASYM_PAUSE)
-+ phydev->advertising |= ADVERTISED_Asym_Pause;
-+ else
-+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
-+ }
-+
-+ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
-+ phydev->autoneg = AUTONEG_ENABLE;
-+ phydev->advertising |= ADVERTISED_Autoneg;
-+ } else {
-+ phydev->autoneg = AUTONEG_DISABLE;
-+ phydev->advertising &= ~ADVERTISED_Autoneg;
-+ }
-+
-+ phy_start_aneg(phydev);
-+}
-+
-+static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
-+{
-+ struct device *dev = (struct device *)arg;
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
-+ struct dpmac_link_cfg link_cfg = { 0 };
-+ u32 status;
-+ int err;
-+
-+ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ DPMAC_IRQ_INDEX, &status);
-+ if (unlikely(err || !status))
-+ return IRQ_NONE;
-+
-+ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
-+ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
-+ if (cmp_dpmac_ver(priv, DPMAC_LINK_AUTONEG_VER_MAJOR,
-+ DPMAC_LINK_AUTONEG_VER_MINOR) < 0)
-+ err = dpmac_get_link_cfg(mc_dev->mc_io, 0,
-+ mc_dev->mc_handle, &link_cfg);
-+ else
-+ err = dpmac_get_link_cfg_v2(mc_dev->mc_io, 0,
-+ mc_dev->mc_handle,
-+ &link_cfg);
-+ if (unlikely(err))
-+ goto out;
-+
-+ configure_link(priv, &link_cfg);
-+ }
-+
-+out:
-+ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ DPMAC_IRQ_INDEX, status);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static int setup_irqs(struct fsl_mc_device *mc_dev)
-+{
-+ int err = 0;
-+ struct fsl_mc_device_irq *irq;
-+
-+ err = fsl_mc_allocate_irqs(mc_dev);
-+ if (err) {
-+ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
-+ return err;
-+ }
-+
-+ irq = mc_dev->irqs[0];
-+ err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
-+ NULL, &dpaa2_mac_irq_handler,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(&mc_dev->dev), &mc_dev->dev);
-+ if (err) {
-+ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
-+ err);
-+ goto free_irq;
-+ }
-+
-+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ);
-+ if (err) {
-+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
-+ goto free_irq;
-+ }
-+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ DPMAC_IRQ_INDEX, 1);
-+ if (err) {
-+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
-+ goto free_irq;
-+ }
-+
-+ return 0;
-+
-+free_irq:
-+ fsl_mc_free_irqs(mc_dev);
-+
-+ return err;
-+}
-+
-+static void teardown_irqs(struct fsl_mc_device *mc_dev)
-+{
-+ int err;
-+
-+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ DPMAC_IRQ_INDEX, 0);
-+ if (err)
-+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
-+
-+ fsl_mc_free_irqs(mc_dev);
-+}
-+
-+static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
-+{
-+ struct device_node *dpmacs, *dpmac = NULL;
-+ struct device_node *mc_node = dev->of_node;
-+ u32 id;
-+ int err;
-+
-+ dpmacs = of_find_node_by_name(mc_node, "dpmacs");
-+ if (!dpmacs) {
-+ dev_err(dev, "No dpmacs subnode in device-tree\n");
-+ return NULL;
-+ }
-+
-+ while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
-+ err = of_property_read_u32(dpmac, "reg", &id);
-+ if (err)
-+ continue;
-+ if (id == dpmac_id)
-+ return dpmac;
-+ }
-+
-+ return NULL;
-+}
-+
-+static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
-+{
-+ struct device *dev;
-+ struct dpaa2_mac_priv *priv = NULL;
-+ struct device_node *phy_node, *dpmac_node;
-+ struct net_device *netdev;
-+ int if_mode;
-+ int err = 0;
-+
-+ dev = &mc_dev->dev;
-+
-+ /* prepare a net_dev structure to make the phy lib API happy */
-+ netdev = alloc_etherdev(sizeof(*priv));
-+ if (!netdev) {
-+ dev_err(dev, "alloc_etherdev error\n");
-+ err = -ENOMEM;
-+ goto err_exit;
-+ }
-+ priv = netdev_priv(netdev);
-+ priv->mc_dev = mc_dev;
-+ priv->netdev = netdev;
-+
-+ SET_NETDEV_DEV(netdev, dev);
-+
-+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
-+ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
-+#endif
-+
-+ dev_set_drvdata(dev, priv);
-+
-+ /* We may need to issue MC commands while in atomic context */
-+ err = fsl_mc_portal_allocate(mc_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
-+ &mc_dev->mc_io);
-+ if (err || !mc_dev->mc_io) {
-+ dev_dbg(dev, "fsl_mc_portal_allocate error: %d\n", err);
-+ err = -EPROBE_DEFER;
-+ goto err_free_netdev;
-+ }
-+
-+ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
-+ &mc_dev->mc_handle);
-+ if (err || !mc_dev->mc_handle) {
-+ dev_err(dev, "dpmac_open error: %d\n", err);
-+ err = -ENODEV;
-+ goto err_free_mcp;
-+ }
-+
-+ err = dpmac_get_api_version(mc_dev->mc_io, 0, &priv->dpmac_ver_major,
-+ &priv->dpmac_ver_minor);
-+ if (err) {
-+ dev_err(dev, "dpmac_get_api_version failed\n");
-+ goto err_version;
-+ }
-+
-+ if (cmp_dpmac_ver(priv, DPMAC_VER_MAJOR, DPMAC_VER_MINOR) < 0) {
-+ dev_err(dev, "DPMAC version %u.%u lower than supported %u.%u\n",
-+ priv->dpmac_ver_major, priv->dpmac_ver_minor,
-+ DPMAC_VER_MAJOR, DPMAC_VER_MINOR);
-+ err = -ENOTSUPP;
-+ goto err_version;
-+ }
-+
-+ err = dpmac_get_attributes(mc_dev->mc_io, 0,
-+ mc_dev->mc_handle, &priv->attr);
-+ if (err) {
-+ dev_err(dev, "dpmac_get_attributes err %d\n", err);
-+ err = -EINVAL;
-+ goto err_close;
-+ }
-+
-+ /* Look up the DPMAC node in the device-tree. */
-+ dpmac_node = find_dpmac_node(dev, priv->attr.id);
-+ if (!dpmac_node) {
-+ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
-+ err = -ENODEV;
-+ goto err_close;
-+ }
-+
-+ err = setup_irqs(mc_dev);
-+ if (err) {
-+ err = -EFAULT;
-+ goto err_close;
-+ }
-+
-+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
-+ /* OPTIONAL, register netdev just to make it visible to the user */
-+ netdev->netdev_ops = &dpaa2_mac_ndo_ops;
-+ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
-+
-+ /* phy starts up enabled so netdev should be up too */
-+ netdev->flags |= IFF_UP;
-+
-+ err = register_netdev(priv->netdev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev error %d\n", err);
-+ err = -ENODEV;
-+ goto err_free_irq;
-+ }
-+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
-+
-+ /* get the interface mode from the dpmac of node or from the MC attributes */
-+ if_mode = of_get_phy_mode(dpmac_node);
-+ if (if_mode >= 0) {
-+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
-+ phy_modes(if_mode), priv->attr.eth_if);
-+ goto link_type;
-+ }
-+
-+ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
-+ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
-+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
-+ phy_modes(if_mode), priv->attr.eth_if);
-+ } else {
-+ dev_err(dev, "Unexpected interface mode %d\n",
-+ priv->attr.eth_if);
-+ err = -EINVAL;
-+ goto err_no_if_mode;
-+ }
-+
-+link_type:
-+ /* probe the PHY as fixed-link if the DPMAC attribute indicates so */
-+ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED)
-+ goto probe_fixed_link;
-+
-+ /* or if there's no phy-handle defined in the device tree */
-+ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
-+ if (!phy_node) {
-+ goto probe_fixed_link;
-+ }
-+
-+ /* try to connect to the PHY */
-+ netdev->phydev = of_phy_connect(netdev, phy_node,
-+ &dpaa2_mac_link_changed, 0, if_mode);
-+ if (!netdev->phydev) {
-+ /* No need for dev_err(); the kernel's loud enough as it is. */
-+ dev_dbg(dev, "Can't of_phy_connect() now.\n");
-+ /* We might be waiting for the MDIO MUX to probe, so defer
-+ * our own probing.
-+ */
-+ err = -EPROBE_DEFER;
-+ goto err_defer;
-+ }
-+ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
-+
-+probe_fixed_link:
-+ if (!netdev->phydev) {
-+ struct fixed_phy_status status = {
-+ .link = 1,
-+ /* fixed-phys don't support 10Gbps speed for now */
-+ .speed = 1000,
-+ .duplex = 1,
-+ };
-+
-+ /* try to register a fixed link phy */
-+ netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1,
-+ NULL);
-+ if (!netdev->phydev || IS_ERR(netdev->phydev)) {
-+ dev_err(dev, "error trying to register fixed PHY\n");
-+ /* So we don't crash unregister_netdev() later on */
-+ netdev->phydev = NULL;
-+ err = -EFAULT;
-+ goto err_no_phy;
-+ }
-+
-+ err = phy_connect_direct(netdev, netdev->phydev,
-+ &dpaa2_mac_link_changed, if_mode);
-+ if (err) {
-+ dev_err(dev, "error trying to connect to PHY\n");
-+ goto err_no_phy;
-+ }
-+
-+ dev_info(dev, "Registered fixed PHY.\n");
-+ }
-+
-+ dpaa2_mac_open(netdev);
-+
-+ return 0;
-+
-+err_no_if_mode:
-+err_defer:
-+err_no_phy:
-+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
-+ unregister_netdev(netdev);
-+err_free_irq:
-+#endif
-+ teardown_irqs(mc_dev);
-+err_version:
-+err_close:
-+ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-+err_free_mcp:
-+ fsl_mc_portal_free(mc_dev->mc_io);
-+err_free_netdev:
-+ free_netdev(netdev);
-+err_exit:
-+ return err;
-+}
-+
-+static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
-+{
-+ struct device *dev = &mc_dev->dev;
-+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
-+ struct net_device *netdev = priv->netdev;
-+
-+ dpaa2_mac_stop(netdev);
-+
-+ if (phy_is_pseudo_fixed_link(netdev->phydev))
-+ fixed_phy_unregister(netdev->phydev);
-+ else
-+ phy_disconnect(netdev->phydev);
-+ netdev->phydev = NULL;
-+
-+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
-+ unregister_netdev(priv->netdev);
-+#endif
-+ teardown_irqs(priv->mc_dev);
-+ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
-+ fsl_mc_portal_free(priv->mc_dev->mc_io);
-+ free_netdev(priv->netdev);
-+
-+ dev_set_drvdata(dev, NULL);
-+
-+ return 0;
-+}
-+
-+static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpmac",
-+ },
-+ { .vendor = 0x0 }
-+};
-+MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
-+
-+static struct fsl_mc_driver dpaa2_mac_drv = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = dpaa2_mac_probe,
-+ .remove = dpaa2_mac_remove,
-+ .match_id_table = dpaa2_mac_match_id_table,
-+};
-+
-+module_fsl_mc_driver(dpaa2_mac_drv);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
diff --git a/target/linux/layerscape/patches-4.14/705-dpaa2-rtc-support-layerscape.patch b/target/linux/layerscape/patches-4.14/705-dpaa2-rtc-support-layerscape.patch
deleted file mode 100644
index 3b3ffe9647..0000000000
--- a/target/linux/layerscape/patches-4.14/705-dpaa2-rtc-support-layerscape.patch
+++ /dev/null
@@ -1,1386 +0,0 @@
-From 802238feea29ddfb765fc0c162e0de34920cd58d Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:31 +0800
-Subject: [PATCH] dpaa2-rtc: support layerscape
-
-This is an integrated patch of dpaa2-rtc for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
- drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +++++
- drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 ++++++++++++++++++++++
- drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +++++
- drivers/staging/fsl-dpaa2/rtc/rtc.c | 240 +++++++
- drivers/staging/fsl-dpaa2/rtc/rtc.h | 14 +
- 6 files changed, 1342 insertions(+)
- create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
- create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
- create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
- create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
- create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
- create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.h
-
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
-@@ -0,0 +1,10 @@
-+
-+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += dpaa2-rtc.o
-+
-+dpaa2-rtc-objs := rtc.o dprtc.o
-+
-+all:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
-+
-+clean:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
-@@ -0,0 +1,160 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPRTC_CMD_H
-+#define _FSL_DPRTC_CMD_H
-+
-+/* DPRTC Version */
-+#define DPRTC_VER_MAJOR 2
-+#define DPRTC_VER_MINOR 0
-+
-+/* Command versioning */
-+#define DPRTC_CMD_BASE_VERSION 1
-+#define DPRTC_CMD_ID_OFFSET 4
-+
-+#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
-+#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
-+#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
-+#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
-+#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
-+
-+#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
-+#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
-+#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
-+#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
-+#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
-+
-+#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
-+#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
-+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
-+#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
-+#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
-+#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
-+
-+#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
-+#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
-+#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
-+#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
-+#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
-+#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
-+#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
-+#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
-+#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
-+#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
-+#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
-+
-+/* Macros for accessing command fields smaller than 1byte */
-+#define DPRTC_MASK(field) \
-+ GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
-+ DPRTC_##field##_SHIFT)
-+#define dprtc_get_field(var, field) \
-+ (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
-+
-+#pragma pack(push, 1)
-+struct dprtc_cmd_open {
-+ uint32_t dprtc_id;
-+};
-+
-+struct dprtc_cmd_destroy {
-+ uint32_t object_id;
-+};
-+
-+#define DPRTC_ENABLE_SHIFT 0
-+#define DPRTC_ENABLE_SIZE 1
-+
-+struct dprtc_rsp_is_enabled {
-+ uint8_t en;
-+};
-+
-+struct dprtc_cmd_get_irq {
-+ uint32_t pad;
-+ uint8_t irq_index;
-+};
-+
-+struct dprtc_cmd_set_irq_enable {
-+ uint8_t en;
-+ uint8_t pad[3];
-+ uint8_t irq_index;
-+};
-+
-+struct dprtc_rsp_get_irq_enable {
-+ uint8_t en;
-+};
-+
-+struct dprtc_cmd_set_irq_mask {
-+ uint32_t mask;
-+ uint8_t irq_index;
-+};
-+
-+struct dprtc_rsp_get_irq_mask {
-+ uint32_t mask;
-+};
-+
-+struct dprtc_cmd_get_irq_status {
-+ uint32_t status;
-+ uint8_t irq_index;
-+};
-+
-+struct dprtc_rsp_get_irq_status {
-+ uint32_t status;
-+};
-+
-+struct dprtc_cmd_clear_irq_status {
-+ uint32_t status;
-+ uint8_t irq_index;
-+};
-+
-+struct dprtc_rsp_get_attributes {
-+ uint32_t pad;
-+ uint32_t id;
-+};
-+
-+struct dprtc_cmd_set_clock_offset {
-+ uint64_t offset;
-+};
-+
-+struct dprtc_get_freq_compensation {
-+ uint32_t freq_compensation;
-+};
-+
-+struct dprtc_time {
-+ uint64_t time;
-+};
-+
-+struct dprtc_rsp_get_api_version {
-+ uint16_t major;
-+ uint16_t minor;
-+};
-+#pragma pack(pop)
-+#endif /* _FSL_DPRTC_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
-@@ -0,0 +1,746 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/fsl/mc.h>
-+
-+#include "dprtc.h"
-+#include "dprtc-cmd.h"
-+
-+/**
-+ * dprtc_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dprtc_id: DPRTC unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dprtc_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dprtc_id,
-+ uint16_t *token)
-+{
-+ struct dprtc_cmd_open *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dprtc_cmd_open *)cmd.params;
-+ cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return err;
-+}
-+
-+/**
-+ * dprtc_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_create() - Create the DPRTC object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @obj_id: Returned object id
-+ *
-+ * Create the DPRTC object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The function accepts an authentication token of a parent
-+ * container that this object should be assigned to. The token
-+ * can be '0' so the object will be assigned to the default container.
-+ * The newly created object can be opened with the returned
-+ * object id and using the container's associated tokens and MC portals.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_create(struct fsl_mc_io *mc_io,
-+ uint16_t dprc_token,
-+ uint32_t cmd_flags,
-+ const struct dprtc_cfg *cfg,
-+ uint32_t *obj_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ (void)(cfg); /* unused */
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
-+ cmd_flags,
-+ dprc_token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *obj_id = mc_cmd_read_object_id(&cmd);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @object_id: The object id; it must be a valid id within the container that
-+ * created this object;
-+ *
-+ * The function accepts the authentication token of the parent container that
-+ * created the object (not the one that currently owns the object). The object
-+ * is searched within parent using the provided 'object_id'.
-+ * All tokens to the object must be closed before calling destroy.
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dprtc_destroy(struct fsl_mc_io *mc_io,
-+ uint16_t dprc_token,
-+ uint32_t cmd_flags,
-+ uint32_t object_id)
-+{
-+ struct dprtc_cmd_destroy *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
-+ cmd_flags,
-+ dprc_token);
-+ cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
-+ cmd_params->object_id = cpu_to_le32(object_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct dprtc_rsp_is_enabled *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
-+ *en = dprtc_get_field(rsp_params->en, ENABLE);
-+
-+ return 0;
-+}
-+
-+int dprtc_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct dprtc_cmd_set_irq_enable *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->en = en;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct dprtc_rsp_get_irq_enable *rsp_params;
-+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
-+ *en = rsp_params->en;
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprtc_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct dprtc_cmd_set_irq_mask *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct dprtc_rsp_get_irq_mask *rsp_params;
-+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
-+ *mask = le32_to_cpu(rsp_params->mask);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct dprtc_cmd_get_irq_status *cmd_params;
-+ struct dprtc_rsp_get_irq_status *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
-+ *status = rsp_params->status;
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprtc_clear_irq_status() - Clear a pending interrupt's status
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct dprtc_cmd_clear_irq_status *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->status = cpu_to_le32(status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_get_attributes - Retrieve DPRTC attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprtc_attr *attr)
-+{
-+ struct dprtc_rsp_get_attributes *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
-+ attr->id = le32_to_cpu(rsp_params->id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprtc_set_clock_offset() - Sets the clock's offset
-+ * (usually relative to another clock).
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @offset: New clock offset (in nanoseconds).
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int64_t offset)
-+{
-+ struct dprtc_cmd_set_clock_offset *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
-+ cmd_params->offset = cpu_to_le64(offset);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @freq_compensation: The new frequency compensation value to set.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t freq_compensation)
-+{
-+ struct dprtc_get_freq_compensation *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
-+ cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @freq_compensation: Frequency compensation value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t *freq_compensation)
-+{
-+ struct dprtc_get_freq_compensation *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
-+ *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprtc_get_time() - Returns the current RTC time.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @time: Current RTC time.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t *time)
-+{
-+ struct dprtc_time *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprtc_time *)cmd.params;
-+ *time = le64_to_cpu(rsp_params->time);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprtc_set_time() - Updates current RTC time.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @time: New RTC time.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time)
-+{
-+ struct dprtc_time *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_time *)cmd.params;
-+ cmd_params->time = cpu_to_le64(time);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_set_alarm() - Defines and sets alarm.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @time: In nanoseconds, the time when the alarm
-+ * should go off - must be a multiple of
-+ * 1 microsecond
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token, uint64_t time)
-+{
-+ struct dprtc_time *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprtc_time *)cmd.params;
-+ cmd_params->time = cpu_to_le64(time);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprtc_get_api_version() - Get Data Path Real Time Counter API version
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of data path real time counter API
-+ * @minor_ver: Minor version of data path real time counter API
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t *major_ver,
-+ uint16_t *minor_ver)
-+{
-+ struct dprtc_rsp_get_api_version *rsp_params;
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
-+ cmd_flags,
-+ 0);
-+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
-+ *major_ver = le16_to_cpu(rsp_params->major);
-+ *minor_ver = le16_to_cpu(rsp_params->minor);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
-@@ -0,0 +1,172 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPRTC_H
-+#define __FSL_DPRTC_H
-+
-+/* Data Path Real Time Counter API
-+ * Contains initialization APIs and runtime control APIs for RTC
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * Number of irq's
-+ */
-+#define DPRTC_MAX_IRQ_NUM 1
-+#define DPRTC_IRQ_INDEX 0
-+
-+/**
-+ * Interrupt event masks:
-+ */
-+
-+/**
-+ * Interrupt event mask indicating alarm event had occurred
-+ */
-+#define DPRTC_EVENT_ALARM 0x40000000
-+/**
-+ * Interrupt event mask indicating periodic pulse event had occurred
-+ */
-+#define DPRTC_EVENT_PPS 0x08000000
-+
-+int dprtc_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dprtc_id,
-+ uint16_t *token);
-+
-+int dprtc_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dprtc_cfg - Structure representing DPRTC configuration
-+ * @options: place holder
-+ */
-+struct dprtc_cfg {
-+ uint32_t options;
-+};
-+
-+int dprtc_create(struct fsl_mc_io *mc_io,
-+ uint16_t dprc_token,
-+ uint32_t cmd_flags,
-+ const struct dprtc_cfg *cfg,
-+ uint32_t *obj_id);
-+
-+int dprtc_destroy(struct fsl_mc_io *mc_io,
-+ uint16_t dprc_token,
-+ uint32_t cmd_flags,
-+ uint32_t object_id);
-+
-+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int64_t offset);
-+
-+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t freq_compensation);
-+
-+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t *freq_compensation);
-+
-+int dprtc_get_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t *time);
-+
-+int dprtc_set_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time);
-+
-+int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time);
-+
-+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dprtc_attr - Structure representing DPRTC attributes
-+ * @id: DPRTC object ID
-+ */
-+struct dprtc_attr {
-+ int id;
-+};
-+
-+int dprtc_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprtc_attr *attr);
-+
-+int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t *major_ver,
-+ uint16_t *minor_ver);
-+
-+#endif /* __FSL_DPRTC_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
-@@ -0,0 +1,240 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/ptp_clock_kernel.h>
-+
-+#include <linux/fsl/mc.h>
-+
-+#include "rtc.h"
-+
-+#define N_EXT_TS 2
-+
-+struct ptp_clock *clock;
-+struct fsl_mc_device *rtc_mc_dev;
-+u32 freqCompensation;
-+
-+/* PTP clock operations */
-+static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-+{
-+ u64 adj;
-+ u32 diff, tmr_add;
-+ int neg_adj = 0;
-+ int err = 0;
-+ struct fsl_mc_device *mc_dev = rtc_mc_dev;
-+ struct device *dev = &mc_dev->dev;
-+
-+ if (ppb < 0) {
-+ neg_adj = 1;
-+ ppb = -ppb;
-+ }
-+
-+ tmr_add = freqCompensation;
-+ adj = tmr_add;
-+ adj *= ppb;
-+ diff = div_u64(adj, 1000000000ULL);
-+
-+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-+
-+ err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
-+ mc_dev->mc_handle, tmr_add);
-+ if (err)
-+ dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
-+ return 0;
-+}
-+
-+static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
-+{
-+ s64 now;
-+ int err = 0;
-+ struct fsl_mc_device *mc_dev = rtc_mc_dev;
-+ struct device *dev = &mc_dev->dev;
-+
-+ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
-+ if (err) {
-+ dev_err(dev, "dprtc_get_time err %d\n", err);
-+ return 0;
-+ }
-+
-+ now += delta;
-+
-+ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
-+ if (err) {
-+ dev_err(dev, "dprtc_set_time err %d\n", err);
-+ return 0;
-+ }
-+ return 0;
-+}
-+
-+static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
-+{
-+ u64 ns;
-+ u32 remainder;
-+ int err = 0;
-+ struct fsl_mc_device *mc_dev = rtc_mc_dev;
-+ struct device *dev = &mc_dev->dev;
-+
-+ err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
-+ if (err) {
-+ dev_err(dev, "dprtc_get_time err %d\n", err);
-+ return 0;
-+ }
-+
-+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-+ ts->tv_nsec = remainder;
-+ return 0;
-+}
-+
-+static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
-+ const struct timespec *ts)
-+{
-+ u64 ns;
-+ int err = 0;
-+ struct fsl_mc_device *mc_dev = rtc_mc_dev;
-+ struct device *dev = &mc_dev->dev;
-+
-+ ns = ts->tv_sec * 1000000000ULL;
-+ ns += ts->tv_nsec;
-+
-+ err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
-+ if (err)
-+ dev_err(dev, "dprtc_set_time err %d\n", err);
-+ return 0;
-+}
-+
-+static struct ptp_clock_info ptp_dpaa2_caps = {
-+ .owner = THIS_MODULE,
-+ .name = "dpaa2 clock",
-+ .max_adj = 512000,
-+ .n_alarm = 0,
-+ .n_ext_ts = N_EXT_TS,
-+ .n_per_out = 0,
-+ .n_pins = 0,
-+ .pps = 1,
-+ .adjfreq = ptp_dpaa2_adjfreq,
-+ .adjtime = ptp_dpaa2_adjtime,
-+ .gettime64 = ptp_dpaa2_gettime,
-+ .settime64 = ptp_dpaa2_settime,
-+};
-+
-+static int rtc_probe(struct fsl_mc_device *mc_dev)
-+{
-+ struct device *dev;
-+ int err = 0;
-+ u32 tmr_add = 0;
-+
-+ if (!mc_dev)
-+ return -EFAULT;
-+
-+ dev = &mc_dev->dev;
-+
-+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
-+ if (unlikely(err)) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
-+ goto err_exit;
-+ }
-+ if (!mc_dev->mc_io) {
-+ dev_err(dev,
-+ "fsl_mc_portal_allocate returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_exit;
-+ }
-+
-+ err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
-+ &mc_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dprtc_open err %d\n", err);
-+ goto err_free_mcp;
-+ }
-+ if (!mc_dev->mc_handle) {
-+ dev_err(dev, "dprtc_open returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_free_mcp;
-+ }
-+
-+ rtc_mc_dev = mc_dev;
-+
-+ err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
-+ mc_dev->mc_handle, &tmr_add);
-+ if (err) {
-+ dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
-+ goto err_close;
-+ }
-+ freqCompensation = tmr_add;
-+
-+ clock = ptp_clock_register(&ptp_dpaa2_caps, dev);
-+ if (IS_ERR(clock)) {
-+ err = PTR_ERR(clock);
-+ goto err_close;
-+ }
-+ dpaa2_phc_index = ptp_clock_index(clock);
-+
-+ return 0;
-+err_close:
-+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-+err_free_mcp:
-+ fsl_mc_portal_free(mc_dev->mc_io);
-+err_exit:
-+ return err;
-+}
-+
-+static int rtc_remove(struct fsl_mc_device *mc_dev)
-+{
-+ ptp_clock_unregister(clock);
-+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-+ fsl_mc_portal_free(mc_dev->mc_io);
-+
-+ return 0;
-+}
-+
-+static const struct fsl_mc_device_id rtc_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dprtc",
-+ },
-+ {}
-+};
-+
-+static struct fsl_mc_driver rtc_drv = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = rtc_probe,
-+ .remove = rtc_remove,
-+ .match_id_table = rtc_match_id_table,
-+};
-+
-+module_fsl_mc_driver(rtc_drv);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.h
-@@ -0,0 +1,14 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright 2018 NXP
-+ */
-+
-+#ifndef __RTC_H
-+#define __RTC_H
-+
-+#include "dprtc.h"
-+#include "dprtc-cmd.h"
-+
-+extern int dpaa2_phc_index;
-+
-+#endif
diff --git a/target/linux/layerscape/patches-4.14/706-dpaa2-virtualbridge-support-layerscape.patch b/target/linux/layerscape/patches-4.14/706-dpaa2-virtualbridge-support-layerscape.patch
deleted file mode 100644
index 6c89bb1fe8..0000000000
--- a/target/linux/layerscape/patches-4.14/706-dpaa2-virtualbridge-support-layerscape.patch
+++ /dev/null
@@ -1,3256 +0,0 @@
-From 278c2ebd8f04e2b05c87187a3e8b6af552abd57f Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Thu, 13 Dec 2018 13:27:22 +0800
-Subject: [PATCH] dpaa2-virtualbridge: support layerscape
-
-This is an integrated patch of dpaa2-virtualbridge for layerscape
-
-Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
- drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
- drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++++
- drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1111 ++++++++++++++++
- drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++++++
- drivers/staging/fsl-dpaa2/evb/evb.c | 1353 ++++++++++++++++++++
- 6 files changed, 3213 insertions(+)
- create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
- create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
- create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
- create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
- create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
- create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
-
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
-@@ -0,0 +1,7 @@
-+config FSL_DPAA2_EVB
-+ tristate "DPAA2 Edge Virtual Bridge"
-+ depends on FSL_MC_BUS && FSL_DPAA2
-+ select VLAN_8021Q
-+ default y
-+ ---help---
-+ Prototype driver for DPAA2 Edge Virtual Bridge.
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/evb/Makefile
-@@ -0,0 +1,10 @@
-+
-+obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
-+
-+dpaa2-evb-objs := evb.o dpdmux.o
-+
-+all:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
-+
-+clean:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
-@@ -0,0 +1,279 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPDMUX_CMD_H
-+#define _FSL_DPDMUX_CMD_H
-+
-+/* DPDMUX Version */
-+#define DPDMUX_VER_MAJOR 6
-+#define DPDMUX_VER_MINOR 1
-+
-+#define DPDMUX_CMD_BASE_VER 1
-+#define DPDMUX_CMD_ID_OFFSET 4
-+
-+#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
-+
-+/* Command IDs */
-+#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
-+#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
-+#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
-+#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
-+#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
-+
-+#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
-+#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
-+#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
-+#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
-+#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
-+
-+#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
-+#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
-+#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
-+#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
-+#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
-+#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
-+
-+#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
-+
-+#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
-+
-+#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
-+#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
-+#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
-+#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
-+
-+#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
-+#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
-+#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
-+#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
-+#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
-+
-+#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
-+#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
-+#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
-+
-+#define DPDMUX_MASK(field) \
-+ GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
-+ DPDMUX_##field##_SHIFT)
-+#define dpdmux_set_field(var, field, val) \
-+ ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
-+#define dpdmux_get_field(var, field) \
-+ (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
-+
-+struct dpdmux_cmd_open {
-+ u32 dpdmux_id;
-+};
-+
-+struct dpdmux_cmd_create {
-+ u8 method;
-+ u8 manip;
-+ u16 num_ifs;
-+ u32 pad;
-+
-+ u16 adv_max_dmat_entries;
-+ u16 adv_max_mc_groups;
-+ u16 adv_max_vlan_ids;
-+ u16 pad1;
-+
-+ u64 options;
-+};
-+
-+struct dpdmux_cmd_destroy {
-+ u32 dpdmux_id;
-+};
-+
-+#define DPDMUX_ENABLE_SHIFT 0
-+#define DPDMUX_ENABLE_SIZE 1
-+
-+struct dpdmux_rsp_is_enabled {
-+ u8 en;
-+};
-+
-+struct dpdmux_cmd_set_irq_enable {
-+ u8 enable;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
-+
-+struct dpdmux_cmd_get_irq_enable {
-+ u32 pad;
-+ u8 irq_index;
-+};
-+
-+struct dpdmux_rsp_get_irq_enable {
-+ u8 enable;
-+};
-+
-+struct dpdmux_cmd_set_irq_mask {
-+ u32 mask;
-+ u8 irq_index;
-+};
-+
-+struct dpdmux_cmd_get_irq_mask {
-+ u32 pad;
-+ u8 irq_index;
-+};
-+
-+struct dpdmux_rsp_get_irq_mask {
-+ u32 mask;
-+};
-+
-+struct dpdmux_cmd_get_irq_status {
-+ u32 status;
-+ u8 irq_index;
-+};
-+
-+struct dpdmux_rsp_get_irq_status {
-+ u32 status;
-+};
-+
-+struct dpdmux_cmd_clear_irq_status {
-+ u32 status;
-+ u8 irq_index;
-+};
-+
-+struct dpdmux_rsp_get_attr {
-+ u8 method;
-+ u8 manip;
-+ u16 num_ifs;
-+ u16 mem_size;
-+ u16 pad;
-+
-+ u64 pad1;
-+
-+ u32 id;
-+ u32 pad2;
-+
-+ u64 options;
-+};
-+
-+struct dpdmux_cmd_set_max_frame_length {
-+ u16 max_frame_length;
-+};
-+
-+#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
-+#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
-+#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
-+#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
-+
-+struct dpdmux_cmd_if_set_accepted_frames {
-+ u16 if_id;
-+ u8 frames_options;
-+};
-+
-+struct dpdmux_cmd_if {
-+ u16 if_id;
-+};
-+
-+struct dpdmux_rsp_if_get_attr {
-+ u8 pad[3];
-+ u8 enabled;
-+ u8 pad1[3];
-+ u8 accepted_frames_type;
-+ u32 rate;
-+};
-+
-+struct dpdmux_cmd_if_l2_rule {
-+ u16 if_id;
-+ u8 mac_addr5;
-+ u8 mac_addr4;
-+ u8 mac_addr3;
-+ u8 mac_addr2;
-+ u8 mac_addr1;
-+ u8 mac_addr0;
-+
-+ u32 pad;
-+ u16 vlan_id;
-+};
-+
-+struct dpdmux_cmd_if_get_counter {
-+ u16 if_id;
-+ u8 counter_type;
-+};
-+
-+struct dpdmux_rsp_if_get_counter {
-+ u64 pad;
-+ u64 counter;
-+};
-+
-+struct dpdmux_cmd_if_set_link_cfg {
-+ u16 if_id;
-+ u16 pad[3];
-+
-+ u32 rate;
-+ u32 pad1;
-+
-+ u64 options;
-+};
-+
-+struct dpdmux_cmd_if_get_link_state {
-+ u16 if_id;
-+};
-+
-+struct dpdmux_rsp_if_get_link_state {
-+ u32 pad;
-+ u8 up;
-+ u8 pad1[3];
-+
-+ u32 rate;
-+ u32 pad2;
-+
-+ u64 options;
-+};
-+
-+struct dpdmux_rsp_get_api_version {
-+ u16 major;
-+ u16 minor;
-+};
-+
-+struct dpdmux_set_custom_key {
-+ u64 pad[6];
-+ u64 key_cfg_iova;
-+};
-+
-+struct dpdmux_cmd_add_custom_cls_entry {
-+ u8 pad[3];
-+ u8 key_size;
-+ u16 pad1;
-+ u16 dest_if;
-+ u64 key_iova;
-+ u64 mask_iova;
-+};
-+
-+struct dpdmux_cmd_remove_custom_cls_entry {
-+ u8 pad[3];
-+ u8 key_size;
-+ u32 pad1;
-+ u64 key_iova;
-+ u64 mask_iova;
-+};
-+
-+#endif /* _FSL_DPDMUX_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
-@@ -0,0 +1,1111 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/fsl/mc.h>
-+#include "dpdmux.h"
-+#include "dpdmux-cmd.h"
-+
-+/**
-+ * dpdmux_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpdmux_id: DPDMUX unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpdmux_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpdmux_id,
-+ u16 *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_open *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpdmux_cmd_open *)cmd.params;
-+ cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_create() - Create the DPDMUX object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @obj_id: returned object id
-+ *
-+ * Create the DPDMUX object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * The function accepts an authentication token of a parent
-+ * container that this object should be assigned to. The token
-+ * can be '0' so the object will be assigned to the default container.
-+ * The newly created object can be opened with the returned
-+ * object id and using the container's associated tokens and MC portals.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_create(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ const struct dpdmux_cfg *cfg,
-+ u32 *obj_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_create *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
-+ cmd_flags,
-+ dprc_token);
-+ cmd_params = (struct dpdmux_cmd_create *)cmd.params;
-+ cmd_params->method = cfg->method;
-+ cmd_params->manip = cfg->manip;
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ cmd_params->adv_max_dmat_entries =
-+ cpu_to_le16(cfg->adv.max_dmat_entries);
-+ cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
-+ cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
-+ cmd_params->options = cpu_to_le64(cfg->adv.options);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *obj_id = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @object_id: The object id; it must be a valid id within the container that
-+ * created this object;
-+ *
-+ * The function accepts the authentication token of the parent container that
-+ * created the object (not the one that currently owns the object). The object
-+ * is searched within parent using the provided 'object_id'.
-+ * All tokens to the object must be closed before calling destroy.
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpdmux_destroy(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ u32 object_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_destroy *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
-+ cmd_flags,
-+ dprc_token);
-+ cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
-+ cmd_params->dpdmux_id = cpu_to_le32(object_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_enable() - Enable DPDMUX functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_disable() - Disable DPDMUX functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_rsp_is_enabled *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
-+ *en = dpdmux_get_field(rsp_params->en, ENABLE);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_set_irq_enable *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
-+ cmd_params->enable = en;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_get_irq_enable() - Get overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_get_irq_enable *cmd_params;
-+ struct dpdmux_rsp_get_irq_enable *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
-+ *en = rsp_params->enable;
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_set_irq_mask *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_get_irq_mask *cmd_params;
-+ struct dpdmux_rsp_get_irq_mask *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
-+ *mask = le32_to_cpu(rsp_params->mask);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_get_irq_status *cmd_params;
-+ struct dpdmux_rsp_get_irq_status *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_clear_irq_status *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_get_attributes() - Retrieve DPDMUX attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpdmux_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_rsp_get_attr *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
-+ attr->id = le32_to_cpu(rsp_params->id);
-+ attr->options = le64_to_cpu(rsp_params->options);
-+ attr->method = rsp_params->method;
-+ attr->manip = rsp_params->manip;
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_if_enable() - Enable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct dpdmux_cmd_if *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_if_disable() - Disable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct dpdmux_cmd_if *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @max_frame_length: The required maximum frame length
-+ *
-+ * Update the maximum frame length on all DMUX interfaces.
-+ * In case of VEPA, the maximum frame length on all dmux interfaces
-+ * will be updated with the minimum value of the mfls of the connected
-+ * dpnis and the actual value of dmux mfl.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 max_frame_length)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_set_max_frame_length *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
-+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_ul_reset_counters() - Function resets the uplink counter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_if_set_accepted_frames() - Set the accepted frame types
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
-+ * @cfg: Frame types configuration
-+ *
-+ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
-+ * priority-tagged frames are discarded.
-+ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
-+ * priority-tagged frames are accepted.
-+ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
-+ * untagged and priority-tagged frame are accepted;
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpdmux_accepted_frames *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
-+ cfg->type);
-+ dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
-+ cfg->unaccept_act);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
-+ * @attr: Interface attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpdmux_if_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_if *cmd_params;
-+ struct dpdmux_rsp_if_get_attr *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
-+ attr->rate = le32_to_cpu(rsp_params->rate);
-+ attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
-+ attr->accept_frame_type =
-+ dpdmux_get_field(rsp_params->accepted_frames_type,
-+ ACCEPTED_FRAMES_TYPE);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Destination interface ID
-+ * @rule: L2 rule
-+ *
-+ * Function removes a L2 rule from DPDMUX table
-+ * or adds an interface to an existing multicast address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpdmux_l2_rule *rule)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_if_l2_rule *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
-+ cmd_params->mac_addr5 = rule->mac_addr[5];
-+ cmd_params->mac_addr4 = rule->mac_addr[4];
-+ cmd_params->mac_addr3 = rule->mac_addr[3];
-+ cmd_params->mac_addr2 = rule->mac_addr[2];
-+ cmd_params->mac_addr1 = rule->mac_addr[1];
-+ cmd_params->mac_addr0 = rule->mac_addr[0];
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Destination interface ID
-+ * @rule: L2 rule
-+ *
-+ * Function adds a L2 rule into DPDMUX table
-+ * or adds an interface to an existing multicast address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpdmux_l2_rule *rule)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_if_l2_rule *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
-+ cmd_params->mac_addr5 = rule->mac_addr[5];
-+ cmd_params->mac_addr4 = rule->mac_addr[4];
-+ cmd_params->mac_addr3 = rule->mac_addr[3];
-+ cmd_params->mac_addr2 = rule->mac_addr[2];
-+ cmd_params->mac_addr1 = rule->mac_addr[1];
-+ cmd_params->mac_addr0 = rule->mac_addr[0];
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Interface Id
-+ * @counter_type: counter type
-+ * @counter: Returned specific counter information
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpdmux_counter_type counter_type,
-+ u64 *counter)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_if_get_counter *cmd_params;
-+ struct dpdmux_rsp_if_get_counter *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->counter_type = counter_type;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
-+ *counter = le64_to_cpu(rsp_params->counter);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_if_set_link_cfg() - set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: interface id
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpdmux_link_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_if_set_link_cfg *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->rate = cpu_to_le32(cfg->rate);
-+ cmd_params->options = cpu_to_le64(cfg->options);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_if_get_link_state - Return the link state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: interface id
-+ * @state: link state
-+ *
-+ * @returns '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpdmux_link_state *state)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_cmd_if_get_link_state *cmd_params;
-+ struct dpdmux_rsp_if_get_link_state *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
-+ state->rate = le32_to_cpu(rsp_params->rate);
-+ state->options = le64_to_cpu(rsp_params->options);
-+ state->up = dpdmux_get_field(rsp_params->up, ENABLE);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpdmux_set_custom_key - Set a custom classification key.
-+ *
-+ * This API is only available for DPDMUX instance created with
-+ * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
-+ * classification table using dpdmux_add_custom_cls_entry.
-+ *
-+ * Calls to dpdmux_set_custom_key remove all existing classification entries
-+ * that may have been added previously using dpdmux_add_custom_cls_entry.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: interface id
-+ * @key_cfg_iova: DMA address of a configuration structure set up using
-+ * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
-+ *
-+ * @returns '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u64 key_cfg_iova)
-+{
-+ struct dpdmux_set_custom_key *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
-+ cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
-+ *
-+ * This API is only available for DPDMUX instances created with
-+ * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
-+ * composition rule must be set up using dpdmux_set_custom_key.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @rule: Classification rule to insert. Rules cannot be duplicated, if a
-+ * matching rule already exists, the action will be replaced.
-+ * @action: Action to perform for matching traffic.
-+ *
-+ * @returns '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpdmux_rule_cfg *rule,
-+ struct dpdmux_cls_action *action)
-+{
-+ struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
-+ cmd_flags,
-+ token);
-+
-+ cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
-+ cmd_params->key_size = rule->key_size;
-+ cmd_params->dest_if = cpu_to_le16(action->dest_if);
-+ cmd_params->key_iova = cpu_to_le64(rule->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
-+ *
-+ * This API is only available for DPDMUX instances created with
-+ * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
-+ * entries previously inserted using dpdmux_add_custom_cls_entry.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @rule: Classification rule to remove
-+ *
-+ * @returns '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpdmux_rule_cfg *rule)
-+{
-+ struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
-+ cmd_params->key_size = rule->key_size;
-+ cmd_params->key_iova = cpu_to_le64(rule->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpdmux_get_api_version() - Get Data Path Demux API version
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of data path demux API
-+ * @minor_ver: Minor version of data path demux API
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmux_rsp_get_api_version *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
-+ cmd_flags,
-+ 0);
-+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
-+ *major_ver = le16_to_cpu(rsp_params->major);
-+ *minor_ver = le16_to_cpu(rsp_params->minor);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
-@@ -0,0 +1,453 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPDMUX_H
-+#define __FSL_DPDMUX_H
-+
-+struct fsl_mc_io;
-+
-+/* Data Path Demux API
-+ * Contains API for handling DPDMUX topology and functionality
-+ */
-+
-+int dpdmux_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpdmux_id,
-+ u16 *token);
-+
-+int dpdmux_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * DPDMUX general options
-+ */
-+
-+/**
-+ * Enable bridging between internal interfaces
-+ */
-+#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
-+
-+/**
-+ * Mask support for classification
-+ */
-+#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
-+
-+#define DPDMUX_IRQ_INDEX_IF 0x0000
-+#define DPDMUX_IRQ_INDEX 0x0001
-+
-+/**
-+ * IRQ event - Indicates that the link state changed
-+ */
-+#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
-+
-+/**
-+ * enum dpdmux_manip - DPDMUX manipulation operations
-+ * @DPDMUX_MANIP_NONE: No manipulation on frames
-+ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
-+ */
-+enum dpdmux_manip {
-+ DPDMUX_MANIP_NONE = 0x0,
-+ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
-+};
-+
-+/**
-+ * enum dpdmux_method - DPDMUX method options
-+ * @DPDMUX_METHOD_NONE: no DPDMUX method
-+ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
-+ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
-+ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
-+ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
-+ */
-+enum dpdmux_method {
-+ DPDMUX_METHOD_NONE = 0x0,
-+ DPDMUX_METHOD_C_VLAN_MAC = 0x1,
-+ DPDMUX_METHOD_MAC = 0x2,
-+ DPDMUX_METHOD_C_VLAN = 0x3,
-+ DPDMUX_METHOD_S_VLAN = 0x4,
-+ DPDMUX_METHOD_CUSTOM = 0x5
-+};
-+
-+/**
-+ * struct dpdmux_cfg - DPDMUX configuration parameters
-+ * @method: Defines the operation method for the DPDMUX address table
-+ * @manip: Required manipulation operation
-+ * @num_ifs: Number of interfaces (excluding the uplink interface)
-+ * @adv: Advanced parameters; default is all zeros;
-+ * use this structure to change default settings
-+ */
-+struct dpdmux_cfg {
-+ enum dpdmux_method method;
-+ enum dpdmux_manip manip;
-+ u16 num_ifs;
-+ /**
-+ * struct adv - Advanced parameters
-+ * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
-+ * @max_dmat_entries: Maximum entries in DPDMUX address table
-+ * 0 - indicates default: 64 entries per interface.
-+ * @max_mc_groups: Number of multicast groups in DPDMUX table
-+ * 0 - indicates default: 32 multicast groups
-+ * @max_vlan_ids: max vlan ids allowed in the system -
-+ * relevant only case of working in mac+vlan method.
-+ * 0 - indicates default 16 vlan ids.
-+ */
-+ struct {
-+ u64 options;
-+ u16 max_dmat_entries;
-+ u16 max_mc_groups;
-+ u16 max_vlan_ids;
-+ } adv;
-+};
-+
-+int dpdmux_create(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ const struct dpdmux_cfg *cfg,
-+ u32 *obj_id);
-+
-+int dpdmux_destroy(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ u32 object_id);
-+
-+int dpdmux_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpdmux_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
-+
-+int dpdmux_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en);
-+
-+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
-+
-+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask);
-+
-+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
-+
-+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status);
-+
-+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status);
-+
-+/**
-+ * struct dpdmux_attr - Structure representing DPDMUX attributes
-+ * @id: DPDMUX object ID
-+ * @options: Configuration options (bitmap)
-+ * @method: DPDMUX address table method
-+ * @manip: DPDMUX manipulation type
-+ * @num_ifs: Number of interfaces (excluding the uplink interface)
-+ * @mem_size: DPDMUX frame storage memory size
-+ */
-+struct dpdmux_attr {
-+ int id;
-+ u64 options;
-+ enum dpdmux_method method;
-+ enum dpdmux_manip manip;
-+ u16 num_ifs;
-+ u16 mem_size;
-+};
-+
-+int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpdmux_attr *attr);
-+
-+int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 max_frame_length);
-+
-+/**
-+ * enum dpdmux_counter_type - Counter types
-+ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
-+ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
-+ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
-+ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
-+ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
-+ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
-+ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
-+ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
-+ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
-+ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
-+ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
-+ */
-+enum dpdmux_counter_type {
-+ DPDMUX_CNT_ING_FRAME = 0x0,
-+ DPDMUX_CNT_ING_BYTE = 0x1,
-+ DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
-+ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
-+ DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
-+ DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
-+ DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
-+ DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
-+ DPDMUX_CNT_EGR_FRAME = 0x8,
-+ DPDMUX_CNT_EGR_BYTE = 0x9,
-+ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
-+};
-+
-+/**
-+ * enum dpdmux_accepted_frames_type - DPDMUX frame types
-+ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
-+ * priority-tagged frames
-+ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
-+ * priority-tagged frames that are received on this
-+ * interface
-+ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
-+ * received on this interface are accepted
-+ */
-+enum dpdmux_accepted_frames_type {
-+ DPDMUX_ADMIT_ALL = 0,
-+ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
-+ DPDMUX_ADMIT_ONLY_UNTAGGED = 2
-+};
-+
-+/**
-+ * enum dpdmux_action - DPDMUX action for un-accepted frames
-+ * @DPDMUX_ACTION_DROP: Drop un-accepted frames
-+ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
-+ * control interface
-+ */
-+enum dpdmux_action {
-+ DPDMUX_ACTION_DROP = 0,
-+ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
-+};
-+
-+/**
-+ * struct dpdmux_accepted_frames - Frame types configuration
-+ * @type: Defines ingress accepted frames
-+ * @unaccept_act: Defines action on frames not accepted
-+ */
-+struct dpdmux_accepted_frames {
-+ enum dpdmux_accepted_frames_type type;
-+ enum dpdmux_action unaccept_act;
-+};
-+
-+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpdmux_accepted_frames *cfg);
-+
-+/**
-+ * struct dpdmux_if_attr - Structure representing frame types configuration
-+ * @rate: Configured interface rate (in bits per second)
-+ * @enabled: Indicates if interface is enabled
-+ * @accept_frame_type: Indicates type of accepted frames for the interface
-+ */
-+struct dpdmux_if_attr {
-+ u32 rate;
-+ int enabled;
-+ enum dpdmux_accepted_frames_type accept_frame_type;
-+};
-+
-+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpdmux_if_attr *attr);
-+
-+int dpdmux_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id);
-+
-+int dpdmux_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id);
-+
-+/**
-+ * struct dpdmux_l2_rule - Structure representing L2 rule
-+ * @mac_addr: MAC address
-+ * @vlan_id: VLAN ID
-+ */
-+struct dpdmux_l2_rule {
-+ u8 mac_addr[6];
-+ u16 vlan_id;
-+};
-+
-+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpdmux_l2_rule *rule);
-+
-+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpdmux_l2_rule *rule);
-+
-+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpdmux_counter_type counter_type,
-+ u64 *counter);
-+
-+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+
-+/**
-+ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
-+ */
-+struct dpdmux_link_cfg {
-+ u32 rate;
-+ u64 options;
-+};
-+
-+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpdmux_link_cfg *cfg);
-+/**
-+ * struct dpdmux_link_state - Structure representing DPDMUX link state
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
-+ * @up: 0 - down, 1 - up
-+ */
-+struct dpdmux_link_state {
-+ u32 rate;
-+ u64 options;
-+ int up;
-+};
-+
-+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpdmux_link_state *state);
-+
-+int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u64 key_cfg_iova);
-+
-+/**
-+ * struct dpdmux_rule_cfg - Custom classification rule.
-+ *
-+ * @key_iova: DMA address of buffer storing the look-up value
-+ * @mask_iova: DMA address of the mask used for TCAM classification
-+ * @key_size: size, in bytes, of the look-up value. This must match the size
-+ * of the look-up key defined using dpdmux_set_custom_key, otherwise the
-+ * entry will never be hit
-+ */
-+struct dpdmux_rule_cfg {
-+ u64 key_iova;
-+ u64 mask_iova;
-+ u8 key_size;
-+};
-+
-+/**
-+ * struct dpdmux_cls_action - Action to execute for frames matching the
-+ * classification entry
-+ *
-+ * @dest_if: Interface to forward the frames to. Port numbering is similar to
-+ * the one used to connect interfaces:
-+ * - 0 is the uplink port,
-+ * - all others are downlink ports.
-+ */
-+struct dpdmux_cls_action {
-+ u16 dest_if;
-+};
-+
-+int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpdmux_rule_cfg *rule,
-+ struct dpdmux_cls_action *action);
-+
-+int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpdmux_rule_cfg *rule);
-+
-+int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
-+
-+#endif /* __FSL_DPDMUX_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/evb/evb.c
-@@ -0,0 +1,1353 @@
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/module.h>
-+#include <linux/msi.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/if_vlan.h>
-+
-+#include <uapi/linux/if_bridge.h>
-+#include <net/netlink.h>
-+
-+#include <linux/fsl/mc.h>
-+
-+#include "dpdmux.h"
-+#include "dpdmux-cmd.h"
-+
-+static const char evb_drv_version[] = "0.1";
-+
-+/* Minimal supported DPDMUX version */
-+#define DPDMUX_MIN_VER_MAJOR 6
-+#define DPDMUX_MIN_VER_MINOR 0
-+
-+/* IRQ index */
-+#define DPDMUX_MAX_IRQ_NUM 2
-+
-+/* MAX FRAME LENGTH (currently 10k) */
-+#define EVB_MAX_FRAME_LENGTH (10 * 1024)
-+#define EVB_MAX_MTU (EVB_MAX_FRAME_LENGTH - VLAN_ETH_HLEN)
-+#define EVB_MIN_MTU 68
-+
-+struct evb_port_priv {
-+ struct net_device *netdev;
-+ struct list_head list;
-+ u16 port_index;
-+ struct evb_priv *evb_priv;
-+ u8 vlans[VLAN_VID_MASK + 1];
-+};
-+
-+struct evb_priv {
-+ /* keep first */
-+ struct evb_port_priv uplink;
-+
-+ struct fsl_mc_io *mc_io;
-+ struct list_head port_list;
-+ struct dpdmux_attr attr;
-+ u16 mux_handle;
-+ int dev_id;
-+};
-+
-+static int _evb_port_carrier_state_sync(struct net_device *netdev)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpdmux_link_state state;
-+ int err;
-+
-+ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index, &state);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
-+ return err;
-+ }
-+
-+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
-+
-+ if (state.up)
-+ netif_carrier_on(port_priv->netdev);
-+ else
-+ netif_carrier_off(port_priv->netdev);
-+
-+ return 0;
-+}
-+
-+static int evb_port_open(struct net_device *netdev)
-+{
-+ int err;
-+
-+ /* FIXME: enable port when support added */
-+
-+ err = _evb_port_carrier_state_sync(netdev);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
-+{
-+ /* we don't support I/O for now, drop the frame */
-+ dev_kfree_skb_any(skb);
-+ return NETDEV_TX_OK;
-+}
-+
-+static int evb_links_state_update(struct evb_priv *priv)
-+{
-+ struct evb_port_priv *port_priv;
-+ struct list_head *pos;
-+ int err;
-+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct evb_port_priv, list);
-+
-+ err = _evb_port_carrier_state_sync(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "_evb_port_carrier_state_sync err %d\n",
-+ err);
-+ }
-+
-+ return 0;
-+}
-+
-+static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
-+{
-+ return IRQ_WAKE_THREAD;
-+}
-+
-+static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
-+{
-+ struct device *dev = (struct device *)arg;
-+ struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct evb_priv *priv = netdev_priv(netdev);
-+ struct fsl_mc_io *io = priv->mc_io;
-+ u16 token = priv->mux_handle;
-+ int irq_index = DPDMUX_IRQ_INDEX_IF;
-+
-+ /* Mask the events and the if_id reserved bits to be cleared on read */
-+ u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
-+ int err;
-+
-+ /* Sanity check */
-+ if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
-+ goto out;
-+ if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
-+ goto out;
-+
-+ err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "Can't get irq status (err %d)", err);
-+ err = dpdmux_clear_irq_status(io, 0, token, irq_index,
-+ 0xFFFFFFFF);
-+ if (unlikely(err))
-+ netdev_err(netdev, "Can't clear irq status (err %d)",
-+ err);
-+ goto out;
-+ }
-+
-+ if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
-+ err = evb_links_state_update(priv);
-+ if (unlikely(err))
-+ goto out;
-+ }
-+
-+out:
-+ return IRQ_HANDLED;
-+}
-+
-+static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
-+{
-+ struct device *dev = &evb_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct evb_priv *priv = netdev_priv(netdev);
-+ int err = 0;
-+ struct fsl_mc_device_irq *irq;
-+ const int irq_index = DPDMUX_IRQ_INDEX_IF;
-+ u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
-+
-+ err = fsl_mc_allocate_irqs(evb_dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "MC irqs allocation failed\n");
-+ return err;
-+ }
-+
-+ if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
-+ err = -EINVAL;
-+ goto free_irq;
-+ }
-+
-+ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
-+ irq_index, 0);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
-+ goto free_irq;
-+ }
-+
-+ irq = evb_dev->irqs[irq_index];
-+
-+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
-+ evb_irq0_handler,
-+ _evb_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(dev), dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "devm_request_threaded_irq(): %d", err);
-+ goto free_irq;
-+ }
-+
-+ err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
-+ irq_index, mask);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
-+ goto free_devm_irq;
-+ }
-+
-+ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
-+ irq_index, 1);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
-+ goto free_devm_irq;
-+ }
-+
-+ return 0;
-+
-+free_devm_irq:
-+ devm_free_irq(dev, irq->msi_desc->irq, dev);
-+free_irq:
-+ fsl_mc_free_irqs(evb_dev);
-+ return err;
-+}
-+
-+static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
-+{
-+ struct device *dev = &evb_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct evb_priv *priv = netdev_priv(netdev);
-+
-+ dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
-+ DPDMUX_IRQ_INDEX_IF, 0);
-+
-+ devm_free_irq(dev,
-+ evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
-+ dev);
-+ fsl_mc_free_irqs(evb_dev);
-+}
-+
-+static int evb_port_add_rule(struct net_device *netdev,
-+ const unsigned char *addr, u16 vid)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpdmux_l2_rule rule = { .vlan_id = vid };
-+ int err;
-+
-+ if (addr)
-+ ether_addr_copy(rule.mac_addr, addr);
-+
-+ err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index, &rule);
-+ if (unlikely(err))
-+ netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
-+ return err;
-+}
-+
-+static int evb_port_del_rule(struct net_device *netdev,
-+ const unsigned char *addr, u16 vid)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpdmux_l2_rule rule = { .vlan_id = vid };
-+ int err;
-+
-+ if (addr)
-+ ether_addr_copy(rule.mac_addr, addr);
-+
-+ err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index, &rule);
-+ if (unlikely(err))
-+ netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
-+ return err;
-+}
-+
-+static bool _lookup_address(struct net_device *netdev,
-+ const unsigned char *addr)
-+{
-+ struct netdev_hw_addr *ha;
-+ struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
-+ &netdev->uc : &netdev->mc;
-+
-+ netif_addr_lock_bh(netdev);
-+ list_for_each_entry(ha, &list->list, list) {
-+ if (ether_addr_equal(ha->addr, addr)) {
-+ netif_addr_unlock_bh(netdev);
-+ return true;
-+ }
-+ }
-+ netif_addr_unlock_bh(netdev);
-+ return false;
-+}
-+
-+static inline int evb_port_fdb_prep(struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 *vid,
-+ bool del)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct evb_priv *evb_priv = port_priv->evb_priv;
-+
-+ *vid = 0;
-+
-+ if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
-+ evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
-+ netdev_err(netdev,
-+ "EVB mode does not support MAC classification\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ /* check if the address is configured on this port */
-+ if (_lookup_address(netdev, addr)) {
-+ if (!del)
-+ return -EEXIST;
-+ } else {
-+ if (del)
-+ return -ENOENT;
-+ }
-+
-+ if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
-+ if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
-+ netdev_err(netdev, "invalid vlan size %d\n",
-+ nla_len(tb[NDA_VLAN]));
-+ return -EINVAL;
-+ }
-+
-+ *vid = nla_get_u16(tb[NDA_VLAN]);
-+
-+ if (!*vid || *vid >= VLAN_VID_MASK) {
-+ netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
-+ return -EINVAL;
-+ }
-+ } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
-+ netdev_err(netdev,
-+ "EVB mode requires explicit VLAN configuration\n");
-+ return -EINVAL;
-+ } else if (tb[NDA_VLAN]) {
-+ netdev_warn(netdev, "VLAN not supported, argument ignored\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid, u16 flags)
-+{
-+ u16 _vid;
-+ int err;
-+
-+ /* TODO: add replace support when added to iproute bridge */
-+ if (!(flags & NLM_F_REQUEST)) {
-+ netdev_err(netdev,
-+ "evb_port_fdb_add unexpected flags value %08x\n",
-+ flags);
-+ return -EINVAL;
-+ }
-+
-+ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
-+ if (unlikely(err))
-+ return err;
-+
-+ err = evb_port_add_rule(netdev, addr, _vid);
-+ if (unlikely(err))
-+ return err;
-+
-+ if (is_unicast_ether_addr(addr)) {
-+ err = dev_uc_add(netdev, addr);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "dev_uc_add err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ err = dev_mc_add(netdev, addr);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "dev_mc_add err %d\n", err);
-+ return err;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid)
-+{
-+ u16 _vid;
-+ int err;
-+
-+ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
-+ if (unlikely(err))
-+ return err;
-+
-+ err = evb_port_del_rule(netdev, addr, _vid);
-+ if (unlikely(err))
-+ return err;
-+
-+ if (is_unicast_ether_addr(addr)) {
-+ err = dev_uc_del(netdev, addr);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "dev_uc_del err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ err = dev_mc_del(netdev, addr);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "dev_mc_del err %d\n", err);
-+ return err;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int evb_change_mtu(struct net_device *netdev,
-+ int mtu)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct evb_priv *evb_priv = port_priv->evb_priv;
-+ struct list_head *pos;
-+ int err = 0;
-+
-+ /* This operation is not permitted on downlinks */
-+ if (port_priv->port_index > 0)
-+ return -EPERM;
-+
-+ err = dpdmux_set_max_frame_length(evb_priv->mc_io,
-+ 0,
-+ evb_priv->mux_handle,
-+ (uint16_t)(mtu + VLAN_ETH_HLEN));
-+
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ /* Update the max frame length for downlinks */
-+ list_for_each(pos, &evb_priv->port_list) {
-+ port_priv = list_entry(pos, struct evb_port_priv, list);
-+ port_priv->netdev->mtu = mtu;
-+ }
-+
-+ netdev->mtu = mtu;
-+ return 0;
-+}
-+
-+static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
-+ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
-+ .len = sizeof(struct bridge_vlan_info), },
-+};
-+
-+static int evb_setlink_af_spec(struct net_device *netdev,
-+ struct nlattr **tb)
-+{
-+ struct bridge_vlan_info *vinfo;
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ int err = 0;
-+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
-+ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
-+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
-+
-+ err = evb_port_add_rule(netdev, NULL, vinfo->vid);
-+ if (unlikely(err))
-+ return err;
-+
-+ port_priv->vlans[vinfo->vid] = 1;
-+
-+ return 0;
-+}
-+
-+static int evb_setlink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct evb_priv *evb_priv = port_priv->evb_priv;
-+ struct nlattr *attr;
-+ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
-+ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
-+ int err = 0;
-+
-+ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
-+ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
-+ netdev_err(netdev,
-+ "EVB mode does not support VLAN only classification\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (attr) {
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
-+ ifla_br_policy, NULL);
-+ if (unlikely(err)) {
-+ netdev_err(netdev,
-+ "nla_parse_nested for br_policy err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ err = evb_setlink_af_spec(netdev, tb);
-+ return err;
-+ }
-+
-+ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
-+ return -EOPNOTSUPP;
-+}
-+
-+static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct evb_priv *evb_priv = port_priv->evb_priv;
-+ u8 operstate = netif_running(netdev) ?
-+ netdev->operstate : IF_OPER_DOWN;
-+ int iflink;
-+ int err;
-+
-+ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ if (netdev->addr_len) {
-+ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
-+ netdev->dev_addr);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ }
-+
-+ iflink = dev_get_iflink(netdev);
-+ if (netdev->ifindex != iflink) {
-+ err = nla_put_u32(skb, IFLA_LINK, iflink);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ }
-+
-+ return 0;
-+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ return err;
-+}
-+
-+static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
-+{
-+ struct nlattr *nest;
-+ int err;
-+
-+ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ nla_nest_end(skb, nest);
-+
-+ return 0;
-+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
-+ return err;
-+}
-+
-+static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct nlattr *nest;
-+ struct bridge_vlan_info vinfo;
-+ const u8 *vlans = port_priv->vlans;
-+ u16 i;
-+ int err;
-+
-+ nest = nla_nest_start(skb, IFLA_AF_SPEC);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed");
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < VLAN_VID_MASK + 1; i++) {
-+ if (!vlans[i])
-+ continue;
-+
-+ vinfo.flags = 0;
-+ vinfo.vid = i;
-+
-+ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
-+ sizeof(vinfo), &vinfo);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ }
-+
-+ nla_nest_end(skb, nest);
-+
-+ return 0;
-+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
-+ return err;
-+}
-+
-+static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-+ struct net_device *netdev, u32 filter_mask, int nlflags)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct evb_priv *evb_priv = port_priv->evb_priv;
-+ struct ifinfomsg *hdr;
-+ struct nlmsghdr *nlh;
-+ int err;
-+
-+ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
-+ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
-+ return 0;
-+ }
-+
-+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
-+ if (!nlh)
-+ return -EMSGSIZE;
-+
-+ hdr = nlmsg_data(nlh);
-+ memset(hdr, 0, sizeof(*hdr));
-+ hdr->ifi_family = AF_BRIDGE;
-+ hdr->ifi_type = netdev->type;
-+ hdr->ifi_index = netdev->ifindex;
-+ hdr->ifi_flags = dev_get_flags(netdev);
-+
-+ err = __nla_put_netdev(skb, netdev);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+
-+ err = __nla_put_port(skb, netdev);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+
-+ /* Check if the VID information is requested */
-+ if (filter_mask & RTEXT_FILTER_BRVLAN) {
-+ err = __nla_put_vlan(skb, netdev);
-+ if (unlikely(err))
-+ goto nla_put_err;
-+ }
-+
-+ nlmsg_end(skb, nlh);
-+ return skb->len;
-+
-+nla_put_err:
-+ nlmsg_cancel(skb, nlh);
-+ return -EMSGSIZE;
-+}
-+
-+static int evb_dellink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
-+{
-+ struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
-+ struct nlattr *spec;
-+ struct bridge_vlan_info *vinfo;
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ int err = 0;
-+
-+ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (!spec)
-+ return 0;
-+
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy, NULL);
-+ if (unlikely(err))
-+ return err;
-+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO])
-+ return -EOPNOTSUPP;
-+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
-+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
-+
-+ err = evb_port_del_rule(netdev, NULL, vinfo->vid);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "evb_port_del_rule err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vinfo->vid] = 0;
-+
-+ return 0;
-+}
-+
-+void evb_port_get_stats(struct net_device *netdev,
-+ struct rtnl_link_stats64 *storage)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ u64 tmp;
-+ int err;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
-+ if (unlikely(err))
-+ goto error;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
-+ if (unlikely(err))
-+ goto error;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
-+ if (unlikely(err))
-+ goto error;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_ING_FRAME_DISCARD,
-+ &storage->rx_dropped);
-+ if (unlikely(err)) {
-+ storage->rx_dropped = tmp;
-+ goto error;
-+ }
-+ storage->rx_dropped += tmp;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_ING_MCAST_FRAME,
-+ &storage->multicast);
-+ if (unlikely(err))
-+ goto error;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
-+ if (unlikely(err))
-+ goto error;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
-+ if (unlikely(err))
-+ goto error;
-+
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ DPDMUX_CNT_EGR_FRAME_DISCARD,
-+ &storage->tx_dropped);
-+ if (unlikely(err))
-+ goto error;
-+
-+ return;
-+
-+error:
-+ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
-+}
-+
-+static const struct net_device_ops evb_port_ops = {
-+ .ndo_open = &evb_port_open,
-+
-+ .ndo_start_xmit = &evb_dropframe,
-+
-+ .ndo_fdb_add = &evb_port_fdb_add,
-+ .ndo_fdb_del = &evb_port_fdb_del,
-+
-+ .ndo_get_stats64 = &evb_port_get_stats,
-+ .ndo_change_mtu = &evb_change_mtu,
-+};
-+
-+static void evb_get_drvinfo(struct net_device *netdev,
-+ struct ethtool_drvinfo *drvinfo)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ u16 version_major, version_minor;
-+ int err;
-+
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
-+
-+ err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
-+ &version_major,
-+ &version_minor);
-+ if (err)
-+ strlcpy(drvinfo->fw_version, "N/A",
-+ sizeof(drvinfo->fw_version));
-+ else
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%u.%u", version_major, version_minor);
-+
-+ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
-+}
-+
-+static int evb_get_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpdmux_link_state state = {0};
-+ int err = 0;
-+
-+ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
-+ }
-+
-+ /* At the moment, we have no way of interrogating the DPMAC
-+ * from the DPDMUX side or there may not exist a DPMAC at all.
-+ * Report only autoneg state, duplexity and speed.
-+ */
-+ if (state.options & DPDMUX_LINK_OPT_AUTONEG)
-+ cmd->autoneg = AUTONEG_ENABLE;
-+ if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
-+ cmd->duplex = DUPLEX_FULL;
-+ ethtool_cmd_speed_set(cmd, state.rate);
-+
-+out:
-+ return err;
-+}
-+
-+static int evb_set_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpdmux_link_state state = {0};
-+ struct dpdmux_link_cfg cfg = {0};
-+ int err = 0;
-+
-+ netdev_dbg(netdev, "Setting link parameters...");
-+
-+ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
-+ }
-+
-+ /* Due to a temporary MC limitation, the DPDMUX port must be down
-+ * in order to be able to change link settings. Taking steps to let
-+ * the user know that.
-+ */
-+ if (netif_running(netdev)) {
-+ netdev_info(netdev,
-+ "Sorry, interface must be brought down first.\n");
-+ return -EACCES;
-+ }
-+
-+ cfg.options = state.options;
-+ cfg.rate = ethtool_cmd_speed(cmd);
-+ if (cmd->autoneg == AUTONEG_ENABLE)
-+ cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
-+ else
-+ cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
-+ if (cmd->duplex == DUPLEX_HALF)
-+ cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
-+ else
-+ cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
-+
-+ err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ &cfg);
-+ if (err)
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
-+
-+out:
-+ return err;
-+}
-+
-+static struct {
-+ enum dpdmux_counter_type id;
-+ char name[ETH_GSTRING_LEN];
-+} evb_ethtool_counters[] = {
-+ {DPDMUX_CNT_ING_FRAME, "rx frames"},
-+ {DPDMUX_CNT_ING_BYTE, "rx bytes"},
-+ {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
-+ {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
-+ {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
-+ {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
-+ {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
-+ {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
-+ {DPDMUX_CNT_EGR_FRAME, "tx frames"},
-+ {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
-+ {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
-+};
-+
-+static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
-+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ARRAY_SIZE(evb_ethtool_counters);
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static void evb_ethtool_get_strings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
-+{
-+ u32 i;
-+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
-+ break;
-+ }
-+}
-+
-+static void evb_ethtool_get_stats(struct net_device *netdev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
-+{
-+ struct evb_port_priv *port_priv = netdev_priv(netdev);
-+ u32 i;
-+ int err;
-+
-+ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
-+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
-+ 0,
-+ port_priv->evb_priv->mux_handle,
-+ port_priv->port_index,
-+ evb_ethtool_counters[i].id,
-+ &data[i]);
-+ if (err)
-+ netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
-+ evb_ethtool_counters[i].name, err);
-+ }
-+}
-+
-+static const struct ethtool_ops evb_port_ethtool_ops = {
-+ .get_drvinfo = &evb_get_drvinfo,
-+ .get_link = &ethtool_op_get_link,
-+ .get_settings = &evb_get_settings,
-+ .set_settings = &evb_set_settings,
-+ .get_strings = &evb_ethtool_get_strings,
-+ .get_ethtool_stats = &evb_ethtool_get_stats,
-+ .get_sset_count = &evb_ethtool_get_sset_count,
-+};
-+
-+static int evb_open(struct net_device *netdev)
-+{
-+ struct evb_priv *priv = netdev_priv(netdev);
-+ int err = 0;
-+
-+ err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
-+ if (unlikely(err))
-+ netdev_err(netdev, "dpdmux_enable err %d\n", err);
-+
-+ return err;
-+}
-+
-+static int evb_close(struct net_device *netdev)
-+{
-+ struct evb_priv *priv = netdev_priv(netdev);
-+ int err = 0;
-+
-+ err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
-+ if (unlikely(err))
-+ netdev_err(netdev, "dpdmux_disable err %d\n", err);
-+
-+ return err;
-+}
-+
-+static const struct net_device_ops evb_ops = {
-+ .ndo_start_xmit = &evb_dropframe,
-+ .ndo_open = &evb_open,
-+ .ndo_stop = &evb_close,
-+
-+ .ndo_bridge_setlink = &evb_setlink,
-+ .ndo_bridge_getlink = &evb_getlink,
-+ .ndo_bridge_dellink = &evb_dellink,
-+
-+ .ndo_get_stats64 = &evb_port_get_stats,
-+ .ndo_change_mtu = &evb_change_mtu,
-+};
-+
-+static int evb_takedown(struct fsl_mc_device *evb_dev)
-+{
-+ struct device *dev = &evb_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct evb_priv *priv = netdev_priv(netdev);
-+ int err;
-+
-+ err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
-+ if (unlikely(err))
-+ dev_warn(dev, "dpdmux_close err %d\n", err);
-+
-+ return 0;
-+}
-+
-+static int evb_init(struct fsl_mc_device *evb_dev)
-+{
-+ struct device *dev = &evb_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct evb_priv *priv = netdev_priv(netdev);
-+ u16 version_major;
-+ u16 version_minor;
-+ int err = 0;
-+
-+ priv->dev_id = evb_dev->obj_desc.id;
-+
-+ err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpdmux_open err %d\n", err);
-+ goto err_exit;
-+ }
-+ if (!priv->mux_handle) {
-+ dev_err(dev, "dpdmux_open returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_exit;
-+ }
-+
-+ err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
-+ &priv->attr);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpdmux_get_attributes err %d\n", err);
-+ goto err_close;
-+ }
-+
-+ err = dpdmux_get_api_version(priv->mc_io, 0,
-+ &version_major,
-+ &version_minor);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpdmux_get_api_version err %d\n", err);
-+ goto err_close;
-+ }
-+
-+ /* Minimum supported DPDMUX version check */
-+ if (version_major < DPDMUX_MIN_VER_MAJOR ||
-+ (version_major == DPDMUX_MIN_VER_MAJOR &&
-+ version_minor < DPDMUX_MIN_VER_MINOR)) {
-+ dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
-+ version_major, version_minor,
-+ DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
-+ err = -ENOTSUPP;
-+ goto err_close;
-+ }
-+
-+ err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpdmux_reset err %d\n", err);
-+ goto err_close;
-+ }
-+
-+ return 0;
-+
-+err_close:
-+ dpdmux_close(priv->mc_io, 0, priv->mux_handle);
-+err_exit:
-+ return err;
-+}
-+
-+static int evb_remove(struct fsl_mc_device *evb_dev)
-+{
-+ struct device *dev = &evb_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct evb_priv *priv = netdev_priv(netdev);
-+ struct evb_port_priv *port_priv;
-+ struct list_head *pos;
-+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct evb_port_priv, list);
-+
-+ rtnl_lock();
-+ netdev_upper_dev_unlink(port_priv->netdev, netdev);
-+ rtnl_unlock();
-+
-+ unregister_netdev(port_priv->netdev);
-+ free_netdev(port_priv->netdev);
-+ }
-+
-+ evb_teardown_irqs(evb_dev);
-+
-+ unregister_netdev(netdev);
-+
-+ evb_takedown(evb_dev);
-+ fsl_mc_portal_free(priv->mc_io);
-+
-+ dev_set_drvdata(dev, NULL);
-+ free_netdev(netdev);
-+
-+ return 0;
-+}
-+
-+static int evb_probe(struct fsl_mc_device *evb_dev)
-+{
-+ struct device *dev;
-+ struct evb_priv *priv = NULL;
-+ struct net_device *netdev = NULL;
-+ char port_name[IFNAMSIZ];
-+ int i;
-+ int err = 0;
-+
-+ dev = &evb_dev->dev;
-+
-+ /* register switch device, it's for management only - no I/O */
-+ netdev = alloc_etherdev(sizeof(*priv));
-+ if (!netdev) {
-+ dev_err(dev, "alloc_etherdev error\n");
-+ return -ENOMEM;
-+ }
-+ netdev->netdev_ops = &evb_ops;
-+
-+ dev_set_drvdata(dev, netdev);
-+
-+ priv = netdev_priv(netdev);
-+
-+ err = fsl_mc_portal_allocate(evb_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
-+ &priv->mc_io);
-+ if (err) {
-+ if (err == -ENXIO)
-+ err = -EPROBE_DEFER;
-+ else
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
-+ goto err_free_netdev;
-+ }
-+
-+ if (!priv->mc_io) {
-+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_free_netdev;
-+ }
-+
-+ err = evb_init(evb_dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "evb init err %d\n", err);
-+ goto err_free_cmdport;
-+ }
-+
-+ INIT_LIST_HEAD(&priv->port_list);
-+ netdev->flags |= IFF_PROMISC | IFF_MASTER;
-+
-+ dev_alloc_name(netdev, "evb%d");
-+
-+ /* register switch ports */
-+ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
-+
-+ /* only register downlinks? */
-+ for (i = 0; i < priv->attr.num_ifs + 1; i++) {
-+ struct net_device *port_netdev;
-+ struct evb_port_priv *port_priv;
-+
-+ if (i) {
-+ port_netdev =
-+ alloc_etherdev(sizeof(struct evb_port_priv));
-+ if (!port_netdev) {
-+ dev_err(dev, "alloc_etherdev error\n");
-+ goto err_takedown;
-+ }
-+
-+ port_priv = netdev_priv(port_netdev);
-+
-+ port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
-+
-+ dev_alloc_name(port_netdev, port_name);
-+ } else {
-+ port_netdev = netdev;
-+ port_priv = &priv->uplink;
-+ }
-+
-+ port_priv->netdev = port_netdev;
-+ port_priv->evb_priv = priv;
-+ port_priv->port_index = i;
-+
-+ SET_NETDEV_DEV(port_netdev, dev);
-+
-+ if (i) {
-+ port_netdev->netdev_ops = &evb_port_ops;
-+
-+ err = register_netdev(port_netdev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev err %d\n", err);
-+ free_netdev(port_netdev);
-+ goto err_takedown;
-+ }
-+
-+ rtnl_lock();
-+ err = netdev_master_upper_dev_link(port_netdev, netdev,
-+ NULL, NULL);
-+ if (unlikely(err)) {
-+ dev_err(dev, "netdev_master_upper_dev_link err %d\n",
-+ err);
-+ unregister_netdev(port_netdev);
-+ free_netdev(port_netdev);
-+ rtnl_unlock();
-+ goto err_takedown;
-+ }
-+ rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
-+ IFF_SLAVE, GFP_KERNEL);
-+ rtnl_unlock();
-+
-+ list_add(&port_priv->list, &priv->port_list);
-+ } else {
-+ /* Set MTU limits only on uplink */
-+ port_netdev->min_mtu = EVB_MIN_MTU;
-+ port_netdev->max_mtu = EVB_MAX_MTU;
-+
-+ err = register_netdev(netdev);
-+
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev error %d\n", err);
-+ goto err_takedown;
-+ }
-+ }
-+
-+ port_netdev->ethtool_ops = &evb_port_ethtool_ops;
-+
-+ /* ports are up from init */
-+ rtnl_lock();
-+ err = dev_open(port_netdev);
-+ rtnl_unlock();
-+ if (unlikely(err))
-+ dev_warn(dev, "dev_open err %d\n", err);
-+ }
-+
-+ /* setup irqs */
-+ err = evb_setup_irqs(evb_dev);
-+ if (unlikely(err)) {
-+ dev_warn(dev, "evb_setup_irqs err %d\n", err);
-+ goto err_takedown;
-+ }
-+
-+ dev_info(dev, "probed evb device with %d ports\n",
-+ priv->attr.num_ifs);
-+ return 0;
-+
-+err_takedown:
-+ evb_remove(evb_dev);
-+err_free_cmdport:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_free_netdev:
-+ return err;
-+}
-+
-+static const struct fsl_mc_device_id evb_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpdmux",
-+ },
-+ {}
-+};
-+
-+static struct fsl_mc_driver evb_drv = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = evb_probe,
-+ .remove = evb_remove,
-+ .match_id_table = evb_match_id_table,
-+};
-+
-+module_fsl_mc_driver(evb_drv);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
diff --git a/target/linux/layerscape/patches-4.14/707-dpaa-ethernet-support-layerscape.patch b/target/linux/layerscape/patches-4.14/707-dpaa-ethernet-support-layerscape.patch
deleted file mode 100644
index 3e512cb357..0000000000
--- a/target/linux/layerscape/patches-4.14/707-dpaa-ethernet-support-layerscape.patch
+++ /dev/null
@@ -1,156554 +0,0 @@
-From b443452fe13292b12295757f57e04c04834b3fc0 Mon Sep 17 00:00:00 2001
-From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 22 May 2019 17:49:18 +0800
-Subject: [PATCH] dpaa-ethernet: support layerscape
-
-This is an integrated patch of dpaa-ethernet for layerscape
-
-Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
-Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
-Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
-Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: Iordache Florinel-R70177 <florinel.iordache@nxp.com>
-Signed-off-by: Jake Moroni <mail@jakemoroni.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
-Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
-Signed-off-by: Radu Bulie <radu-andrei.bulie@nxp.com>
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Vakul Garg <vakul.garg@nxp.com>
-Signed-off-by: Vicentiu Galanopulo <vicentiu.galanopulo@nxp.com>
-Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
-Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: yuan linyu <Linyu.Yuan@alcatel-sbell.com.cn>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
----
- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 616 +-
- drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | 3 +
- drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 41 +-
- drivers/net/ethernet/freescale/fman/Kconfig | 1 -
- drivers/net/ethernet/freescale/fman/Makefile | 12 +-
- drivers/net/ethernet/freescale/fman/fman.c | 38 +-
- drivers/net/ethernet/freescale/fman/fman.h | 5 +
- drivers/net/ethernet/freescale/fman/fman_dtsec.c | 46 +
- drivers/net/ethernet/freescale/fman/fman_dtsec.h | 2 +
- drivers/net/ethernet/freescale/fman/fman_memac.c | 37 +-
- drivers/net/ethernet/freescale/fman/fman_memac.h | 2 +
- drivers/net/ethernet/freescale/fman/fman_port.c | 28 +
- drivers/net/ethernet/freescale/fman/fman_port.h | 4 +
- drivers/net/ethernet/freescale/fman/fman_tgec.c | 54 +-
- drivers/net/ethernet/freescale/fman/fman_tgec.h | 2 +
- drivers/net/ethernet/freescale/fman/mac.c | 152 +-
- drivers/net/ethernet/freescale/fman/mac.h | 9 +-
- drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 184 +
- drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 45 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1223 ++++
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 674 ++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 205 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 49 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 2076 ++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 241 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1745 +++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 226 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1195 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 587 ++
- drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 931 +++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 490 ++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 134 +
- .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++
- .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
- drivers/net/ethernet/freescale/sdk_fman/Kconfig | 153 +
- drivers/net/ethernet/freescale/sdk_fman/Makefile | 11 +
- .../freescale/sdk_fman/Peripherals/FM/HC/Makefile | 15 +
- .../freescale/sdk_fman/Peripherals/FM/HC/hc.c | 1232 ++++
- .../freescale/sdk_fman/Peripherals/FM/MAC/Makefile | 28 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c | 1504 ++++
- .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h | 228 +
- .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c | 97 +
- .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h | 42 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c | 674 ++
- .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h | 226 +
- .../sdk_fman/Peripherals/FM/MAC/fman_crc32.c | 119 +
- .../sdk_fman/Peripherals/FM/MAC/fman_crc32.h | 43 +
- .../sdk_fman/Peripherals/FM/MAC/fman_dtsec.c | 847 +++
- .../Peripherals/FM/MAC/fman_dtsec_mii_acc.c | 165 +
- .../sdk_fman/Peripherals/FM/MAC/fman_memac.c | 532 ++
- .../Peripherals/FM/MAC/fman_memac_mii_acc.c | 215 +
- .../sdk_fman/Peripherals/FM/MAC/fman_tgec.c | 367 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 1166 +++
- .../freescale/sdk_fman/Peripherals/FM/MAC/memac.h | 110 +
- .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c | 78 +
- .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h | 73 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.c | 1017 +++
- .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.h | 151 +
- .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c | 139 +
- .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h | 80 +
- .../sdk_fman/Peripherals/FM/MACSEC/Makefile | 15 +
- .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c | 237 +
- .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h | 203 +
- .../Peripherals/FM/MACSEC/fm_macsec_guest.c | 59 +
- .../Peripherals/FM/MACSEC/fm_macsec_master.c | 1031 +++
- .../Peripherals/FM/MACSEC/fm_macsec_master.h | 479 ++
- .../Peripherals/FM/MACSEC/fm_macsec_secy.c | 883 +++
- .../Peripherals/FM/MACSEC/fm_macsec_secy.h | 144 +
- .../freescale/sdk_fman/Peripherals/FM/Makefile | 23 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/Makefile | 26 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h | 360 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7582 ++++++++++++++++++++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c | 3242 +++++++++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h | 206 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 ++++++++++++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_manip.h | 555 ++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2095 ++++++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h | 543 ++
- .../sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h | 280 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1847 +++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h | 165 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 423 ++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h | 316 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_replic.c | 984 +++
- .../sdk_fman/Peripherals/FM/Pcd/fm_replic.h | 101 +
- .../sdk_fman/Peripherals/FM/Pcd/fman_kg.c | 890 +++
- .../sdk_fman/Peripherals/FM/Pcd/fman_prs.c | 129 +
- .../sdk_fman/Peripherals/FM/Port/Makefile | 15 +
- .../sdk_fman/Peripherals/FM/Port/fm_port.c | 6437 +++++++++++++++++
- .../sdk_fman/Peripherals/FM/Port/fm_port.h | 999 +++
- .../sdk_fman/Peripherals/FM/Port/fm_port_dsar.h | 494 ++
- .../sdk_fman/Peripherals/FM/Port/fm_port_im.c | 753 ++
- .../sdk_fman/Peripherals/FM/Port/fman_port.c | 1570 ++++
- .../freescale/sdk_fman/Peripherals/FM/Rtc/Makefile | 15 +
- .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c | 692 ++
- .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h | 96 +
- .../sdk_fman/Peripherals/FM/Rtc/fman_rtc.c | 334 +
- .../freescale/sdk_fman/Peripherals/FM/SP/Makefile | 15 +
- .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c | 757 ++
- .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h | 85 +
- .../freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c | 197 +
- .../freescale/sdk_fman/Peripherals/FM/fm.c | 5216 ++++++++++++++
- .../freescale/sdk_fman/Peripherals/FM/fm.h | 648 ++
- .../freescale/sdk_fman/Peripherals/FM/fm_ipc.h | 465 ++
- .../freescale/sdk_fman/Peripherals/FM/fm_muram.c | 174 +
- .../freescale/sdk_fman/Peripherals/FM/fman.c | 1400 ++++
- .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1214 ++++
- .../freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h | 93 +
- .../sdk_fman/Peripherals/FM/inc/fm_sp_common.h | 117 +
- .../net/ethernet/freescale/sdk_fman/etc/Makefile | 12 +
- .../net/ethernet/freescale/sdk_fman/etc/error.c | 95 +
- drivers/net/ethernet/freescale/sdk_fman/etc/list.c | 71 +
- .../net/ethernet/freescale/sdk_fman/etc/memcpy.c | 620 ++
- drivers/net/ethernet/freescale/sdk_fman/etc/mm.c | 1155 +++
- drivers/net/ethernet/freescale/sdk_fman/etc/mm.h | 105 +
- .../net/ethernet/freescale/sdk_fman/etc/sprint.c | 81 +
- .../ethernet/freescale/sdk_fman/fmanv3h_dflags.h | 57 +
- .../ethernet/freescale/sdk_fman/fmanv3l_dflags.h | 56 +
- .../sdk_fman/inc/Peripherals/crc_mac_addr_ext.h | 364 +
- .../freescale/sdk_fman/inc/Peripherals/dpaa_ext.h | 210 +
- .../freescale/sdk_fman/inc/Peripherals/fm_ext.h | 1731 +++++
- .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 887 +++
- .../sdk_fman/inc/Peripherals/fm_macsec_ext.h | 1271 ++++
- .../sdk_fman/inc/Peripherals/fm_muram_ext.h | 170 +
- .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 ++++++++++
- .../sdk_fman/inc/Peripherals/fm_port_ext.h | 2608 +++++++
- .../sdk_fman/inc/Peripherals/fm_rtc_ext.h | 619 ++
- .../sdk_fman/inc/Peripherals/fm_vsp_ext.h | 411 ++
- .../sdk_fman/inc/Peripherals/mii_acc_ext.h | 76 +
- .../net/ethernet/freescale/sdk_fman/inc/core_ext.h | 90 +
- .../freescale/sdk_fman/inc/cores/arm_ext.h | 55 +
- .../freescale/sdk_fman/inc/cores/e500v2_ext.h | 476 ++
- .../freescale/sdk_fman/inc/cores/ppc_ext.h | 141 +
- .../ethernet/freescale/sdk_fman/inc/ddr_std_ext.h | 77 +
- .../ethernet/freescale/sdk_fman/inc/debug_ext.h | 233 +
- .../ethernet/freescale/sdk_fman/inc/endian_ext.h | 447 ++
- .../net/ethernet/freescale/sdk_fman/inc/enet_ext.h | 205 +
- .../ethernet/freescale/sdk_fman/inc/error_ext.h | 529 ++
- .../ethernet/freescale/sdk_fman/inc/etc/list_ext.h | 358 +
- .../ethernet/freescale/sdk_fman/inc/etc/mem_ext.h | 318 +
- .../freescale/sdk_fman/inc/etc/memcpy_ext.h | 208 +
- .../ethernet/freescale/sdk_fman/inc/etc/mm_ext.h | 310 +
- .../freescale/sdk_fman/inc/etc/sprint_ext.h | 118 +
- .../sdk_fman/inc/flib/common/arch/ppc_access.h | 37 +
- .../freescale/sdk_fman/inc/flib/common/general.h | 52 +
- .../freescale/sdk_fman/inc/flib/fman_common.h | 78 +
- .../freescale/sdk_fman/inc/flib/fsl_enet.h | 273 +
- .../freescale/sdk_fman/inc/flib/fsl_fman.h | 825 +++
- .../freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h | 1096 +++
- .../sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h | 107 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_kg.h | 514 ++
- .../freescale/sdk_fman/inc/flib/fsl_fman_memac.h | 434 ++
- .../sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h | 78 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_port.h | 593 ++
- .../freescale/sdk_fman/inc/flib/fsl_fman_prs.h | 102 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_rtc.h | 449 ++
- .../freescale/sdk_fman/inc/flib/fsl_fman_sp.h | 138 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_tgec.h | 479 ++
- .../integrations/FMANV3H/dpaa_integration_ext.h | 291 +
- .../sdk_fman/inc/integrations/FMANV3H/part_ext.h | 71 +
- .../integrations/FMANV3H/part_integration_ext.h | 304 +
- .../integrations/FMANV3L/dpaa_integration_ext.h | 293 +
- .../sdk_fman/inc/integrations/FMANV3L/part_ext.h | 59 +
- .../integrations/FMANV3L/part_integration_ext.h | 304 +
- .../inc/integrations/LS1043/dpaa_integration_ext.h | 291 +
- .../sdk_fman/inc/integrations/LS1043/part_ext.h | 64 +
- .../inc/integrations/LS1043/part_integration_ext.h | 185 +
- .../inc/integrations/P1023/dpaa_integration_ext.h | 213 +
- .../sdk_fman/inc/integrations/P1023/part_ext.h | 82 +
- .../inc/integrations/P1023/part_integration_ext.h | 635 ++
- .../P3040_P4080_P5020/dpaa_integration_ext.h | 276 +
- .../inc/integrations/P3040_P4080_P5020/part_ext.h | 83 +
- .../P3040_P4080_P5020/part_integration_ext.h | 336 +
- .../net/ethernet/freescale/sdk_fman/inc/math_ext.h | 100 +
- .../net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h | 435 ++
- .../net/ethernet/freescale/sdk_fman/inc/net_ext.h | 430 ++
- .../net/ethernet/freescale/sdk_fman/inc/std_ext.h | 48 +
- .../ethernet/freescale/sdk_fman/inc/stdarg_ext.h | 49 +
- .../ethernet/freescale/sdk_fman/inc/stdlib_ext.h | 162 +
- .../ethernet/freescale/sdk_fman/inc/string_ext.h | 56 +
- .../ethernet/freescale/sdk_fman/inc/types_ext.h | 62 +
- .../ethernet/freescale/sdk_fman/inc/xx_common.h | 56 +
- .../net/ethernet/freescale/sdk_fman/inc/xx_ext.h | 791 ++
- .../ethernet/freescale/sdk_fman/ls1043_dflags.h | 56 +
- .../net/ethernet/freescale/sdk_fman/ncsw_config.mk | 53 +
- .../net/ethernet/freescale/sdk_fman/p1023_dflags.h | 65 +
- .../freescale/sdk_fman/p3040_4080_5020_dflags.h | 62 +
- .../net/ethernet/freescale/sdk_fman/src/Makefile | 11 +
- .../freescale/sdk_fman/src/inc/system/sys_ext.h | 118 +
- .../freescale/sdk_fman/src/inc/system/sys_io_ext.h | 46 +
- .../freescale/sdk_fman/src/inc/types_linux.h | 208 +
- .../sdk_fman/src/inc/wrapper/fsl_fman_test.h | 84 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 130 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h | 163 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h | 921 +++
- .../ethernet/freescale/sdk_fman/src/inc/xx/xx.h | 50 +
- .../freescale/sdk_fman/src/system/Makefile | 10 +
- .../freescale/sdk_fman/src/system/sys_io.c | 171 +
- .../freescale/sdk_fman/src/wrapper/Makefile | 19 +
- .../freescale/sdk_fman/src/wrapper/fman_test.c | 1665 +++++
- .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.c | 2910 ++++++++
- .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.h | 294 +
- .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1512 ++++
- .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c | 4854 +++++++++++++
- .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c | 1297 ++++
- .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h | 755 ++
- .../sdk_fman/src/wrapper/lnxwrp_resources.h | 121 +
- .../sdk_fman/src/wrapper/lnxwrp_resources_ut.c | 191 +
- .../sdk_fman/src/wrapper/lnxwrp_resources_ut.h | 144 +
- .../sdk_fman/src/wrapper/lnxwrp_resources_ut.make | 28 +
- .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c | 60 +
- .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h | 60 +
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c | 1855 +++++
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h | 136 +
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c | 1268 ++++
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h | 56 +
- .../ethernet/freescale/sdk_fman/src/xx/Makefile | 18 +
- .../freescale/sdk_fman/src/xx/module_strings.c | 46 +
- .../freescale/sdk_fman/src/xx/xx_arm_linux.c | 905 +++
- .../ethernet/freescale/sdk_fman/src/xx/xx_linux.c | 918 +++
- drivers/staging/fsl_qbman/Kconfig | 228 +
- drivers/staging/fsl_qbman/Makefile | 28 +
- drivers/staging/fsl_qbman/bman_config.c | 720 ++
- drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
- drivers/staging/fsl_qbman/bman_driver.c | 559 ++
- drivers/staging/fsl_qbman/bman_high.c | 1145 +++
- drivers/staging/fsl_qbman/bman_low.h | 565 ++
- drivers/staging/fsl_qbman/bman_private.h | 166 +
- drivers/staging/fsl_qbman/bman_test.c | 56 +
- drivers/staging/fsl_qbman/bman_test.h | 44 +
- drivers/staging/fsl_qbman/bman_test_high.c | 183 +
- drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
- drivers/staging/fsl_qbman/dpa_alloc.c | 706 ++
- drivers/staging/fsl_qbman/dpa_sys.h | 259 +
- drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
- drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
- drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
- drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
- drivers/staging/fsl_qbman/fsl_usdpaa.c | 2008 ++++++
- drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 +
- drivers/staging/fsl_qbman/qbman_driver.c | 88 +
- drivers/staging/fsl_qbman/qman_config.c | 1224 ++++
- drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++
- drivers/staging/fsl_qbman/qman_driver.c | 961 +++
- drivers/staging/fsl_qbman/qman_high.c | 5655 +++++++++++++++
- drivers/staging/fsl_qbman/qman_low.h | 1445 ++++
- drivers/staging/fsl_qbman/qman_private.h | 398 +
- drivers/staging/fsl_qbman/qman_test.c | 57 +
- drivers/staging/fsl_qbman/qman_test.h | 45 +
- drivers/staging/fsl_qbman/qman_test_high.c | 216 +
- drivers/staging/fsl_qbman/qman_test_hotpotato.c | 502 ++
- drivers/staging/fsl_qbman/qman_utility.c | 129 +
- include/linux/fsl/svr.h | 97 +
- include/linux/fsl_bman.h | 532 ++
- include/linux/fsl_qman.h | 3910 ++++++++++
- include/linux/fsl_usdpaa.h | 372 +
- include/linux/netdev_features.h | 2 +
- include/uapi/linux/fmd/Kbuild | 5 +
- include/uapi/linux/fmd/Peripherals/Kbuild | 4 +
- include/uapi/linux/fmd/Peripherals/fm_ioctls.h | 628 ++
- include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h | 3084 ++++++++
- .../uapi/linux/fmd/Peripherals/fm_port_ioctls.h | 973 +++
- .../uapi/linux/fmd/Peripherals/fm_test_ioctls.h | 208 +
- include/uapi/linux/fmd/integrations/Kbuild | 1 +
- .../linux/fmd/integrations/integration_ioctls.h | 56 +
- include/uapi/linux/fmd/ioctls.h | 96 +
- include/uapi/linux/fmd/net_ioctls.h | 430 ++
- net/sched/sch_generic.c | 7 +
- 276 files changed, 153982 insertions(+), 277 deletions(-)
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Kconfig
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/hc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec_mii_acc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac.c
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac_mii_acc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_tgec.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_guest.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_kg.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_prs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_dsar.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_im.c
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fman_port.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fman_rtc.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_ipc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_muram.c
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_common.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_sp_common.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/error.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/list.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/memcpy.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/mm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/mm.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/etc/sprint.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/fmanv3h_dflags.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/fmanv3l_dflags.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/crc_mac_addr_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/dpaa_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_macsec_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_muram_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_pcd_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_port_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_rtc_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_vsp_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/mii_acc_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/core_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/cores/arm_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/cores/e500v2_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/cores/ppc_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/ddr_std_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/debug_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/endian_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/enet_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/error_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/list_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/mem_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/memcpy_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/mm_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/etc/sprint_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/arch/ppc_access.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/general.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fman_common.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_enet.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_kg.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_port.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_prs.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_rtc.h
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_sp.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_tgec.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/dpaa_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/dpaa_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/dpaa_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/dpaa_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_integration_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/net_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/std_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/stdarg_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/stdlib_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/string_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/types_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/xx_common.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/inc/xx_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/ls1043_dflags.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/p1023_dflags.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/p3040_4080_5020_dflags.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_io_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/types_linux.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/fsl_fman_test.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/inc/xx/xx.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/system/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/system/sys_io.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/fman_test.c
- create mode 100755 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.make
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/Makefile
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_linux.c
- create mode 100644 drivers/staging/fsl_qbman/Kconfig
- create mode 100644 drivers/staging/fsl_qbman/Makefile
- create mode 100644 drivers/staging/fsl_qbman/bman_config.c
- create mode 100644 drivers/staging/fsl_qbman/bman_debugfs.c
- create mode 100644 drivers/staging/fsl_qbman/bman_driver.c
- create mode 100644 drivers/staging/fsl_qbman/bman_high.c
- create mode 100644 drivers/staging/fsl_qbman/bman_low.h
- create mode 100644 drivers/staging/fsl_qbman/bman_private.h
- create mode 100644 drivers/staging/fsl_qbman/bman_test.c
- create mode 100644 drivers/staging/fsl_qbman/bman_test.h
- create mode 100644 drivers/staging/fsl_qbman/bman_test_high.c
- create mode 100644 drivers/staging/fsl_qbman/bman_test_thresh.c
- create mode 100644 drivers/staging/fsl_qbman/dpa_alloc.c
- create mode 100644 drivers/staging/fsl_qbman/dpa_sys.h
- create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm.h
- create mode 100644 drivers/staging/fsl_qbman/dpa_sys_arm64.h
- create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc32.h
- create mode 100644 drivers/staging/fsl_qbman/dpa_sys_ppc64.h
- create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa.c
- create mode 100644 drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
- create mode 100644 drivers/staging/fsl_qbman/qbman_driver.c
- create mode 100644 drivers/staging/fsl_qbman/qman_config.c
- create mode 100644 drivers/staging/fsl_qbman/qman_debugfs.c
- create mode 100644 drivers/staging/fsl_qbman/qman_driver.c
- create mode 100644 drivers/staging/fsl_qbman/qman_high.c
- create mode 100644 drivers/staging/fsl_qbman/qman_low.h
- create mode 100644 drivers/staging/fsl_qbman/qman_private.h
- create mode 100644 drivers/staging/fsl_qbman/qman_test.c
- create mode 100644 drivers/staging/fsl_qbman/qman_test.h
- create mode 100644 drivers/staging/fsl_qbman/qman_test_high.c
- create mode 100644 drivers/staging/fsl_qbman/qman_test_hotpotato.c
- create mode 100644 drivers/staging/fsl_qbman/qman_utility.c
- create mode 100644 include/linux/fsl/svr.h
- create mode 100644 include/linux/fsl_bman.h
- create mode 100644 include/linux/fsl_qman.h
- create mode 100644 include/linux/fsl_usdpaa.h
- create mode 100644 include/uapi/linux/fmd/Kbuild
- create mode 100644 include/uapi/linux/fmd/Peripherals/Kbuild
- create mode 100644 include/uapi/linux/fmd/Peripherals/fm_ioctls.h
- create mode 100644 include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
- create mode 100644 include/uapi/linux/fmd/Peripherals/fm_port_ioctls.h
- create mode 100644 include/uapi/linux/fmd/Peripherals/fm_test_ioctls.h
- create mode 100644 include/uapi/linux/fmd/integrations/Kbuild
- create mode 100644 include/uapi/linux/fmd/integrations/integration_ioctls.h
- create mode 100644 include/uapi/linux/fmd/ioctls.h
- create mode 100644 include/uapi/linux/fmd/net_ioctls.h
-
---- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
-+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
-@@ -50,9 +50,13 @@
- #include <linux/highmem.h>
- #include <linux/percpu.h>
- #include <linux/dma-mapping.h>
-+#include <linux/iommu.h>
- #include <linux/sort.h>
- #include <soc/fsl/bman.h>
- #include <soc/fsl/qman.h>
-+#if !defined(CONFIG_PPC) && defined(CONFIG_SOC_BUS)
-+#include <linux/sys_soc.h> /* soc_device_match */
-+#endif
-
- #include "fman.h"
- #include "fman_port.h"
-@@ -73,6 +77,10 @@ static u16 tx_timeout = 1000;
- module_param(tx_timeout, ushort, 0444);
- MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
-
-+#ifndef CONFIG_PPC
-+bool dpaa_errata_a010022;
-+#endif
-+
- #define FM_FD_STAT_RX_ERRORS \
- (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
- FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
-@@ -388,34 +396,19 @@ out:
-
- static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
- {
-- struct platform_device *of_dev;
- struct dpaa_eth_data *eth_data;
-- struct device *dpaa_dev, *dev;
-- struct device_node *mac_node;
-+ struct device *dpaa_dev;
- struct mac_device *mac_dev;
-
- dpaa_dev = &pdev->dev;
- eth_data = dpaa_dev->platform_data;
-- if (!eth_data)
-+ if (!eth_data) {
-+ dev_err(dpaa_dev, "eth_data missing\n");
- return ERR_PTR(-ENODEV);
--
-- mac_node = eth_data->mac_node;
--
-- of_dev = of_find_device_by_node(mac_node);
-- if (!of_dev) {
-- dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n",
-- mac_node);
-- of_node_put(mac_node);
-- return ERR_PTR(-EINVAL);
- }
-- of_node_put(mac_node);
--
-- dev = &of_dev->dev;
--
-- mac_dev = dev_get_drvdata(dev);
-+ mac_dev = eth_data->mac_dev;
- if (!mac_dev) {
-- dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
-- dev_name(dev));
-+ dev_err(dpaa_dev, "mac_dev missing\n");
- return ERR_PTR(-EINVAL);
- }
-
-@@ -472,6 +465,16 @@ static void dpaa_set_rx_mode(struct net_
- err);
- }
-
-+ if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
-+ priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
-+ err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
-+ priv->mac_dev->allmulti);
-+ if (err < 0)
-+ netif_err(priv, drv, net_dev,
-+ "mac_dev->set_allmulti() = %d\n",
-+ err);
-+ }
-+
- err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
- if (err < 0)
- netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
-@@ -1176,7 +1179,7 @@ static int dpaa_eth_init_tx_port(struct
- buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
- buf_prefix_content.pass_prs_result = true;
- buf_prefix_content.pass_hash_result = true;
-- buf_prefix_content.pass_time_stamp = false;
-+ buf_prefix_content.pass_time_stamp = true;
- buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
-
- params.specific_params.non_rx_params.err_fqid = errq->fqid;
-@@ -1218,7 +1221,7 @@ static int dpaa_eth_init_rx_port(struct
- buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
- buf_prefix_content.pass_prs_result = true;
- buf_prefix_content.pass_hash_result = true;
-- buf_prefix_content.pass_time_stamp = false;
-+ buf_prefix_content.pass_time_stamp = true;
- buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
-
- rx_p = &params.specific_params.rx_params;
-@@ -1500,7 +1503,19 @@ static int dpaa_bp_add_8_bufs(const stru
- u8 i;
-
- for (i = 0; i < 8; i++) {
-+#ifndef CONFIG_PPC
-+ if (dpaa_errata_a010022) {
-+ struct page *page = alloc_page(GFP_KERNEL);
-+
-+ if (unlikely(!page))
-+ goto release_previous_buffs;
-+ new_buf = page_address(page);
-+ } else {
-+ new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
-+ }
-+#else
- new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
-+#endif
- if (unlikely(!new_buf)) {
- dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
- dpaa_bp->raw_size);
-@@ -1600,6 +1615,17 @@ static int dpaa_eth_refill_bpools(struct
- return 0;
- }
-
-+static phys_addr_t dpaa_iova_to_phys(struct device *dev, dma_addr_t addr)
-+{
-+ struct iommu_domain *domain;
-+
-+ domain = iommu_get_domain_for_dev(dev);
-+ if (domain)
-+ return iommu_iova_to_phys(domain, addr);
-+ else
-+ return addr;
-+}
-+
- /* Cleanup function for outgoing frame descriptors that were built on Tx path,
- * either contiguous frames or scatter/gather ones.
- * Skb freeing is not handled here.
-@@ -1615,24 +1641,41 @@ static struct sk_buff *dpaa_cleanup_tx_f
- {
- const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- struct device *dev = priv->net_dev->dev.parent;
-+ struct skb_shared_hwtstamps shhwtstamps;
- dma_addr_t addr = qm_fd_addr(fd);
- const struct qm_sg_entry *sgt;
- struct sk_buff **skbh, *skb;
- int nr_frags, i;
-+ u64 ns;
-
-- skbh = (struct sk_buff **)phys_to_virt(addr);
-+ skbh = (struct sk_buff **)phys_to_virt(dpaa_iova_to_phys(dev, addr));
- skb = *skbh;
-
-+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-+
-+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
-+ &ns)) {
-+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ } else {
-+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
-+ }
-+ }
-+
- if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
- nr_frags = skb_shinfo(skb)->nr_frags;
-- dma_unmap_single(dev, addr,
-- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
-- dma_dir);
-
- /* The sgt buffer has been allocated with netdev_alloc_frag(),
- * it's from lowmem.
- */
-- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
-+ sgt = phys_to_virt(dpaa_iova_to_phys(dev,
-+ addr +
-+ qm_fd_get_offset(fd)));
-+
-+ dma_unmap_single(dev, addr,
-+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
-+ dma_dir);
-
- /* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
-@@ -1645,9 +1688,13 @@ static struct sk_buff *dpaa_cleanup_tx_f
- dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
- qm_sg_entry_get_len(&sgt[i]), dma_dir);
- }
--
-- /* Free the page frag that we allocated on Tx */
-- skb_free_frag(phys_to_virt(addr));
-+#ifndef CONFIG_PPC
-+ if (dpaa_errata_a010022)
-+ put_page(virt_to_page(sgt));
-+ else
-+#endif
-+ /* Free the page frag that we allocated on Tx */
-+ skb_free_frag(skbh);
- } else {
- dma_unmap_single(dev, addr,
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
-@@ -1678,26 +1725,21 @@ static u8 rx_csum_offload(const struct d
- * accommodate the shared info area of the skb.
- */
- static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
-- const struct qm_fd *fd)
-+ const struct qm_fd *fd,
-+ struct dpaa_bp *dpaa_bp,
-+ void *vaddr)
- {
- ssize_t fd_off = qm_fd_get_offset(fd);
-- dma_addr_t addr = qm_fd_addr(fd);
-- struct dpaa_bp *dpaa_bp;
- struct sk_buff *skb;
-- void *vaddr;
-
-- vaddr = phys_to_virt(addr);
- WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-
-- dpaa_bp = dpaa_bpid2pool(fd->bpid);
-- if (!dpaa_bp)
-- goto free_buffer;
--
- skb = build_skb(vaddr, dpaa_bp->size +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (unlikely(!skb)) {
- WARN_ONCE(1, "Build skb failure on Rx\n");
-- goto free_buffer;
-+ skb_free_frag(vaddr);
-+ return NULL;
- }
- WARN_ON(fd_off != priv->rx_headroom);
- skb_reserve(skb, fd_off);
-@@ -1706,10 +1748,6 @@ static struct sk_buff *contig_fd_to_skb(
- skb->ip_summed = rx_csum_offload(priv, fd);
-
- return skb;
--
--free_buffer:
-- skb_free_frag(vaddr);
-- return NULL;
- }
-
- /* Build an skb with the data of the first S/G entry in the linear portion and
-@@ -1718,14 +1756,14 @@ free_buffer:
- * The page fragment holding the S/G Table is recycled here.
- */
- static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
-- const struct qm_fd *fd)
-+ const struct qm_fd *fd,
-+ struct dpaa_bp *dpaa_bp,
-+ void *vaddr)
- {
- ssize_t fd_off = qm_fd_get_offset(fd);
-- dma_addr_t addr = qm_fd_addr(fd);
- const struct qm_sg_entry *sgt;
- struct page *page, *head_page;
-- struct dpaa_bp *dpaa_bp;
-- void *vaddr, *sg_vaddr;
-+ void *sg_vaddr;
- int frag_off, frag_len;
- struct sk_buff *skb;
- dma_addr_t sg_addr;
-@@ -1734,29 +1772,33 @@ static struct sk_buff *sg_fd_to_skb(cons
- int *count_ptr;
- int i;
-
-- vaddr = phys_to_virt(addr);
- WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-
- /* Iterate through the SGT entries and add data buffers to the skb */
- sgt = vaddr + fd_off;
-+ skb = NULL;
- for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
- /* Extension bit is not supported */
- WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
-
- sg_addr = qm_sg_addr(&sgt[i]);
-- sg_vaddr = phys_to_virt(sg_addr);
-- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
-- SMP_CACHE_BYTES));
-
- /* We may use multiple Rx pools */
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
-- if (!dpaa_bp)
-+ if (!dpaa_bp) {
-+ pr_info("%s: fail to get dpaa_bp for sg bpid %d\n",
-+ __func__, sgt[i].bpid);
- goto free_buffers;
-+ }
-+ sg_vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev,
-+ sg_addr));
-+ WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
-+ SMP_CACHE_BYTES));
-
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
- DMA_FROM_DEVICE);
-- if (i == 0) {
-+ if (!skb) {
- sz = dpaa_bp->size +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- skb = build_skb(sg_vaddr, sz);
-@@ -1823,10 +1865,11 @@ free_buffers:
- /* free all the SG entries */
- for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
- sg_addr = qm_sg_addr(&sgt[i]);
-- sg_vaddr = phys_to_virt(sg_addr);
-- skb_free_frag(sg_vaddr);
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
- if (dpaa_bp) {
-+ sg_addr = dpaa_iova_to_phys(dpaa_bp->dev, sg_addr);
-+ sg_vaddr = phys_to_virt(sg_addr);
-+ skb_free_frag(sg_vaddr);
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- (*count_ptr)--;
- }
-@@ -1909,16 +1952,28 @@ static int skb_to_sg_fd(struct dpaa_priv
- size_t frag_len;
- void *sgt_buf;
-
-- /* get a page frag to store the SGTable */
-- sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
-- sgt_buf = netdev_alloc_frag(sz);
-- if (unlikely(!sgt_buf)) {
-- netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
-- sz);
-- return -ENOMEM;
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022)) {
-+ struct page *page = alloc_page(GFP_ATOMIC);
-+ if (unlikely(!page))
-+ return -ENOMEM;
-+ sgt_buf = page_address(page);
-+ } else {
-+#endif
-+ /* get a page frag to store the SGTable */
-+ sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
-+ sgt_buf = netdev_alloc_frag(sz);
-+ if (unlikely(!sgt_buf)) {
-+ netdev_err(net_dev,
-+ "netdev_alloc_frag() failed for size %d\n",
-+ sz);
-+ return -ENOMEM;
-+ }
-+#ifndef CONFIG_PPC
- }
-+#endif
-
-- /* Enable L3/L4 hardware checksum computation.
-+ /* Enable L3/L4 hardware checksum computation.
- *
- * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
- * need to write into the skb.
-@@ -2036,6 +2091,121 @@ static inline int dpaa_xmit(struct dpaa_
- return 0;
- }
-
-+#ifndef CONFIG_PPC
-+/* On LS1043A SoC there is a known erratum ERR010022 that results in split DMA
-+ * transfers in the FMan under certain conditions. This, combined with a fixed
-+ * size FIFO of ongoing DMA transfers that may overflow when a split occurs,
-+ * results in the FMan stalling DMA transfers under high traffic. To avoid the
-+ * problem, one needs to prevent the DMA transfer splits to occur by preparing
-+ * the buffers
-+ */
-+
-+#define DPAA_A010022_HEADROOM 256
-+#define CROSS_4K_BOUND(start, size) \
-+ (((start) + (size)) > (((start) + 0x1000) & ~0xFFF))
-+
-+static bool dpaa_errata_a010022_has_dma_issue(struct sk_buff *skb,
-+ struct dpaa_priv *priv)
-+{
-+ int nr_frags, i = 0;
-+ skb_frag_t *frag;
-+
-+ /* Transfers that do not start at 16B aligned addresses will be split;
-+ * Transfers that cross a 4K page boundary will also be split
-+ */
-+
-+ /* Check if the frame data is aligned to 16 bytes */
-+ if ((uintptr_t)skb->data % DPAA_FD_DATA_ALIGNMENT)
-+ return true;
-+
-+ /* Check if the headroom crosses a boundary */
-+ if (CROSS_4K_BOUND((uintptr_t)skb->head, skb_headroom(skb)))
-+ return true;
-+
-+ /* Check if the non-paged data crosses a boundary */
-+ if (CROSS_4K_BOUND((uintptr_t)skb->data, skb_headlen(skb)))
-+ return true;
-+
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+
-+ while (i < nr_frags) {
-+ frag = &skb_shinfo(skb)->frags[i];
-+
-+ /* Check if a paged fragment crosses a boundary from its
-+ * offset to its end.
-+ */
-+ if (CROSS_4K_BOUND((uintptr_t)frag->page_offset, frag->size))
-+ return true;
-+
-+ i++;
-+ }
-+
-+ return false;
-+}
-+
-+static struct sk_buff *dpaa_errata_a010022_prevent(struct sk_buff *skb,
-+ struct dpaa_priv *priv)
-+{
-+ int trans_offset = skb_transport_offset(skb);
-+ int net_offset = skb_network_offset(skb);
-+ int nsize, npage_order, headroom;
-+ struct sk_buff *nskb = NULL;
-+ struct page *npage;
-+ void *npage_addr;
-+
-+ if (!dpaa_errata_a010022_has_dma_issue(skb, priv))
-+ return skb;
-+
-+ /* For the new skb we only need the old one's data (both non-paged and
-+ * paged). We can skip the old tailroom.
-+ *
-+ * The headroom also needs to fit our private info (64 bytes) but we
-+ * reserve 256 bytes instead in order to guarantee that the data is
-+ * aligned to 256.
-+ */
-+ headroom = DPAA_A010022_HEADROOM;
-+ nsize = headroom + skb->len +
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-+
-+ /* Reserve enough memory to accommodate Jumbo frames */
-+ npage_order = (nsize - 1) / PAGE_SIZE;
-+ npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, npage_order);
-+ if (unlikely(!npage)) {
-+ WARN_ONCE(1, "Memory allocation failure\n");
-+ return NULL;
-+ }
-+ npage_addr = page_address(npage);
-+
-+ nskb = build_skb(npage_addr, nsize);
-+ if (unlikely(!nskb))
-+ goto err;
-+
-+ /* Code borrowed and adapted from skb_copy() */
-+ skb_reserve(nskb, headroom);
-+ skb_put(nskb, skb->len);
-+ if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
-+ WARN_ONCE(1, "skb parsing failure\n");
-+ goto err;
-+ }
-+ copy_skb_header(nskb, skb);
-+ /* We move the headroom when we align it so we have to reset the
-+ * network and transport header offsets relative to the new data
-+ * pointer. The checksum offload relies on these offsets.
-+ */
-+ skb_set_network_header(nskb, net_offset);
-+ skb_set_transport_header(nskb, trans_offset);
-+
-+ dev_kfree_skb(skb);
-+ return nskb;
-+
-+err:
-+ if (nskb)
-+ dev_kfree_skb(nskb);
-+ put_page(npage);
-+ return NULL;
-+}
-+#endif
-+
- static netdev_tx_t
- dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
- {
-@@ -2043,6 +2213,7 @@ dpaa_start_xmit(struct sk_buff *skb, str
- bool nonlinear = skb_is_nonlinear(skb);
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa_percpu_priv *percpu_priv;
-+ struct netdev_queue *txq;
- struct dpaa_priv *priv;
- struct qm_fd fd;
- int offset = 0;
-@@ -2070,24 +2241,47 @@ dpaa_start_xmit(struct sk_buff *skb, str
- /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
- * make sure we don't feed FMan with more fragments than it supports.
- */
-- if (nonlinear &&
-- likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
-- /* Just create a S/G fd based on the skb */
-- err = skb_to_sg_fd(priv, skb, &fd);
-- percpu_priv->tx_frag_skbuffs++;
-- } else {
-+ if (unlikely(nonlinear &&
-+ (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
- /* If the egress skb contains more fragments than we support
- * we have no choice but to linearize it ourselves.
- */
-- if (unlikely(nonlinear) && __skb_linearize(skb))
-+ if (__skb_linearize(skb))
- goto enomem;
-
-- /* Finally, create a contig FD from this skb */
-+ nonlinear = skb_is_nonlinear(skb);
-+ }
-+
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022)) {
-+ skb = dpaa_errata_a010022_prevent(skb, priv);
-+ if (!skb)
-+ goto enomem;
-+ nonlinear = skb_is_nonlinear(skb);
-+ }
-+#endif
-+
-+ if (nonlinear) {
-+ /* Just create a S/G fd based on the skb */
-+ err = skb_to_sg_fd(priv, skb, &fd);
-+ percpu_priv->tx_frag_skbuffs++;
-+ } else {
-+ /* Create a contig FD from this skb */
- err = skb_to_contig_fd(priv, skb, &fd, &offset);
- }
- if (unlikely(err < 0))
- goto skb_to_fd_failed;
-
-+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
-+
-+ /* LLTX requires to do our own update of trans_start */
-+ txq->trans_start = jiffies;
-+
-+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-+ fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
-+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-+ }
-+
- if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
- return NETDEV_TX_OK;
-
-@@ -2219,14 +2413,8 @@ static enum qman_cb_dqrr_result rx_error
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
- return qman_cb_dqrr_stop;
-
-- if (dpaa_eth_refill_bpools(priv))
-- /* Unable to refill the buffer pool due to insufficient
-- * system memory. Just release the frame back into the pool,
-- * otherwise we'll soon end up with an empty buffer pool.
-- */
-- dpaa_fd_release(net_dev, &dq->fd);
-- else
-- dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-+ dpaa_eth_refill_bpools(priv);
-+ dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-
- return qman_cb_dqrr_consume;
- }
-@@ -2235,6 +2423,7 @@ static enum qman_cb_dqrr_result rx_defau
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
- {
-+ struct skb_shared_hwtstamps *shhwtstamps;
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa_percpu_priv *percpu_priv;
- const struct qm_fd *fd = &dq->fd;
-@@ -2248,6 +2437,7 @@ static enum qman_cb_dqrr_result rx_defau
- struct sk_buff *skb;
- int *count_ptr;
- void *vaddr;
-+ u64 ns;
-
- fd_status = be32_to_cpu(fd->status);
- fd_format = qm_fd_get_format(fd);
-@@ -2290,12 +2480,12 @@ static enum qman_cb_dqrr_result rx_defau
- if (!dpaa_bp)
- return qman_cb_dqrr_consume;
-
-- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
--
- /* prefetch the first 64 bytes of the frame or the SGT start */
-- vaddr = phys_to_virt(addr);
-+ vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev, addr));
- prefetch(vaddr + qm_fd_get_offset(fd));
-
-+ dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
-+
- /* The only FD types that we may receive are contig and S/G */
- WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
-
-@@ -2306,12 +2496,22 @@ static enum qman_cb_dqrr_result rx_defau
- (*count_ptr)--;
-
- if (likely(fd_format == qm_fd_contig))
-- skb = contig_fd_to_skb(priv, fd);
-+ skb = contig_fd_to_skb(priv, fd, dpaa_bp, vaddr);
- else
-- skb = sg_fd_to_skb(priv, fd);
-+ skb = sg_fd_to_skb(priv, fd, dpaa_bp, vaddr);
- if (!skb)
- return qman_cb_dqrr_consume;
-
-+ if (priv->rx_tstamp) {
-+ shhwtstamps = skb_hwtstamps(skb);
-+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-+
-+ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
-+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
-+ else
-+ dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
-+ }
-+
- skb->protocol = eth_type_trans(skb, net_dev);
-
- if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
-@@ -2440,6 +2640,44 @@ static void dpaa_eth_napi_disable(struct
- }
- }
-
-+static void dpaa_adjust_link(struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev;
-+ struct dpaa_priv *priv;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+ mac_dev->adjust_link(mac_dev);
-+}
-+
-+static int dpaa_phy_init(struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev;
-+ struct phy_device *phy_dev;
-+ struct dpaa_priv *priv;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &dpaa_adjust_link, 0,
-+ mac_dev->phy_if);
-+ if (!phy_dev) {
-+ netif_err(priv, ifup, net_dev, "init_phy() failed\n");
-+ return -ENODEV;
-+ }
-+
-+ /* Remove any features not supported by the controller */
-+ phy_dev->supported &= mac_dev->if_support;
-+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ phy_dev->advertising = phy_dev->supported;
-+
-+ mac_dev->phy_dev = phy_dev;
-+ net_dev->phydev = phy_dev;
-+
-+ return 0;
-+}
-+
- static int dpaa_open(struct net_device *net_dev)
- {
- struct mac_device *mac_dev;
-@@ -2450,12 +2688,9 @@ static int dpaa_open(struct net_device *
- mac_dev = priv->mac_dev;
- dpaa_eth_napi_enable(priv);
-
-- net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
-- if (!net_dev->phydev) {
-- netif_err(priv, ifup, net_dev, "init_phy() failed\n");
-- err = -ENODEV;
-+ err = dpaa_phy_init(net_dev);
-+ if (err)
- goto phy_init_failed;
-- }
-
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
- err = fman_port_enable(mac_dev->port[i]);
-@@ -2496,11 +2731,58 @@ static int dpaa_eth_stop(struct net_devi
- return err;
- }
-
-+static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct dpaa_priv *priv = netdev_priv(dev);
-+ struct hwtstamp_config config;
-+
-+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
-+ return -EFAULT;
-+
-+ switch (config.tx_type) {
-+ case HWTSTAMP_TX_OFF:
-+ /* Couldn't disable rx/tx timestamping separately.
-+ * Do nothing here.
-+ */
-+ priv->tx_tstamp = false;
-+ break;
-+ case HWTSTAMP_TX_ON:
-+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
-+ priv->tx_tstamp = true;
-+ break;
-+ default:
-+ return -ERANGE;
-+ }
-+
-+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
-+ /* Couldn't disable rx/tx timestamping separately.
-+ * Do nothing here.
-+ */
-+ priv->rx_tstamp = false;
-+ } else {
-+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
-+ priv->rx_tstamp = true;
-+ /* TS is set for all frame types, not only those requested */
-+ config.rx_filter = HWTSTAMP_FILTER_ALL;
-+ }
-+
-+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-+ -EFAULT : 0;
-+}
-+
- static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
- {
-- if (!net_dev->phydev)
-- return -EINVAL;
-- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
-+ int ret = -EINVAL;
-+
-+ if (cmd == SIOCGMIIREG) {
-+ if (net_dev->phydev)
-+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
-+ }
-+
-+ if (cmd == SIOCSHWTSTAMP)
-+ return dpaa_ts_ioctl(net_dev, rq, cmd);
-+
-+ return ret;
- }
-
- static const struct net_device_ops dpaa_ops = {
-@@ -2652,7 +2934,6 @@ static inline u16 dpaa_get_headroom(stru
- static int dpaa_eth_probe(struct platform_device *pdev)
- {
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
-- struct dpaa_percpu_priv *percpu_priv;
- struct net_device *net_dev = NULL;
- struct dpaa_fq *dpaa_fq, *tmp;
- struct dpaa_priv *priv = NULL;
-@@ -2661,7 +2942,51 @@ static int dpaa_eth_probe(struct platfor
- int err = 0, i, channel;
- struct device *dev;
-
-- dev = &pdev->dev;
-+ err = bman_is_probed();
-+ if (!err)
-+ return -EPROBE_DEFER;
-+ if (err < 0) {
-+ dev_err(&pdev->dev, "failing probe due to bman probe error\n");
-+ return -ENODEV;
-+ }
-+ err = qman_is_probed();
-+ if (!err)
-+ return -EPROBE_DEFER;
-+ if (err < 0) {
-+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
-+ return -ENODEV;
-+ }
-+ err = bman_portals_probed();
-+ if (!err)
-+ return -EPROBE_DEFER;
-+ if (err < 0) {
-+ dev_err(&pdev->dev,
-+ "failing probe due to bman portals probe error\n");
-+ return -ENODEV;
-+ }
-+ err = qman_portals_probed();
-+ if (!err)
-+ return -EPROBE_DEFER;
-+ if (err < 0) {
-+ dev_err(&pdev->dev,
-+ "failing probe due to qman portals probe error\n");
-+ return -ENODEV;
-+ }
-+
-+ mac_dev = dpaa_mac_dev_get(pdev);
-+ if (IS_ERR(mac_dev)) {
-+ dev_err(&pdev->dev, "dpaa_mac_dev_get() failed\n");
-+ err = PTR_ERR(mac_dev);
-+ goto probe_err;
-+ }
-+
-+ /* device used for DMA mapping */
-+ dev = fman_port_get_device(mac_dev->port[RX]);
-+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
-+ if (err) {
-+ dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
-+ return err;
-+ }
-
- /* Allocate this early, so we can store relevant information in
- * the private area
-@@ -2669,7 +2994,7 @@ static int dpaa_eth_probe(struct platfor
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
- if (!net_dev) {
- dev_err(dev, "alloc_etherdev_mq() failed\n");
-- goto alloc_etherdev_mq_failed;
-+ return -ENOMEM;
- }
-
- /* Do this here, so we can be verbose early */
-@@ -2681,13 +3006,6 @@ static int dpaa_eth_probe(struct platfor
-
- priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
-
-- mac_dev = dpaa_mac_dev_get(pdev);
-- if (IS_ERR(mac_dev)) {
-- dev_err(dev, "dpaa_mac_dev_get() failed\n");
-- err = PTR_ERR(mac_dev);
-- goto mac_probe_failed;
-- }
--
- /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
- * we choose conservatively and let the user explicitly set a higher
- * MTU via ifconfig. Otherwise, the user may end up with different MTUs
-@@ -2703,21 +3021,13 @@ static int dpaa_eth_probe(struct platfor
- priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
- priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
-
-- /* device used for DMA mapping */
-- set_dma_ops(dev, get_dma_ops(&pdev->dev));
-- err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
-- if (err) {
-- dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
-- goto dev_mask_failed;
-- }
--
- /* bp init */
- for (i = 0; i < DPAA_BPS_NUM; i++) {
-- int err;
--
- dpaa_bps[i] = dpaa_bp_alloc(dev);
-- if (IS_ERR(dpaa_bps[i]))
-- return PTR_ERR(dpaa_bps[i]);
-+ if (IS_ERR(dpaa_bps[i])) {
-+ err = PTR_ERR(dpaa_bps[i]);
-+ goto free_dpaa_bps;
-+ }
- /* the raw size of the buffers used for reception */
- dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
- /* avoid runtime computations by keeping the usable size here */
-@@ -2725,11 +3035,8 @@ static int dpaa_eth_probe(struct platfor
- dpaa_bps[i]->dev = dev;
-
- err = dpaa_bp_alloc_pool(dpaa_bps[i]);
-- if (err < 0) {
-- dpaa_bps_free(priv);
-- priv->dpaa_bps[i] = NULL;
-- goto bp_create_failed;
-- }
-+ if (err < 0)
-+ goto free_dpaa_bps;
- priv->dpaa_bps[i] = dpaa_bps[i];
- }
-
-@@ -2740,7 +3047,7 @@ static int dpaa_eth_probe(struct platfor
- err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
- if (err < 0) {
- dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
-- goto fq_probe_failed;
-+ goto free_dpaa_bps;
- }
-
- priv->mac_dev = mac_dev;
-@@ -2749,12 +3056,12 @@ static int dpaa_eth_probe(struct platfor
- if (channel < 0) {
- dev_err(dev, "dpaa_get_channel() failed\n");
- err = channel;
-- goto get_channel_failed;
-+ goto free_dpaa_bps;
- }
-
- priv->channel = (u16)channel;
-
-- /* Start a thread that will walk the CPUs with affine portals
-+ /* Walk the CPUs with affine portals
- * and add this pool channel to each's dequeue mask.
- */
- dpaa_eth_add_channel(priv->channel);
-@@ -2769,20 +3076,20 @@ static int dpaa_eth_probe(struct platfor
- err = dpaa_eth_cgr_init(priv);
- if (err < 0) {
- dev_err(dev, "Error initializing CGR\n");
-- goto tx_cgr_init_failed;
-+ goto free_dpaa_bps;
- }
-
- err = dpaa_ingress_cgr_init(priv);
- if (err < 0) {
- dev_err(dev, "Error initializing ingress CGR\n");
-- goto rx_cgr_init_failed;
-+ goto delete_egress_cgr;
- }
-
- /* Add the FQs to the interface, and make them active */
- list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
- err = dpaa_fq_init(dpaa_fq, false);
- if (err < 0)
-- goto fq_alloc_failed;
-+ goto free_dpaa_fqs;
- }
-
- priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
-@@ -2792,7 +3099,7 @@ static int dpaa_eth_probe(struct platfor
- err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
- &priv->buf_layout[0], dev);
- if (err)
-- goto init_ports_failed;
-+ goto free_dpaa_fqs;
-
- /* Rx traffic distribution based on keygen hashing defaults to on */
- priv->keygen_in_use = true;
-@@ -2801,11 +3108,7 @@ static int dpaa_eth_probe(struct platfor
- if (!priv->percpu_priv) {
- dev_err(dev, "devm_alloc_percpu() failed\n");
- err = -ENOMEM;
-- goto alloc_percpu_failed;
-- }
-- for_each_possible_cpu(i) {
-- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-- memset(percpu_priv, 0, sizeof(*percpu_priv));
-+ goto free_dpaa_fqs;
- }
-
- priv->num_tc = 1;
-@@ -2814,11 +3117,11 @@ static int dpaa_eth_probe(struct platfor
- /* Initialize NAPI */
- err = dpaa_napi_add(net_dev);
- if (err < 0)
-- goto napi_add_failed;
-+ goto delete_dpaa_napi;
-
- err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
- if (err < 0)
-- goto netdev_init_failed;
-+ goto delete_dpaa_napi;
-
- dpaa_eth_sysfs_init(&net_dev->dev);
-
-@@ -2827,32 +3130,21 @@ static int dpaa_eth_probe(struct platfor
-
- return 0;
-
--netdev_init_failed:
--napi_add_failed:
-+delete_dpaa_napi:
- dpaa_napi_del(net_dev);
--alloc_percpu_failed:
--init_ports_failed:
-+free_dpaa_fqs:
- dpaa_fq_free(dev, &priv->dpaa_fq_list);
--fq_alloc_failed:
- qman_delete_cgr_safe(&priv->ingress_cgr);
- qman_release_cgrid(priv->ingress_cgr.cgrid);
--rx_cgr_init_failed:
-+delete_egress_cgr:
- qman_delete_cgr_safe(&priv->cgr_data.cgr);
- qman_release_cgrid(priv->cgr_data.cgr.cgrid);
--tx_cgr_init_failed:
--get_channel_failed:
-+free_dpaa_bps:
- dpaa_bps_free(priv);
--bp_create_failed:
--fq_probe_failed:
--dev_mask_failed:
--mac_probe_failed:
- dev_set_drvdata(dev, NULL);
- free_netdev(net_dev);
--alloc_etherdev_mq_failed:
-- for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
-- if (atomic_read(&dpaa_bps[i]->refs) == 0)
-- devm_kfree(dev, dpaa_bps[i]);
-- }
-+probe_err:
-+
- return err;
- }
-
-@@ -2889,6 +3181,23 @@ static int dpaa_remove(struct platform_d
- return err;
- }
-
-+#ifndef CONFIG_PPC
-+static bool __init soc_has_errata_a010022(void)
-+{
-+#ifdef CONFIG_SOC_BUS
-+ const struct soc_device_attribute soc_msi_matches[] = {
-+ { .family = "QorIQ LS1043A",
-+ .data = NULL },
-+ { },
-+ };
-+
-+ if (!soc_device_match(soc_msi_matches))
-+ return false;
-+#endif
-+ return true; /* cannot identify SoC or errata applies */
-+}
-+#endif
-+
- static const struct platform_device_id dpaa_devtype[] = {
- {
- .name = "dpaa-ethernet",
-@@ -2913,6 +3222,10 @@ static int __init dpaa_load(void)
-
- pr_debug("FSL DPAA Ethernet driver\n");
-
-+#ifndef CONFIG_PPC
-+ /* Detect if the current SoC requires the DMA transfer alignment workaround */
-+ dpaa_errata_a010022 = soc_has_errata_a010022();
-+#endif
- /* initialize dpaa_eth mirror values */
- dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
- dpaa_max_frm = fman_get_max_frm();
---- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
-+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
-@@ -182,6 +182,9 @@ struct dpaa_priv {
-
- struct dpaa_buffer_layout buf_layout[2];
- u16 rx_headroom;
-+
-+ bool tx_tstamp; /* Tx timestamping enabled */
-+ bool rx_tstamp; /* Rx timestamping enabled */
- };
-
- /* from dpaa_ethtool.c */
---- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
-+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
-@@ -32,6 +32,9 @@
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
- #include <linux/string.h>
-+#include <linux/of_platform.h>
-+#include <linux/net_tstamp.h>
-+#include <linux/fsl/ptp_qoriq.h>
-
- #include "dpaa_eth.h"
- #include "mac.h"
-@@ -344,7 +347,7 @@ static void dpaa_get_ethtool_stats(struc
-
- /* gather congestion related counters */
- cg_num = 0;
-- cg_status = 0;
-+ cg_status = false;
- cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
- if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
- cg_num = priv->cgr_data.cgr_congested_count;
-@@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_dev
- return ret;
- }
-
-+static int dpaa_get_ts_info(struct net_device *net_dev,
-+ struct ethtool_ts_info *info)
-+{
-+ struct device *dev = net_dev->dev.parent;
-+ struct device_node *mac_node = dev->of_node;
-+ struct device_node *fman_node = NULL, *ptp_node = NULL;
-+ struct platform_device *ptp_dev = NULL;
-+ struct qoriq_ptp *ptp = NULL;
-+
-+ info->phc_index = -1;
-+
-+ fman_node = of_get_parent(mac_node);
-+ if (fman_node)
-+ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
-+
-+ if (ptp_node)
-+ ptp_dev = of_find_device_by_node(ptp_node);
-+
-+ if (ptp_dev)
-+ ptp = platform_get_drvdata(ptp_dev);
-+
-+ if (ptp)
-+ info->phc_index = ptp->phc_index;
-+
-+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
-+ SOF_TIMESTAMPING_RX_HARDWARE |
-+ SOF_TIMESTAMPING_RAW_HARDWARE;
-+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
-+ (1 << HWTSTAMP_TX_ON);
-+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-+ (1 << HWTSTAMP_FILTER_ALL);
-+
-+ return 0;
-+}
-+
- const struct ethtool_ops dpaa_ethtool_ops = {
- .get_drvinfo = dpaa_get_drvinfo,
- .get_msglevel = dpaa_get_msglevel,
-@@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_op
- .set_link_ksettings = dpaa_set_link_ksettings,
- .get_rxnfc = dpaa_get_rxnfc,
- .set_rxnfc = dpaa_set_rxnfc,
-+ .get_ts_info = dpaa_get_ts_info,
- };
---- a/drivers/net/ethernet/freescale/fman/Kconfig
-+++ b/drivers/net/ethernet/freescale/fman/Kconfig
-@@ -2,7 +2,6 @@ config FSL_FMAN
- tristate "FMan support"
- depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
- select GENERIC_ALLOCATOR
-- depends on HAS_DMA
- select PHYLIB
- default n
- help
---- a/drivers/net/ethernet/freescale/fman/Makefile
-+++ b/drivers/net/ethernet/freescale/fman/Makefile
-@@ -1,10 +1,10 @@
- # SPDX-License-Identifier: GPL-2.0
- subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
-
--obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
--obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
--obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
-+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman.o
-+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman_port.o
-+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_mac.o
-
--fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o
--fsl_fman_port-objs := fman_port.o
--fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
-+fsl_dpaa_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o
-+fsl_dpaa_fman_port-objs := fman_port.o
-+fsl_dpaa_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
---- a/drivers/net/ethernet/freescale/fman/fman.c
-+++ b/drivers/net/ethernet/freescale/fman/fman.c
-@@ -634,6 +634,7 @@ static void set_port_order_restoration(s
- iowrite32be(tmp, &fpm_rg->fmfp_prc);
- }
-
-+#ifdef CONFIG_PPC
- static void set_port_liodn(struct fman *fman, u8 port_id,
- u32 liodn_base, u32 liodn_ofst)
- {
-@@ -651,6 +652,27 @@ static void set_port_liodn(struct fman *
- iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
- iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
- }
-+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+static void save_restore_port_icids(struct fman *fman, bool save)
-+{
-+ int port_idxes[] = {
-+ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc,
-+ 0xd, 0xe, 0xf, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
-+ 0x10, 0x11, 0x30, 0x31
-+ };
-+ int idx, i;
-+
-+ for (i = 0; i < ARRAY_SIZE(port_idxes); i++) {
-+ idx = port_idxes[i];
-+ if (save)
-+ fman->sp_icids[idx] =
-+ ioread32be(&fman->bmi_regs->fmbm_spliodn[idx]);
-+ else
-+ iowrite32be(fman->sp_icids[idx],
-+ &fman->bmi_regs->fmbm_spliodn[idx]);
-+ }
-+}
-+#endif
-
- static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
- {
-@@ -1919,7 +1941,10 @@ _return:
- static int fman_init(struct fman *fman)
- {
- struct fman_cfg *cfg = NULL;
-- int err = 0, i, count;
-+ int err = 0, count;
-+#ifdef CONFIG_PPC
-+ int i;
-+#endif
-
- if (is_init_done(fman->cfg))
- return -EINVAL;
-@@ -1939,6 +1964,7 @@ static int fman_init(struct fman *fman)
- memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
- fman->state->fm_port_num_of_cg);
-
-+#ifdef CONFIG_PPC
- /* Save LIODN info before FMan reset
- * Skipping non-existent port 0 (i = 1)
- */
-@@ -1958,6 +1984,9 @@ static int fman_init(struct fman *fman)
- }
- fman->liodn_base[i] = liodn_base;
- }
-+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ save_restore_port_icids(fman, true);
-+#endif
-
- err = fman_reset(fman);
- if (err)
-@@ -2186,8 +2215,12 @@ int fman_set_port_params(struct fman *fm
- if (err)
- goto return_err;
-
-+#ifdef CONFIG_PPC
- set_port_liodn(fman, port_id, fman->liodn_base[port_id],
- fman->liodn_offset[port_id]);
-+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ save_restore_port_icids(fman, false);
-+#endif
-
- if (fman->state->rev_info.major < 6)
- set_port_order_restoration(fman->fpm_regs, port_id);
-@@ -2813,7 +2846,8 @@ static struct fman *read_dts_node(struct
-
- of_node_put(muram_node);
-
-- err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
-+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
-+ "fman", fman);
- if (err < 0) {
- dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
- __func__, irq, err);
---- a/drivers/net/ethernet/freescale/fman/fman.h
-+++ b/drivers/net/ethernet/freescale/fman/fman.h
-@@ -42,6 +42,7 @@
- /* Frame queue Context Override */
- #define FM_FD_CMD_FCO 0x80000000
- #define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
-+#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */
- #define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
-
- /* TX-Port: Unsupported Format */
-@@ -346,8 +347,12 @@ struct fman {
- unsigned long fifo_offset;
- size_t fifo_size;
-
-+#ifdef CONFIG_PPC
- u32 liodn_base[64];
- u32 liodn_offset[64];
-+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ u32 sp_icids[64];
-+#endif
-
- struct fman_dts_params dts_params;
- };
---- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
-+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
-@@ -123,11 +123,13 @@
- #define DTSEC_ECNTRL_R100M 0x00000008
- #define DTSEC_ECNTRL_QSGMIIM 0x00000001
-
-+#define TCTRL_TTSE 0x00000040
- #define TCTRL_GTS 0x00000020
-
- #define RCTRL_PAL_MASK 0x001f0000
- #define RCTRL_PAL_SHIFT 16
- #define RCTRL_GHTX 0x00000400
-+#define RCTRL_RTSE 0x00000040
- #define RCTRL_GRS 0x00000020
- #define RCTRL_MPROM 0x00000008
- #define RCTRL_RSF 0x00000004
-@@ -1116,6 +1118,50 @@ int dtsec_add_hash_mac_address(struct fm
-
- return 0;
- }
-+
-+int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
-+{
-+ u32 tmp;
-+ struct dtsec_regs __iomem *regs = dtsec->regs;
-+
-+ if (!is_init_done(dtsec->dtsec_drv_param))
-+ return -EINVAL;
-+
-+ tmp = ioread32be(&regs->rctrl);
-+ if (enable)
-+ tmp |= RCTRL_MPROM;
-+ else
-+ tmp &= ~RCTRL_MPROM;
-+
-+ iowrite32be(tmp, &regs->rctrl);
-+
-+ return 0;
-+}
-+
-+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
-+{
-+ struct dtsec_regs __iomem *regs = dtsec->regs;
-+ u32 rctrl, tctrl;
-+
-+ if (!is_init_done(dtsec->dtsec_drv_param))
-+ return -EINVAL;
-+
-+ rctrl = ioread32be(&regs->rctrl);
-+ tctrl = ioread32be(&regs->tctrl);
-+
-+ if (enable) {
-+ rctrl |= RCTRL_RTSE;
-+ tctrl |= TCTRL_TTSE;
-+ } else {
-+ rctrl &= ~RCTRL_RTSE;
-+ tctrl &= ~TCTRL_TTSE;
-+ }
-+
-+ iowrite32be(rctrl, &regs->rctrl);
-+ iowrite32be(tctrl, &regs->tctrl);
-+
-+ return 0;
-+}
-
- int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
- {
---- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
-+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
-@@ -55,5 +55,7 @@ int dtsec_set_exception(struct fman_mac
- int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
- int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
- int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
-+int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
-+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
-
- #endif /* __DTSEC_H */
---- a/drivers/net/ethernet/freescale/fman/fman_memac.c
-+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
-@@ -350,6 +350,7 @@ struct fman_mac {
- struct fman_rev_info fm_rev_info;
- bool basex_if;
- struct phy_device *pcsphy;
-+ bool allmulti_enabled;
- };
-
- static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
-@@ -940,6 +941,34 @@ int memac_add_hash_mac_address(struct fm
- return 0;
- }
-
-+int memac_set_allmulti(struct fman_mac *memac, bool enable)
-+{
-+ u32 entry;
-+ struct memac_regs __iomem *regs = memac->regs;
-+
-+ if (!is_init_done(memac->memac_drv_param))
-+ return -EINVAL;
-+
-+ if (enable) {
-+ for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
-+ iowrite32be(entry | HASH_CTRL_MCAST_EN,
-+ &regs->hashtable_ctrl);
-+ } else {
-+ for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
-+ iowrite32be(entry & ~HASH_CTRL_MCAST_EN,
-+ &regs->hashtable_ctrl);
-+ }
-+
-+ memac->allmulti_enabled = enable;
-+
-+ return 0;
-+}
-+
-+int memac_set_tstamp(struct fman_mac *memac, bool enable)
-+{
-+ return 0; /* Always enabled. */
-+}
-+
- int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
- {
- struct memac_regs __iomem *regs = memac->regs;
-@@ -963,8 +992,12 @@ int memac_del_hash_mac_address(struct fm
- break;
- }
- }
-- if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
-- iowrite32be(hash & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
-+
-+ if (!memac->allmulti_enabled) {
-+ if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
-+ iowrite32be(hash & ~HASH_CTRL_MCAST_EN,
-+ &regs->hashtable_ctrl);
-+ }
-
- return 0;
- }
---- a/drivers/net/ethernet/freescale/fman/fman_memac.h
-+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
-@@ -57,5 +57,7 @@ int memac_set_exception(struct fman_mac
- enum fman_mac_exceptions exception, bool enable);
- int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
- int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-+int memac_set_allmulti(struct fman_mac *memac, bool enable);
-+int memac_set_tstamp(struct fman_mac *memac, bool enable);
-
- #endif /* __MEMAC_H */
---- a/drivers/net/ethernet/freescale/fman/fman_port.c
-+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
-@@ -1347,8 +1347,10 @@ int fman_port_config(struct fman_port *p
- switch (port->port_type) {
- case FMAN_PORT_TYPE_RX:
- set_rx_dflt_cfg(port, params);
-+ /* fall through */
- case FMAN_PORT_TYPE_TX:
- set_tx_dflt_cfg(port, params, &port->dts_params);
-+ /* fall through */
- default:
- set_dflt_cfg(port, params);
- }
-@@ -1728,6 +1730,20 @@ u32 fman_port_get_qman_channel_id(struct
- }
- EXPORT_SYMBOL(fman_port_get_qman_channel_id);
-
-+/**
-+ * fman_port_get_device
-+ * port: Pointer to the FMan port device
-+ *
-+ * Get the 'struct device' associated to the specified FMan port device
-+ *
-+ * Return: pointer to associated 'struct device'
-+ */
-+struct device *fman_port_get_device(struct fman_port *port)
-+{
-+ return port->dev;
-+}
-+EXPORT_SYMBOL(fman_port_get_device);
-+
- int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
- {
- if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
-@@ -1739,6 +1755,18 @@ int fman_port_get_hash_result_offset(str
- }
- EXPORT_SYMBOL(fman_port_get_hash_result_offset);
-
-+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
-+{
-+ if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
-+ return -EINVAL;
-+
-+ *tstamp = be64_to_cpu(*(__be64 *)(data +
-+ port->buffer_offsets.time_stamp_offset));
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(fman_port_get_tstamp);
-+
- static int fman_port_probe(struct platform_device *of_dev)
- {
- struct fman_port *port;
---- a/drivers/net/ethernet/freescale/fman/fman_port.h
-+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
-@@ -153,6 +153,10 @@ u32 fman_port_get_qman_channel_id(struct
-
- int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
-
-+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
-+
- struct fman_port *fman_port_bind(struct device *dev);
-
-+struct device *fman_port_get_device(struct fman_port *port);
-+
- #endif /* __FMAN_PORT_H */
---- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
-+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
-@@ -44,6 +44,7 @@
- #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
-
- /* Command and Configuration Register (COMMAND_CONFIG) */
-+#define CMD_CFG_EN_TIMESTAMP 0x00100000
- #define CMD_CFG_NO_LEN_CHK 0x00020000
- #define CMD_CFG_PAUSE_IGNORE 0x00000100
- #define CMF_CFG_CRC_FWD 0x00000040
-@@ -217,6 +218,7 @@ struct fman_mac {
- struct tgec_cfg *cfg;
- void *fm;
- struct fman_rev_info fm_rev_info;
-+ bool allmulti_enabled;
- };
-
- static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
-@@ -564,6 +566,49 @@ int tgec_add_hash_mac_address(struct fma
- return 0;
- }
-
-+int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
-+{
-+ u32 entry;
-+ struct tgec_regs __iomem *regs = tgec->regs;
-+
-+ if (!is_init_done(tgec->cfg))
-+ return -EINVAL;
-+
-+ if (enable) {
-+ for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
-+ iowrite32be(entry | TGEC_HASH_MCAST_EN,
-+ &regs->hashtable_ctrl);
-+ } else {
-+ for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
-+ iowrite32be(entry & ~TGEC_HASH_MCAST_EN,
-+ &regs->hashtable_ctrl);
-+ }
-+
-+ tgec->allmulti_enabled = enable;
-+
-+ return 0;
-+}
-+
-+int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
-+{
-+ struct tgec_regs __iomem *regs = tgec->regs;
-+ u32 tmp;
-+
-+ if (!is_init_done(tgec->cfg))
-+ return -EINVAL;
-+
-+ tmp = ioread32be(&regs->command_config);
-+
-+ if (enable)
-+ tmp |= CMD_CFG_EN_TIMESTAMP;
-+ else
-+ tmp &= ~CMD_CFG_EN_TIMESTAMP;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+
-+ return 0;
-+}
-+
- int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
- {
- struct tgec_regs __iomem *regs = tgec->regs;
-@@ -591,9 +636,12 @@ int tgec_del_hash_mac_address(struct fma
- break;
- }
- }
-- if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
-- iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
-- &regs->hashtable_ctrl);
-+
-+ if (!tgec->allmulti_enabled) {
-+ if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
-+ iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
-+ &regs->hashtable_ctrl);
-+ }
-
- return 0;
- }
---- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
-+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
-@@ -51,5 +51,7 @@ int tgec_set_exception(struct fman_mac *
- int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
- int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
- int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
-+int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
-+int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
-
- #endif /* __TGEC_H */
---- a/drivers/net/ethernet/freescale/fman/mac.c
-+++ b/drivers/net/ethernet/freescale/fman/mac.c
-@@ -57,9 +57,7 @@ struct mac_priv_s {
- struct device *dev;
- void __iomem *vaddr;
- u8 cell_index;
-- phy_interface_t phy_if;
- struct fman *fman;
-- struct device_node *phy_node;
- struct device_node *internal_phy_node;
- /* List of multicast addresses */
- struct list_head mc_addr_list;
-@@ -106,7 +104,7 @@ static void set_fman_mac_params(struct m
- resource_size(mac_dev->res));
- memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
- params->max_speed = priv->max_speed;
-- params->phy_if = priv->phy_if;
-+ params->phy_if = mac_dev->phy_if;
- params->basex_if = false;
- params->mac_id = priv->cell_index;
- params->fm = (void *)priv->fman;
-@@ -419,15 +417,12 @@ void fman_get_pause_cfg(struct mac_devic
- }
- EXPORT_SYMBOL(fman_get_pause_cfg);
-
--static void adjust_link_void(struct net_device *net_dev)
-+static void adjust_link_void(struct mac_device *mac_dev)
- {
- }
-
--static void adjust_link_dtsec(struct net_device *net_dev)
-+static void adjust_link_dtsec(struct mac_device *mac_dev)
- {
-- struct device *dev = net_dev->dev.parent;
-- struct dpaa_eth_data *eth_data = dev->platform_data;
-- struct mac_device *mac_dev = eth_data->mac_dev;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
-@@ -444,14 +439,12 @@ static void adjust_link_dtsec(struct net
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
-- netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
-+ dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
-+ err);
- }
-
--static void adjust_link_memac(struct net_device *net_dev)
-+static void adjust_link_memac(struct mac_device *mac_dev)
- {
-- struct device *dev = net_dev->dev.parent;
-- struct dpaa_eth_data *eth_data = dev->platform_data;
-- struct mac_device *mac_dev = eth_data->mac_dev;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
-@@ -463,60 +456,12 @@ static void adjust_link_memac(struct net
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
-- netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
--}
--
--/* Initializes driver's PHY state, and attaches to the PHY.
-- * Returns 0 on success.
-- */
--static struct phy_device *init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev,
-- void (*adj_lnk)(struct net_device *))
--{
-- struct phy_device *phy_dev;
-- struct mac_priv_s *priv = mac_dev->priv;
--
-- phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
-- priv->phy_if);
-- if (!phy_dev) {
-- netdev_err(net_dev, "Could not connect to PHY\n");
-- return NULL;
-- }
--
-- /* Remove any features not supported by the controller */
-- phy_dev->supported &= mac_dev->if_support;
-- /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-- * as most of the PHY drivers do not enable them by default.
-- */
-- phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-- phy_dev->advertising = phy_dev->supported;
--
-- mac_dev->phy_dev = phy_dev;
--
-- return phy_dev;
--}
--
--static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev)
--{
-- return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
--}
--
--static struct phy_device *tgec_init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev)
--{
-- return init_phy(net_dev, mac_dev, adjust_link_void);
--}
--
--static struct phy_device *memac_init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev)
--{
-- return init_phy(net_dev, mac_dev, &adjust_link_memac);
-+ dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
-+ err);
- }
-
- static void setup_dtsec(struct mac_device *mac_dev)
- {
-- mac_dev->init_phy = dtsec_init_phy;
- mac_dev->init = dtsec_initialization;
- mac_dev->set_promisc = dtsec_set_promiscuous;
- mac_dev->change_addr = dtsec_modify_mac_address;
-@@ -525,17 +470,18 @@ static void setup_dtsec(struct mac_devic
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
- mac_dev->set_exception = dtsec_set_exception;
-+ mac_dev->set_allmulti = dtsec_set_allmulti;
-+ mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
--
-+ mac_dev->adjust_link = adjust_link_dtsec;
- mac_dev->priv->enable = dtsec_enable;
- mac_dev->priv->disable = dtsec_disable;
- }
-
- static void setup_tgec(struct mac_device *mac_dev)
- {
-- mac_dev->init_phy = tgec_init_phy;
- mac_dev->init = tgec_initialization;
- mac_dev->set_promisc = tgec_set_promiscuous;
- mac_dev->change_addr = tgec_modify_mac_address;
-@@ -544,17 +490,18 @@ static void setup_tgec(struct mac_device
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
- mac_dev->set_exception = tgec_set_exception;
-+ mac_dev->set_allmulti = tgec_set_allmulti;
-+ mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
--
-+ mac_dev->adjust_link = adjust_link_void;
- mac_dev->priv->enable = tgec_enable;
- mac_dev->priv->disable = tgec_disable;
- }
-
- static void setup_memac(struct mac_device *mac_dev)
- {
-- mac_dev->init_phy = memac_init_phy;
- mac_dev->init = memac_initialization;
- mac_dev->set_promisc = memac_set_promiscuous;
- mac_dev->change_addr = memac_modify_mac_address;
-@@ -563,10 +510,12 @@ static void setup_memac(struct mac_devic
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
- mac_dev->set_exception = memac_set_exception;
-+ mac_dev->set_allmulti = memac_set_allmulti;
-+ mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
--
-+ mac_dev->adjust_link = adjust_link_memac;
- mac_dev->priv->enable = memac_enable;
- mac_dev->priv->disable = memac_disable;
- }
-@@ -599,8 +548,7 @@ static const u16 phy2speed[] = {
- };
-
- static struct platform_device *dpaa_eth_add_device(int fman_id,
-- struct mac_device *mac_dev,
-- struct device_node *node)
-+ struct mac_device *mac_dev)
- {
- struct platform_device *pdev;
- struct dpaa_eth_data data;
-@@ -613,19 +561,15 @@ static struct platform_device *dpaa_eth_
- data.mac_dev = mac_dev;
- data.mac_hw_id = priv->cell_index;
- data.fman_hw_id = fman_id;
-- data.mac_node = node;
-
- mutex_lock(&eth_lock);
--
- pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
- if (!pdev) {
- ret = -ENOMEM;
- goto no_mem;
- }
-
-- pdev->dev.of_node = node;
- pdev->dev.parent = priv->dev;
-- set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
-
- ret = platform_device_add_data(pdev, &data, sizeof(data));
- if (ret)
-@@ -676,7 +620,6 @@ static int mac_probe(struct platform_dev
- mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
- if (!mac_dev) {
- err = -ENOMEM;
-- dev_err(dev, "devm_kzalloc() = %d\n", err);
- goto _return;
- }
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-@@ -706,9 +649,6 @@ static int mac_probe(struct platform_dev
- goto _return;
- }
-
-- /* Register mac_dev */
-- dev_set_drvdata(dev, mac_dev);
--
- INIT_LIST_HEAD(&priv->mc_addr_list);
-
- /* Get the FM node */
-@@ -717,7 +657,7 @@ static int mac_probe(struct platform_dev
- dev_err(dev, "of_get_parent(%pOF) failed\n",
- mac_node);
- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
-
- of_dev = of_find_device_by_node(dev_node);
-@@ -751,7 +691,7 @@ static int mac_probe(struct platform_dev
- if (err < 0) {
- dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
- mac_node, err);
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
-
- mac_dev->res = __devm_request_region(dev,
-@@ -761,7 +701,7 @@ static int mac_probe(struct platform_dev
- if (!mac_dev->res) {
- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- err = -EBUSY;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
-
- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
-@@ -769,16 +709,12 @@ static int mac_probe(struct platform_dev
- if (!priv->vaddr) {
- dev_err(dev, "devm_ioremap() failed\n");
- err = -EIO;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
-
- if (!of_device_is_available(mac_node)) {
-- devm_iounmap(dev, priv->vaddr);
-- __devm_release_region(dev, fman_get_mem_region(priv->fman),
-- res.start, res.end + 1 - res.start);
-- devm_kfree(dev, mac_dev);
-- dev_set_drvdata(dev, NULL);
-- return -ENODEV;
-+ err = -ENODEV;
-+ goto _return_of_get_parent;
- }
-
- /* Get the cell-index */
-@@ -786,7 +722,7 @@ static int mac_probe(struct platform_dev
- if (err) {
- dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
- priv->cell_index = (u8)val;
-
-@@ -795,7 +731,7 @@ static int mac_probe(struct platform_dev
- if (!mac_addr) {
- dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
- memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
-
-@@ -805,14 +741,14 @@ static int mac_probe(struct platform_dev
- dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
- mac_node);
- err = nph;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
-
- if (nph != ARRAY_SIZE(mac_dev->port)) {
- dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
- mac_node);
- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
-
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
-@@ -851,13 +787,13 @@ static int mac_probe(struct platform_dev
- mac_node);
- phy_if = PHY_INTERFACE_MODE_SGMII;
- }
-- priv->phy_if = phy_if;
-+ mac_dev->phy_if = phy_if;
-
-- priv->speed = phy2speed[priv->phy_if];
-+ priv->speed = phy2speed[mac_dev->phy_if];
- priv->max_speed = priv->speed;
- mac_dev->if_support = DTSEC_SUPPORTED;
- /* We don't support half-duplex in SGMII mode */
-- if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
-+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
- mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
- SUPPORTED_100baseT_Half);
-
-@@ -866,30 +802,31 @@ static int mac_probe(struct platform_dev
- mac_dev->if_support |= SUPPORTED_1000baseT_Full;
-
- /* The 10G interface only supports one mode */
-- if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
-+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->if_support = SUPPORTED_10000baseT_Full;
-
- /* Get the rest of the PHY information */
-- priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
-- if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
-+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
-+ if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
- struct phy_device *phy;
-
- err = of_phy_register_fixed_link(mac_node);
- if (err)
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
-
- priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
- GFP_KERNEL);
- if (!priv->fixed_link) {
- err = -ENOMEM;
-- goto _return_dev_set_drvdata;
-+ goto _return_of_get_parent;
- }
-
-- priv->phy_node = of_node_get(mac_node);
-- phy = of_phy_find_device(priv->phy_node);
-+ mac_dev->phy_node = of_node_get(mac_node);
-+ phy = of_phy_find_device(mac_dev->phy_node);
- if (!phy) {
- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-+ of_node_put(mac_dev->phy_node);
-+ goto _return_of_get_parent;
- }
-
- priv->fixed_link->link = phy->link;
-@@ -904,8 +841,8 @@ static int mac_probe(struct platform_dev
- err = mac_dev->init(mac_dev);
- if (err < 0) {
- dev_err(dev, "mac_dev->init() = %d\n", err);
-- of_node_put(priv->phy_node);
-- goto _return_dev_set_drvdata;
-+ of_node_put(mac_dev->phy_node);
-+ goto _return_of_get_parent;
- }
-
- /* pause frame autonegotiation enabled */
-@@ -926,7 +863,7 @@ static int mac_probe(struct platform_dev
- mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
- mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
-
-- priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
-+ priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
- if (IS_ERR(priv->eth_dev)) {
- dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
- priv->cell_index);
-@@ -937,9 +874,8 @@ static int mac_probe(struct platform_dev
-
- _return_of_node_put:
- of_node_put(dev_node);
--_return_dev_set_drvdata:
-+_return_of_get_parent:
- kfree(priv->fixed_link);
-- dev_set_drvdata(dev, NULL);
- _return:
- return err;
- }
---- a/drivers/net/ethernet/freescale/fman/mac.h
-+++ b/drivers/net/ethernet/freescale/fman/mac.h
-@@ -50,6 +50,8 @@ struct mac_device {
- struct fman_port *port[2];
- u32 if_support;
- struct phy_device *phy_dev;
-+ phy_interface_t phy_if;
-+ struct device_node *phy_node;
-
- bool autoneg_pause;
- bool rx_pause_req;
-@@ -57,14 +59,16 @@ struct mac_device {
- bool rx_pause_active;
- bool tx_pause_active;
- bool promisc;
-+ bool allmulti;
-
-- struct phy_device *(*init_phy)(struct net_device *net_dev,
-- struct mac_device *mac_dev);
- int (*init)(struct mac_device *mac_dev);
- int (*start)(struct mac_device *mac_dev);
- int (*stop)(struct mac_device *mac_dev);
-+ void (*adjust_link)(struct mac_device *mac_dev);
- int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
- int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
-+ int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
-+ int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
- int (*set_multi)(struct net_device *net_dev,
- struct mac_device *mac_dev);
- int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
-@@ -82,7 +86,6 @@ struct mac_device {
- };
-
- struct dpaa_eth_data {
-- struct device_node *mac_node;
- struct mac_device *mac_dev;
- int mac_hw_id;
- int fman_hw_id;
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
-@@ -0,0 +1,184 @@
-+menuconfig FSL_SDK_DPAA_ETH
-+ tristate "DPAA Ethernet"
-+ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH
-+ select PHYLIB
-+ help
-+ Data Path Acceleration Architecture Ethernet driver,
-+ supporting the Freescale QorIQ chips.
-+ Depends on Freescale Buffer Manager and Queue Manager
-+ driver and Frame Manager Driver.
-+
-+if FSL_SDK_DPAA_ETH
-+
-+config FSL_DPAA_HOOKS
-+ bool "DPAA Ethernet driver hooks"
-+
-+config FSL_DPAA_CEETM
-+ bool "DPAA CEETM QoS"
-+ depends on NET_SCHED
-+ default n
-+ help
-+ Enable QoS offloading support through the CEETM hardware block.
-+
-+config FSL_DPAA_CEETM_CCS_THRESHOLD_1G
-+ hex "CEETM egress congestion threshold on 1G ports"
-+ depends on FSL_DPAA_CEETM
-+ range 0x1000 0x10000000
-+ default "0x00005000"
-+ help
-+ The size in bytes of the CEETM egress Class Congestion State threshold on 1G ports.
-+ The threshold needs to be configured keeping in mind the following factors:
-+ - A threshold too large will buffer frames for a long time in the TX queues,
-+ when a small shaping rate is configured. This will cause buffer pool depletion
-+ or out of memory errors. This in turn will cause frame loss on RX;
-+ - A threshold too small will cause unnecessary frame loss by entering
-+ congestion too often.
-+
-+config FSL_DPAA_CEETM_CCS_THRESHOLD_10G
-+ hex "CEETM egress congestion threshold on 10G ports"
-+ depends on FSL_DPAA_CEETM
-+ range 0x1000 0x20000000
-+ default "0x00032000"
-+ help
-+ The size in bytes of the CEETM egress Class Congestion State threshold on 10G ports.
-+ See FSL_DPAA_CEETM_CCS_THRESHOLD_1G for details.
-+
-+config FSL_DPAA_OFFLINE_PORTS
-+ bool "Offline Ports support"
-+ depends on FSL_SDK_DPAA_ETH
-+ default y
-+ help
-+ The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
-+ most of the functionality of the regular, online ports, except they receive their
-+ frames from a core or an accelerator on the SoC, via QMan frame queues,
-+ rather than directly from the network.
-+ Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
-+ any online FMan port. They deliver the processed frames to frame queues, according
-+ to the applied PCD configurations.
-+
-+ Choosing this feature will not impact the functionality and/or performance of the system,
-+ so it is safe to have it.
-+
-+config FSL_DPAA_ADVANCED_DRIVERS
-+ bool "Advanced DPAA Ethernet drivers"
-+ depends on FSL_SDK_DPAA_ETH
-+ default y
-+ help
-+ Besides the standard DPAA Ethernet driver the DPAA Proxy initialization driver
-+ is needed to support advanced scenarios. Select this to also build the advanced
-+ drivers.
-+
-+config FSL_DPAA_ETH_JUMBO_FRAME
-+ bool "Optimize for jumbo frames"
-+ default n
-+ help
-+ Optimize the DPAA Ethernet driver throughput for large frames
-+ termination traffic (e.g. 4K and above).
-+ NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
-+ is set to 9600 bytes.
-+ Using this option in combination with small frames increases
-+ significantly the driver's memory footprint and may even deplete
-+ the system memory. Also, the skb truesize is altered and messages
-+ from the stack that warn against this are bypassed.
-+
-+config FSL_DPAA_TS
-+ bool "Linux compliant timestamping"
-+ depends on FSL_SDK_DPAA_ETH
-+ default n
-+ help
-+ Enable Linux API compliant timestamping support.
-+
-+config FSL_DPAA_1588
-+ bool "IEEE 1588-compliant timestamping"
-+ depends on FSL_SDK_DPAA_ETH
-+ select FSL_DPAA_TS
-+ default n
-+ help
-+ Enable IEEE1588 support code.
-+
-+config FSL_DPAA_ETH_MAX_BUF_COUNT
-+ int "Maximum nuber of buffers in private bpool"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 64 2048
-+ default "128"
-+ help
-+ The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
-+ buffer pool. One needn't normally modify this, as it has probably been tuned for performance
-+ already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
-+
-+config FSL_DPAA_ETH_REFILL_THRESHOLD
-+ int "Private bpool refill threshold"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
-+ default "80"
-+ help
-+ The DPAA-Ethernet driver will start replenishing buffer pools whose count
-+ falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
-+ modify this value unless one has very specific performance reasons.
-+
-+config FSL_DPAA_CS_THRESHOLD_1G
-+ hex "Egress congestion threshold on 1G ports"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 0x1000 0x10000000
-+ default "0x06000000"
-+ help
-+ The size in bytes of the egress Congestion State notification threshold on 1G ports.
-+ The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
-+ (e.g. by sending UDP datagrams at "while(1) speed"),
-+ and the larger the frame size, the more acute the problem.
-+ So we have to find a balance between these factors:
-+ - avoiding the device staying congested for a prolonged time (risking
-+ the netdev watchdog to fire - see also the tx_timeout module param);
-+ - affecting performance of protocols such as TCP, which otherwise
-+ behave well under the congestion notification mechanism;
-+ - preventing the Tx cores from tightly-looping (as if the congestion
-+ threshold was too low to be effective);
-+ - running out of memory if the CS threshold is set too high.
-+
-+config FSL_DPAA_CS_THRESHOLD_10G
-+ hex "Egress congestion threshold on 10G ports"
-+ depends on FSL_SDK_DPAA_ETH
-+ range 0x1000 0x20000000
-+ default "0x10000000"
-+ help
-+ The size in bytes of the egress Congestion State notification threshold on 10G ports.
-+
-+config FSL_DPAA_INGRESS_CS_THRESHOLD
-+ hex "Ingress congestion threshold on FMan ports"
-+ depends on FSL_SDK_DPAA_ETH
-+ default "0x10000000"
-+ help
-+ The size in bytes of the ingress tail-drop threshold on FMan ports.
-+ Traffic piling up above this value will be rejected by QMan and discarded by FMan.
-+
-+config FSL_DPAA_ETH_DEBUGFS
-+ bool "DPAA Ethernet debugfs interface"
-+ depends on DEBUG_FS && FSL_SDK_DPAA_ETH
-+ default y
-+ help
-+ This option compiles debugfs code for the DPAA Ethernet driver.
-+
-+config FSL_DPAA_ETH_DEBUG
-+ bool "DPAA Ethernet Debug Support"
-+ depends on FSL_SDK_DPAA_ETH
-+ default n
-+ help
-+ This option compiles debug code for the DPAA Ethernet driver.
-+
-+config FSL_DPAA_DBG_LOOP
-+ bool "DPAA Ethernet Debug loopback"
-+ depends on FSL_DPAA_ETH_DEBUGFS
-+ default n
-+ help
-+ This option allows to divert all received traffic on a certain interface A towards a
-+ selected interface B. This option is used to benchmark the HW + Ethernet driver in
-+ isolation from the Linux networking stack. The loops are controlled by debugfs entries,
-+ one for each interface. By default all loops are disabled (target value is -1). I.e. to
-+ change the loop setting for interface 4 and divert all received traffic to interface 5
-+ write Tx interface number in the receive interface debugfs file:
-+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
-+ 4->-1
-+ # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
-+ # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
-+ 4->5
-+endif # FSL_SDK_DPAA_ETH
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
-@@ -0,0 +1,45 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+# Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+ccflags-y += -I$(NET_DPA)
-+
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
-+
-+fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
-+ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
-+fsl_dpa-objs += dpaa_debugfs.o
-+endif
-+ifeq ($(CONFIG_FSL_DPAA_1588),y)
-+fsl_dpa-objs += dpaa_1588.o
-+endif
-+ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
-+ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
-+fsl_dpa-objs += dpaa_eth_ceetm.o
-+endif
-+
-+fsl_mac-objs += mac.o mac-api.o
-+
-+# Advanced drivers
-+ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
-+
-+fsl_advanced-objs += dpaa_eth_base.o
-+# suport for multiple drivers per kernel module comes in kernel 3.14
-+# so we are forced to generate several modules for the advanced drivers
-+fsl_proxy-objs += dpaa_eth_proxy.o
-+
-+ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
-+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
-+
-+fsl_oh-objs += offline_port.o
-+endif
-+endif
-+
-+# Needed by the tracing framework
-+CFLAGS_dpaa_eth.o := -I$(src)
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
-@@ -0,0 +1,580 @@
-+/* Copyright (C) 2011 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2009 IXXAT Automation, GmbH
-+ *
-+ * DPAA Ethernet Driver -- IEEE 1588 interface functionality
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along
-+ * with this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+#include <linux/io.h>
-+#include <linux/device.h>
-+#include <linux/fs.h>
-+#include <linux/vmalloc.h>
-+#include <linux/spinlock.h>
-+#include <linux/ip.h>
-+#include <linux/ipv6.h>
-+#include <linux/udp.h>
-+#include <asm/div64.h>
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#include "dpaa_1588.h"
-+#include "mac.h"
-+
-+static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+
-+ circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
-+ if (!circ_buf->buf)
-+ return 1;
-+
-+ circ_buf->head = 0;
-+ circ_buf->tail = 0;
-+ ptp_buf->size = size;
-+ spin_lock_init(&ptp_buf->ptp_lock);
-+
-+ return 0;
-+}
-+
-+static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+
-+ circ_buf->head = 0;
-+ circ_buf->tail = 0;
-+ ptp_buf->size = size;
-+}
-+
-+static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
-+ struct dpa_ptp_data *data)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+ int size = ptp_buf->size;
-+ struct dpa_ptp_data *tmp;
-+ unsigned long flags;
-+ int head, tail;
-+
-+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
-+
-+ head = circ_buf->head;
-+ tail = circ_buf->tail;
-+
-+ if (CIRC_SPACE(head, tail, size) <= 0)
-+ circ_buf->tail = (tail + 1) & (size - 1);
-+
-+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
-+ memcpy(tmp, data, sizeof(struct dpa_ptp_data));
-+
-+ circ_buf->head = (head + 1) & (size - 1);
-+
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
-+ struct dpa_ptp_ident *src)
-+{
-+ int ret;
-+
-+ if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
-+ return 0;
-+
-+ if ((dst->netw_prot == src->netw_prot)
-+ || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
-+ if (dst->seq_id != src->seq_id)
-+ return 0;
-+
-+ ret = memcmp(dst->snd_port_id, src->snd_port_id,
-+ DPA_PTP_SOURCE_PORT_LENGTH);
-+ if (ret)
-+ return 0;
-+ else
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
-+ struct dpa_ptp_ident *ident,
-+ struct dpa_ptp_time *ts)
-+{
-+ struct circ_buf *circ_buf = &ptp_buf->circ_buf;
-+ int size = ptp_buf->size;
-+ int head, tail, idx;
-+ unsigned long flags;
-+ struct dpa_ptp_data *tmp, *tmp2;
-+ struct dpa_ptp_ident *tmp_ident;
-+
-+ spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
-+
-+ head = circ_buf->head;
-+ tail = idx = circ_buf->tail;
-+
-+ if (CIRC_CNT(head, tail, size) == 0) {
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+ return 1;
-+ }
-+
-+ while (idx != head) {
-+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
-+ tmp_ident = &tmp->ident;
-+ if (dpa_ptp_is_ident_match(tmp_ident, ident))
-+ break;
-+ idx = (idx + 1) & (size - 1);
-+ }
-+
-+ if (idx == head) {
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+ return 1;
-+ }
-+
-+ ts->sec = tmp->ts.sec;
-+ ts->nsec = tmp->ts.nsec;
-+
-+ if (idx != tail) {
-+ if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
-+ tail = circ_buf->tail =
-+ (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
-+ }
-+
-+ while (CIRC_CNT(idx, tail, size) > 0) {
-+ tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
-+ idx = (idx - 1) & (size - 1);
-+ tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
-+ *tmp = *tmp2;
-+ }
-+ }
-+ circ_buf->tail = (tail + 1) & (size - 1);
-+
-+ spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
-+
-+ return 0;
-+}
-+
-+/* Parse the PTP packets
-+ *
-+ * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
-+ * an IEEE802.3 ethernet frame. This function returns the position of
-+ * the PTP packet or NULL if no PTP found
-+ */
-+static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
-+{
-+ u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
-+ u8 *ptp_loc = NULL;
-+ u8 msg_type;
-+ u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
-+ struct iphdr *iph;
-+ struct udphdr *udph;
-+ struct ipv6hdr *ipv6h;
-+
-+ /* when we can receive S/G frames we need to check the data we want to
-+ * access is in the linear skb buffer
-+ */
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ *eth_type = *((u16 *)pos);
-+
-+ /* Check if inner tag is here */
-+ if (*eth_type == ETH_P_8021Q) {
-+ access_len += DPA_VLAN_TAG_LEN;
-+
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ pos += DPA_VLAN_TAG_LEN;
-+ *eth_type = *((u16 *)pos);
-+ }
-+
-+ pos += DPA_ETYPE_LEN;
-+
-+ switch (*eth_type) {
-+ /* Transport of PTP over Ethernet */
-+ case ETH_P_1588:
-+ ptp_loc = pos;
-+
-+ if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
-+ return NULL;
-+
-+ msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
-+ if ((msg_type == PTP_MSGTYPE_SYNC)
-+ || (msg_type == PTP_MSGTYPE_DELREQ)
-+ || (msg_type == PTP_MSGTYPE_PDELREQ)
-+ || (msg_type == PTP_MSGTYPE_PDELRESP))
-+ return ptp_loc;
-+ break;
-+ /* Transport of PTP over IPv4 */
-+ case ETH_P_IP:
-+ iph = (struct iphdr *)pos;
-+ access_len += sizeof(struct iphdr);
-+
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ if (ntohs(iph->protocol) != IPPROTO_UDP)
-+ return NULL;
-+
-+ access_len += iph->ihl * 4 - sizeof(struct iphdr) +
-+ sizeof(struct udphdr);
-+
-+ if (!pskb_may_pull(skb, access_len))
-+ return NULL;
-+
-+ pos += iph->ihl * 4;
-+ udph = (struct udphdr *)pos;
-+ if (ntohs(udph->dest) != 319)
-+ return NULL;
-+ ptp_loc = pos + sizeof(struct udphdr);
-+ break;
-+ /* Transport of PTP over IPv6 */
-+ case ETH_P_IPV6:
-+ ipv6h = (struct ipv6hdr *)pos;
-+
-+ access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
-+
-+ if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
-+ return NULL;
-+
-+ pos += sizeof(struct ipv6hdr);
-+ udph = (struct udphdr *)pos;
-+ if (ntohs(udph->dest) != 319)
-+ return NULL;
-+ ptp_loc = pos + sizeof(struct udphdr);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return ptp_loc;
-+}
-+
-+static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data, enum port_type rx_tx,
-+ struct dpa_ptp_data *ptp_data)
-+{
-+ u64 nsec;
-+ u32 mod;
-+ u8 *ptp_loc;
-+ u16 eth_type;
-+
-+ ptp_loc = dpa_ptp_parse_packet(skb, &eth_type);
-+ if (!ptp_loc)
-+ return -EINVAL;
-+
-+ switch (eth_type) {
-+ case ETH_P_IP:
-+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
-+ break;
-+ case ETH_P_IPV6:
-+ ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
-+ break;
-+ case ETH_P_1588:
-+ ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
-+ return -EINVAL;
-+
-+ ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
-+ ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
-+ ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
-+ memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
-+ DPA_PTP_SOURCE_PORT_LENGTH);
-+
-+ nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
-+ mod = do_div(nsec, NANOSEC_PER_SECOND);
-+ ptp_data->ts.sec = nsec;
-+ ptp_data->ts.nsec = mod;
-+
-+ return 0;
-+}
-+
-+void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data)
-+{
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+ struct dpa_ptp_data ptp_tx_data;
-+
-+ if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
-+ return;
-+
-+ dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
-+}
-+
-+void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data)
-+{
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+ struct dpa_ptp_data ptp_rx_data;
-+
-+ if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
-+ return;
-+
-+ dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
-+}
-+
-+static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
-+ struct dpa_ptp_ident *ident,
-+ struct dpa_ptp_time *ts)
-+{
-+ struct dpa_ptp_tsu *tsu = ptp_tsu;
-+ struct dpa_ptp_time tmp;
-+ int flag;
-+
-+ flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
-+ if (!flag) {
-+ ts->sec = tmp.sec;
-+ ts->nsec = tmp.nsec;
-+ return 0;
-+ }
-+
-+ return -1;
-+}
-+
-+static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
-+ struct dpa_ptp_ident *ident,
-+ struct dpa_ptp_time *ts)
-+{
-+ struct dpa_ptp_tsu *tsu = ptp_tsu;
-+ struct dpa_ptp_time tmp;
-+ int flag;
-+
-+ flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
-+ if (!flag) {
-+ ts->sec = tmp.sec;
-+ ts->nsec = tmp.nsec;
-+ return 0;
-+ }
-+
-+ return -1;
-+}
-+
-+static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
-+ struct dpa_ptp_time *cnt_time)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u64 tmp, fiper;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
-+
-+ /* TMR_FIPER1 will pulse every second after ALARM1 expired */
-+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
-+ fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
-+ if (mac_dev->fm_rtc_set_alarm)
-+ mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
-+ 0, tmp);
-+ if (mac_dev->fm_rtc_set_fiper)
-+ mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
-+ 0, fiper);
-+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
-+}
-+
-+static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
-+ struct dpa_ptp_time *curr_time)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u64 tmp;
-+ u32 mod;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
-+ &tmp);
-+
-+ mod = do_div(tmp, NANOSEC_PER_SECOND);
-+ curr_time->sec = (u32)tmp;
-+ curr_time->nsec = mod;
-+}
-+
-+static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
-+ struct dpa_ptp_time *cnt_time)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u64 tmp;
-+
-+ tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
-+
-+ if (mac_dev->fm_rtc_set_cnt)
-+ mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
-+ tmp);
-+
-+ /* Restart fiper two seconds later */
-+ cnt_time->sec += 2;
-+ cnt_time->nsec = 0;
-+ dpa_set_fiper_alarm(tsu, cnt_time);
-+}
-+
-+static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+ u32 drift;
-+
-+ if (mac_dev->fm_rtc_get_drift)
-+ mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
-+ &drift);
-+
-+ *addend = drift;
-+}
-+
-+static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
-+{
-+ struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_set_drift)
-+ mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
-+ addend);
-+}
-+
-+static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
-+{
-+ dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
-+ dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
-+}
-+
-+int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ struct dpa_ptp_data ptp_data;
-+ struct dpa_ptp_data *ptp_data_user;
-+ struct dpa_ptp_time act_time;
-+ u32 addend;
-+ int retval = 0;
-+
-+ if (!tsu || !tsu->valid)
-+ return -ENODEV;
-+
-+ switch (cmd) {
-+ case PTP_ENBL_TXTS_IOCTL:
-+ tsu->hwts_tx_en_ioctl = 1;
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
-+ if (mac_dev->ptp_enable)
-+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
-+ break;
-+ case PTP_DSBL_TXTS_IOCTL:
-+ tsu->hwts_tx_en_ioctl = 0;
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
-+ if (mac_dev->ptp_disable)
-+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
-+ break;
-+ case PTP_ENBL_RXTS_IOCTL:
-+ tsu->hwts_rx_en_ioctl = 1;
-+ break;
-+ case PTP_DSBL_RXTS_IOCTL:
-+ tsu->hwts_rx_en_ioctl = 0;
-+ break;
-+ case PTP_GET_RX_TIMESTAMP:
-+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
-+ if (copy_from_user(&ptp_data.ident,
-+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
-+ return -EINVAL;
-+
-+ if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
-+ return -EAGAIN;
-+
-+ if (copy_to_user((void __user *)&ptp_data_user->ts,
-+ &ptp_data.ts, sizeof(ptp_data.ts)))
-+ return -EFAULT;
-+ break;
-+ case PTP_GET_TX_TIMESTAMP:
-+ ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
-+ if (copy_from_user(&ptp_data.ident,
-+ &ptp_data_user->ident, sizeof(ptp_data.ident)))
-+ return -EINVAL;
-+
-+ if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
-+ return -EAGAIN;
-+
-+ if (copy_to_user((void __user *)&ptp_data_user->ts,
-+ &ptp_data.ts, sizeof(ptp_data.ts)))
-+ return -EFAULT;
-+ break;
-+ case PTP_GET_TIME:
-+ dpa_get_curr_cnt(tsu, &act_time);
-+ if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
-+ return -EFAULT;
-+ break;
-+ case PTP_SET_TIME:
-+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
-+ return -EINVAL;
-+ dpa_set_1588cnt(tsu, &act_time);
-+ break;
-+ case PTP_GET_ADJ:
-+ dpa_get_drift(tsu, &addend);
-+ if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
-+ return -EFAULT;
-+ break;
-+ case PTP_SET_ADJ:
-+ if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
-+ return -EINVAL;
-+ dpa_set_drift(tsu, addend);
-+ break;
-+ case PTP_SET_FIPER_ALARM:
-+ if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
-+ return -EINVAL;
-+ dpa_set_fiper_alarm(tsu, &act_time);
-+ break;
-+ case PTP_CLEANUP_TS:
-+ dpa_flush_timestamp(tsu);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return retval;
-+}
-+
-+int dpa_ptp_init(struct dpa_priv_s *priv)
-+{
-+ struct dpa_ptp_tsu *tsu;
-+
-+ /* Allocate memory for PTP structure */
-+ tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
-+ if (!tsu)
-+ return -ENOMEM;
-+
-+ tsu->valid = TRUE;
-+ tsu->dpa_priv = priv;
-+
-+ dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
-+ dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
-+
-+ priv->tsu = tsu;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_ptp_init);
-+
-+void dpa_ptp_cleanup(struct dpa_priv_s *priv)
-+{
-+ struct dpa_ptp_tsu *tsu = priv->tsu;
-+
-+ tsu->valid = FALSE;
-+ vfree(tsu->rx_timestamps.circ_buf.buf);
-+ vfree(tsu->tx_timestamps.circ_buf.buf);
-+
-+ kfree(tsu);
-+}
-+EXPORT_SYMBOL(dpa_ptp_cleanup);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
-@@ -0,0 +1,138 @@
-+/* Copyright (C) 2011 Freescale Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along
-+ * with this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+#ifndef __DPAA_1588_H__
-+#define __DPAA_1588_H__
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/circ_buf.h>
-+#include <linux/fsl_qman.h>
-+
-+#define DEFAULT_PTP_RX_BUF_SZ 256
-+#define DEFAULT_PTP_TX_BUF_SZ 256
-+
-+/* 1588 private ioctl calls */
-+#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
-+#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
-+#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
-+#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
-+#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
-+#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
-+#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
-+#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
-+#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
-+#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
-+#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
-+#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
-+
-+/* PTP V2 message type */
-+enum {
-+ PTP_MSGTYPE_SYNC = 0x0,
-+ PTP_MSGTYPE_DELREQ = 0x1,
-+ PTP_MSGTYPE_PDELREQ = 0x2,
-+ PTP_MSGTYPE_PDELRESP = 0x3,
-+ PTP_MSGTYPE_FLWUP = 0x8,
-+ PTP_MSGTYPE_DELRESP = 0x9,
-+ PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
-+ PTP_MSGTYPE_ANNOUNCE = 0xB,
-+ PTP_MSGTYPE_SGNLNG = 0xC,
-+ PTP_MSGTYPE_MNGMNT = 0xD,
-+};
-+
-+/* Byte offset of data in the PTP V2 headers */
-+#define PTP_OFFS_MSG_TYPE 0
-+#define PTP_OFFS_VER_PTP 1
-+#define PTP_OFFS_MSG_LEN 2
-+#define PTP_OFFS_DOM_NMB 4
-+#define PTP_OFFS_FLAGS 6
-+#define PTP_OFFS_CORFIELD 8
-+#define PTP_OFFS_SRCPRTID 20
-+#define PTP_OFFS_SEQ_ID 30
-+#define PTP_OFFS_CTRL 32
-+#define PTP_OFFS_LOGMEAN 33
-+
-+#define PTP_IP_OFFS 14
-+#define PTP_UDP_OFFS 34
-+#define PTP_HEADER_OFFS 42
-+#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
-+#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
-+#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
-+#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
-+
-+/* 1588-2008 network protocol enumeration values */
-+#define DPA_PTP_PROT_IPV4 1
-+#define DPA_PTP_PROT_IPV6 2
-+#define DPA_PTP_PROT_802_3 3
-+#define DPA_PTP_PROT_DONTCARE 0xFFFF
-+
-+#define DPA_PTP_SOURCE_PORT_LENGTH 10
-+#define DPA_PTP_HEADER_SZE 34
-+#define DPA_ETYPE_LEN 2
-+#define DPA_VLAN_TAG_LEN 4
-+#define NANOSEC_PER_SECOND 1000000000
-+
-+/* The threshold between the current found one and the oldest one */
-+#define TS_ACCUMULATION_THRESHOLD 50
-+
-+/* Struct needed to identify a timestamp */
-+struct dpa_ptp_ident {
-+ u8 version;
-+ u8 msg_type;
-+ u16 netw_prot;
-+ u16 seq_id;
-+ u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
-+};
-+
-+/* Timestamp format in 1588-2008 */
-+struct dpa_ptp_time {
-+ u64 sec; /* just 48 bit used */
-+ u32 nsec;
-+};
-+
-+/* needed for timestamp data over ioctl */
-+struct dpa_ptp_data {
-+ struct dpa_ptp_ident ident;
-+ struct dpa_ptp_time ts;
-+};
-+
-+struct dpa_ptp_circ_buf {
-+ struct circ_buf circ_buf;
-+ u32 size;
-+ spinlock_t ptp_lock;
-+};
-+
-+/* PTP TSU control structure */
-+struct dpa_ptp_tsu {
-+ struct dpa_priv_s *dpa_priv;
-+ bool valid;
-+ struct dpa_ptp_circ_buf rx_timestamps;
-+ struct dpa_ptp_circ_buf tx_timestamps;
-+
-+ /* HW timestamping over ioctl enabled flag */
-+ int hwts_tx_en_ioctl;
-+ int hwts_rx_en_ioctl;
-+};
-+
-+extern int dpa_ptp_init(struct dpa_priv_s *priv);
-+extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
-+extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data);
-+extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb, void *data);
-+extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
-@@ -0,0 +1,180 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
-+#include <linux/debugfs.h>
-+#include "dpaa_debugfs.h"
-+#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
-+
-+#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
-+#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
-+
-+static struct dentry *dpa_debugfs_root;
-+
-+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
-+static ssize_t dpa_loop_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off);
-+
-+static const struct file_operations dpa_debugfs_lp_fops = {
-+ .open = dpa_debugfs_loop_open,
-+ .write = dpa_loop_write,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
-+{
-+ struct dpa_priv_s *priv;
-+
-+ BUG_ON(offset == NULL);
-+
-+ priv = netdev_priv((struct net_device *)file->private);
-+ seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
-+
-+ return 0;
-+}
-+
-+static int user_input_convert(const char __user *user_buf, size_t count,
-+ long *val)
-+{
-+ char buf[12];
-+
-+ if (count > sizeof(buf) - 1)
-+ return -EINVAL;
-+ if (copy_from_user(buf, user_buf, count))
-+ return -EFAULT;
-+ buf[count] = '\0';
-+ if (kstrtol(buf, 0, val))
-+ return -EINVAL;
-+ return 0;
-+}
-+
-+static ssize_t dpa_loop_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off)
-+{
-+ struct dpa_priv_s *priv;
-+ struct net_device *netdev;
-+ struct seq_file *sf;
-+ int ret;
-+ long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+
-+ sf = (struct seq_file *)f->private_data;
-+ netdev = (struct net_device *)sf->private;
-+ priv = netdev_priv(netdev);
-+
-+ priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
-+
-+ return count;
-+}
-+
-+static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
-+{
-+ int _errno;
-+ const struct net_device *net_dev;
-+
-+ _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
-+ if (unlikely(_errno < 0)) {
-+ net_dev = (struct net_device *)inode->i_private;
-+
-+ if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
-+ netdev_err(net_dev, "single_open() = %d\n",
-+ _errno);
-+ }
-+
-+ return _errno;
-+}
-+
-+
-+int dpa_netdev_debugfs_create(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ static int cnt;
-+ char loop_file_name[100];
-+
-+ if (unlikely(dpa_debugfs_root == NULL)) {
-+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__,
-+ "root debugfs missing, possible module ordering issue");
-+ return -ENOMEM;
-+ }
-+
-+ sprintf(loop_file_name, "eth%d_loop", ++cnt);
-+ priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
-+ S_IRUGO,
-+ dpa_debugfs_root,
-+ net_dev,
-+ &dpa_debugfs_lp_fops);
-+ if (unlikely(priv->debugfs_loop_file == NULL)) {
-+ netdev_err(net_dev, "debugfs_create_file(%s/%s)",
-+ dpa_debugfs_root->d_iname,
-+ loop_file_name);
-+
-+ return -ENOMEM;
-+ }
-+ return 0;
-+}
-+
-+void dpa_netdev_debugfs_remove(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+
-+ debugfs_remove(priv->debugfs_loop_file);
-+}
-+
-+int __init dpa_debugfs_module_init(void)
-+{
-+ int _errno = 0;
-+
-+ pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
-+
-+ dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
-+
-+ if (unlikely(dpa_debugfs_root == NULL)) {
-+ _errno = -ENOMEM;
-+ pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__);
-+ pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
-+ DPA_ETH_DEBUGFS_ROOT, _errno);
-+ }
-+
-+ return _errno;
-+}
-+
-+void __exit dpa_debugfs_module_exit(void)
-+{
-+ debugfs_remove(dpa_debugfs_root);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
-@@ -0,0 +1,43 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPAA_DEBUGFS_H_
-+#define DPAA_DEBUGFS_H_
-+
-+#include <linux/netdevice.h>
-+#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
-+
-+int dpa_netdev_debugfs_create(struct net_device *net_dev);
-+void dpa_netdev_debugfs_remove(struct net_device *net_dev);
-+int __init dpa_debugfs_module_init(void);
-+void __exit dpa_debugfs_module_exit(void);
-+
-+#endif /* DPAA_DEBUGFS_H_ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
-@@ -0,0 +1,1223 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_mdio.h>
-+#include <linux/of_net.h>
-+#include <linux/kthread.h>
-+#include <linux/io.h>
-+#include <linux/if_arp.h> /* arp_hdr_len() */
-+#include <linux/if_vlan.h> /* VLAN_HLEN */
-+#include <linux/icmp.h> /* struct icmphdr */
-+#include <linux/ip.h> /* struct iphdr */
-+#include <linux/ipv6.h> /* struct ipv6hdr */
-+#include <linux/udp.h> /* struct udphdr */
-+#include <linux/tcp.h> /* struct tcphdr */
-+#include <linux/net.h> /* net_ratelimit() */
-+#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
-+#include <linux/highmem.h>
-+#include <linux/percpu.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/fsl_bman.h>
-+#ifdef CONFIG_SOC_BUS
-+#include <linux/sys_soc.h> /* soc_device_match */
-+#endif
-+
-+#include "fsl_fman.h"
-+#include "fm_ext.h"
-+#include "fm_port_ext.h"
-+
-+#include "mac.h"
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+#include "dpaa_debugfs.h"
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
-+ * using trace events only need to #include <trace/events/sched.h>
-+ */
-+#define CREATE_TRACE_POINTS
-+#include "dpaa_eth_trace.h"
-+
-+#define DPA_NAPI_WEIGHT 64
-+
-+/* Valid checksum indication */
-+#define DPA_CSUM_VALID 0xFFFF
-+
-+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
-+
-+MODULE_DESCRIPTION(DPA_DESCRIPTION);
-+
-+static uint8_t debug = -1;
-+module_param(debug, byte, S_IRUGO);
-+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
-+
-+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
-+static uint16_t tx_timeout = 1000;
-+module_param(tx_timeout, ushort, S_IRUGO);
-+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
-+
-+static const char rtx[][3] = {
-+ [RX] = "RX",
-+ [TX] = "TX"
-+};
-+
-+#ifndef CONFIG_PPC
-+bool dpaa_errata_a010022;
-+EXPORT_SYMBOL(dpaa_errata_a010022);
-+#endif
-+
-+/* BM */
-+
-+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
-+
-+static uint8_t dpa_priv_common_bpid;
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+struct net_device *dpa_loop_netdevs[20];
-+#endif
-+
-+#ifdef CONFIG_PM
-+
-+static int dpaa_suspend(struct device *dev)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ int err = 0;
-+
-+ net_dev = dev_get_drvdata(dev);
-+
-+ if (net_dev->flags & IFF_UP) {
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
-+ priv->mac_dev->get_mac_handle(mac_dev), true);
-+ if (err) {
-+ netdev_err(net_dev, "set_wol() = %d\n", err);
-+ goto set_wol_failed;
-+ }
-+ }
-+
-+ err = fm_port_suspend(mac_dev->port_dev[RX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
-+ goto rx_port_suspend_failed;
-+ }
-+
-+ err = fm_port_suspend(mac_dev->port_dev[TX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
-+ goto tx_port_suspend_failed;
-+ }
-+ }
-+
-+ return 0;
-+
-+tx_port_suspend_failed:
-+ fm_port_resume(mac_dev->port_dev[RX]);
-+rx_port_suspend_failed:
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ priv->mac_dev->set_wol(mac_dev->port_dev[RX],
-+ priv->mac_dev->get_mac_handle(mac_dev), false);
-+ }
-+set_wol_failed:
-+ return err;
-+}
-+
-+static int dpaa_resume(struct device *dev)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ int err = 0;
-+
-+ net_dev = dev_get_drvdata(dev);
-+
-+ if (net_dev->flags & IFF_UP) {
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ err = fm_mac_resume(mac_dev->get_mac_handle(mac_dev));
-+ if (err) {
-+ netdev_err(net_dev, "fm_mac_resume = %d\n", err);
-+ goto resume_failed;
-+ }
-+
-+ err = fm_port_resume(mac_dev->port_dev[TX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
-+ goto resume_failed;
-+ }
-+
-+ err = fm_port_resume(mac_dev->port_dev[RX]);
-+ if (err) {
-+ netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
-+ goto resume_failed;
-+ }
-+
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
-+ priv->mac_dev->get_mac_handle(mac_dev), false);
-+ if (err) {
-+ netdev_err(net_dev, "set_wol() = %d\n", err);
-+ goto resume_failed;
-+ }
-+ }
-+ }
-+
-+ return 0;
-+
-+resume_failed:
-+ return err;
-+}
-+
-+static const struct dev_pm_ops dpaa_pm_ops = {
-+ .suspend = dpaa_suspend,
-+ .resume = dpaa_resume,
-+};
-+
-+#define DPAA_PM_OPS (&dpaa_pm_ops)
-+
-+#else /* CONFIG_PM */
-+
-+#define DPAA_PM_OPS NULL
-+
-+#endif /* CONFIG_PM */
-+
-+/* Checks whether the checksum field in Parse Results array is valid
-+ * (equals 0xFFFF) and increments the .cse counter otherwise
-+ */
-+static inline void
-+dpa_csum_validation(const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd)
-+{
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ void *frm = phys_to_virt(addr);
-+ fm_prs_result_t *parse_result;
-+
-+ if (unlikely(!frm))
-+ return;
-+
-+ dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
-+ DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
-+
-+ parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
-+
-+ if (parse_result->cksum != DPA_CSUM_VALID)
-+ percpu_priv->rx_errors.cse++;
-+}
-+
-+static void _dpa_rx_error(struct net_device *net_dev,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ /* limit common, possibly innocuous Rx FIFO Overflow errors'
-+ * interference with zero-loss convergence benchmark results.
-+ */
-+ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
-+ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
-+ else
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
-+ fd->status & FM_FD_STAT_RX_ERRORS);
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ if (dpaa_eth_hooks.rx_error &&
-+ dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
-+ /* it's up to the hook to perform resource cleanup */
-+ return;
-+#endif
-+ percpu_priv->stats.rx_errors++;
-+
-+ if (fd->status & FM_PORT_FRM_ERR_DMA)
-+ percpu_priv->rx_errors.dme++;
-+ if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
-+ percpu_priv->rx_errors.fpe++;
-+ if (fd->status & FM_PORT_FRM_ERR_SIZE)
-+ percpu_priv->rx_errors.fse++;
-+ if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
-+ percpu_priv->rx_errors.phe++;
-+ if (fd->status & FM_FD_STAT_L4CV)
-+ dpa_csum_validation(priv, percpu_priv, fd);
-+
-+ dpa_fd_release(net_dev, fd);
-+}
-+
-+static void _dpa_tx_error(struct net_device *net_dev,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ struct sk_buff *skb;
-+
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_warn(net_dev, "FD status = 0x%08x\n",
-+ fd->status & FM_FD_STAT_TX_ERRORS);
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ if (dpaa_eth_hooks.tx_error &&
-+ dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
-+ /* now the hook must ensure proper cleanup */
-+ return;
-+#endif
-+ percpu_priv->stats.tx_errors++;
-+
-+ /* If we intended the buffers from this frame to go into the bpools
-+ * when the FMan transmit was done, we need to put it in manually.
-+ */
-+ if (fd->bpid != 0xff) {
-+ dpa_fd_release(net_dev, fd);
-+ return;
-+ }
-+
-+ skb = _dpa_cleanup_tx_fd(priv, fd);
-+ dev_kfree_skb(skb);
-+}
-+
-+/* Helper function to factor out frame validation logic on all Rx paths. Its
-+ * purpose is to extract from the Parse Results structure information about
-+ * the integrity of the frame, its checksum, the length of the parsed headers
-+ * and whether the frame is suitable for GRO.
-+ *
-+ * Assumes no parser errors, since any error frame is dropped before this
-+ * function is called.
-+ *
-+ * @skb will have its ip_summed field overwritten;
-+ * @use_gro will only be written with 0, if the frame is definitely not
-+ * GRO-able; otherwise, it will be left unchanged;
-+ * @hdr_size will be written with a safe value, at least the size of the
-+ * headers' length.
-+ */
-+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
-+ const struct qm_fd *fd,
-+ struct sk_buff *skb, int *use_gro)
-+{
-+ if (fd->status & FM_FD_STAT_L4CV) {
-+ /* The parser has run and performed L4 checksum validation.
-+ * We know there were no parser errors (and implicitly no
-+ * L4 csum error), otherwise we wouldn't be here.
-+ */
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+ /* Don't go through GRO for certain types of traffic that
-+ * we know are not GRO-able, such as dgram-based protocols.
-+ * In the worst-case scenarios, such as small-pkt terminating
-+ * UDP, the extra GRO processing would be overkill.
-+ *
-+ * The only protocol the Parser supports that is also GRO-able
-+ * is currently TCP.
-+ */
-+ if (!fm_l4_frame_is_tcp(parse_results))
-+ *use_gro = 0;
-+
-+ return;
-+ }
-+
-+ /* We're here because either the parser didn't run or the L4 checksum
-+ * was not verified. This may include the case of a UDP frame with
-+ * checksum zero or an L4 proto other than TCP/UDP
-+ */
-+ skb->ip_summed = CHECKSUM_NONE;
-+
-+ /* Bypass GRO for unknown traffic or if no PCDs are applied */
-+ *use_gro = 0;
-+}
-+
-+int dpaa_eth_poll(struct napi_struct *napi, int budget)
-+{
-+ struct dpa_napi_portal *np =
-+ container_of(napi, struct dpa_napi_portal, napi);
-+
-+ int cleaned = qman_p_poll_dqrr(np->p, budget);
-+
-+ if (cleaned < budget) {
-+ int tmp;
-+ napi_complete(napi);
-+ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
-+ DPA_BUG_ON(tmp);
-+ }
-+
-+ return cleaned;
-+}
-+EXPORT_SYMBOL(dpaa_eth_poll);
-+
-+static void __hot _dpa_tx_conf(struct net_device *net_dev,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ struct sk_buff *skb;
-+
-+ /* do we need the timestamp for the error frames? */
-+
-+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_warn(net_dev, "FD status = 0x%08x\n",
-+ fd->status & FM_FD_STAT_TX_ERRORS);
-+
-+ percpu_priv->stats.tx_errors++;
-+ }
-+
-+ /* hopefully we need not get the timestamp before the hook */
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
-+ fd, fqid) == DPAA_ETH_STOLEN)
-+ /* it's the hook that must now perform cleanup */
-+ return;
-+#endif
-+ /* This might not perfectly reflect the reality, if the core dequeuing
-+ * the Tx confirmation is different from the one that did the enqueue,
-+ * but at least it'll show up in the total count.
-+ */
-+ percpu_priv->tx_confirm++;
-+
-+ skb = _dpa_cleanup_tx_fd(priv, fd);
-+
-+ dev_kfree_skb(skb);
-+}
-+
-+enum qman_cb_dqrr_result
-+priv_rx_error_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int *count_ptr;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
-+
-+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
-+ return qman_cb_dqrr_stop;
-+
-+ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
-+ /* Unable to refill the buffer pool due to insufficient
-+ * system memory. Just release the frame back into the pool,
-+ * otherwise we'll soon end up with an empty buffer pool.
-+ */
-+ dpa_fd_release(net_dev, &dq->fd);
-+ else
-+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+
-+enum qman_cb_dqrr_result __hot
-+priv_rx_default_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int *count_ptr;
-+ struct dpa_bp *dpa_bp;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+ dpa_bp = priv->dpa_bp;
-+
-+ /* Trace the Rx fd */
-+ trace_dpa_rx_fd(net_dev, fq, &dq->fd);
-+
-+ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
-+
-+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
-+ return qman_cb_dqrr_stop;
-+
-+ /* Vale of plenty: make sure we didn't run out of buffers */
-+
-+ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
-+ /* Unable to refill the buffer pool due to insufficient
-+ * system memory. Just release the frame back into the pool,
-+ * otherwise we'll soon end up with an empty buffer pool.
-+ */
-+ dpa_fd_release(net_dev, &dq->fd);
-+ else
-+ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
-+ count_ptr);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+enum qman_cb_dqrr_result
-+priv_tx_conf_error_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
-+ return qman_cb_dqrr_stop;
-+
-+ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+enum qman_cb_dqrr_result __hot
-+priv_tx_conf_default_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+
-+ /* Trace the fd */
-+ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
-+
-+ /* Non-migratable context, safe to use raw_cpu_ptr */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
-+ return qman_cb_dqrr_stop;
-+
-+ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+void priv_ern(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ struct net_device *net_dev;
-+ const struct dpa_priv_s *priv;
-+ struct sk_buff *skb;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct qm_fd fd = msg->ern.fd;
-+
-+ net_dev = ((struct dpa_fq *)fq)->net_dev;
-+ priv = netdev_priv(net_dev);
-+ /* Non-migratable context, safe to use raw_cpu_ptr */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ percpu_priv->stats.tx_dropped++;
-+ percpu_priv->stats.tx_fifo_errors++;
-+ count_ern(percpu_priv, msg);
-+
-+ /* If we intended this buffer to go into the pool
-+ * when the FM was done, we need to put it in
-+ * manually.
-+ */
-+ if (msg->ern.fd.bpid != 0xff) {
-+ dpa_fd_release(net_dev, &fd);
-+ return;
-+ }
-+
-+ skb = _dpa_cleanup_tx_fd(priv, &fd);
-+ dev_kfree_skb_any(skb);
-+}
-+
-+const struct dpa_fq_cbs_t private_fq_cbs = {
-+ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
-+ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
-+ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
-+ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
-+ .egress_ern = { .cb = { .ern = priv_ern } }
-+};
-+EXPORT_SYMBOL(private_fq_cbs);
-+
-+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
-+{
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, j;
-+
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ for (j = 0; j < qman_portal_max; j++)
-+ napi_enable(&percpu_priv->np[j].napi);
-+ }
-+}
-+
-+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
-+{
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, j;
-+
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ for (j = 0; j < qman_portal_max; j++)
-+ napi_disable(&percpu_priv->np[j].napi);
-+ }
-+}
-+
-+static int __cold dpa_eth_priv_start(struct net_device *net_dev)
-+{
-+ int err;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ dpaa_eth_napi_enable(priv);
-+
-+ err = dpa_start(net_dev);
-+ if (err < 0)
-+ dpaa_eth_napi_disable(priv);
-+
-+ return err;
-+}
-+
-+
-+
-+static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ _errno = dpa_stop(net_dev);
-+ /* Allow NAPI to consume any frame still in the Rx/TxConfirm
-+ * ingress queues. This is to avoid a race between the current
-+ * context and ksoftirqd which could leave NAPI disabled while
-+ * in fact there's still Rx traffic to be processed.
-+ */
-+ usleep_range(5000, 10000);
-+
-+ priv = netdev_priv(net_dev);
-+ dpaa_eth_napi_disable(priv);
-+
-+ return _errno;
-+}
-+
-+#ifdef CONFIG_NET_POLL_CONTROLLER
-+static void dpaa_eth_poll_controller(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv =
-+ raw_cpu_ptr(priv->percpu_priv);
-+ struct qman_portal *p;
-+ const struct qman_portal_config *pc;
-+ struct dpa_napi_portal *np;
-+
-+ p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
-+ pc = qman_p_get_portal_config(p);
-+ np = &percpu_priv->np[pc->index];
-+
-+ qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
-+ qman_p_poll_dqrr(np->p, np->napi.weight);
-+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
-+}
-+#endif
-+
-+static const struct net_device_ops dpa_private_ops = {
-+ .ndo_open = dpa_eth_priv_start,
-+ .ndo_start_xmit = dpa_tx,
-+ .ndo_stop = dpa_eth_priv_stop,
-+ .ndo_tx_timeout = dpa_timeout,
-+ .ndo_get_stats64 = dpa_get_stats64,
-+ .ndo_set_mac_address = dpa_set_mac_address,
-+ .ndo_validate_addr = eth_validate_addr,
-+#ifdef CONFIG_FMAN_PFC
-+ .ndo_select_queue = dpa_select_queue,
-+#endif
-+ .ndo_set_rx_mode = dpa_set_rx_mode,
-+ .ndo_init = dpa_ndo_init,
-+ .ndo_set_features = dpa_set_features,
-+ .ndo_fix_features = dpa_fix_features,
-+ .ndo_do_ioctl = dpa_ioctl,
-+#ifdef CONFIG_NET_POLL_CONTROLLER
-+ .ndo_poll_controller = dpaa_eth_poll_controller,
-+#endif
-+};
-+
-+static int dpa_private_napi_add(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
-+
-+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
-+ qman_portal_max * sizeof(struct dpa_napi_portal),
-+ GFP_KERNEL);
-+
-+ if (unlikely(percpu_priv->np == NULL)) {
-+ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < qman_portal_max; i++)
-+ netif_napi_add(net_dev, &percpu_priv->np[i].napi,
-+ dpaa_eth_poll, DPA_NAPI_WEIGHT);
-+ }
-+
-+ return 0;
-+}
-+
-+void dpa_private_napi_del(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int i, cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
-+
-+ if (percpu_priv->np) {
-+ for (i = 0; i < qman_portal_max; i++)
-+ netif_napi_del(&percpu_priv->np[i].napi);
-+
-+ devm_kfree(net_dev->dev.parent, percpu_priv->np);
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(dpa_private_napi_del);
-+
-+static int dpa_private_netdev_init(struct net_device *net_dev)
-+{
-+ int i;
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ const uint8_t *mac_addr;
-+
-+ /* Although we access another CPU's private data here
-+ * we do it at initialization so it is safe
-+ */
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+ percpu_priv->net_dev = net_dev;
-+ }
-+
-+ net_dev->netdev_ops = &dpa_private_ops;
-+ mac_addr = priv->mac_dev->addr;
-+
-+ net_dev->mem_start = priv->mac_dev->res->start;
-+ net_dev->mem_end = priv->mac_dev->res->end;
-+
-+ /* Configure the maximum MTU according to the FMan's MAXFRM */
-+ net_dev->min_mtu = ETH_MIN_MTU;
-+ net_dev->max_mtu = dpa_get_max_mtu();
-+
-+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-+ NETIF_F_LLTX);
-+
-+ /* Advertise S/G and HIGHDMA support for private interfaces */
-+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
-+ /* Recent kernels enable GSO automatically, if
-+ * we declare NETIF_F_SG. For conformity, we'll
-+ * still declare GSO explicitly.
-+ */
-+ net_dev->features |= NETIF_F_GSO;
-+
-+ /* Advertise GRO support */
-+ net_dev->features |= NETIF_F_GRO;
-+
-+ /* Advertise NETIF_F_HW_ACCEL_MQ to avoid Tx timeout warnings */
-+ net_dev->features |= NETIF_F_HW_ACCEL_MQ;
-+
-+#ifndef CONFIG_PPC
-+ /* Due to the A010022 FMan errata, we can not use S/G frames. We need
-+ * to stop advertising S/G and GSO support.
-+ */
-+ if (unlikely(dpaa_errata_a010022)) {
-+ net_dev->hw_features &= ~NETIF_F_SG;
-+ net_dev->features &= ~NETIF_F_GSO;
-+ }
-+#endif
-+
-+ return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
-+}
-+
-+static struct dpa_bp * __cold
-+dpa_priv_bp_probe(struct device *dev)
-+{
-+ struct dpa_bp *dpa_bp;
-+
-+ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
-+ if (unlikely(dpa_bp == NULL)) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
-+ dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
-+
-+ dpa_bp->seed_cb = dpa_bp_priv_seed;
-+ dpa_bp->free_buf_cb = _dpa_bp_free_pf;
-+
-+ return dpa_bp;
-+}
-+
-+/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
-+ * We won't be sending congestion notifications to FMan; for now, we just use
-+ * this CGR to generate enqueue rejections to FMan in order to drop the frames
-+ * before they reach our ingress queues and eat up memory.
-+ */
-+static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
-+{
-+ struct qm_mcc_initcgr initcgr;
-+ u32 cs_th;
-+ int err;
-+
-+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
-+ if (err < 0) {
-+ pr_err("Error %d allocating CGR ID\n", err);
-+ goto out_error;
-+ }
-+
-+ /* Enable CS TD, but disable Congestion State Change Notifications. */
-+ initcgr.we_mask = QM_CGR_WE_CS_THRES;
-+ initcgr.cgr.cscn_en = QM_CGR_EN;
-+ cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
-+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
-+
-+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
-+ initcgr.cgr.cstd_en = QM_CGR_EN;
-+
-+ /* This is actually a hack, because this CGR will be associated with
-+ * our affine SWP. However, we'll place our ingress FQs in it.
-+ */
-+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
-+ &initcgr);
-+ if (err < 0) {
-+ pr_err("Error %d creating ingress CGR with ID %d\n", err,
-+ priv->ingress_cgr.cgrid);
-+ qman_release_cgrid(priv->ingress_cgr.cgrid);
-+ goto out_error;
-+ }
-+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
-+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
-+
-+ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
-+ * range), but we have no common initialization path between the
-+ * different variants of the DPAA Eth driver, so we do it here rather
-+ * than modifying every other variant than "private Eth".
-+ */
-+ priv->use_ingress_cgr = true;
-+
-+out_error:
-+ return err;
-+}
-+
-+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
-+ size_t count)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ int i;
-+
-+ if (netif_msg_probe(priv))
-+ dev_dbg(net_dev->dev.parent,
-+ "Using private BM buffer pools\n");
-+
-+ priv->bp_count = count;
-+
-+ for (i = 0; i < count; i++) {
-+ int err;
-+ err = dpa_bp_alloc(&dpa_bp[i], net_dev->dev.parent);
-+ if (err < 0) {
-+ dpa_bp_free(priv);
-+ priv->dpa_bp = NULL;
-+ return err;
-+ }
-+
-+ priv->dpa_bp = &dpa_bp[i];
-+ }
-+
-+ dpa_priv_common_bpid = priv->dpa_bp->bpid;
-+ return 0;
-+}
-+
-+static const struct of_device_id dpa_match[];
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+static int dpa_new_loop_id(void)
-+{
-+ static int if_id;
-+
-+ return if_id++;
-+}
-+#endif
-+
-+static int
-+dpaa_eth_priv_probe(struct platform_device *_of_dev)
-+{
-+ int err = 0, i, channel;
-+ struct device *dev;
-+ struct device_node *dpa_node;
-+ struct dpa_bp *dpa_bp;
-+ size_t count = 1;
-+ struct net_device *net_dev = NULL;
-+ struct dpa_priv_s *priv = NULL;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct fm_port_fqs port_fqs;
-+ struct dpa_buffer_layout_s *buf_layout = NULL;
-+ struct mac_device *mac_dev;
-+
-+ dev = &_of_dev->dev;
-+
-+ dpa_node = dev->of_node;
-+
-+ if (!of_device_is_available(dpa_node))
-+ return -ENODEV;
-+
-+ /* Get the buffer pools assigned to this interface;
-+ * run only once the default pool probing code
-+ */
-+ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
-+ dpa_priv_bp_probe(dev);
-+ if (IS_ERR(dpa_bp))
-+ return PTR_ERR(dpa_bp);
-+
-+ /* Allocate this early, so we can store relevant information in
-+ * the private area (needed by 1588 code in dpa_mac_probe)
-+ */
-+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
-+ if (!net_dev) {
-+ dev_err(dev, "alloc_etherdev_mq() failed\n");
-+ goto alloc_etherdev_mq_failed;
-+ }
-+
-+ /* Do this here, so we can be verbose early */
-+ SET_NETDEV_DEV(net_dev, dev);
-+ dev_set_drvdata(dev, net_dev);
-+
-+ priv = netdev_priv(net_dev);
-+ priv->net_dev = net_dev;
-+ strcpy(priv->if_type, "private");
-+
-+ priv->msg_enable = netif_msg_init(debug, -1);
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ priv->loop_id = dpa_new_loop_id();
-+ priv->loop_to = -1; /* disabled by default */
-+ dpa_loop_netdevs[priv->loop_id] = net_dev;
-+#endif
-+
-+ mac_dev = dpa_mac_probe(_of_dev);
-+ if (IS_ERR(mac_dev) || !mac_dev) {
-+ err = PTR_ERR(mac_dev);
-+ goto mac_probe_failed;
-+ }
-+
-+ /* We have physical ports, so we need to establish
-+ * the buffer layout.
-+ */
-+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
-+ GFP_KERNEL);
-+ if (!buf_layout) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ goto alloc_failed;
-+ }
-+ dpa_set_buffers_layout(mac_dev, buf_layout);
-+
-+ /* For private ports, need to compute the size of the default
-+ * buffer pool, based on FMan port buffer layout;also update
-+ * the maximum buffer size for private ports if necessary
-+ */
-+ dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
-+ /* We only want to use jumbo frame optimization if we actually have
-+ * L2 MAX FRM set for jumbo frames as well.
-+ */
-+ if(fm_get_max_frm() < 9600)
-+ dev_warn(dev,
-+ "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
-+#endif
-+
-+ INIT_LIST_HEAD(&priv->dpa_fq_list);
-+
-+ memset(&port_fqs, 0, sizeof(port_fqs));
-+
-+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
-+ if (!err)
-+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
-+ &port_fqs, true, TX);
-+
-+ if (err < 0)
-+ goto fq_probe_failed;
-+
-+ /* bp init */
-+
-+ err = dpa_priv_bp_create(net_dev, dpa_bp, count);
-+
-+ if (err < 0)
-+ goto bp_create_failed;
-+
-+ priv->mac_dev = mac_dev;
-+
-+ channel = dpa_get_channel();
-+
-+ if (channel < 0) {
-+ err = channel;
-+ goto get_channel_failed;
-+ }
-+
-+ priv->channel = (uint16_t)channel;
-+ dpaa_eth_add_channel(priv->channel);
-+
-+ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
-+
-+ /* Create a congestion group for this netdev, with
-+ * dynamically-allocated CGR ID.
-+ * Must be executed after probing the MAC, but before
-+ * assigning the egress FQs to the CGRs.
-+ */
-+ err = dpaa_eth_cgr_init(priv);
-+ if (err < 0) {
-+ dev_err(dev, "Error initializing CGR\n");
-+ goto tx_cgr_init_failed;
-+ }
-+ err = dpaa_eth_priv_ingress_cgr_init(priv);
-+ if (err < 0) {
-+ dev_err(dev, "Error initializing ingress CGR\n");
-+ goto rx_cgr_init_failed;
-+ }
-+
-+ /* Add the FQs to the interface, and make them active */
-+ err = dpa_fqs_init(dev, &priv->dpa_fq_list, false);
-+ if (err < 0)
-+ goto fq_alloc_failed;
-+
-+ priv->buf_layout = buf_layout;
-+ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
-+ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
-+
-+ /* All real interfaces need their ports initialized */
-+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
-+ buf_layout, dev);
-+
-+#ifdef CONFIG_FMAN_PFC
-+ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
-+ err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
-+ mac_dev->port_dev[TX], i, i);
-+ if (unlikely(err != 0)) {
-+ dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
-+ goto pfc_mapping_failed;
-+ }
-+ }
-+#endif
-+
-+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
-+
-+ if (priv->percpu_priv == NULL) {
-+ dev_err(dev, "devm_alloc_percpu() failed\n");
-+ err = -ENOMEM;
-+ goto alloc_percpu_failed;
-+ }
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+ memset(percpu_priv, 0, sizeof(*percpu_priv));
-+ }
-+
-+ /* Initialize NAPI */
-+ err = dpa_private_napi_add(net_dev);
-+
-+ if (err < 0)
-+ goto napi_add_failed;
-+
-+ err = dpa_private_netdev_init(net_dev);
-+
-+ if (err < 0)
-+ goto netdev_init_failed;
-+
-+ dpaa_eth_sysfs_init(&net_dev->dev);
-+
-+#ifdef CONFIG_PM
-+ device_set_wakeup_capable(dev, true);
-+#endif
-+
-+ pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
-+
-+ return 0;
-+
-+netdev_init_failed:
-+napi_add_failed:
-+ dpa_private_napi_del(net_dev);
-+alloc_percpu_failed:
-+#ifdef CONFIG_FMAN_PFC
-+pfc_mapping_failed:
-+#endif
-+ dpa_fq_free(dev, &priv->dpa_fq_list);
-+fq_alloc_failed:
-+ qman_delete_cgr_safe(&priv->ingress_cgr);
-+ qman_release_cgrid(priv->ingress_cgr.cgrid);
-+rx_cgr_init_failed:
-+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
-+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-+tx_cgr_init_failed:
-+get_channel_failed:
-+ dpa_bp_free(priv);
-+bp_create_failed:
-+fq_probe_failed:
-+alloc_failed:
-+mac_probe_failed:
-+ dev_set_drvdata(dev, NULL);
-+ free_netdev(net_dev);
-+alloc_etherdev_mq_failed:
-+ if (atomic_read(&dpa_bp->refs) == 0)
-+ devm_kfree(dev, dpa_bp);
-+
-+ return err;
-+}
-+
-+static const struct of_device_id dpa_match[] = {
-+ {
-+ .compatible = "fsl,dpa-ethernet"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, dpa_match);
-+
-+static struct platform_driver dpa_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .of_match_table = dpa_match,
-+ .owner = THIS_MODULE,
-+ .pm = DPAA_PM_OPS,
-+ },
-+ .probe = dpaa_eth_priv_probe,
-+ .remove = dpa_remove
-+};
-+
-+#ifndef CONFIG_PPC
-+static bool __init __cold soc_has_errata_a010022(void)
-+{
-+#ifdef CONFIG_SOC_BUS
-+ const struct soc_device_attribute soc_msi_matches[] = {
-+ { .family = "QorIQ LS1043A",
-+ .data = NULL },
-+ { },
-+ };
-+
-+ if (soc_device_match(soc_msi_matches))
-+ return true;
-+
-+ return false;
-+#else
-+ return true; /* cannot identify SoC */
-+#endif
-+}
-+#endif
-+
-+static int __init __cold dpa_load(void)
-+{
-+ int _errno;
-+
-+ pr_info(DPA_DESCRIPTION "\n");
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ dpa_debugfs_module_init();
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+ /* initialise dpaa_eth mirror values */
-+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
-+ dpa_max_frm = fm_get_max_frm();
-+ dpa_num_cpus = num_possible_cpus();
-+
-+#ifndef CONFIG_PPC
-+ /* Detect if the current SoC requires the 4K alignment workaround */
-+ dpaa_errata_a010022 = soc_has_errata_a010022();
-+#endif
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
-+#endif
-+
-+ _errno = platform_driver_register(&dpa_driver);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ }
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ return _errno;
-+}
-+module_init(dpa_load);
-+
-+static void __exit __cold dpa_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ platform_driver_unregister(&dpa_driver);
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ dpa_debugfs_module_exit();
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+ /* Only one channel is used and needs to be relased after all
-+ * interfaces are removed
-+ */
-+ dpa_release_channel();
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(dpa_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
-@@ -0,0 +1,674 @@
-+/* Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPA_H
-+#define __DPA_H
-+
-+#include <linux/netdevice.h>
-+#include <linux/fsl_qman.h> /* struct qman_fq */
-+
-+#include "fm_ext.h"
-+#include "dpaa_eth_trace.h"
-+
-+extern int dpa_rx_extra_headroom;
-+extern int dpa_max_frm;
-+extern int dpa_num_cpus;
-+
-+#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
-+#define dpa_get_max_frm() dpa_max_frm
-+
-+#define dpa_get_max_mtu() \
-+ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
-+
-+#define __hot
-+
-+/* Simple enum of FQ types - used for array indexing */
-+enum port_type {RX, TX};
-+
-+/* TODO: This structure should be renamed & moved to the FMD wrapper */
-+struct dpa_buffer_layout_s {
-+ uint16_t priv_data_size;
-+ bool parse_results;
-+ bool time_stamp;
-+ bool hash_results;
-+ uint8_t manip_extra_space;
-+ uint16_t data_align;
-+};
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define DPA_BUG_ON(cond) BUG_ON(cond)
-+#else
-+#define DPA_BUG_ON(cond)
-+#endif
-+
-+#define DPA_TX_PRIV_DATA_SIZE 16
-+#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
-+#define DPA_TIME_STAMP_SIZE 8
-+#define DPA_HASH_RESULTS_SIZE 8
-+#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
-+ dpa_get_rx_extra_headroom())
-+
-+#define FM_FD_STAT_RX_ERRORS \
-+ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
-+ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
-+ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
-+ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
-+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
-+
-+#define FM_FD_STAT_TX_ERRORS \
-+ (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
-+ FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
-+
-+#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
-+/* The raw buffer size must be cacheline aligned.
-+ * Normally we use 2K buffers.
-+ */
-+#define DPA_BP_RAW_SIZE 2048
-+#else
-+/* For jumbo frame optimizations, use buffers large enough to accommodate
-+ * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
-+ * space to account for further alignments.
-+ */
-+#define DPA_MAX_FRM_SIZE 9600
-+#ifdef CONFIG_PPC
-+#define DPA_BP_RAW_SIZE \
-+ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
-+ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
-+#else /* CONFIG_PPC */
-+#define DPA_BP_RAW_SIZE ((unlikely(dpaa_errata_a010022)) ? 2048 : \
-+ ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
-+ sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1)))
-+#endif /* CONFIG_PPC */
-+#endif /* CONFIG_FSL_DPAA_ETH_JUMBO_FRAME */
-+
-+/* This is what FMan is ever allowed to use.
-+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
-+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
-+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
-+ * half-page-aligned buffers (can we?), so we reserve some more space
-+ * for start-of-buffer alignment.
-+ */
-+#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
-+ SMP_CACHE_BYTES)
-+/* We must ensure that skb_shinfo is always cacheline-aligned. */
-+#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
-+
-+/* Maximum size of a buffer for which recycling is allowed.
-+ * We need an upper limit such that forwarded skbs that get reallocated on Tx
-+ * aren't allowed to grow unboundedly. On the other hand, we need to make sure
-+ * that skbs allocated by us will not fail to be recycled due to their size.
-+ *
-+ * For a requested size, the kernel allocator provides the next power of two
-+ * sized block, which the stack will use as is, regardless of the actual size
-+ * it required; since we must accommodate at most 9.6K buffers (L2 maximum
-+ * supported frame size), set the recycling upper limit to 16K.
-+ */
-+#define DPA_RECYCLE_MAX_SIZE 16384
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+/*TODO: temporary for fman pcd testing */
-+#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
-+#endif
-+
-+#define DPAA_ETH_FQ_DELTA 0x10000
-+
-+#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
-+ (((device_addr) & 0x1fffff) >> 6)
-+
-+#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
-+ (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
-+
-+/* Largest value that the FQD's OAL field can hold.
-+ * This is DPAA-1.x specific.
-+ * TODO: This rather belongs in fsl_qman.h
-+ */
-+#define FSL_QMAN_MAX_OAL 127
-+
-+/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
-+#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
-+
-+/* Default alignment for start of data in an Rx FD */
-+#define DPA_FD_DATA_ALIGNMENT 16
-+
-+/* Values for the L3R field of the FM Parse Results
-+ */
-+/* L3 Type field: First IP Present IPv4 */
-+#define FM_L3_PARSE_RESULT_IPV4 0x8000
-+/* L3 Type field: First IP Present IPv6 */
-+#define FM_L3_PARSE_RESULT_IPV6 0x4000
-+
-+/* Values for the L4R field of the FM Parse Results
-+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
-+ */
-+/* L4 Type field: UDP */
-+#define FM_L4_PARSE_RESULT_UDP 0x40
-+/* L4 Type field: TCP */
-+#define FM_L4_PARSE_RESULT_TCP 0x20
-+/* FD status field indicating whether the FM Parser has attempted to validate
-+ * the L4 csum of the frame.
-+ * Note that having this bit set doesn't necessarily imply that the checksum
-+ * is valid. One would have to check the parse results to find that out.
-+ */
-+#define FM_FD_STAT_L4CV 0x00000004
-+
-+
-+#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
-+
-+/* Check if the parsed frame was found to be a TCP segment.
-+ *
-+ * @parse_result_ptr must be of type (fm_prs_result_t *).
-+ */
-+#define fm_l4_frame_is_tcp(parse_result_ptr) \
-+ ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
-+
-+/* number of Tx queues to FMan */
-+#ifdef CONFIG_FMAN_PFC
-+#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
-+#else
-+#define DPAA_ETH_TX_QUEUES NR_CPUS
-+#endif
-+
-+#define DPAA_ETH_RX_QUEUES 128
-+
-+/* Convenience macros for storing/retrieving the skb back-pointers. They must
-+ * accommodate both recycling and confirmation paths - i.e. cases when the buf
-+ * was allocated by ourselves, respectively by the stack. In the former case,
-+ * we could store the skb at negative offset; in the latter case, we can't,
-+ * so we'll use 0 as offset.
-+ *
-+ * NB: @off is an offset from a (struct sk_buff **) pointer!
-+ */
-+#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
-+{ \
-+ skbh = (struct sk_buff **)addr; \
-+ *(skbh + (off)) = skb; \
-+}
-+#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
-+{ \
-+ skbh = (struct sk_buff **)addr; \
-+ skb = *(skbh + (off)); \
-+}
-+
-+#ifdef CONFIG_PM
-+/* Magic Packet wakeup */
-+#define DPAA_WOL_MAGIC 0x00000001
-+#endif
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+struct pcd_range {
-+ uint32_t base;
-+ uint32_t count;
-+};
-+#endif
-+
-+/* More detailed FQ types - used for fine-grained WQ assignments */
-+enum dpa_fq_type {
-+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
-+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
-+ FQ_TYPE_RX_PCD, /* User-defined PCDs */
-+ FQ_TYPE_TX, /* "Real" Tx FQs */
-+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
-+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
-+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
-+ FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
-+};
-+
-+struct dpa_fq {
-+ struct qman_fq fq_base;
-+ struct list_head list;
-+ struct net_device *net_dev;
-+ bool init;
-+ uint32_t fqid;
-+ uint32_t flags;
-+ uint16_t channel;
-+ uint8_t wq;
-+ enum dpa_fq_type fq_type;
-+};
-+
-+struct dpa_fq_cbs_t {
-+ struct qman_fq rx_defq;
-+ struct qman_fq tx_defq;
-+ struct qman_fq rx_errq;
-+ struct qman_fq tx_errq;
-+ struct qman_fq egress_ern;
-+};
-+
-+struct fqid_cell {
-+ uint32_t start;
-+ uint32_t count;
-+};
-+
-+struct dpa_bp {
-+ struct bman_pool *pool;
-+ uint8_t bpid;
-+ struct device *dev;
-+ union {
-+ /* The buffer pools used for the private ports are initialized
-+ * with target_count buffers for each CPU; at runtime the
-+ * number of buffers per CPU is constantly brought back to this
-+ * level
-+ */
-+ int target_count;
-+ /* The configured value for the number of buffers in the pool,
-+ * used for shared port buffer pools
-+ */
-+ int config_count;
-+ };
-+ size_t size;
-+ bool seed_pool;
-+ /* physical address of the contiguous memory used by the pool to store
-+ * the buffers
-+ */
-+ dma_addr_t paddr;
-+ /* virtual address of the contiguous memory used by the pool to store
-+ * the buffers
-+ */
-+ void __iomem *vaddr;
-+ /* current number of buffers in the bpool alloted to this CPU */
-+ int __percpu *percpu_count;
-+ atomic_t refs;
-+ /* some bpools need to be seeded before use by this cb */
-+ int (*seed_cb)(struct dpa_bp *);
-+ /* some bpools need to be emptied before freeing; this cb is used
-+ * for freeing of individual buffers taken from the pool
-+ */
-+ void (*free_buf_cb)(void *addr);
-+};
-+
-+struct dpa_rx_errors {
-+ u64 dme; /* DMA Error */
-+ u64 fpe; /* Frame Physical Error */
-+ u64 fse; /* Frame Size Error */
-+ u64 phe; /* Header Error */
-+ u64 cse; /* Checksum Validation Error */
-+};
-+
-+/* Counters for QMan ERN frames - one counter per rejection code */
-+struct dpa_ern_cnt {
-+ u64 cg_tdrop; /* Congestion group taildrop */
-+ u64 wred; /* WRED congestion */
-+ u64 err_cond; /* Error condition */
-+ u64 early_window; /* Order restoration, frame too early */
-+ u64 late_window; /* Order restoration, frame too late */
-+ u64 fq_tdrop; /* FQ taildrop */
-+ u64 fq_retired; /* FQ is retired */
-+ u64 orp_zero; /* ORP disabled */
-+};
-+
-+struct dpa_napi_portal {
-+ struct napi_struct napi;
-+ struct qman_portal *p;
-+};
-+
-+struct dpa_percpu_priv_s {
-+ struct net_device *net_dev;
-+ struct dpa_napi_portal *np;
-+ u64 in_interrupt;
-+ u64 tx_returned;
-+ u64 tx_confirm;
-+ /* fragmented (non-linear) skbuffs received from the stack */
-+ u64 tx_frag_skbuffs;
-+ /* number of S/G frames received */
-+ u64 rx_sg;
-+
-+ struct rtnl_link_stats64 stats;
-+ struct dpa_rx_errors rx_errors;
-+ struct dpa_ern_cnt ern_cnt;
-+};
-+
-+struct dpa_priv_s {
-+ struct dpa_percpu_priv_s __percpu *percpu_priv;
-+ struct dpa_bp *dpa_bp;
-+ /* Store here the needed Tx headroom for convenience and speed
-+ * (even though it can be computed based on the fields of buf_layout)
-+ */
-+ uint16_t tx_headroom;
-+ struct net_device *net_dev;
-+ struct mac_device *mac_dev;
-+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
-+ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
-+
-+ size_t bp_count;
-+
-+ uint16_t channel; /* "fsl,qman-channel-id" */
-+ struct list_head dpa_fq_list;
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ struct dentry *debugfs_loop_file;
-+#endif
-+
-+ uint32_t msg_enable; /* net_device message level */
-+#ifdef CONFIG_FSL_DPAA_1588
-+ struct dpa_ptp_tsu *tsu;
-+#endif
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+/* TODO: this is temporary until pcd support is implemented in dpaa */
-+ int priv_pcd_num_ranges;
-+ struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
-+#endif
-+
-+ struct {
-+ /**
-+ * All egress queues to a given net device belong to one
-+ * (and the same) congestion group.
-+ */
-+ struct qman_cgr cgr;
-+ /* If congested, when it began. Used for performance stats. */
-+ u32 congestion_start_jiffies;
-+ /* Number of jiffies the Tx port was congested. */
-+ u32 congested_jiffies;
-+ /**
-+ * Counter for the number of times the CGR
-+ * entered congestion state
-+ */
-+ u32 cgr_congested_count;
-+ } cgr_data;
-+ /* Use a per-port CGR for ingress traffic. */
-+ bool use_ingress_cgr;
-+ struct qman_cgr ingress_cgr;
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+ bool ts_tx_en; /* Tx timestamping enabled */
-+ bool ts_rx_en; /* Rx timestamping enabled */
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ struct dpa_buffer_layout_s *buf_layout;
-+ uint16_t rx_headroom;
-+ char if_type[30];
-+
-+ void *peer;
-+#ifdef CONFIG_PM
-+ u32 wol;
-+#endif
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ int loop_id;
-+ int loop_to;
-+#endif
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+ bool ceetm_en; /* CEETM QoS enabled */
-+#endif
-+};
-+
-+struct fm_port_fqs {
-+ struct dpa_fq *tx_defq;
-+ struct dpa_fq *tx_errq;
-+ struct dpa_fq *rx_defq;
-+ struct dpa_fq *rx_errq;
-+};
-+
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+extern struct net_device *dpa_loop_netdevs[20];
-+#endif
-+
-+/* functions with different implementation for SG and non-SG: */
-+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
-+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
-+void __hot _dpa_rx(struct net_device *net_dev,
-+ struct qman_portal *portal,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid,
-+ int *count_ptr);
-+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
-+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
-+ struct qman_fq *egress_fq, struct qman_fq *conf_fq);
-+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd);
-+void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
-+ const struct qm_fd *fd,
-+ struct sk_buff *skb,
-+ int *use_gro);
-+#ifndef CONFIG_FSL_DPAA_TS
-+bool dpa_skb_is_recyclable(struct sk_buff *skb);
-+bool dpa_buf_is_recyclable(struct sk_buff *skb,
-+ uint32_t min_size,
-+ uint16_t min_offset,
-+ unsigned char **new_buf_start);
-+#endif
-+int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd,
-+ int *count_ptr, int *offset);
-+int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd);
-+int __cold __attribute__((nonnull))
-+ _dpa_fq_free(struct device *dev, struct qman_fq *fq);
-+
-+/* Turn on HW checksum computation for this outgoing frame.
-+ * If the current protocol is not something we support in this regard
-+ * (or if the stack has already computed the SW checksum), we do nothing.
-+ *
-+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
-+ * otherwise.
-+ *
-+ * Note that this function may modify the fd->cmd field and the skb data buffer
-+ * (the Parse Results area).
-+ */
-+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
-+
-+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
-+ struct qman_portal *portal)
-+{
-+ /* In case of threaded ISR for RT enable kernel,
-+ * in_irq() does not return appropriate value, so use
-+ * in_serving_softirq to distinguish softirq or irq context.
-+ */
-+ if (unlikely(in_irq() || !in_serving_softirq())) {
-+ /* Disable QMan IRQ and invoke NAPI */
-+ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
-+ if (likely(!ret)) {
-+ const struct qman_portal_config *pc =
-+ qman_p_get_portal_config(portal);
-+ struct dpa_napi_portal *np =
-+ &percpu_priv->np[pc->index];
-+
-+ np->p = portal;
-+ napi_schedule(&np->napi);
-+ percpu_priv->in_interrupt++;
-+ return 1;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static inline ssize_t __const __must_check __attribute__((nonnull))
-+dpa_fd_length(const struct qm_fd *fd)
-+{
-+ return fd->length20;
-+}
-+
-+static inline ssize_t __const __must_check __attribute__((nonnull))
-+dpa_fd_offset(const struct qm_fd *fd)
-+{
-+ return fd->offset;
-+}
-+
-+static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
-+{
-+ uint16_t headroom;
-+ /* The frame headroom must accommodate:
-+ * - the driver private data area
-+ * - parse results, hash results, timestamp if selected
-+ * - manip extra space
-+ * If either hash results or time stamp are selected, both will
-+ * be copied to/from the frame headroom, as TS is located between PR and
-+ * HR in the IC and IC copy size has a granularity of 16bytes
-+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
-+ *
-+ * Also make sure the headroom is a multiple of data_align bytes
-+ */
-+ headroom = (uint16_t)(bl->priv_data_size +
-+ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
-+ (bl->hash_results || bl->time_stamp ?
-+ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
-+ bl->manip_extra_space);
-+
-+ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
-+}
-+
-+int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
-+int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
-+int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
-+
-+void dpaa_eth_sysfs_remove(struct device *dev);
-+void dpaa_eth_sysfs_init(struct device *dev);
-+int dpaa_eth_poll(struct napi_struct *napi, int budget);
-+
-+void dpa_private_napi_del(struct net_device *net_dev);
-+
-+/* Equivalent to a memset(0), but works faster */
-+static inline void clear_fd(struct qm_fd *fd)
-+{
-+ fd->opaque_addr = 0;
-+ fd->opaque = 0;
-+ fd->cmd = 0;
-+}
-+
-+static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
-+ struct qman_fq *tx_fq)
-+{
-+ int i;
-+
-+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
-+ if (priv->egress_fqs[i] == tx_fq)
-+ return i;
-+
-+ return -EINVAL;
-+}
-+
-+static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
-+ struct rtnl_link_stats64 *percpu_stats,
-+ struct qm_fd *fd, struct qman_fq *egress_fq,
-+ struct qman_fq *conf_fq)
-+{
-+ int err, i;
-+
-+ if (fd->bpid == 0xff)
-+ fd->cmd |= qman_fq_fqid(conf_fq);
-+
-+ /* Trace this Tx fd */
-+ trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
-+
-+ for (i = 0; i < 100000; i++) {
-+ err = qman_enqueue(egress_fq, fd, 0);
-+ if (err != -EBUSY)
-+ break;
-+ }
-+
-+ if (unlikely(err < 0)) {
-+ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
-+ percpu_stats->tx_errors++;
-+ percpu_stats->tx_fifo_errors++;
-+ return err;
-+ }
-+
-+ percpu_stats->tx_packets++;
-+ percpu_stats->tx_bytes += dpa_fd_length(fd);
-+
-+ return 0;
-+}
-+
-+/* Use multiple WQs for FQ assignment:
-+ * - Tx Confirmation queues go to WQ1.
-+ * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
-+ * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
-+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
-+ * to be scheduled, in case there are many more FQs in WQ3).
-+ * This ensures that Tx-confirmed buffers are timely released. In particular,
-+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
-+ * are greatly outnumbered by other FQs in the system (usually PCDs), while
-+ * dequeue scheduling is round-robin.
-+ */
-+static inline void _dpa_assign_wq(struct dpa_fq *fq)
-+{
-+ switch (fq->fq_type) {
-+ case FQ_TYPE_TX_CONFIRM:
-+ case FQ_TYPE_TX_CONF_MQ:
-+ fq->wq = 1;
-+ break;
-+ case FQ_TYPE_RX_DEFAULT:
-+ case FQ_TYPE_TX:
-+ fq->wq = 3;
-+ break;
-+ case FQ_TYPE_RX_ERROR:
-+ case FQ_TYPE_TX_ERROR:
-+ case FQ_TYPE_RX_PCD_HI_PRIO:
-+ fq->wq = 2;
-+ break;
-+ case FQ_TYPE_RX_PCD:
-+ fq->wq = 5;
-+ break;
-+ default:
-+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
-+ fq->fq_type, fq->fqid);
-+ }
-+}
-+
-+#ifdef CONFIG_FMAN_PFC
-+/* Use in lieu of skb_get_queue_mapping() */
-+#define dpa_get_queue_mapping(skb) \
-+ (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
-+ ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
-+ ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
-+ dpa_num_cpus + smp_processor_id()));
-+#else
-+#define dpa_get_queue_mapping(skb) skb_get_queue_mapping(skb)
-+#endif
-+
-+static inline void _dpa_bp_free_pf(void *addr)
-+{
-+ put_page(virt_to_head_page(addr));
-+}
-+
-+/* LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
-+ * manifests itself at high traffic rates when frames cross 4K memory
-+ * boundaries, when they are not aligned to 16 bytes or when they have
-+ * Scatter/Gather fragments; For the moment, we use a SW workaround that
-+ * realigns frames to 256 bytes. Scatter/Gather frames aren't supported
-+ * on egress.
-+ */
-+
-+#ifndef CONFIG_PPC
-+extern bool dpaa_errata_a010022; /* SoC affected by A010022 errata */
-+#define NONREC_MARK 0x01
-+#define HAS_DMA_ISSUE(start, size) \
-+ (((uintptr_t)(start) + (size)) > \
-+ (((uintptr_t)(start) + 0x1000) & ~0xFFF))
-+/* The headroom needs to accommodate our private data (64 bytes) but
-+ * we reserve 256 bytes instead to guarantee 256 data alignment.
-+ */
-+#define DPAA_A010022_HEADROOM 256
-+#endif /* !CONFIG_PPC */
-+
-+#endif /* __DPA_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
-@@ -0,0 +1,205 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/io.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_net.h>
-+#include <linux/etherdevice.h>
-+#include <linux/kthread.h>
-+#include <linux/percpu.h>
-+#include <linux/highmem.h>
-+#include <linux/sort.h>
-+#include <linux/fsl_qman.h>
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#include "dpaa_eth_base.h"
-+
-+#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+uint8_t advanced_debug = -1;
-+module_param(advanced_debug, byte, S_IRUGO);
-+MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
-+EXPORT_SYMBOL(advanced_debug);
-+
-+static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
-+{
-+ return ((struct dpa_bp *)dpa_bp0)->size -
-+ ((struct dpa_bp *)dpa_bp1)->size;
-+}
-+
-+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
-+dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
-+{
-+ int i, lenp, na, ns, err;
-+ struct device *dev;
-+ struct device_node *dev_node;
-+ const __be32 *bpool_cfg;
-+ struct dpa_bp *dpa_bp;
-+ u32 bpid;
-+
-+ dev = &_of_dev->dev;
-+
-+ *count = of_count_phandle_with_args(dev->of_node,
-+ "fsl,bman-buffer-pools", NULL);
-+ if (*count < 1) {
-+ dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
-+ return ERR_PTR(-EINVAL);
-+ }
-+
-+ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
-+ if (dpa_bp == NULL) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ dev_node = of_find_node_by_path("/");
-+ if (unlikely(dev_node == NULL)) {
-+ dev_err(dev, "of_find_node_by_path(/) failed\n");
-+ return ERR_PTR(-EINVAL);
-+ }
-+
-+ na = of_n_addr_cells(dev_node);
-+ ns = of_n_size_cells(dev_node);
-+
-+ for (i = 0; i < *count; i++) {
-+ of_node_put(dev_node);
-+
-+ dev_node = of_parse_phandle(dev->of_node,
-+ "fsl,bman-buffer-pools", i);
-+ if (dev_node == NULL) {
-+ dev_err(dev, "of_find_node_by_phandle() failed\n");
-+ return ERR_PTR(-EFAULT);
-+ }
-+
-+ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
-+ dev_err(dev,
-+ "!of_device_is_compatible(%s, fsl,bpool)\n",
-+ dev_node->full_name);
-+ dpa_bp = ERR_PTR(-EINVAL);
-+ goto _return_of_node_put;
-+ }
-+
-+ err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
-+ if (err) {
-+ dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
-+ dpa_bp = ERR_PTR(-EINVAL);
-+ goto _return_of_node_put;
-+ }
-+ dpa_bp[i].bpid = (uint8_t)bpid;
-+
-+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
-+ &lenp);
-+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
-+ const uint32_t *seed_pool;
-+
-+ dpa_bp[i].config_count =
-+ (int)of_read_number(bpool_cfg, ns);
-+ dpa_bp[i].size =
-+ (size_t)of_read_number(bpool_cfg + ns, ns);
-+ dpa_bp[i].paddr =
-+ of_read_number(bpool_cfg + 2 * ns, na);
-+
-+ seed_pool = of_get_property(dev_node,
-+ "fsl,bpool-ethernet-seeds", &lenp);
-+ dpa_bp[i].seed_pool = !!seed_pool;
-+
-+ } else {
-+ dev_err(dev,
-+ "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
-+ dev_node->full_name);
-+ dpa_bp = ERR_PTR(-EINVAL);
-+ goto _return_of_node_put;
-+ }
-+ }
-+
-+ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
-+
-+ return dpa_bp;
-+
-+_return_of_node_put:
-+ if (dev_node)
-+ of_node_put(dev_node);
-+
-+ return dpa_bp;
-+}
-+EXPORT_SYMBOL(dpa_bp_probe);
-+
-+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
-+ size_t count)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ int i;
-+
-+ priv->dpa_bp = dpa_bp;
-+ priv->bp_count = count;
-+
-+ for (i = 0; i < count; i++) {
-+ int err;
-+ err = dpa_bp_alloc(&dpa_bp[i], net_dev->dev.parent);
-+ if (err < 0) {
-+ dpa_bp_free(priv);
-+ priv->dpa_bp = NULL;
-+ return err;
-+ }
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_bp_create);
-+
-+static int __init __cold dpa_advanced_load(void)
-+{
-+ pr_info(DPA_DESCRIPTION "\n");
-+
-+ return 0;
-+}
-+module_init(dpa_advanced_load);
-+
-+static void __exit __cold dpa_advanced_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+}
-+module_exit(dpa_advanced_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
-@@ -0,0 +1,49 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPAA_ETH_BASE_H
-+#define __DPAA_ETH_BASE_H
-+
-+#include <linux/etherdevice.h> /* struct net_device */
-+#include <linux/fsl_bman.h> /* struct bm_buffer */
-+#include <linux/of_platform.h> /* struct platform_device */
-+#include <linux/net_tstamp.h> /* struct hwtstamp_config */
-+
-+extern uint8_t advanced_debug;
-+extern const struct dpa_fq_cbs_t shared_fq_cbs;
-+extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
-+
-+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
-+dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
-+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
-+ size_t count);
-+
-+#endif /* __DPAA_ETH_BASE_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
-@@ -0,0 +1,2076 @@
-+/* Copyright 2008-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/init.h>
-+#include "dpaa_eth_ceetm.h"
-+
-+#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
-+
-+const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
-+ [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
-+ [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
-+};
-+
-+struct Qdisc_ops ceetm_qdisc_ops;
-+
-+/* Obtain the DCP and the SP ids from the FMan port */
-+static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
-+ unsigned int *sp_id)
-+{
-+ uint32_t channel;
-+ t_LnxWrpFmPortDev *port_dev;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = dpa_priv->mac_dev;
-+
-+ port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
-+ channel = port_dev->txCh;
-+
-+ *sp_id = channel & CHANNEL_SP_MASK;
-+ pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
-+
-+ if (channel < DCP0_MAX_CHANNEL) {
-+ *dcp_id = qm_dc_portal_fman0;
-+ pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
-+ } else {
-+ *dcp_id = qm_dc_portal_fman1;
-+ pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
-+ }
-+}
-+
-+/* Wait for the DPAA Eth driver WQ TX FQs to empty */
-+static void dpaa_drain_fqs(struct net_device *dev)
-+{
-+ const struct dpa_priv_s *priv = netdev_priv(dev);
-+ struct qm_mcr_queryfq_np np;
-+ struct qman_fq *fq;
-+ int ret, i;
-+
-+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i ++) {
-+ fq = priv->egress_fqs[i];
-+ while (true) {
-+ ret = qman_query_fq_np(fq, &np);
-+ if (unlikely(ret)) {
-+ pr_err(KBUILD_BASENAME
-+ " : %s : unable to query FQ %x: %d\n",
-+ __func__, fq->fqid, ret);
-+ break;
-+ }
-+
-+ if (np.frm_cnt == 0)
-+ break;
-+ }
-+ }
-+}
-+
-+/* Wait for the DPAA CEETM TX CQs to empty */
-+static void ceetm_drain_class(struct ceetm_class *cl)
-+{
-+ struct qm_mcr_ceetm_cq_query cq_query;
-+ struct qm_ceetm_cq *cq;
-+ unsigned int idx;
-+ int ret;
-+
-+ if (!cl)
-+ return;
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ /* The ROOT classes aren't directly linked to CEETM CQs */
-+ return;
-+ case CEETM_PRIO:
-+ cq = (struct qm_ceetm_cq*)cl->prio.cq;
-+ break;
-+ case CEETM_WBFS:
-+ cq = (struct qm_ceetm_cq*)cl->wbfs.cq;
-+ break;
-+ }
-+
-+ if (!cq || !cl->ch)
-+ return;
-+
-+ /* Build the query CQID by merging the channel and the CQ IDs */
-+ idx = (cq->parent->idx << 4) | cq->idx;
-+
-+ while (true) {
-+ ret = qman_ceetm_query_cq(idx,
-+ cl->ch->dcp_idx,
-+ &cq_query);
-+ if (unlikely(ret)) {
-+ pr_err(KBUILD_BASENAME
-+ " : %s : unable to query CQ %x: %d\n",
-+ __func__, idx, ret);
-+ break;
-+ }
-+
-+ if (cq_query.frm_cnt == 0)
-+ break;
-+ }
-+}
-+
-+/* Enqueue Rejection Notification callback */
-+static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ struct dpa_percpu_priv_s *dpa_percpu_priv;
-+ struct ceetm_class_stats *cstats = NULL;
-+ const struct dpa_priv_s *dpa_priv;
-+ struct qm_fd fd = msg->ern.fd;
-+ struct net_device *net_dev;
-+ struct ceetm_fq *ceetm_fq;
-+ struct ceetm_class *cls;
-+ struct sk_buff *skb;
-+
-+ ceetm_fq = container_of(fq, struct ceetm_fq, fq);
-+ net_dev = ceetm_fq->net_dev;
-+ dpa_priv = netdev_priv(net_dev);
-+ dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
-+
-+ /* Increment DPA counters */
-+ dpa_percpu_priv->stats.tx_dropped++;
-+ dpa_percpu_priv->stats.tx_fifo_errors++;
-+ count_ern(dpa_percpu_priv, msg);
-+
-+ /* Increment CEETM counters */
-+ cls = ceetm_fq->ceetm_cls;
-+ switch (cls->type) {
-+ case CEETM_PRIO:
-+ cstats = this_cpu_ptr(cls->prio.cstats);
-+ break;
-+ case CEETM_WBFS:
-+ cstats = this_cpu_ptr(cls->wbfs.cstats);
-+ break;
-+ }
-+
-+ if (cstats)
-+ cstats->ern_drop_count++;
-+
-+ /* Release the buffers that were supposed to be recycled. */
-+ if (fd.bpid != 0xff) {
-+ dpa_fd_release(net_dev, &fd);
-+ return;
-+ }
-+
-+ /* Release the frames that were supposed to return on the
-+ * confirmation path.
-+ */
-+ skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
-+ dev_kfree_skb_any(skb);
-+}
-+
-+/* Congestion State Change Notification callback */
-+static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
-+{
-+ struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
-+ struct ceetm_class *cls = ceetm_fq->ceetm_cls;
-+ struct ceetm_class_stats *cstats = NULL;
-+
-+ switch (cls->type) {
-+ case CEETM_PRIO:
-+ cstats = this_cpu_ptr(cls->prio.cstats);
-+ break;
-+ case CEETM_WBFS:
-+ cstats = this_cpu_ptr(cls->wbfs.cstats);
-+ break;
-+ }
-+
-+ ceetm_fq->congested = congested;
-+
-+ if (congested) {
-+ dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
-+ dpa_priv->cgr_data.cgr_congested_count++;
-+ if (cstats)
-+ cstats->congested_count++;
-+ } else {
-+ dpa_priv->cgr_data.congested_jiffies +=
-+ (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
-+ }
-+}
-+
-+/* Allocate a ceetm fq */
-+static int ceetm_alloc_fq(struct ceetm_fq **fq, struct net_device *dev,
-+ struct ceetm_class *cls)
-+{
-+ *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
-+ if (!*fq)
-+ return -ENOMEM;
-+
-+ (*fq)->net_dev = dev;
-+ (*fq)->ceetm_cls = cls;
-+ (*fq)->congested = 0;
-+ return 0;
-+}
-+
-+/* Configure a ceetm Class Congestion Group */
-+static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
-+ struct qm_ceetm_channel *channel, unsigned int id,
-+ struct ceetm_fq *fq, struct dpa_priv_s *dpa_priv)
-+{
-+ int err;
-+ u32 cs_th;
-+ u16 ccg_mask;
-+ struct qm_ceetm_ccg_params ccg_params;
-+
-+ err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
-+ if (err)
-+ return err;
-+
-+ /* Configure the count mode (frames/bytes), enable congestion state
-+ * notifications, configure the congestion entry and exit thresholds,
-+ * enable tail-drop, configure the tail-drop mode, and set the
-+ * overhead accounting limit
-+ */
-+ ccg_mask = QM_CCGR_WE_MODE |
-+ QM_CCGR_WE_CSCN_EN |
-+ QM_CCGR_WE_CS_THRES_IN | QM_CCGR_WE_CS_THRES_OUT |
-+ QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
-+ QM_CCGR_WE_OAL;
-+
-+ ccg_params.mode = 0; /* count bytes */
-+ ccg_params.cscn_en = 1; /* generate notifications */
-+ ccg_params.td_en = 1; /* enable tail-drop */
-+ ccg_params.td_mode = 0; /* tail-drop on congestion state */
-+ ccg_params.oal = (signed char)(min(sizeof(struct sk_buff) +
-+ dpa_priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-+
-+ /* Set the congestion state thresholds according to the link speed */
-+ if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
-+ cs_th = CONFIG_FSL_DPAA_CEETM_CCS_THRESHOLD_10G;
-+ else
-+ cs_th = CONFIG_FSL_DPAA_CEETM_CCS_THRESHOLD_1G;
-+
-+ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1);
-+ qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out,
-+ cs_th * CEETM_CCGR_RATIO, 1);
-+
-+ err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Configure a ceetm Logical Frame Queue */
-+static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
-+ struct qm_ceetm_lfq **lfq)
-+{
-+ int err;
-+ u64 context_a;
-+ u32 context_b;
-+
-+ err = qman_ceetm_lfq_claim(lfq, cq);
-+ if (err)
-+ return err;
-+
-+ /* Get the former contexts in order to preserve context B */
-+ err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
-+ if (err)
-+ return err;
-+
-+ context_a = CEETM_CONTEXT_A;
-+ err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
-+ if (err)
-+ return err;
-+
-+ (*lfq)->ern = ceetm_ern;
-+
-+ err = qman_ceetm_create_fq(*lfq, &fq->fq);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Configure a prio ceetm class */
-+static int ceetm_config_prio_cls(struct ceetm_class *cls,
-+ struct net_device *dev,
-+ unsigned int id)
-+{
-+ int err;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+
-+ err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CCG */
-+ err = ceetm_config_ccg(&cls->prio.ccg, cls->ch, id, cls->prio.fq,
-+ dpa_priv);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CQ */
-+ err = qman_ceetm_cq_claim(&cls->prio.cq, cls->ch, id, cls->prio.ccg);
-+ if (err)
-+ return err;
-+
-+ if (cls->shaped) {
-+ err = qman_ceetm_channel_set_cq_cr_eligibility(cls->ch, id, 1);
-+ if (err)
-+ return err;
-+
-+ err = qman_ceetm_channel_set_cq_er_eligibility(cls->ch, id, 1);
-+ if (err)
-+ return err;
-+ }
-+
-+ /* Claim and configure a LFQ */
-+ err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Configure a wbfs ceetm class */
-+static int ceetm_config_wbfs_cls(struct ceetm_class *cls,
-+ struct net_device *dev,
-+ unsigned int id, int type)
-+{
-+ int err;
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+
-+ err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CCG */
-+ err = ceetm_config_ccg(&cls->wbfs.ccg, cls->ch, id, cls->wbfs.fq,
-+ dpa_priv);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure the CQ */
-+ if (type == WBFS_GRP_B)
-+ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, cls->ch, id,
-+ cls->wbfs.ccg);
-+ else
-+ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, cls->ch, id,
-+ cls->wbfs.ccg);
-+ if (err)
-+ return err;
-+
-+ /* Configure the CQ weight: real number multiplied by 100 to get rid
-+ * of the fraction
-+ */
-+ err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
-+ cls->wbfs.weight * 100);
-+ if (err)
-+ return err;
-+
-+ /* Claim and configure a LFQ */
-+ err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
-+ if (err)
-+ return err;
-+
-+ return 0;
-+}
-+
-+/* Find class in qdisc hash table using given handle */
-+static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct Qdisc_class_common *clc;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
-+ __func__, handle, sch->handle);
-+
-+ clc = qdisc_class_find(&priv->clhash, handle);
-+ return clc ? container_of(clc, struct ceetm_class, common) : NULL;
-+}
-+
-+/* Insert a class in the qdisc's class hash */
-+static void ceetm_link_class(struct Qdisc *sch,
-+ struct Qdisc_class_hash *clhash,
-+ struct Qdisc_class_common *common)
-+{
-+ sch_tree_lock(sch);
-+ qdisc_class_hash_insert(clhash, common);
-+ sch_tree_unlock(sch);
-+ qdisc_class_hash_grow(sch, clhash);
-+}
-+
-+/* Destroy a ceetm class */
-+static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
-+{
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ if (!cl)
-+ return;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (cl->root.child) {
-+ qdisc_destroy(cl->root.child);
-+ cl->root.child = NULL;
-+ }
-+
-+ if (cl->ch && qman_ceetm_channel_release(cl->ch))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the channel %d\n",
-+ __func__, cl->ch->idx);
-+
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (cl->prio.child) {
-+ qdisc_destroy(cl->prio.child);
-+ cl->prio.child = NULL;
-+ }
-+
-+ /* We must make sure the CQ is empty before releasing it.
-+ * Pause all transmissions while we wait for it to drain.
-+ */
-+ netif_tx_stop_all_queues(dev);
-+ ceetm_drain_class(cl);
-+
-+ if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the LFQ %d\n",
-+ __func__, cl->prio.lfq->idx);
-+
-+ if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CQ %d\n",
-+ __func__, cl->prio.cq->idx);
-+
-+ if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CCG %d\n",
-+ __func__, cl->prio.ccg->idx);
-+
-+ kfree(cl->prio.fq);
-+
-+ if (cl->prio.cstats)
-+ free_percpu(cl->prio.cstats);
-+
-+ netif_tx_wake_all_queues(dev);
-+ break;
-+
-+ case CEETM_WBFS:
-+ /* We must make sure the CQ is empty before releasing it.
-+ * Pause all transmissions while we wait for it to drain.
-+ */
-+ netif_tx_stop_all_queues(dev);
-+ ceetm_drain_class(cl);
-+
-+ if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the LFQ %d\n",
-+ __func__, cl->wbfs.lfq->idx);
-+
-+ if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CQ %d\n",
-+ __func__, cl->wbfs.cq->idx);
-+
-+ if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the CCG %d\n",
-+ __func__, cl->wbfs.ccg->idx);
-+
-+ kfree(cl->wbfs.fq);
-+
-+ if (cl->wbfs.cstats)
-+ free_percpu(cl->wbfs.cstats);
-+
-+ netif_tx_wake_all_queues(dev);
-+ }
-+
-+ tcf_block_put(cl->block);
-+ kfree(cl);
-+}
-+
-+/* Destroy a ceetm qdisc */
-+static void ceetm_destroy(struct Qdisc *sch)
-+{
-+ unsigned int ntx, i;
-+ struct hlist_node *next;
-+ struct ceetm_class *cl;
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
-+ __func__, sch->handle);
-+
-+ /* All filters need to be removed before destroying the classes */
-+ tcf_block_put(priv->block);
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
-+ tcf_block_put(cl->block);
-+ cl->block = NULL;
-+ }
-+ }
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
-+ common.hnode)
-+ ceetm_cls_destroy(sch, cl);
-+ }
-+
-+ qdisc_class_hash_destroy(&priv->clhash);
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ dpa_disable_ceetm(dev);
-+
-+ if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the LNI %d\n",
-+ __func__, priv->root.lni->idx);
-+
-+ if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
-+ pr_err(KBUILD_BASENAME
-+ " : %s : error releasing the SP %d\n",
-+ __func__, priv->root.sp->idx);
-+
-+ if (priv->root.qstats)
-+ free_percpu(priv->root.qstats);
-+
-+ if (!priv->root.qdiscs)
-+ break;
-+
-+ /* Destroy the pfifo qdiscs in case they haven't been attached
-+ * to the netdev queues yet.
-+ */
-+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
-+ if (priv->root.qdiscs[ntx])
-+ qdisc_destroy(priv->root.qdiscs[ntx]);
-+
-+ kfree(priv->root.qdiscs);
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (priv->prio.parent)
-+ priv->prio.parent->root.child = NULL;
-+ break;
-+
-+ case CEETM_WBFS:
-+ /* Reset the WBFS groups and priorities */
-+ if (priv->wbfs.ch)
-+ qman_ceetm_channel_set_group(priv->wbfs.ch, 1, 0, 0);
-+
-+ if (priv->wbfs.parent)
-+ priv->wbfs.parent->prio.child = NULL;
-+ break;
-+ }
-+}
-+
-+static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
-+{
-+ struct Qdisc *qdisc;
-+ unsigned int ntx, i;
-+ struct nlattr *nest;
-+ struct tc_ceetm_qopt qopt;
-+ struct ceetm_qdisc_stats *qstats;
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ sch_tree_lock(sch);
-+ memset(&qopt, 0, sizeof(qopt));
-+ qopt.type = priv->type;
-+ qopt.shaped = priv->shaped;
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ /* Gather statistics from the underlying pfifo qdiscs */
-+ sch->q.qlen = 0;
-+ memset(&sch->bstats, 0, sizeof(sch->bstats));
-+ memset(&sch->qstats, 0, sizeof(sch->qstats));
-+
-+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
-+ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
-+ sch->q.qlen += qdisc->q.qlen;
-+ sch->bstats.bytes += qdisc->bstats.bytes;
-+ sch->bstats.packets += qdisc->bstats.packets;
-+ sch->qstats.qlen += qdisc->qstats.qlen;
-+ sch->qstats.backlog += qdisc->qstats.backlog;
-+ sch->qstats.drops += qdisc->qstats.drops;
-+ sch->qstats.requeues += qdisc->qstats.requeues;
-+ sch->qstats.overlimits += qdisc->qstats.overlimits;
-+ }
-+
-+ for_each_online_cpu(i) {
-+ qstats = per_cpu_ptr(priv->root.qstats, i);
-+ sch->qstats.drops += qstats->drops;
-+ }
-+
-+ qopt.rate = priv->root.rate;
-+ qopt.ceil = priv->root.ceil;
-+ qopt.overhead = priv->root.overhead;
-+ break;
-+
-+ case CEETM_PRIO:
-+ qopt.qcount = priv->prio.qcount;
-+ break;
-+
-+ case CEETM_WBFS:
-+ qopt.qcount = priv->wbfs.qcount;
-+ qopt.cr = priv->wbfs.cr;
-+ qopt.er = priv->wbfs.er;
-+ break;
-+
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ sch_tree_unlock(sch);
-+ return -EINVAL;
-+ }
-+
-+ nest = nla_nest_start(skb, TCA_OPTIONS);
-+ if (!nest)
-+ goto nla_put_failure;
-+ if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
-+ goto nla_put_failure;
-+ nla_nest_end(skb, nest);
-+
-+ sch_tree_unlock(sch);
-+ return skb->len;
-+
-+nla_put_failure:
-+ sch_tree_unlock(sch);
-+ nla_nest_cancel(skb, nest);
-+ return -EMSGSIZE;
-+}
-+
-+/* Configure a root ceetm qdisc */
-+static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ struct netdev_queue *dev_queue;
-+ struct Qdisc *qdisc;
-+ enum qm_dc_portal dcp_id;
-+ unsigned int i, sp_id, parent_id;
-+ int err;
-+ u64 bps;
-+ struct qm_ceetm_sp *sp;
-+ struct qm_ceetm_lni *lni;
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct dpa_priv_s *dpa_priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = dpa_priv->mac_dev;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ /* Validate inputs */
-+ if (sch->parent != TC_H_ROOT) {
-+ pr_err("CEETM: a root ceetm qdisc must be root\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!mac_dev) {
-+ pr_err("CEETM: the interface is lacking a mac\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Pre-allocate underlying pfifo qdiscs.
-+ *
-+ * We want to offload shaping and scheduling decisions to the hardware.
-+ * The pfifo qdiscs will be attached to the netdev queues and will
-+ * guide the traffic from the IP stack down to the driver with minimum
-+ * interference.
-+ *
-+ * The CEETM qdiscs and classes will be crossed when the traffic
-+ * reaches the driver.
-+ */
-+ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
-+ sizeof(priv->root.qdiscs[0]),
-+ GFP_KERNEL);
-+ if (!priv->root.qdiscs) {
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < dev->num_tx_queues; i++) {
-+ dev_queue = netdev_get_tx_queue(dev, i);
-+ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
-+ TC_H_MIN(i + PFIFO_MIN_OFFSET));
-+
-+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
-+ parent_id);
-+ if (!qdisc)
-+ return -ENOMEM;
-+
-+ priv->root.qdiscs[i] = qdisc;
-+ qdisc->flags |= TCQ_F_ONETXQUEUE;
-+ }
-+
-+ sch->flags |= TCQ_F_MQROOT;
-+
-+ priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
-+ if (!priv->root.qstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ return -ENOMEM;
-+ }
-+
-+ priv->shaped = qopt->shaped;
-+ priv->root.rate = qopt->rate;
-+ priv->root.ceil = qopt->ceil;
-+ priv->root.overhead = qopt->overhead;
-+
-+ /* Claim the SP */
-+ get_dcp_and_sp(dev, &dcp_id, &sp_id);
-+ err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ priv->root.sp = sp;
-+
-+ /* Claim the LNI - will use the same id as the SP id since SPs 0-7
-+ * are connected to the TX FMan ports
-+ */
-+ err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ priv->root.lni = lni;
-+
-+ err = qman_ceetm_sp_set_lni(sp, lni);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to link the SP and LNI\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ lni->sp = sp;
-+
-+ /* Configure the LNI shaper */
-+ if (priv->shaped) {
-+ err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ bps = priv->root.rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ bps = priv->root.ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
-+ __func__);
-+ return err;
-+ }
-+ }
-+
-+ /* TODO default configuration */
-+
-+ dpa_enable_ceetm(dev);
-+ return 0;
-+}
-+
-+/* Configure a prio ceetm qdisc */
-+static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err;
-+ unsigned int i;
-+ struct ceetm_class *parent_cl, *child_cl;
-+ struct Qdisc *parent_qdisc;
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (sch->parent == TC_H_ROOT) {
-+ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
-+ return -EINVAL;
-+ }
-+
-+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
-+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Obtain the parent root ceetm_class */
-+ parent_cl = ceetm_find(sch->parent, parent_qdisc);
-+
-+ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
-+ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
-+ return -EINVAL;
-+ }
-+
-+ priv->prio.parent = parent_cl;
-+ parent_cl->root.child = sch;
-+
-+ priv->shaped = parent_cl->shaped;
-+ priv->prio.qcount = qopt->qcount;
-+ priv->prio.ch = parent_cl->ch;
-+
-+ /* Create and configure qcount child classes */
-+ for (i = 0; i < priv->prio.qcount; i++) {
-+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
-+ if (!child_cl) {
-+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
-+ __func__);
-+ return -ENOMEM;
-+ }
-+
-+ child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
-+ if (!child_cl->prio.cstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_prio_cls;
-+ }
-+
-+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
-+ child_cl->parent = sch;
-+ child_cl->type = CEETM_PRIO;
-+ child_cl->shaped = priv->shaped;
-+ child_cl->prio.child = NULL;
-+ child_cl->ch = priv->prio.ch;
-+
-+ /* All shaped CQs have CR and ER enabled by default */
-+ child_cl->prio.cr = child_cl->shaped;
-+ child_cl->prio.er = child_cl->shaped;
-+ child_cl->prio.fq = NULL;
-+ child_cl->prio.cq = NULL;
-+
-+ /* Configure the corresponding hardware CQ */
-+ err = ceetm_config_prio_cls(child_cl, dev, i);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
-+ __func__, child_cl->common.classid);
-+ goto err_init_prio_cls;
-+ }
-+
-+ /* Add class handle in Qdisc */
-+ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
-+ pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X associated with CQ %d and CCG %d\n",
-+ __func__, child_cl->common.classid,
-+ child_cl->prio.cq->idx, child_cl->prio.ccg->idx);
-+ }
-+
-+ return 0;
-+
-+err_init_prio_cls:
-+ ceetm_cls_destroy(sch, child_cl);
-+ /* Note: ceetm_destroy() will be called by our caller */
-+ return err;
-+}
-+
-+/* Configure a wbfs ceetm qdisc */
-+static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err, group_b, small_group;
-+ unsigned int i, id, prio_a, prio_b;
-+ struct ceetm_class *parent_cl, *child_cl, *root_cl;
-+ struct Qdisc *parent_qdisc;
-+ struct ceetm_qdisc *parent_priv;
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ /* Validate inputs */
-+ if (sch->parent == TC_H_ROOT) {
-+ pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Obtain the parent prio ceetm qdisc */
-+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
-+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Obtain the parent prio ceetm class */
-+ parent_cl = ceetm_find(sch->parent, parent_qdisc);
-+ parent_priv = qdisc_priv(parent_qdisc);
-+
-+ if (!parent_cl || parent_cl->type != CEETM_PRIO) {
-+ pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a prio ceetm class\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!qopt->qcount || !qopt->qweight[0]) {
-+ pr_err("CEETM: qcount and qweight are mandatory for a wbfs ceetm qdisc\n");
-+ return -EINVAL;
-+ }
-+
-+ priv->shaped = parent_cl->shaped;
-+
-+ if (!priv->shaped && (qopt->cr || qopt->er)) {
-+ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
-+ return -EINVAL;
-+ }
-+
-+ if (priv->shaped && !(qopt->cr || qopt->er)) {
-+ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Obtain the parent root ceetm class */
-+ root_cl = parent_priv->prio.parent;
-+ if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) ||
-+ root_cl->root.wbfs_grp_large) {
-+ pr_err("CEETM: no more wbfs classes are available\n");
-+ return -EINVAL;
-+ }
-+
-+ if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) &&
-+ qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
-+ pr_err("CEETM: only %d wbfs classes are available\n",
-+ CEETM_MIN_WBFS_QCOUNT);
-+ return -EINVAL;
-+ }
-+
-+ priv->wbfs.parent = parent_cl;
-+ parent_cl->prio.child = sch;
-+
-+ priv->wbfs.qcount = qopt->qcount;
-+ priv->wbfs.cr = qopt->cr;
-+ priv->wbfs.er = qopt->er;
-+ priv->wbfs.ch = parent_cl->ch;
-+
-+ /* Configure the hardware wbfs channel groups */
-+ if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
-+ /* Configure the large group A */
-+ priv->wbfs.group_type = WBFS_GRP_LARGE;
-+ small_group = false;
-+ group_b = false;
-+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
-+ prio_b = prio_a;
-+
-+ } else if (root_cl->root.wbfs_grp_a) {
-+ /* Configure the group B */
-+ priv->wbfs.group_type = WBFS_GRP_B;
-+
-+ err = qman_ceetm_channel_get_group(priv->wbfs.ch, &small_group,
-+ &prio_a, &prio_b);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ small_group = true;
-+ group_b = true;
-+ prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
-+ /* If group A isn't configured, configure it as group B */
-+ prio_a = prio_a ? : prio_b;
-+
-+ } else {
-+ /* Configure the small group A */
-+ priv->wbfs.group_type = WBFS_GRP_A;
-+
-+ err = qman_ceetm_channel_get_group(priv->wbfs.ch, &small_group,
-+ &prio_a, &prio_b);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ small_group = true;
-+ group_b = false;
-+ prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
-+ /* If group B isn't configured, configure it as group A */
-+ prio_b = prio_b ? : prio_a;
-+ }
-+
-+ err = qman_ceetm_channel_set_group(priv->wbfs.ch, small_group, prio_a,
-+ prio_b);
-+ if (err)
-+ return err;
-+
-+ if (priv->shaped) {
-+ err = qman_ceetm_channel_set_group_cr_eligibility(priv->wbfs.ch,
-+ group_b,
-+ priv->wbfs.cr);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to set group CR eligibility\n",
-+ __func__);
-+ return err;
-+ }
-+
-+ err = qman_ceetm_channel_set_group_er_eligibility(priv->wbfs.ch,
-+ group_b,
-+ priv->wbfs.er);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to set group ER eligibility\n",
-+ __func__);
-+ return err;
-+ }
-+ }
-+
-+ /* Create qcount child classes */
-+ for (i = 0; i < priv->wbfs.qcount; i++) {
-+ child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
-+ if (!child_cl) {
-+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
-+ __func__);
-+ return -ENOMEM;
-+ }
-+
-+ child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
-+ if (!child_cl->wbfs.cstats) {
-+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
-+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_wbfs_cls;
-+ }
-+
-+ child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
-+ child_cl->parent = sch;
-+ child_cl->type = CEETM_WBFS;
-+ child_cl->shaped = priv->shaped;
-+ child_cl->wbfs.fq = NULL;
-+ child_cl->wbfs.cq = NULL;
-+ child_cl->wbfs.weight = qopt->qweight[i];
-+ child_cl->ch = priv->wbfs.ch;
-+
-+ if (priv->wbfs.group_type == WBFS_GRP_B)
-+ id = WBFS_GRP_B_OFFSET + i;
-+ else
-+ id = WBFS_GRP_A_OFFSET + i;
-+
-+ err = ceetm_config_wbfs_cls(child_cl, dev, id,
-+ priv->wbfs.group_type);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
-+ __func__, child_cl->common.classid);
-+ goto err_init_wbfs_cls;
-+ }
-+
-+ /* Add class handle in Qdisc */
-+ ceetm_link_class(sch, &priv->clhash, &child_cl->common);
-+ pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X associated with CQ %d and CCG %d\n",
-+ __func__, child_cl->common.classid,
-+ child_cl->wbfs.cq->idx, child_cl->wbfs.ccg->idx);
-+ }
-+
-+ /* Signal the root class that a group has been configured */
-+ switch (priv->wbfs.group_type) {
-+ case WBFS_GRP_LARGE:
-+ root_cl->root.wbfs_grp_large = true;
-+ break;
-+ case WBFS_GRP_A:
-+ root_cl->root.wbfs_grp_a = true;
-+ break;
-+ case WBFS_GRP_B:
-+ root_cl->root.wbfs_grp_b = true;
-+ break;
-+ }
-+
-+ return 0;
-+
-+err_init_wbfs_cls:
-+ ceetm_cls_destroy(sch, child_cl);
-+ /* Note: ceetm_destroy() will be called by our caller */
-+ return err;
-+}
-+
-+/* Configure a generic ceetm qdisc */
-+static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
-+{
-+ struct tc_ceetm_qopt *qopt;
-+ struct nlattr *tb[TCA_CEETM_QOPS + 1];
-+ int ret;
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (!netif_is_multiqueue(dev))
-+ return -EOPNOTSUPP;
-+
-+ if (!opt) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ ret = tcf_block_get(&priv->block, &priv->filter_list);
-+ if (ret)
-+ return ret;
-+
-+ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy, NULL);
-+ if (ret < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return ret;
-+ }
-+
-+ if (!tb[TCA_CEETM_QOPS]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(sch->handle)) {
-+ pr_err("CEETM: a qdisc should not have a minor\n");
-+ return -EINVAL;
-+ }
-+
-+ qopt = nla_data(tb[TCA_CEETM_QOPS]);
-+
-+ /* Initialize the class hash list. Each qdisc has its own class hash */
-+ ret = qdisc_class_hash_init(&priv->clhash);
-+ if (ret < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ priv->type = qopt->type;
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ netif_tx_stop_all_queues(dev);
-+ dpaa_drain_fqs(dev);
-+ ret = ceetm_init_root(sch, priv, qopt);
-+ netif_tx_wake_all_queues(dev);
-+ break;
-+ case CEETM_PRIO:
-+ ret = ceetm_init_prio(sch, priv, qopt);
-+ break;
-+ case CEETM_WBFS:
-+ ret = ceetm_init_wbfs(sch, priv, qopt);
-+ break;
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ /* Note: ceetm_destroy() will be called by our caller */
-+ ret = -EINVAL;
-+ }
-+
-+ return ret;
-+}
-+
-+/* Edit a root ceetm qdisc */
-+static int ceetm_change_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct net_device *dev,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err = 0;
-+ u64 bps;
-+
-+ if (priv->shaped != (bool)qopt->shaped) {
-+ pr_err("CEETM: qdisc %X is %s\n", sch->handle,
-+ priv->shaped ? "shaped" : "unshaped");
-+ return -EINVAL;
-+ }
-+
-+ /* Nothing to modify for unshaped qdiscs */
-+ if (!priv->shaped)
-+ return 0;
-+
-+ /* Configure the LNI shaper */
-+ if (priv->root.overhead != qopt->overhead) {
-+ err = qman_ceetm_lni_enable_shaper(priv->root.lni, 1,
-+ qopt->overhead);
-+ if (err)
-+ goto change_err;
-+ priv->root.overhead = qopt->overhead;
-+ }
-+
-+ if (priv->root.rate != qopt->rate) {
-+ bps = qopt->rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_commit_rate_bps(priv->root.lni, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_err;
-+ priv->root.rate = qopt->rate;
-+ }
-+
-+ if (priv->root.ceil != qopt->ceil) {
-+ bps = qopt->ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_lni_set_excess_rate_bps(priv->root.lni, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_err;
-+ priv->root.ceil = qopt->ceil;
-+ }
-+
-+ return 0;
-+
-+change_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the root ceetm qdisc %X\n",
-+ __func__, sch->handle);
-+ return err;
-+}
-+
-+/* Edit a wbfs ceetm qdisc */
-+static int ceetm_change_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
-+ struct tc_ceetm_qopt *qopt)
-+{
-+ int err;
-+ bool group_b;
-+
-+ if (qopt->qcount) {
-+ pr_err("CEETM: the qcount can not be modified\n");
-+ return -EINVAL;
-+ }
-+
-+ if (qopt->qweight[0]) {
-+ pr_err("CEETM: the qweight can be modified through the wbfs classes\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!priv->shaped && (qopt->cr || qopt->er)) {
-+ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
-+ return -EINVAL;
-+ }
-+
-+ if (priv->shaped && !(qopt->cr || qopt->er)) {
-+ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Nothing to modify for unshaped qdiscs */
-+ if (!priv->shaped)
-+ return 0;
-+
-+ group_b = priv->wbfs.group_type == WBFS_GRP_B;
-+
-+ if (qopt->cr != priv->wbfs.cr) {
-+ err = qman_ceetm_channel_set_group_cr_eligibility(priv->wbfs.ch,
-+ group_b,
-+ qopt->cr);
-+ if (err)
-+ goto change_err;
-+ priv->wbfs.cr = qopt->cr;
-+ }
-+
-+ if (qopt->er != priv->wbfs.er) {
-+ err = qman_ceetm_channel_set_group_er_eligibility(priv->wbfs.ch,
-+ group_b,
-+ qopt->er);
-+ if (err)
-+ goto change_err;
-+ priv->wbfs.er = qopt->er;
-+ }
-+
-+ return 0;
-+
-+change_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the wbfs ceetm qdisc %X\n",
-+ __func__, sch->handle);
-+ return err;
-+}
-+
-+/* Edit a ceetm qdisc */
-+static int ceetm_change(struct Qdisc *sch, struct nlattr *opt)
-+{
-+ struct tc_ceetm_qopt *qopt;
-+ struct nlattr *tb[TCA_CEETM_QOPS + 1];
-+ int ret;
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy, NULL);
-+ if (ret < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return ret;
-+ }
-+
-+ if (!tb[TCA_CEETM_QOPS]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(sch->handle)) {
-+ pr_err("CEETM: a qdisc should not have a minor\n");
-+ return -EINVAL;
-+ }
-+
-+ qopt = nla_data(tb[TCA_CEETM_QOPS]);
-+
-+ if (priv->type != qopt->type) {
-+ pr_err("CEETM: qdisc %X is not of the provided type\n",
-+ sch->handle);
-+ return -EINVAL;
-+ }
-+
-+ switch (priv->type) {
-+ case CEETM_ROOT:
-+ ret = ceetm_change_root(sch, priv, dev, qopt);
-+ break;
-+ case CEETM_PRIO:
-+ pr_err("CEETM: prio qdiscs can not be modified\n");
-+ ret = -EINVAL;
-+ break;
-+ case CEETM_WBFS:
-+ ret = ceetm_change_wbfs(sch, priv, qopt);
-+ break;
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ ret = -EINVAL;
-+ }
-+
-+ return ret;
-+}
-+
-+/* Graft the underlying pfifo qdiscs to the netdev queues.
-+ * It's safe to remove our references at this point, since the kernel will
-+ * destroy the qdiscs on its own and no cleanup from our part is required.
-+ */
-+static void ceetm_attach(struct Qdisc *sch)
-+{
-+ struct net_device *dev = qdisc_dev(sch);
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct Qdisc *qdisc, *old_qdisc;
-+ unsigned int i;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ for (i = 0; i < dev->num_tx_queues; i++) {
-+ qdisc = priv->root.qdiscs[i];
-+ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
-+ if (old_qdisc)
-+ qdisc_destroy(old_qdisc);
-+ }
-+
-+ kfree(priv->root.qdiscs);
-+ priv->root.qdiscs = NULL;
-+}
-+
-+static unsigned long ceetm_cls_search(struct Qdisc *sch, u32 handle)
-+{
-+ return (unsigned long)ceetm_find(handle, sch);
-+}
-+
-+static int ceetm_cls_change_root(struct ceetm_class *cl,
-+ struct tc_ceetm_copt *copt,
-+ struct net_device *dev)
-+{
-+ int err;
-+ u64 bps;
-+
-+ if ((bool)copt->shaped != cl->shaped) {
-+ pr_err("CEETM: class %X is %s\n", cl->common.classid,
-+ cl->shaped ? "shaped" : "unshaped");
-+ return -EINVAL;
-+ }
-+
-+ if (cl->shaped && cl->root.rate != copt->rate) {
-+ bps = copt->rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_commit_rate_bps(cl->ch, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_cls_err;
-+ cl->root.rate = copt->rate;
-+ }
-+
-+ if (cl->shaped && cl->root.ceil != copt->ceil) {
-+ bps = copt->ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_excess_rate_bps(cl->ch, bps,
-+ dev->mtu);
-+ if (err)
-+ goto change_cls_err;
-+ cl->root.ceil = copt->ceil;
-+ }
-+
-+ if (!cl->shaped && cl->root.tbl != copt->tbl) {
-+ err = qman_ceetm_channel_set_weight(cl->ch, copt->tbl);
-+ if (err)
-+ goto change_cls_err;
-+ cl->root.tbl = copt->tbl;
-+ }
-+
-+ return 0;
-+
-+change_cls_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm root class %X\n",
-+ __func__, cl->common.classid);
-+ return err;
-+}
-+
-+static int ceetm_cls_change_prio(struct ceetm_class *cl,
-+ struct tc_ceetm_copt *copt)
-+{
-+ int err;
-+
-+ if (!cl->shaped && (copt->cr || copt->er)) {
-+ pr_err("CEETM: only shaped classes can have CR and ER enabled\n");
-+ return -EINVAL;
-+ }
-+
-+ if (cl->prio.cr != (bool)copt->cr) {
-+ err = qman_ceetm_channel_set_cq_cr_eligibility(
-+ cl->prio.cq->parent,
-+ cl->prio.cq->idx,
-+ copt->cr);
-+ if (err)
-+ goto change_cls_err;
-+ cl->prio.cr = copt->cr;
-+ }
-+
-+ if (cl->prio.er != (bool)copt->er) {
-+ err = qman_ceetm_channel_set_cq_er_eligibility(
-+ cl->prio.cq->parent,
-+ cl->prio.cq->idx,
-+ copt->er);
-+ if (err)
-+ goto change_cls_err;
-+ cl->prio.er = copt->er;
-+ }
-+
-+ return 0;
-+
-+change_cls_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
-+ __func__, cl->common.classid);
-+ return err;
-+}
-+
-+static int ceetm_cls_change_wbfs(struct ceetm_class *cl,
-+ struct tc_ceetm_copt *copt)
-+{
-+ int err;
-+
-+ if (copt->weight != cl->wbfs.weight) {
-+ /* Configure the CQ weight: real number multiplied by 100 to
-+ * get rid of the fraction
-+ */
-+ err = qman_ceetm_set_queue_weight_in_ratio(cl->wbfs.cq,
-+ copt->weight * 100);
-+
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
-+ __func__, cl->common.classid);
-+ return err;
-+ }
-+
-+ cl->wbfs.weight = copt->weight;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Add a ceetm root class or configure a ceetm root/prio/wbfs class */
-+static int ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
-+ struct nlattr **tca, unsigned long *arg)
-+{
-+ int err;
-+ u64 bps;
-+ struct ceetm_qdisc *priv;
-+ struct ceetm_class *cl = (struct ceetm_class *)*arg;
-+ struct nlattr *opt = tca[TCA_OPTIONS];
-+ struct nlattr *tb[__TCA_CEETM_MAX];
-+ struct tc_ceetm_copt *copt;
-+ struct qm_ceetm_channel *channel;
-+ struct net_device *dev = qdisc_dev(sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
-+ __func__, classid, sch->handle);
-+
-+ if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
-+ return -EINVAL;
-+ }
-+
-+ priv = qdisc_priv(sch);
-+
-+ if (!opt) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (!cl && sch->handle != parentid) {
-+ pr_err("CEETM: classes can be attached to the root ceetm qdisc only\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!cl && priv->type != CEETM_ROOT) {
-+ pr_err("CEETM: root ceetm classes can be attached to the root ceetm qdisc only\n");
-+ return -EINVAL;
-+ }
-+
-+ err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy, NULL);
-+ if (err < 0) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (!tb[TCA_CEETM_COPT]) {
-+ pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
-+ pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm root classes\n");
-+ return -EINVAL;
-+ }
-+
-+ copt = nla_data(tb[TCA_CEETM_COPT]);
-+
-+ /* Configure an existing ceetm class */
-+ if (cl) {
-+ if (copt->type != cl->type) {
-+ pr_err("CEETM: class %X is not of the provided type\n",
-+ cl->common.classid);
-+ return -EINVAL;
-+ }
-+
-+ switch (copt->type) {
-+ case CEETM_ROOT:
-+ return ceetm_cls_change_root(cl, copt, dev);
-+
-+ case CEETM_PRIO:
-+ return ceetm_cls_change_prio(cl, copt);
-+
-+ case CEETM_WBFS:
-+ return ceetm_cls_change_wbfs(cl, copt);
-+
-+ default:
-+ pr_err(KBUILD_BASENAME " : %s : invalid class\n",
-+ __func__);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ /* Add a new root ceetm class */
-+ if (copt->type != CEETM_ROOT) {
-+ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
-+ return -EINVAL;
-+ }
-+
-+ if (copt->shaped && !priv->shaped) {
-+ pr_err("CEETM: can not add a shaped ceetm root class under an unshaped ceetm root qdisc\n");
-+ return -EINVAL;
-+ }
-+
-+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
-+ if (!cl)
-+ return -ENOMEM;
-+
-+ err = tcf_block_get(&cl->block, &cl->filter_list);
-+ if (err) {
-+ kfree(cl);
-+ return err;
-+ }
-+
-+ cl->type = copt->type;
-+ cl->shaped = copt->shaped;
-+ cl->root.rate = copt->rate;
-+ cl->root.ceil = copt->ceil;
-+ cl->root.tbl = copt->tbl;
-+
-+ cl->common.classid = classid;
-+ cl->parent = sch;
-+ cl->root.child = NULL;
-+ cl->root.wbfs_grp_a = false;
-+ cl->root.wbfs_grp_b = false;
-+ cl->root.wbfs_grp_large = false;
-+
-+ /* Claim a CEETM channel */
-+ err = qman_ceetm_channel_claim(&channel, priv->root.lni);
-+ if (err) {
-+ pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
-+ __func__);
-+ goto claim_err;
-+ }
-+
-+ cl->ch = channel;
-+
-+ if (cl->shaped) {
-+ /* Configure the channel shaper */
-+ err = qman_ceetm_channel_enable_shaper(channel, 1);
-+ if (err)
-+ goto channel_err;
-+
-+ bps = cl->root.rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
-+ dev->mtu);
-+ if (err)
-+ goto channel_err;
-+
-+ bps = cl->root.ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
-+ dev->mtu);
-+ if (err)
-+ goto channel_err;
-+
-+ } else {
-+ /* Configure the uFQ algorithm */
-+ err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
-+ if (err)
-+ goto channel_err;
-+ }
-+
-+ /* Add class handle in Qdisc */
-+ ceetm_link_class(sch, &priv->clhash, &cl->common);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with channel %d\n",
-+ __func__, classid, channel->idx);
-+ *arg = (unsigned long)cl;
-+ return 0;
-+
-+channel_err:
-+ pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
-+ __func__, channel->idx);
-+ if (qman_ceetm_channel_release(channel))
-+ pr_err(KBUILD_BASENAME " : %s : failed to release the channel %d\n",
-+ __func__, channel->idx);
-+claim_err:
-+ tcf_block_put(cl->block);
-+ kfree(cl);
-+ return err;
-+}
-+
-+static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl;
-+ unsigned int i;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
-+
-+ if (arg->stop)
-+ return;
-+
-+ for (i = 0; i < priv->clhash.hashsize; i++) {
-+ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
-+ if (arg->count < arg->skip) {
-+ arg->count++;
-+ continue;
-+ }
-+ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
-+ arg->stop = 1;
-+ return;
-+ }
-+ arg->count++;
-+ }
-+ }
-+}
-+
-+static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
-+ struct sk_buff *skb, struct tcmsg *tcm)
-+{
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+ struct nlattr *nest;
-+ struct tc_ceetm_copt copt;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ sch_tree_lock(sch);
-+
-+ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
-+ tcm->tcm_handle = cl->common.classid;
-+
-+ memset(&copt, 0, sizeof(copt));
-+
-+ copt.shaped = cl->shaped;
-+ copt.type = cl->type;
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (cl->root.child)
-+ tcm->tcm_info = cl->root.child->handle;
-+
-+ copt.rate = cl->root.rate;
-+ copt.ceil = cl->root.ceil;
-+ copt.tbl = cl->root.tbl;
-+ break;
-+
-+ case CEETM_PRIO:
-+ if (cl->prio.child)
-+ tcm->tcm_info = cl->prio.child->handle;
-+
-+ copt.cr = cl->prio.cr;
-+ copt.er = cl->prio.er;
-+ break;
-+
-+ case CEETM_WBFS:
-+ copt.weight = cl->wbfs.weight;
-+ break;
-+ }
-+
-+ nest = nla_nest_start(skb, TCA_OPTIONS);
-+ if (!nest)
-+ goto nla_put_failure;
-+ if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
-+ goto nla_put_failure;
-+ nla_nest_end(skb, nest);
-+ sch_tree_unlock(sch);
-+ return skb->len;
-+
-+nla_put_failure:
-+ sch_tree_unlock(sch);
-+ nla_nest_cancel(skb, nest);
-+ return -EMSGSIZE;
-+}
-+
-+static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ sch_tree_lock(sch);
-+ qdisc_class_hash_remove(&priv->clhash, &cl->common);
-+
-+ sch_tree_unlock(sch);
-+ ceetm_cls_destroy(sch, cl);
-+ return 0;
-+}
-+
-+/* Get the class' child qdisc, if any */
-+static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
-+ __func__, cl->common.classid, sch->handle);
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ return cl->root.child;
-+
-+ case CEETM_PRIO:
-+ return cl->prio.child;
-+ }
-+
-+ return NULL;
-+}
-+
-+static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
-+ struct Qdisc *new, struct Qdisc **old)
-+{
-+ if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
-+ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
-+ struct gnet_dump *d)
-+{
-+ unsigned int i;
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+ struct gnet_stats_basic_packed tmp_bstats;
-+ struct ceetm_class_stats *cstats = NULL;
-+ struct qm_ceetm_cq *cq = NULL;
-+ struct tc_ceetm_xstats xstats;
-+
-+ memset(&xstats, 0, sizeof(xstats));
-+ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ return 0;
-+ case CEETM_PRIO:
-+ cq = cl->prio.cq;
-+ break;
-+ case CEETM_WBFS:
-+ cq = cl->wbfs.cq;
-+ break;
-+ }
-+
-+ for_each_online_cpu(i) {
-+ switch (cl->type) {
-+ case CEETM_PRIO:
-+ cstats = per_cpu_ptr(cl->prio.cstats, i);
-+ break;
-+ case CEETM_WBFS:
-+ cstats = per_cpu_ptr(cl->wbfs.cstats, i);
-+ break;
-+ }
-+
-+ if (cstats) {
-+ xstats.ern_drop_count += cstats->ern_drop_count;
-+ xstats.congested_count += cstats->congested_count;
-+ tmp_bstats.bytes += cstats->bstats.bytes;
-+ tmp_bstats.packets += cstats->bstats.packets;
-+ }
-+ }
-+
-+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
-+ d, NULL, &tmp_bstats) < 0)
-+ return -1;
-+
-+ if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
-+ &xstats.frame_count,
-+ &xstats.byte_count))
-+ return -1;
-+
-+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
-+}
-+
-+static struct tcf_block *ceetm_tcf_block(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+ struct tcf_block *block = cl ? cl->block : priv->block;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+ return block;
-+}
-+
-+static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
-+ u32 classid)
-+{
-+ struct ceetm_class *cl = ceetm_find(classid, sch);
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+ return (unsigned long)cl;
-+}
-+
-+static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
-+{
-+ struct ceetm_class *cl = (struct ceetm_class *)arg;
-+
-+ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
-+ cl ? cl->common.classid : 0, sch->handle);
-+}
-+
-+const struct Qdisc_class_ops ceetm_cls_ops = {
-+ .graft = ceetm_cls_graft,
-+ .leaf = ceetm_cls_leaf,
-+ .find = ceetm_cls_search,
-+ .change = ceetm_cls_change,
-+ .delete = ceetm_cls_delete,
-+ .walk = ceetm_cls_walk,
-+ .tcf_block = ceetm_tcf_block,
-+ .bind_tcf = ceetm_tcf_bind,
-+ .unbind_tcf = ceetm_tcf_unbind,
-+ .dump = ceetm_cls_dump,
-+ .dump_stats = ceetm_cls_dump_stats,
-+};
-+
-+struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
-+ .id = "ceetm",
-+ .priv_size = sizeof(struct ceetm_qdisc),
-+ .cl_ops = &ceetm_cls_ops,
-+ .init = ceetm_init,
-+ .destroy = ceetm_destroy,
-+ .change = ceetm_change,
-+ .dump = ceetm_dump,
-+ .attach = ceetm_attach,
-+ .owner = THIS_MODULE,
-+};
-+
-+/* Run the filters and classifiers attached to the qdisc on the provided skb */
-+static struct ceetm_class *ceetm_classify(struct sk_buff *skb,
-+ struct Qdisc *sch, int *qerr,
-+ bool *act_drop)
-+{
-+ struct ceetm_qdisc *priv = qdisc_priv(sch);
-+ struct ceetm_class *cl = NULL, *wbfs_cl;
-+ struct tcf_result res;
-+ struct tcf_proto *tcf;
-+ int result;
-+
-+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-+ tcf = priv->filter_list;
-+ while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
-+#ifdef CONFIG_NET_CLS_ACT
-+ switch (result) {
-+ case TC_ACT_QUEUED:
-+ case TC_ACT_STOLEN:
-+ case TC_ACT_TRAP:
-+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-+ case TC_ACT_SHOT:
-+ /* No valid class found due to action */
-+ *act_drop = true;
-+ return NULL;
-+ }
-+#endif
-+ cl = (void *)res.class;
-+ if (!cl) {
-+ if (res.classid == sch->handle) {
-+ /* The filter leads to the qdisc */
-+ /* TODO default qdisc */
-+ return NULL;
-+ }
-+
-+ cl = ceetm_find(res.classid, sch);
-+ if (!cl)
-+ /* The filter leads to an invalid class */
-+ break;
-+ }
-+
-+ /* The class might have its own filters attached */
-+ tcf = cl->filter_list;
-+ }
-+
-+ if (!cl) {
-+ /* No valid class found */
-+ /* TODO default qdisc */
-+ return NULL;
-+ }
-+
-+ switch (cl->type) {
-+ case CEETM_ROOT:
-+ if (cl->root.child) {
-+ /* Run the prio qdisc classifiers */
-+ return ceetm_classify(skb, cl->root.child, qerr,
-+ act_drop);
-+ } else {
-+ /* The root class does not have a child prio qdisc */
-+ /* TODO default qdisc */
-+ return NULL;
-+ }
-+ case CEETM_PRIO:
-+ if (cl->prio.child) {
-+ /* If filters lead to a wbfs class, return it.
-+ * Otherwise, return the prio class
-+ */
-+ wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
-+ act_drop);
-+ /* A NULL result might indicate either an erroneous
-+ * filter, or no filters at all. We will assume the
-+ * latter
-+ */
-+ return wbfs_cl ? : cl;
-+ }
-+ }
-+
-+ /* For wbfs and childless prio classes, return the class directly */
-+ return cl;
-+}
-+
-+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
-+{
-+ int queue_mapping = dpa_get_queue_mapping(skb);
-+ struct Qdisc *sch = net_dev->qdisc;
-+ struct ceetm_class_stats *cstats;
-+ struct ceetm_qdisc_stats *qstats;
-+ struct dpa_priv_s *priv_dpa;
-+ struct ceetm_fq *ceetm_fq;
-+ struct ceetm_qdisc *priv;
-+ struct qman_fq *conf_fq;
-+ struct ceetm_class *cl;
-+ spinlock_t *root_lock;
-+ bool act_drop = false;
-+ int ret;
-+
-+ root_lock = qdisc_lock(sch);
-+ priv = qdisc_priv(sch);
-+ qstats = this_cpu_ptr(priv->root.qstats);
-+
-+ spin_lock(root_lock);
-+ cl = ceetm_classify(skb, sch, &ret, &act_drop);
-+ spin_unlock(root_lock);
-+
-+#ifdef CONFIG_NET_CLS_ACT
-+ if (act_drop) {
-+ if (ret & __NET_XMIT_BYPASS)
-+ qstats->drops++;
-+ goto drop;
-+ }
-+#endif
-+ /* TODO default class */
-+ if (unlikely(!cl)) {
-+ qstats->drops++;
-+ goto drop;
-+ }
-+
-+ if (unlikely(queue_mapping >= DPAA_ETH_TX_QUEUES))
-+ queue_mapping = queue_mapping % DPAA_ETH_TX_QUEUES;
-+
-+ priv_dpa = netdev_priv(net_dev);
-+ conf_fq = priv_dpa->conf_fqs[queue_mapping];
-+
-+ /* Choose the proper tx fq and update the basic stats (bytes and
-+ * packets sent by the class)
-+ */
-+ switch (cl->type) {
-+ case CEETM_PRIO:
-+ ceetm_fq = cl->prio.fq;
-+ cstats = this_cpu_ptr(cl->prio.cstats);
-+ break;
-+ case CEETM_WBFS:
-+ ceetm_fq = cl->wbfs.fq;
-+ cstats = this_cpu_ptr(cl->wbfs.cstats);
-+ break;
-+ default:
-+ qstats->drops++;
-+ goto drop;
-+ }
-+
-+ /* If the FQ is congested, avoid enqueuing the frame and dropping it
-+ * when it returns on the ERN path. Drop it here directly instead.
-+ */
-+ if (unlikely(ceetm_fq->congested)) {
-+ qstats->drops++;
-+ goto drop;
-+ }
-+
-+ bstats_update(&cstats->bstats, skb);
-+ return dpa_tx_extended(skb, net_dev, &ceetm_fq->fq, conf_fq);
-+
-+drop:
-+ dev_kfree_skb_any(skb);
-+ return NET_XMIT_SUCCESS;
-+}
-+
-+static int __init ceetm_register(void)
-+{
-+ int _errno = 0;
-+
-+ pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
-+
-+ _errno = register_qdisc(&ceetm_qdisc_ops);
-+ if (unlikely(_errno))
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): register_qdisc() = %d\n",
-+ KBUILD_BASENAME ".c", __LINE__, __func__, _errno);
-+
-+ return _errno;
-+}
-+
-+static void __exit ceetm_unregister(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME ".c", __func__);
-+
-+ unregister_qdisc(&ceetm_qdisc_ops);
-+}
-+
-+module_init(ceetm_register);
-+module_exit(ceetm_unregister);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
-@@ -0,0 +1,241 @@
-+/* Copyright 2008-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPAA_ETH_CEETM_H
-+#define __DPAA_ETH_CEETM_H
-+
-+#include <net/pkt_sched.h>
-+#include <net/pkt_cls.h>
-+#include <net/netlink.h>
-+#include <lnxwrp_fm.h>
-+
-+#include "mac.h"
-+#include "dpaa_eth_common.h"
-+
-+/* Mask to determine the sub-portal id from a channel number */
-+#define CHANNEL_SP_MASK 0x1f
-+/* The number of the last channel that services DCP0, connected to FMan 0.
-+ * Value validated for B4 and T series platforms.
-+ */
-+#define DCP0_MAX_CHANNEL 0x80f
-+/* A2V=1 - field A2 is valid
-+ * A0V=1 - field A0 is valid - enables frame confirmation
-+ * OVOM=1 - override operation mode bits with values from A2
-+ * EBD=1 - external buffers are deallocated at the end of the FMan flow
-+ * NL=0 - the BMI releases all the internal buffers
-+ */
-+#define CEETM_CONTEXT_A 0x1a00000080000000
-+/* The ratio between the superior and inferior congestion state thresholds. The
-+ * lower threshold is set to 7/8 of the superior one (as the default for WQ
-+ * scheduling).
-+ */
-+#define CEETM_CCGR_RATIO 0.875
-+/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
-+ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
-+ * are reserved for the maximum 32 CEETM channels (majors and minors are in
-+ * hex).
-+ */
-+#define PFIFO_MIN_OFFSET 0x21
-+
-+/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
-+#define CEETM_MAX_PRIO_QCOUNT 8
-+#define CEETM_MAX_WBFS_QCOUNT 8
-+#define CEETM_MIN_WBFS_QCOUNT 4
-+
-+/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
-+ * and/or 12-15 for group B).
-+ */
-+#define WBFS_GRP_A_OFFSET 8
-+#define WBFS_GRP_B_OFFSET 12
-+
-+#define WBFS_GRP_A 1
-+#define WBFS_GRP_B 2
-+#define WBFS_GRP_LARGE 3
-+
-+enum {
-+ TCA_CEETM_UNSPEC,
-+ TCA_CEETM_COPT,
-+ TCA_CEETM_QOPS,
-+ __TCA_CEETM_MAX,
-+};
-+
-+/* CEETM configuration types */
-+enum {
-+ CEETM_ROOT = 1,
-+ CEETM_PRIO,
-+ CEETM_WBFS
-+};
-+
-+#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
-+extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
-+
-+struct ceetm_class;
-+struct ceetm_qdisc_stats;
-+struct ceetm_class_stats;
-+
-+struct ceetm_fq {
-+ struct qman_fq fq;
-+ struct net_device *net_dev;
-+ struct ceetm_class *ceetm_cls;
-+ int congested; /* Congestion status */
-+};
-+
-+struct root_q {
-+ struct Qdisc **qdiscs;
-+ __u16 overhead;
-+ __u32 rate;
-+ __u32 ceil;
-+ struct qm_ceetm_sp *sp;
-+ struct qm_ceetm_lni *lni;
-+ struct ceetm_qdisc_stats __percpu *qstats;
-+};
-+
-+struct prio_q {
-+ __u16 qcount;
-+ struct ceetm_class *parent;
-+ struct qm_ceetm_channel *ch;
-+};
-+
-+struct wbfs_q {
-+ __u16 qcount;
-+ int group_type;
-+ struct ceetm_class *parent;
-+ struct qm_ceetm_channel *ch;
-+ __u16 cr;
-+ __u16 er;
-+};
-+
-+struct ceetm_qdisc {
-+ int type; /* LNI/CHNL/WBFS */
-+ bool shaped;
-+ union {
-+ struct root_q root;
-+ struct prio_q prio;
-+ struct wbfs_q wbfs;
-+ };
-+ struct Qdisc_class_hash clhash;
-+ struct tcf_proto *filter_list; /* qdisc attached filters */
-+ struct tcf_block *block;
-+};
-+
-+/* CEETM Qdisc configuration parameters */
-+struct tc_ceetm_qopt {
-+ __u32 type;
-+ __u16 shaped;
-+ __u16 qcount;
-+ __u16 overhead;
-+ __u32 rate;
-+ __u32 ceil;
-+ __u16 cr;
-+ __u16 er;
-+ __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
-+};
-+
-+struct root_c {
-+ unsigned int rate;
-+ unsigned int ceil;
-+ unsigned int tbl;
-+ bool wbfs_grp_a;
-+ bool wbfs_grp_b;
-+ bool wbfs_grp_large;
-+ struct Qdisc *child;
-+};
-+
-+struct prio_c {
-+ bool cr;
-+ bool er;
-+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
-+ struct qm_ceetm_lfq *lfq;
-+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
-+ struct qm_ceetm_ccg *ccg;
-+ /* only one wbfs can be linked to one priority CQ */
-+ struct Qdisc *child;
-+ struct ceetm_class_stats __percpu *cstats;
-+};
-+
-+struct wbfs_c {
-+ __u8 weight; /* The weight of the class between 1 and 248 */
-+ struct ceetm_fq *fq; /* Hardware FQ instance Handle */
-+ struct qm_ceetm_lfq *lfq;
-+ struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
-+ struct qm_ceetm_ccg *ccg;
-+ struct ceetm_class_stats __percpu *cstats;
-+};
-+
-+struct ceetm_class {
-+ struct Qdisc_class_common common;
-+ struct tcf_proto *filter_list; /* class attached filters */
-+ struct tcf_block *block;
-+ struct Qdisc *parent;
-+ struct qm_ceetm_channel *ch;
-+ bool shaped;
-+ int type; /* ROOT/PRIO/WBFS */
-+ union {
-+ struct root_c root;
-+ struct prio_c prio;
-+ struct wbfs_c wbfs;
-+ };
-+};
-+
-+/* CEETM Class configuration parameters */
-+struct tc_ceetm_copt {
-+ __u32 type;
-+ __u16 shaped;
-+ __u32 rate;
-+ __u32 ceil;
-+ __u16 tbl;
-+ __u16 cr;
-+ __u16 er;
-+ __u8 weight;
-+};
-+
-+/* CEETM stats */
-+struct ceetm_qdisc_stats {
-+ __u32 drops;
-+};
-+
-+struct ceetm_class_stats {
-+ /* Software counters */
-+ struct gnet_stats_basic_packed bstats;
-+ __u32 ern_drop_count;
-+ __u32 congested_count;
-+};
-+
-+struct tc_ceetm_xstats {
-+ __u32 ern_drop_count;
-+ __u32 congested_count;
-+ /* Hardware counters */
-+ __u64 frame_count;
-+ __u64 byte_count;
-+};
-+
-+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
-@@ -0,0 +1,1745 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_net.h>
-+#include <linux/etherdevice.h>
-+#include <linux/kthread.h>
-+#include <linux/percpu.h>
-+#include <linux/highmem.h>
-+#include <linux/sort.h>
-+#include <linux/fsl_qman.h>
-+#include <linux/ip.h>
-+#include <linux/ipv6.h>
-+#include <linux/if_vlan.h> /* vlan_eth_hdr */
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#ifdef CONFIG_FSL_DPAA_1588
-+#include "dpaa_1588.h"
-+#endif
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+#include "dpaa_debugfs.h"
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+#include "mac.h"
-+
-+/* Size in bytes of the FQ taildrop threshold */
-+#define DPA_FQ_TD 0x200000
-+
-+static struct dpa_bp *dpa_bp_array[64];
-+
-+int dpa_max_frm;
-+EXPORT_SYMBOL(dpa_max_frm);
-+
-+int dpa_rx_extra_headroom;
-+EXPORT_SYMBOL(dpa_rx_extra_headroom);
-+
-+int dpa_num_cpus = NR_CPUS;
-+
-+static const struct fqid_cell tx_confirm_fqids[] = {
-+ {0, DPAA_ETH_TX_QUEUES}
-+};
-+
-+static struct fqid_cell default_fqids[][3] = {
-+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
-+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
-+};
-+
-+static const char fsl_qman_frame_queues[][25] = {
-+ [RX] = "fsl,qman-frame-queues-rx",
-+ [TX] = "fsl,qman-frame-queues-tx"
-+};
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+/* A set of callbacks for hooking into the fastpath at different points. */
-+struct dpaa_eth_hooks_s dpaa_eth_hooks;
-+EXPORT_SYMBOL(dpaa_eth_hooks);
-+/* This function should only be called on the probe paths, since it makes no
-+ * effort to guarantee consistency of the destination hooks structure.
-+ */
-+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
-+{
-+ if (hooks)
-+ dpaa_eth_hooks = *hooks;
-+ else
-+ pr_err("NULL pointer to hooks!\n");
-+}
-+EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
-+#endif
-+
-+int dpa_netdev_init(struct net_device *net_dev,
-+ const uint8_t *mac_addr,
-+ uint16_t tx_timeout)
-+{
-+ int err;
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+
-+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
-+
-+ net_dev->features |= net_dev->hw_features;
-+ net_dev->vlan_features = net_dev->features;
-+
-+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
-+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
-+
-+ net_dev->ethtool_ops = &dpa_ethtool_ops;
-+
-+ net_dev->needed_headroom = priv->tx_headroom;
-+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
-+
-+ err = register_netdev(net_dev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev() = %d\n", err);
-+ return err;
-+ }
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ /* create debugfs entry for this net_device */
-+ err = dpa_netdev_debugfs_create(net_dev);
-+ if (err) {
-+ unregister_netdev(net_dev);
-+ return err;
-+ }
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_netdev_init);
-+
-+int __cold dpa_start(struct net_device *net_dev)
-+{
-+ int err, i;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ err = mac_dev->init_phy(net_dev, priv->mac_dev);
-+ if (err < 0) {
-+ if (netif_msg_ifup(priv))
-+ netdev_err(net_dev, "init_phy() = %d\n", err);
-+ return err;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_enable(mac_dev->port_dev[i]);
-+ if (err)
-+ goto mac_start_failed;
-+ }
-+
-+ err = priv->mac_dev->start(mac_dev);
-+ if (err < 0) {
-+ if (netif_msg_ifup(priv))
-+ netdev_err(net_dev, "mac_dev->start() = %d\n", err);
-+ goto mac_start_failed;
-+ }
-+
-+ netif_tx_start_all_queues(net_dev);
-+
-+ return 0;
-+
-+mac_start_failed:
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_disable(mac_dev->port_dev[i]);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(dpa_start);
-+
-+int __cold dpa_stop(struct net_device *net_dev)
-+{
-+ int _errno, i, err;
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ netif_tx_stop_all_queues(net_dev);
-+ /* Allow the Fman (Tx) port to process in-flight frames before we
-+ * try switching it off.
-+ */
-+ usleep_range(5000, 10000);
-+
-+ _errno = mac_dev->stop(mac_dev);
-+ if (unlikely(_errno < 0))
-+ if (netif_msg_ifdown(priv))
-+ netdev_err(net_dev, "mac_dev->stop() = %d\n",
-+ _errno);
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_disable(mac_dev->port_dev[i]);
-+ _errno = err ? err : _errno;
-+ }
-+
-+ if (mac_dev->phy_dev)
-+ phy_disconnect(mac_dev->phy_dev);
-+ mac_dev->phy_dev = NULL;
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_stop);
-+
-+void __cold dpa_timeout(struct net_device *net_dev)
-+{
-+ const struct dpa_priv_s *priv;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+
-+ priv = netdev_priv(net_dev);
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+
-+ if (netif_msg_timer(priv))
-+ netdev_crit(net_dev, "Transmit timeout!\n");
-+
-+ percpu_priv->stats.tx_errors++;
-+}
-+EXPORT_SYMBOL(dpa_timeout);
-+
-+/* net_device */
-+
-+/**
-+ * @param net_dev the device for which statistics are calculated
-+ * @param stats the function fills this structure with the device's statistics
-+ * @return the address of the structure containing the statistics
-+ *
-+ * Calculates the statistics for the given device by adding the statistics
-+ * collected by each CPU.
-+ */
-+void __cold
-+dpa_get_stats64(struct net_device *net_dev,
-+ struct rtnl_link_stats64 *stats)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ u64 *cpustats;
-+ u64 *netstats = (u64 *)stats;
-+ int i, j;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
-+
-+ for_each_possible_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ cpustats = (u64 *)&percpu_priv->stats;
-+
-+ for (j = 0; j < numstats; j++)
-+ netstats[j] += cpustats[j];
-+ }
-+}
-+EXPORT_SYMBOL(dpa_get_stats64);
-+
-+/* .ndo_init callback */
-+int dpa_ndo_init(struct net_device *net_dev)
-+{
-+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
-+ * we choose conservatively and let the user explicitly set a higher
-+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
-+ * in the same LAN.
-+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
-+ * start with the maximum allowed.
-+ */
-+ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
-+
-+ pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
-+ net_dev->mtu = init_mtu;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_ndo_init);
-+
-+int dpa_set_features(struct net_device *dev, netdev_features_t features)
-+{
-+ /* Not much to do here for now */
-+ dev->features = features;
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_set_features);
-+
-+netdev_features_t dpa_fix_features(struct net_device *dev,
-+ netdev_features_t features)
-+{
-+ netdev_features_t unsupported_features = 0;
-+
-+ /* In theory we should never be requested to enable features that
-+ * we didn't set in netdev->features and netdev->hw_features at probe
-+ * time, but double check just to be on the safe side.
-+ * We don't support enabling Rx csum through ethtool yet
-+ */
-+ unsupported_features |= NETIF_F_RXCSUM;
-+
-+ features &= ~unsupported_features;
-+
-+ return features;
-+}
-+EXPORT_SYMBOL(dpa_fix_features);
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
-+ const void *data)
-+{
-+ u64 *ts;
-+
-+ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
-+ data);
-+
-+ if (!ts || *ts == 0)
-+ return 0;
-+
-+ be64_to_cpus(ts);
-+
-+ return *ts;
-+}
-+
-+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
-+ struct skb_shared_hwtstamps *shhwtstamps, const void *data)
-+{
-+ u64 ns;
-+
-+ ns = dpa_get_timestamp_ns(priv, rx_tx, data);
-+
-+ if (ns == 0)
-+ return -EINVAL;
-+
-+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
-+
-+ return 0;
-+}
-+
-+static void dpa_ts_tx_enable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->ptp_enable)
-+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
-+
-+ priv->ts_tx_en = true;
-+}
-+
-+static void dpa_ts_tx_disable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+
-+#if 0
-+/* the RTC might be needed by the Rx Ts, cannot disable here
-+ * no separate ptp_disable API for Rx/Tx, cannot disable here
-+ */
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
-+
-+ if (mac_dev->ptp_disable)
-+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
-+#endif
-+
-+ priv->ts_tx_en = false;
-+}
-+
-+static void dpa_ts_rx_enable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->ptp_enable)
-+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
-+
-+ priv->ts_rx_en = true;
-+}
-+
-+static void dpa_ts_rx_disable(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+
-+#if 0
-+/* the RTC might be needed by the Tx Ts, cannot disable here
-+ * no separate ptp_disable API for Rx/Tx, cannot disable here
-+ */
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
-+
-+ if (mac_dev->ptp_disable)
-+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
-+#endif
-+
-+ priv->ts_rx_en = false;
-+}
-+
-+static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct hwtstamp_config config;
-+
-+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
-+ return -EFAULT;
-+
-+ switch (config.tx_type) {
-+ case HWTSTAMP_TX_OFF:
-+ dpa_ts_tx_disable(dev);
-+ break;
-+ case HWTSTAMP_TX_ON:
-+ dpa_ts_tx_enable(dev);
-+ break;
-+ default:
-+ return -ERANGE;
-+ }
-+
-+ if (config.rx_filter == HWTSTAMP_FILTER_NONE)
-+ dpa_ts_rx_disable(dev);
-+ else {
-+ dpa_ts_rx_enable(dev);
-+ /* TS is set for all frame types, not only those requested */
-+ config.rx_filter = HWTSTAMP_FILTER_ALL;
-+ }
-+
-+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-+ -EFAULT : 0;
-+}
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+#ifdef CONFIG_FSL_DPAA_1588
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+#endif
-+ int ret = -EINVAL;
-+
-+ if (!netif_running(dev))
-+ return -EINVAL;
-+
-+ if (cmd == SIOCGMIIREG) {
-+ if (!dev->phydev)
-+ ret = -EINVAL;
-+ else
-+ ret = phy_mii_ioctl(dev->phydev, rq, cmd);
-+ }
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (cmd == SIOCSHWTSTAMP)
-+ return dpa_ts_ioctl(dev, rq, cmd);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
-+ if (priv->tsu && priv->tsu->valid)
-+ ret = dpa_ioctl_1588(dev, rq, cmd);
-+ else
-+ ret = -ENODEV;
-+ }
-+#endif
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(dpa_ioctl);
-+
-+int __cold dpa_remove(struct platform_device *of_dev)
-+{
-+ int err;
-+ struct device *dev;
-+ struct net_device *net_dev;
-+ struct dpa_priv_s *priv;
-+
-+ dev = &of_dev->dev;
-+ net_dev = dev_get_drvdata(dev);
-+
-+ priv = netdev_priv(net_dev);
-+
-+ dpaa_eth_sysfs_remove(dev);
-+
-+ dev_set_drvdata(dev, NULL);
-+ unregister_netdev(net_dev);
-+
-+ err = dpa_fq_free(dev, &priv->dpa_fq_list);
-+
-+ qman_delete_cgr_safe(&priv->ingress_cgr);
-+ qman_release_cgrid(priv->ingress_cgr.cgrid);
-+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
-+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-+
-+ dpa_private_napi_del(net_dev);
-+
-+ dpa_bp_free(priv);
-+
-+ if (priv->buf_layout)
-+ devm_kfree(dev, priv->buf_layout);
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ /* remove debugfs entry for this net_device */
-+ dpa_netdev_debugfs_remove(net_dev);
-+#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid)
-+ dpa_ptp_cleanup(priv);
-+#endif
-+
-+ free_netdev(net_dev);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(dpa_remove);
-+
-+struct mac_device * __cold __must_check
-+__attribute__((nonnull))
-+dpa_mac_probe(struct platform_device *_of_dev)
-+{
-+ struct device *dpa_dev, *dev;
-+ struct device_node *mac_node;
-+ struct platform_device *of_dev;
-+ struct mac_device *mac_dev;
-+#ifdef CONFIG_FSL_DPAA_1588
-+ int lenp;
-+ const phandle *phandle_prop;
-+ struct net_device *net_dev = NULL;
-+ struct dpa_priv_s *priv = NULL;
-+ struct device_node *timer_node;
-+#endif
-+ dpa_dev = &_of_dev->dev;
-+
-+ mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
-+ if (unlikely(mac_node == NULL)) {
-+ dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
-+ return ERR_PTR(-EFAULT);
-+ }
-+
-+ of_dev = of_find_device_by_node(mac_node);
-+ if (unlikely(of_dev == NULL)) {
-+ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
-+ mac_node->full_name);
-+ of_node_put(mac_node);
-+ return ERR_PTR(-EINVAL);
-+ }
-+ of_node_put(mac_node);
-+
-+ dev = &of_dev->dev;
-+
-+ mac_dev = dev_get_drvdata(dev);
-+ if (unlikely(mac_dev == NULL)) {
-+ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
-+ dev_name(dev));
-+ return ERR_PTR(-EINVAL);
-+ }
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ phandle_prop = of_get_property(mac_node, "ptp-timer", &lenp);
-+ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
-+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
-+ (mac_dev->speed == SPEED_1000)))) {
-+ timer_node = of_find_node_by_phandle(*phandle_prop);
-+ if (timer_node)
-+ net_dev = dev_get_drvdata(dpa_dev);
-+ if (timer_node && net_dev) {
-+ priv = netdev_priv(net_dev);
-+ if (!dpa_ptp_init(priv))
-+ dev_info(dev, "%s: ptp 1588 is initialized.\n",
-+ mac_node->full_name);
-+ }
-+ }
-+#endif
-+
-+ return mac_dev;
-+}
-+EXPORT_SYMBOL(dpa_mac_probe);
-+
-+int dpa_set_mac_address(struct net_device *net_dev, void *addr)
-+{
-+ const struct dpa_priv_s *priv;
-+ int _errno;
-+ struct mac_device *mac_dev;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ _errno = eth_mac_addr(net_dev, addr);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev,
-+ "eth_mac_addr() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ mac_dev = priv->mac_dev;
-+
-+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
-+ net_dev->dev_addr);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev,
-+ "mac_dev->change_addr() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_set_mac_address);
-+
-+void dpa_set_rx_mode(struct net_device *net_dev)
-+{
-+ int _errno;
-+ const struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
-+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
-+ _errno = priv->mac_dev->set_promisc(
-+ priv->mac_dev->get_mac_handle(priv->mac_dev),
-+ priv->mac_dev->promisc);
-+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
-+ netdev_err(net_dev,
-+ "mac_dev->set_promisc() = %d\n",
-+ _errno);
-+ }
-+
-+ _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
-+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
-+ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
-+}
-+EXPORT_SYMBOL(dpa_set_rx_mode);
-+
-+void dpa_set_buffers_layout(struct mac_device *mac_dev,
-+ struct dpa_buffer_layout_s *layout)
-+{
-+ struct fm_port_params params;
-+
-+ /* Rx */
-+ layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
-+ layout[RX].parse_results = true;
-+ layout[RX].hash_results = true;
-+#ifdef CONFIG_FSL_DPAA_TS
-+ layout[RX].time_stamp = true;
-+#endif
-+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
-+ layout[RX].manip_extra_space = params.manip_extra_space;
-+ /* a value of zero for data alignment means "don't care", so align to
-+ * a non-zero value to prevent FMD from using its own default
-+ */
-+ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
-+
-+ /* Tx */
-+ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
-+ layout[TX].parse_results = true;
-+ layout[TX].hash_results = true;
-+#ifdef CONFIG_FSL_DPAA_TS
-+ layout[TX].time_stamp = true;
-+#endif
-+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
-+ layout[TX].manip_extra_space = params.manip_extra_space;
-+ layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
-+}
-+EXPORT_SYMBOL(dpa_set_buffers_layout);
-+
-+int __attribute__((nonnull))
-+dpa_bp_alloc(struct dpa_bp *dpa_bp, struct device *dev)
-+{
-+ int err;
-+ struct bman_pool_params bp_params;
-+
-+ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
-+ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
-+ return -EINVAL;
-+ }
-+
-+ memset(&bp_params, 0, sizeof(struct bman_pool_params));
-+#ifdef CONFIG_FMAN_PFC
-+ bp_params.flags = BMAN_POOL_FLAG_THRESH;
-+ bp_params.thresholds[0] = bp_params.thresholds[2] =
-+ CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
-+ bp_params.thresholds[1] = bp_params.thresholds[3] =
-+ CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
-+#endif
-+
-+ /* If the pool is already specified, we only create one per bpid */
-+ if (dpa_bpid2pool_use(dpa_bp->bpid))
-+ return 0;
-+
-+ if (dpa_bp->bpid == 0)
-+ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
-+ else
-+ bp_params.bpid = dpa_bp->bpid;
-+
-+ dpa_bp->pool = bman_new_pool(&bp_params);
-+ if (unlikely(dpa_bp->pool == NULL)) {
-+ pr_err("bman_new_pool() failed\n");
-+ return -ENODEV;
-+ }
-+
-+ dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
-+
-+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
-+ if (err) {
-+ pr_err("dma_coerce_mask_and_coherent() failed\n");
-+ goto bman_free_pool;
-+ }
-+
-+ dpa_bp->dev = dev;
-+
-+ if (dpa_bp->seed_cb) {
-+ err = dpa_bp->seed_cb(dpa_bp);
-+ if (err)
-+ goto bman_free_pool;
-+ }
-+
-+ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
-+
-+ return 0;
-+
-+bman_free_pool:
-+ bman_free_pool(dpa_bp->pool);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(dpa_bp_alloc);
-+
-+void dpa_bp_drain(struct dpa_bp *bp)
-+{
-+ int ret, num = 8;
-+
-+ do {
-+ struct bm_buffer bmb[8];
-+ int i;
-+
-+ ret = bman_acquire(bp->pool, bmb, num, 0);
-+ if (ret < 0) {
-+ if (num == 8) {
-+ /* we have less than 8 buffers left;
-+ * drain them one by one
-+ */
-+ num = 1;
-+ ret = 1;
-+ continue;
-+ } else {
-+ /* Pool is fully drained */
-+ break;
-+ }
-+ }
-+
-+ for (i = 0; i < num; i++) {
-+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
-+
-+ dma_unmap_single(bp->dev, addr, bp->size,
-+ DMA_BIDIRECTIONAL);
-+
-+ bp->free_buf_cb(phys_to_virt(addr));
-+ }
-+ } while (ret > 0);
-+}
-+EXPORT_SYMBOL(dpa_bp_drain);
-+
-+static void __cold __attribute__((nonnull))
-+_dpa_bp_free(struct dpa_bp *dpa_bp)
-+{
-+ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
-+
-+ /* the mapping between bpid and dpa_bp is done very late in the
-+ * allocation procedure; if something failed before the mapping, the bp
-+ * was not configured, therefore we don't need the below instructions
-+ */
-+ if (!bp)
-+ return;
-+
-+ if (!atomic_dec_and_test(&bp->refs))
-+ return;
-+
-+ if (bp->free_buf_cb)
-+ dpa_bp_drain(bp);
-+
-+ dpa_bp_array[bp->bpid] = NULL;
-+ bman_free_pool(bp->pool);
-+}
-+
-+void __cold __attribute__((nonnull))
-+dpa_bp_free(struct dpa_priv_s *priv)
-+{
-+ int i;
-+
-+ if (priv->dpa_bp)
-+ for (i = 0; i < priv->bp_count; i++)
-+ _dpa_bp_free(&priv->dpa_bp[i]);
-+}
-+EXPORT_SYMBOL(dpa_bp_free);
-+
-+struct dpa_bp *dpa_bpid2pool(int bpid)
-+{
-+ return dpa_bp_array[bpid];
-+}
-+EXPORT_SYMBOL(dpa_bpid2pool);
-+
-+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
-+{
-+ dpa_bp_array[bpid] = dpa_bp;
-+ atomic_set(&dpa_bp->refs, 1);
-+}
-+
-+bool dpa_bpid2pool_use(int bpid)
-+{
-+ if (dpa_bpid2pool(bpid)) {
-+ atomic_inc(&dpa_bp_array[bpid]->refs);
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
-+#ifdef CONFIG_FMAN_PFC
-+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
-+ void *accel_priv, select_queue_fallback_t fallback)
-+{
-+ return dpa_get_queue_mapping(skb);
-+}
-+#endif
-+
-+struct dpa_fq *dpa_fq_alloc(struct device *dev,
-+ u32 fq_start,
-+ u32 fq_count,
-+ struct list_head *list,
-+ enum dpa_fq_type fq_type)
-+{
-+ int i;
-+ struct dpa_fq *dpa_fq;
-+
-+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
-+ if (dpa_fq == NULL)
-+ return NULL;
-+
-+ for (i = 0; i < fq_count; i++) {
-+ dpa_fq[i].fq_type = fq_type;
-+ if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
-+ dpa_fq[i].fqid = fq_start ?
-+ DPAA_ETH_FQ_DELTA + fq_start + i : 0;
-+ else
-+ dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
-+
-+ list_add_tail(&dpa_fq[i].list, list);
-+ }
-+
-+#ifdef CONFIG_FMAN_PFC
-+ if (fq_type == FQ_TYPE_TX)
-+ for (i = 0; i < fq_count; i++)
-+ dpa_fq[i].wq = i / dpa_num_cpus;
-+ else
-+#endif
-+ for (i = 0; i < fq_count; i++)
-+ _dpa_assign_wq(dpa_fq + i);
-+
-+ return dpa_fq;
-+}
-+EXPORT_SYMBOL(dpa_fq_alloc);
-+
-+/* Probing of FQs for MACful ports */
-+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
-+ struct fm_port_fqs *port_fqs,
-+ bool alloc_tx_conf_fqs,
-+ enum port_type ptype)
-+{
-+ struct fqid_cell *fqids = NULL;
-+ const void *fqids_off = NULL;
-+ struct dpa_fq *dpa_fq = NULL;
-+ struct device_node *np = dev->of_node;
-+ int num_ranges;
-+ int i, lenp;
-+
-+ if (ptype == TX && alloc_tx_conf_fqs) {
-+ if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
-+ tx_confirm_fqids->count, list,
-+ FQ_TYPE_TX_CONF_MQ))
-+ goto fq_alloc_failed;
-+ }
-+
-+ fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
-+ if (fqids_off == NULL) {
-+ /* No dts definition, so use the defaults. */
-+ fqids = default_fqids[ptype];
-+ num_ranges = 3;
-+ } else {
-+ num_ranges = lenp / sizeof(*fqids);
-+
-+ fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
-+ GFP_KERNEL);
-+ if (fqids == NULL)
-+ goto fqids_alloc_failed;
-+
-+ /* convert to CPU endianess */
-+ for (i = 0; i < num_ranges; i++) {
-+ fqids[i].start = be32_to_cpup(fqids_off +
-+ i * sizeof(*fqids));
-+ fqids[i].count = be32_to_cpup(fqids_off +
-+ i * sizeof(*fqids) + sizeof(__be32));
-+ }
-+ }
-+
-+ for (i = 0; i < num_ranges; i++) {
-+ switch (i) {
-+ case 0:
-+ /* The first queue is the error queue */
-+ if (fqids[i].count != 1)
-+ goto invalid_error_queue;
-+
-+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ ptype == RX ?
-+ FQ_TYPE_RX_ERROR :
-+ FQ_TYPE_TX_ERROR);
-+ if (dpa_fq == NULL)
-+ goto fq_alloc_failed;
-+
-+ if (ptype == RX)
-+ port_fqs->rx_errq = &dpa_fq[0];
-+ else
-+ port_fqs->tx_errq = &dpa_fq[0];
-+ break;
-+ case 1:
-+ /* the second queue is the default queue */
-+ if (fqids[i].count != 1)
-+ goto invalid_default_queue;
-+
-+ dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ ptype == RX ?
-+ FQ_TYPE_RX_DEFAULT :
-+ FQ_TYPE_TX_CONFIRM);
-+ if (dpa_fq == NULL)
-+ goto fq_alloc_failed;
-+
-+ if (ptype == RX)
-+ port_fqs->rx_defq = &dpa_fq[0];
-+ else
-+ port_fqs->tx_defq = &dpa_fq[0];
-+ break;
-+ default:
-+ /* all subsequent queues are either RX* PCD or Tx */
-+ if (ptype == RX) {
-+ if (!dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ FQ_TYPE_RX_PCD) ||
-+ !dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ FQ_TYPE_RX_PCD_HI_PRIO))
-+ goto fq_alloc_failed;
-+ } else {
-+ if (!dpa_fq_alloc(dev, fqids[i].start,
-+ fqids[i].count, list,
-+ FQ_TYPE_TX))
-+ goto fq_alloc_failed;
-+ }
-+ break;
-+ }
-+ }
-+
-+ return 0;
-+
-+fq_alloc_failed:
-+fqids_alloc_failed:
-+ dev_err(dev, "Cannot allocate memory for frame queues\n");
-+ return -ENOMEM;
-+
-+invalid_default_queue:
-+invalid_error_queue:
-+ dev_err(dev, "Too many default or error queues\n");
-+ return -EINVAL;
-+}
-+EXPORT_SYMBOL(dpa_fq_probe_mac);
-+
-+static u32 rx_pool_channel;
-+static DEFINE_SPINLOCK(rx_pool_channel_init);
-+
-+int dpa_get_channel(void)
-+{
-+ spin_lock(&rx_pool_channel_init);
-+ if (!rx_pool_channel) {
-+ u32 pool;
-+ int ret = qman_alloc_pool(&pool);
-+ if (!ret)
-+ rx_pool_channel = pool;
-+ }
-+ spin_unlock(&rx_pool_channel_init);
-+ if (!rx_pool_channel)
-+ return -ENOMEM;
-+ return rx_pool_channel;
-+}
-+EXPORT_SYMBOL(dpa_get_channel);
-+
-+void dpa_release_channel(void)
-+{
-+ qman_release_pool(rx_pool_channel);
-+}
-+EXPORT_SYMBOL(dpa_release_channel);
-+
-+void dpaa_eth_add_channel(u16 channel)
-+{
-+ const cpumask_t *cpus = qman_affine_cpus();
-+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
-+ int cpu;
-+ struct qman_portal *portal;
-+
-+ for_each_cpu(cpu, cpus) {
-+ portal = (struct qman_portal *)qman_get_affine_portal(cpu);
-+ qman_p_static_dequeue_add(portal, pool);
-+ }
-+}
-+EXPORT_SYMBOL(dpaa_eth_add_channel);
-+
-+/**
-+ * Congestion group state change notification callback.
-+ * Stops the device's egress queues while they are congested and
-+ * wakes them upon exiting congested state.
-+ * Also updates some CGR-related stats.
-+ */
-+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
-+
-+ int congested)
-+{
-+ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
-+ struct dpa_priv_s, cgr_data.cgr);
-+
-+ if (congested) {
-+ priv->cgr_data.congestion_start_jiffies = jiffies;
-+ netif_tx_stop_all_queues(priv->net_dev);
-+ priv->cgr_data.cgr_congested_count++;
-+ } else {
-+ priv->cgr_data.congested_jiffies +=
-+ (jiffies - priv->cgr_data.congestion_start_jiffies);
-+ netif_tx_wake_all_queues(priv->net_dev);
-+ }
-+}
-+
-+int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
-+{
-+ struct qm_mcc_initcgr initcgr;
-+ u32 cs_th;
-+ int err;
-+
-+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
-+ if (err < 0) {
-+ pr_err("Error %d allocating CGR ID\n", err);
-+ goto out_error;
-+ }
-+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
-+
-+ /* Enable Congestion State Change Notifications and CS taildrop */
-+ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
-+ initcgr.cgr.cscn_en = QM_CGR_EN;
-+
-+ /* Set different thresholds based on the MAC speed.
-+ * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
-+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
-+ * In such cases, we ought to reconfigure the threshold, too.
-+ */
-+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
-+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
-+ else
-+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
-+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
-+
-+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
-+ initcgr.cgr.cstd_en = QM_CGR_EN;
-+
-+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
-+ &initcgr);
-+ if (err < 0) {
-+ pr_err("Error %d creating CGR with ID %d\n", err,
-+ priv->cgr_data.cgr.cgrid);
-+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-+ goto out_error;
-+ }
-+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
-+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
-+ priv->cgr_data.cgr.chan);
-+
-+out_error:
-+ return err;
-+}
-+EXPORT_SYMBOL(dpaa_eth_cgr_init);
-+
-+static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
-+ struct dpa_fq *fq,
-+ const struct qman_fq *template)
-+{
-+ fq->fq_base = *template;
-+ fq->net_dev = priv->net_dev;
-+
-+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
-+ fq->channel = priv->channel;
-+}
-+
-+static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
-+ struct dpa_fq *fq,
-+ struct fm_port *port,
-+ const struct qman_fq *template)
-+{
-+ fq->fq_base = *template;
-+ fq->net_dev = priv->net_dev;
-+
-+ if (port) {
-+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
-+ fq->channel = (uint16_t)fm_get_tx_port_channel(port);
-+ } else {
-+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
-+ }
-+}
-+
-+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
-+ struct fm_port *tx_port)
-+{
-+ struct dpa_fq *fq;
-+ uint16_t portals[NR_CPUS];
-+ int cpu, portal_cnt = 0, num_portals = 0;
-+ uint32_t pcd_fqid, pcd_fqid_hi_prio;
-+ const cpumask_t *affine_cpus = qman_affine_cpus();
-+ int egress_cnt = 0, conf_cnt = 0;
-+
-+ /* Prepare for PCD FQs init */
-+ for_each_cpu(cpu, affine_cpus)
-+ portals[num_portals++] = qman_affine_channel(cpu);
-+ if (num_portals == 0)
-+ dev_err(priv->net_dev->dev.parent,
-+ "No Qman software (affine) channels found");
-+
-+ pcd_fqid = (priv->mac_dev) ?
-+ DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
-+ pcd_fqid_hi_prio = (priv->mac_dev) ?
-+ DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
-+
-+ /* Initialize each FQ in the list */
-+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
-+ switch (fq->fq_type) {
-+ case FQ_TYPE_RX_DEFAULT:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
-+ break;
-+ case FQ_TYPE_RX_ERROR:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
-+ break;
-+ case FQ_TYPE_RX_PCD:
-+ /* For MACless we can't have dynamic Rx queues */
-+ BUG_ON(!priv->mac_dev && !fq->fqid);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
-+ if (!fq->fqid)
-+ fq->fqid = pcd_fqid++;
-+ fq->channel = portals[portal_cnt];
-+ portal_cnt = (portal_cnt + 1) % num_portals;
-+ break;
-+ case FQ_TYPE_RX_PCD_HI_PRIO:
-+ /* For MACless we can't have dynamic Hi Pri Rx queues */
-+ BUG_ON(!priv->mac_dev && !fq->fqid);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
-+ if (!fq->fqid)
-+ fq->fqid = pcd_fqid_hi_prio++;
-+ fq->channel = portals[portal_cnt];
-+ portal_cnt = (portal_cnt + 1) % num_portals;
-+ break;
-+ case FQ_TYPE_TX:
-+ dpa_setup_egress(priv, fq, tx_port,
-+ &fq_cbs->egress_ern);
-+ /* If we have more Tx queues than the number of cores,
-+ * just ignore the extra ones.
-+ */
-+ if (egress_cnt < DPAA_ETH_TX_QUEUES)
-+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
-+ break;
-+ case FQ_TYPE_TX_CONFIRM:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
-+ break;
-+ case FQ_TYPE_TX_CONF_MQ:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
-+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
-+ break;
-+ case FQ_TYPE_TX_ERROR:
-+ BUG_ON(!priv->mac_dev);
-+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
-+ break;
-+ default:
-+ dev_warn(priv->net_dev->dev.parent,
-+ "Unknown FQ type detected!\n");
-+ break;
-+ }
-+ }
-+
-+ /* The number of Tx queues may be smaller than the number of cores, if
-+ * the Tx queue range is specified in the device tree instead of being
-+ * dynamically allocated.
-+ * Make sure all CPUs receive a corresponding Tx queue.
-+ */
-+ while (egress_cnt < DPAA_ETH_TX_QUEUES) {
-+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
-+ if (fq->fq_type != FQ_TYPE_TX)
-+ continue;
-+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
-+ if (egress_cnt == DPAA_ETH_TX_QUEUES)
-+ break;
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(dpa_fq_setup);
-+
-+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
-+{
-+ int _errno;
-+ const struct dpa_priv_s *priv;
-+ struct device *dev;
-+ struct qman_fq *fq;
-+ struct qm_mcc_initfq initfq;
-+ struct qman_fq *confq;
-+ int queue_id;
-+
-+ priv = netdev_priv(dpa_fq->net_dev);
-+ dev = dpa_fq->net_dev->dev.parent;
-+
-+ if (dpa_fq->fqid == 0)
-+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
-+
-+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
-+
-+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
-+ if (_errno) {
-+ dev_err(dev, "qman_create_fq() failed\n");
-+ return _errno;
-+ }
-+ fq = &dpa_fq->fq_base;
-+
-+ if (dpa_fq->init) {
-+ memset(&initfq, 0, sizeof(initfq));
-+
-+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
-+
-+ /* Try to reduce the number of portal interrupts for
-+ * Tx Confirmation FQs.
-+ */
-+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
-+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
-+
-+ /* FQ placement */
-+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
-+
-+ initfq.fqd.dest.channel = dpa_fq->channel;
-+ initfq.fqd.dest.wq = dpa_fq->wq;
-+
-+ /* Put all egress queues in a congestion group of their own.
-+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
-+ * rather than Tx - but they nonetheless account for the
-+ * memory footprint on behalf of egress traffic. We therefore
-+ * place them in the netdev's CGR, along with the Tx FQs.
-+ */
-+ if (dpa_fq->fq_type == FQ_TYPE_TX ||
-+ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
-+ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
-+ initfq.we_mask |= QM_INITFQ_WE_CGID;
-+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
-+ initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
-+ /* Set a fixed overhead accounting, in an attempt to
-+ * reduce the impact of fixed-size skb shells and the
-+ * driver's needed headroom on system memory. This is
-+ * especially the case when the egress traffic is
-+ * composed of small datagrams.
-+ * Unfortunately, QMan's OAL value is capped to an
-+ * insufficient value, but even that is better than
-+ * no overhead accounting at all.
-+ */
-+ initfq.we_mask |= QM_INITFQ_WE_OAC;
-+ initfq.fqd.oac_init.oac = QM_OAC_CG;
-+ initfq.fqd.oac_init.oal =
-+ (signed char)(min(sizeof(struct sk_buff) +
-+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-+ }
-+
-+ if (td_enable) {
-+ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
-+ qm_fqd_taildrop_set(&initfq.fqd.td,
-+ DPA_FQ_TD, 1);
-+ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
-+ }
-+
-+ /* Configure the Tx confirmation queue, now that we know
-+ * which Tx queue it pairs with.
-+ */
-+ if (dpa_fq->fq_type == FQ_TYPE_TX) {
-+ queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
-+ if (queue_id >= 0) {
-+ confq = priv->conf_fqs[queue_id];
-+ if (confq) {
-+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
-+ /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
-+ * A2V=1 (contextA A2 field is valid)
-+ * A0V=1 (contextA A0 field is valid)
-+ * B0V=1 (contextB field is valid)
-+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
-+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
-+ */
-+ initfq.fqd.context_a.hi = 0x1e000000;
-+ initfq.fqd.context_a.lo = 0x80000000;
-+ }
-+ }
-+ }
-+
-+ /* Put all *private* ingress queues in our "ingress CGR". */
-+ if (priv->use_ingress_cgr &&
-+ (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
-+ dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
-+ dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
-+ dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
-+ initfq.we_mask |= QM_INITFQ_WE_CGID;
-+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
-+ initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
-+ /* Set a fixed overhead accounting, just like for the
-+ * egress CGR.
-+ */
-+ initfq.we_mask |= QM_INITFQ_WE_OAC;
-+ initfq.fqd.oac_init.oac = QM_OAC_CG;
-+ initfq.fqd.oac_init.oal =
-+ (signed char)(min(sizeof(struct sk_buff) +
-+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
-+ }
-+
-+ /* Initialization common to all ingress queues */
-+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
-+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
-+ initfq.fqd.fq_ctrl |=
-+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
-+ initfq.fqd.context_a.stashing.exclusive =
-+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
-+ QM_STASHING_EXCL_ANNOTATION;
-+ initfq.fqd.context_a.stashing.data_cl = 2;
-+ initfq.fqd.context_a.stashing.annotation_cl = 1;
-+ initfq.fqd.context_a.stashing.context_cl =
-+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
-+ }
-+
-+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
-+ if (_errno < 0) {
-+ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno)) {
-+ dpa_fq->init = 0;
-+ } else {
-+ dev_err(dev, "qman_init_fq(%u) = %d\n",
-+ qman_fq_fqid(fq), _errno);
-+ qman_destroy_fq(fq, 0);
-+ }
-+ return _errno;
-+ }
-+ }
-+
-+ dpa_fq->fqid = qman_fq_fqid(fq);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_fq_init);
-+
-+int __cold __attribute__((nonnull))
-+_dpa_fq_free(struct device *dev, struct qman_fq *fq)
-+{
-+ int _errno, __errno;
-+ struct dpa_fq *dpa_fq;
-+ const struct dpa_priv_s *priv;
-+
-+ _errno = 0;
-+
-+ dpa_fq = container_of(fq, struct dpa_fq, fq_base);
-+ priv = netdev_priv(dpa_fq->net_dev);
-+
-+ if (dpa_fq->init) {
-+ _errno = qman_retire_fq(fq, NULL);
-+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
-+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
-+ qman_fq_fqid(fq), _errno);
-+
-+ __errno = qman_oos_fq(fq);
-+ if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
-+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
-+ qman_fq_fqid(fq), __errno);
-+ if (_errno >= 0)
-+ _errno = __errno;
-+ }
-+ }
-+
-+ qman_destroy_fq(fq, 0);
-+ list_del(&dpa_fq->list);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(_dpa_fq_free);
-+
-+int __cold __attribute__((nonnull))
-+dpa_fq_free(struct device *dev, struct list_head *list)
-+{
-+ int _errno, __errno;
-+ struct dpa_fq *dpa_fq, *tmp;
-+
-+ _errno = 0;
-+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
-+ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
-+ if (unlikely(__errno < 0) && _errno >= 0)
-+ _errno = __errno;
-+ }
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_fq_free);
-+
-+int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable)
-+{
-+ int _errno, __errno;
-+ struct dpa_fq *dpa_fq, *tmp;
-+ static bool print_msg __read_mostly;
-+
-+ _errno = 0;
-+ print_msg = true;
-+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
-+ __errno = dpa_fq_init(dpa_fq, td_enable);
-+ if (unlikely(__errno < 0) && _errno >= 0) {
-+ if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, __errno)) {
-+ if (print_msg) {
-+ dev_warn(dev,
-+ "Skip RX PCD High Priority FQs initialization\n");
-+ print_msg = false;
-+ }
-+ if (_dpa_fq_free(dev, (struct qman_fq *)dpa_fq))
-+ dev_warn(dev,
-+ "Error freeing frame queues\n");
-+ } else {
-+ _errno = __errno;
-+ break;
-+ }
-+ }
-+ }
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_fqs_init);
-+static void
-+dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
-+ struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
-+{
-+ struct fm_port_params tx_port_param;
-+ bool frag_enabled = false;
-+
-+ memset(&tx_port_param, 0, sizeof(tx_port_param));
-+ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
-+ buf_layout, frag_enabled);
-+}
-+
-+static void
-+dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
-+ struct dpa_fq *errq, struct dpa_fq *defq,
-+ struct dpa_buffer_layout_s *buf_layout)
-+{
-+ struct fm_port_params rx_port_param;
-+ int i;
-+ bool frag_enabled = false;
-+
-+ memset(&rx_port_param, 0, sizeof(rx_port_param));
-+ count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
-+ rx_port_param.num_pools = (uint8_t)count;
-+ for (i = 0; i < count; i++) {
-+ if (i >= rx_port_param.num_pools)
-+ break;
-+ rx_port_param.pool_param[i].id = bp[i].bpid;
-+ rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
-+ }
-+
-+ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
-+ buf_layout, frag_enabled);
-+}
-+
-+#if defined(CONFIG_FSL_SDK_FMAN_TEST)
-+/* Defined as weak, to be implemented by fman pcd tester. */
-+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
-+__attribute__((weak));
-+
-+int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
-+#else
-+int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
-+
-+int dpa_free_pcd_fqids(struct device *, uint32_t);
-+
-+#endif /* CONFIG_FSL_SDK_FMAN_TEST */
-+
-+
-+int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
-+ uint8_t alignment, uint32_t *base_fqid)
-+{
-+ dev_crit(dev, "callback not implemented!\n");
-+
-+ return 0;
-+}
-+
-+int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
-+{
-+
-+ dev_crit(dev, "callback not implemented!\n");
-+
-+ return 0;
-+}
-+
-+void dpaa_eth_init_ports(struct mac_device *mac_dev,
-+ struct dpa_bp *bp, size_t count,
-+ struct fm_port_fqs *port_fqs,
-+ struct dpa_buffer_layout_s *buf_layout,
-+ struct device *dev)
-+{
-+ struct fm_port_pcd_param rx_port_pcd_param;
-+ struct fm_port *rxport = mac_dev->port_dev[RX];
-+ struct fm_port *txport = mac_dev->port_dev[TX];
-+
-+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
-+ port_fqs->tx_defq, &buf_layout[TX]);
-+ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
-+ port_fqs->rx_defq, &buf_layout[RX]);
-+
-+ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
-+ rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
-+ rx_port_pcd_param.dev = dev;
-+ fm_port_pcd_bind(rxport, &rx_port_pcd_param);
-+}
-+EXPORT_SYMBOL(dpaa_eth_init_ports);
-+
-+void dpa_release_sgt(struct qm_sg_entry *sgt)
-+{
-+ struct dpa_bp *dpa_bp;
-+ struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
-+ uint8_t i = 0, j;
-+
-+ memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
-+
-+ do {
-+ dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
-+ DPA_BUG_ON(!dpa_bp);
-+
-+ j = 0;
-+ do {
-+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
-+ bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
-+
-+ j++; i++;
-+ } while (j < ARRAY_SIZE(bmb) &&
-+ !qm_sg_entry_get_final(&sgt[i-1]) &&
-+ qm_sg_entry_get_bpid(&sgt[i-1]) ==
-+ qm_sg_entry_get_bpid(&sgt[i]));
-+
-+ while (bman_release(dpa_bp->pool, bmb, j, 0))
-+ cpu_relax();
-+ } while (!qm_sg_entry_get_final(&sgt[i-1]));
-+}
-+EXPORT_SYMBOL(dpa_release_sgt);
-+
-+void __attribute__((nonnull))
-+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
-+{
-+ struct qm_sg_entry *sgt;
-+ struct dpa_bp *dpa_bp;
-+ struct bm_buffer bmb;
-+ dma_addr_t addr;
-+ void *vaddr;
-+
-+ bmb.opaque = 0;
-+ bm_buffer_set64(&bmb, qm_fd_addr(fd));
-+
-+ dpa_bp = dpa_bpid2pool(fd->bpid);
-+ DPA_BUG_ON(!dpa_bp);
-+
-+ if (fd->format == qm_fd_sg) {
-+ vaddr = phys_to_virt(qm_fd_addr(fd));
-+ sgt = vaddr + dpa_fd_offset(fd);
-+
-+ dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+
-+ dpa_release_sgt(sgt);
-+ addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ return;
-+ }
-+ bm_buffer_set64(&bmb, addr);
-+ }
-+
-+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
-+ cpu_relax();
-+}
-+EXPORT_SYMBOL(dpa_fd_release);
-+
-+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_mr_entry *msg)
-+{
-+ switch (msg->ern.rc & QM_MR_RC_MASK) {
-+ case QM_MR_RC_CGR_TAILDROP:
-+ percpu_priv->ern_cnt.cg_tdrop++;
-+ break;
-+ case QM_MR_RC_WRED:
-+ percpu_priv->ern_cnt.wred++;
-+ break;
-+ case QM_MR_RC_ERROR:
-+ percpu_priv->ern_cnt.err_cond++;
-+ break;
-+ case QM_MR_RC_ORPWINDOW_EARLY:
-+ percpu_priv->ern_cnt.early_window++;
-+ break;
-+ case QM_MR_RC_ORPWINDOW_LATE:
-+ percpu_priv->ern_cnt.late_window++;
-+ break;
-+ case QM_MR_RC_FQ_TAILDROP:
-+ percpu_priv->ern_cnt.fq_tdrop++;
-+ break;
-+ case QM_MR_RC_ORPWINDOW_RETIRED:
-+ percpu_priv->ern_cnt.fq_retired++;
-+ break;
-+ case QM_MR_RC_ORP_ZERO:
-+ percpu_priv->ern_cnt.orp_zero++;
-+ break;
-+ }
-+}
-+EXPORT_SYMBOL(count_ern);
-+
-+/**
-+ * Turn on HW checksum computation for this outgoing frame.
-+ * If the current protocol is not something we support in this regard
-+ * (or if the stack has already computed the SW checksum), we do nothing.
-+ *
-+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
-+ * otherwise.
-+ *
-+ * Note that this function may modify the fd->cmd field and the skb data buffer
-+ * (the Parse Results area).
-+ */
-+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
-+{
-+ fm_prs_result_t *parse_result;
-+ struct iphdr *iph;
-+ struct ipv6hdr *ipv6h = NULL;
-+ u8 l4_proto;
-+ u16 ethertype = ntohs(skb->protocol);
-+ int retval = 0;
-+
-+ if (skb->ip_summed != CHECKSUM_PARTIAL)
-+ return 0;
-+
-+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
-+ * L4 alone from the FM configuration anyway.
-+ */
-+
-+ /* Fill in some fields of the Parse Results array, so the FMan
-+ * can find them as if they came from the FMan Parser.
-+ */
-+ parse_result = (fm_prs_result_t *)parse_results;
-+
-+ /* If we're dealing with VLAN, get the real Ethernet type */
-+ if (ethertype == ETH_P_8021Q) {
-+ /* We can't always assume the MAC header is set correctly
-+ * by the stack, so reset to beginning of skb->data
-+ */
-+ skb_reset_mac_header(skb);
-+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
-+ }
-+
-+ /* Fill in the relevant L3 parse result fields
-+ * and read the L4 protocol type
-+ */
-+ switch (ethertype) {
-+ case ETH_P_IP:
-+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
-+ iph = ip_hdr(skb);
-+ DPA_BUG_ON(iph == NULL);
-+ l4_proto = iph->protocol;
-+ break;
-+ case ETH_P_IPV6:
-+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
-+ ipv6h = ipv6_hdr(skb);
-+ DPA_BUG_ON(ipv6h == NULL);
-+ l4_proto = ipv6h->nexthdr;
-+ break;
-+ default:
-+ /* We shouldn't even be here */
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_alert(priv->net_dev,
-+ "Can't compute HW csum for L3 proto 0x%x\n",
-+ ntohs(skb->protocol));
-+ retval = -EIO;
-+ goto return_error;
-+ }
-+
-+ /* Fill in the relevant L4 parse result fields */
-+ switch (l4_proto) {
-+ case IPPROTO_UDP:
-+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
-+ break;
-+ case IPPROTO_TCP:
-+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
-+ break;
-+ default:
-+ /* This can as well be a BUG() */
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_alert(priv->net_dev,
-+ "Can't compute HW csum for L4 proto 0x%x\n",
-+ l4_proto);
-+ retval = -EIO;
-+ goto return_error;
-+ }
-+
-+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
-+ parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
-+ parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
-+
-+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
-+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
-+
-+ /* On P1023 and similar platforms fd->cmd interpretation could
-+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
-+ * is not set so we do not need to check; in the future, if/when
-+ * using context_a we need to check this bit
-+ */
-+
-+return_error:
-+ return retval;
-+}
-+EXPORT_SYMBOL(dpa_enable_tx_csum);
-+
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+void dpa_enable_ceetm(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ priv->ceetm_en = true;
-+}
-+EXPORT_SYMBOL(dpa_enable_ceetm);
-+
-+void dpa_disable_ceetm(struct net_device *dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(dev);
-+ priv->ceetm_en = false;
-+}
-+EXPORT_SYMBOL(dpa_disable_ceetm);
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
-@@ -0,0 +1,226 @@
-+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPAA_ETH_COMMON_H
-+#define __DPAA_ETH_COMMON_H
-+
-+#include <linux/etherdevice.h> /* struct net_device */
-+#include <linux/fsl_bman.h> /* struct bm_buffer */
-+#include <linux/of_platform.h> /* struct platform_device */
-+#include <linux/net_tstamp.h> /* struct hwtstamp_config */
-+
-+#include "dpaa_eth.h"
-+#include "lnxwrp_fsl_fman.h"
-+
-+#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
-+ frag_enabled) \
-+{ \
-+ param.errq = errq_id; \
-+ param.defq = defq_id; \
-+ param.priv_data_size = buf_layout->priv_data_size; \
-+ param.parse_results = buf_layout->parse_results; \
-+ param.hash_results = buf_layout->hash_results; \
-+ param.frag_enable = frag_enabled; \
-+ param.time_stamp = buf_layout->time_stamp; \
-+ param.manip_extra_space = buf_layout->manip_extra_space; \
-+ param.data_align = buf_layout->data_align; \
-+ fm_set_##type##_port_params(port, &param); \
-+}
-+
-+/* The SGT needs to be 256 bytes long. Even if the table has only one entry,
-+ * the FMan will read 256 bytes from its start.
-+ */
-+#define DPA_SGT_SIZE 256
-+#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-+
-+#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
-+
-+#define DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno) \
-+ (((dpa_fq)->fq_type == FQ_TYPE_RX_PCD_HI_PRIO) && \
-+ (_errno == -EIO))
-+/* return codes for the dpaa-eth hooks */
-+enum dpaa_eth_hook_result {
-+ /* fd/skb was retained by the hook.
-+ *
-+ * On the Rx path, this means the Ethernet driver will _not_
-+ * deliver the skb to the stack. Instead, the hook implementation
-+ * is expected to properly dispose of the skb.
-+ *
-+ * On the Tx path, the Ethernet driver's dpa_tx() function will
-+ * immediately return NETDEV_TX_OK. The hook implementation is expected
-+ * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
-+ * unless you know exactly what you're doing!
-+ *
-+ * On the confirmation/error paths, the Ethernet driver will _not_
-+ * perform any fd cleanup, nor update the interface statistics.
-+ */
-+ DPAA_ETH_STOLEN,
-+ /* fd/skb was returned to the Ethernet driver for regular processing.
-+ * The hook is not allowed to, for instance, reallocate the skb (as if
-+ * by linearizing, copying, cloning or reallocating the headroom).
-+ */
-+ DPAA_ETH_CONTINUE
-+};
-+
-+typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
-+ struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
-+typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
-+ struct sk_buff *skb, struct net_device *net_dev);
-+typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
-+ struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
-+
-+/* used in napi related functions */
-+extern u16 qman_portal_max;
-+
-+/* from dpa_ethtool.c */
-+extern const struct ethtool_ops dpa_ethtool_ops;
-+
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+/* Various hooks used for unit-testing and/or fastpath optimizations.
-+ * Currently only one set of such hooks is supported.
-+ */
-+struct dpaa_eth_hooks_s {
-+ /* Invoked on the Tx private path, immediately after receiving the skb
-+ * from the stack.
-+ */
-+ dpaa_eth_egress_hook_t tx;
-+
-+ /* Invoked on the Rx private path, right before passing the skb
-+ * up the stack. At that point, the packet's protocol id has already
-+ * been set. The skb's data pointer is now at the L3 header, and
-+ * skb->mac_header points to the L2 header. skb->len has been adjusted
-+ * to be the length of L3+payload (i.e., the length of the
-+ * original frame minus the L2 header len).
-+ * For more details on what the skb looks like, see eth_type_trans().
-+ */
-+ dpaa_eth_ingress_hook_t rx_default;
-+
-+ /* Driver hook for the Rx error private path. */
-+ dpaa_eth_confirm_hook_t rx_error;
-+ /* Driver hook for the Tx confirmation private path. */
-+ dpaa_eth_confirm_hook_t tx_confirm;
-+ /* Driver hook for the Tx error private path. */
-+ dpaa_eth_confirm_hook_t tx_error;
-+};
-+
-+void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
-+
-+extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
-+#endif
-+
-+int dpa_netdev_init(struct net_device *net_dev,
-+ const uint8_t *mac_addr,
-+ uint16_t tx_timeout);
-+int __cold dpa_start(struct net_device *net_dev);
-+int __cold dpa_stop(struct net_device *net_dev);
-+void __cold dpa_timeout(struct net_device *net_dev);
-+void __cold
-+dpa_get_stats64(struct net_device *net_dev,
-+ struct rtnl_link_stats64 *stats);
-+int dpa_ndo_init(struct net_device *net_dev);
-+int dpa_set_features(struct net_device *dev, netdev_features_t features);
-+netdev_features_t dpa_fix_features(struct net_device *dev,
-+ netdev_features_t features);
-+#ifdef CONFIG_FSL_DPAA_TS
-+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
-+ enum port_type rx_tx, const void *data);
-+/* Updates the skb shared hw timestamp from the hardware timestamp */
-+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
-+ struct skb_shared_hwtstamps *shhwtstamps, const void *data);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-+int __cold dpa_remove(struct platform_device *of_dev);
-+struct mac_device * __cold __must_check
-+__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
-+int dpa_set_mac_address(struct net_device *net_dev, void *addr);
-+void dpa_set_rx_mode(struct net_device *net_dev);
-+void dpa_set_buffers_layout(struct mac_device *mac_dev,
-+ struct dpa_buffer_layout_s *layout);
-+int __attribute__((nonnull))
-+dpa_bp_alloc(struct dpa_bp *dpa_bp, struct device *dev);
-+void __cold __attribute__((nonnull))
-+dpa_bp_free(struct dpa_priv_s *priv);
-+struct dpa_bp *dpa_bpid2pool(int bpid);
-+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
-+bool dpa_bpid2pool_use(int bpid);
-+void dpa_bp_drain(struct dpa_bp *bp);
-+#ifdef CONFIG_FMAN_PFC
-+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
-+ void *accel_priv, select_queue_fallback_t fallback);
-+#endif
-+struct dpa_fq *dpa_fq_alloc(struct device *dev,
-+ u32 fq_start,
-+ u32 fq_count,
-+ struct list_head *list,
-+ enum dpa_fq_type fq_type);
-+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
-+ struct fm_port_fqs *port_fqs,
-+ bool tx_conf_fqs_per_core,
-+ enum port_type ptype);
-+int dpa_get_channel(void);
-+void dpa_release_channel(void);
-+void dpaa_eth_add_channel(u16 channel);
-+int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
-+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
-+ struct fm_port *tx_port);
-+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
-+int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable);
-+int __cold __attribute__((nonnull))
-+dpa_fq_free(struct device *dev, struct list_head *list);
-+void dpaa_eth_init_ports(struct mac_device *mac_dev,
-+ struct dpa_bp *bp, size_t count,
-+ struct fm_port_fqs *port_fqs,
-+ struct dpa_buffer_layout_s *buf_layout,
-+ struct device *dev);
-+void dpa_release_sgt(struct qm_sg_entry *sgt);
-+void __attribute__((nonnull))
-+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
-+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_mr_entry *msg);
-+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+void dpa_enable_ceetm(struct net_device *dev);
-+void dpa_disable_ceetm(struct net_device *dev);
-+#endif
-+struct proxy_device {
-+ struct mac_device *mac_dev;
-+};
-+
-+/* mac device control functions exposed by proxy interface*/
-+int dpa_proxy_start(struct net_device *net_dev);
-+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
-+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev);
-+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev);
-+
-+#endif /* __DPAA_ETH_COMMON_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
-@@ -0,0 +1,381 @@
-+/* Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_platform.h>
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#include "dpaa_eth_base.h"
-+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
-+#include "mac.h"
-+
-+#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+MODULE_DESCRIPTION(DPA_DESCRIPTION);
-+
-+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
-+#ifdef CONFIG_PM
-+
-+static int proxy_suspend(struct device *dev)
-+{
-+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ int err = 0;
-+
-+ err = fm_port_suspend(mac_dev->port_dev[RX]);
-+ if (err)
-+ goto port_suspend_failed;
-+
-+ err = fm_port_suspend(mac_dev->port_dev[TX]);
-+ if (err)
-+ err = fm_port_resume(mac_dev->port_dev[RX]);
-+
-+port_suspend_failed:
-+ return err;
-+}
-+
-+static int proxy_resume(struct device *dev)
-+{
-+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ int err = 0;
-+
-+ err = fm_port_resume(mac_dev->port_dev[TX]);
-+ if (err)
-+ goto port_resume_failed;
-+
-+ err = fm_port_resume(mac_dev->port_dev[RX]);
-+ if (err)
-+ err = fm_port_suspend(mac_dev->port_dev[TX]);
-+
-+port_resume_failed:
-+ return err;
-+}
-+
-+static const struct dev_pm_ops proxy_pm_ops = {
-+ .suspend = proxy_suspend,
-+ .resume = proxy_resume,
-+};
-+
-+#define PROXY_PM_OPS (&proxy_pm_ops)
-+
-+#else /* CONFIG_PM */
-+
-+#define PROXY_PM_OPS NULL
-+
-+#endif /* CONFIG_PM */
-+
-+static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
-+{
-+ int err = 0, i;
-+ struct device *dev;
-+ struct device_node *dpa_node;
-+ struct dpa_bp *dpa_bp;
-+ struct list_head proxy_fq_list;
-+ size_t count;
-+ struct fm_port_fqs port_fqs;
-+ struct dpa_buffer_layout_s *buf_layout = NULL;
-+ struct mac_device *mac_dev;
-+ struct proxy_device *proxy_dev;
-+
-+ dev = &_of_dev->dev;
-+
-+ dpa_node = dev->of_node;
-+
-+ if (!of_device_is_available(dpa_node))
-+ return -ENODEV;
-+
-+ /* Get the buffer pools assigned to this interface */
-+ dpa_bp = dpa_bp_probe(_of_dev, &count);
-+ if (IS_ERR(dpa_bp))
-+ return PTR_ERR(dpa_bp);
-+
-+ mac_dev = dpa_mac_probe(_of_dev);
-+ if (IS_ERR(mac_dev))
-+ return PTR_ERR(mac_dev);
-+
-+ proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
-+ if (!proxy_dev) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ proxy_dev->mac_dev = mac_dev;
-+ dev_set_drvdata(dev, proxy_dev);
-+
-+ /* We have physical ports, so we need to establish
-+ * the buffer layout.
-+ */
-+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
-+ GFP_KERNEL);
-+ if (!buf_layout) {
-+ dev_err(dev, "devm_kzalloc() failed\n");
-+ return -ENOMEM;
-+ }
-+ dpa_set_buffers_layout(mac_dev, buf_layout);
-+
-+ INIT_LIST_HEAD(&proxy_fq_list);
-+
-+ memset(&port_fqs, 0, sizeof(port_fqs));
-+
-+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
-+ if (!err)
-+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
-+ TX);
-+ if (err < 0) {
-+ devm_kfree(dev, buf_layout);
-+ return err;
-+ }
-+
-+ /* Proxy initializer - Just configures the MAC on behalf of
-+ * another partition.
-+ */
-+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
-+ buf_layout, dev);
-+
-+ /* Proxy interfaces need to be started, and the allocated
-+ * memory freed
-+ */
-+ devm_kfree(dev, buf_layout);
-+ devm_kfree(dev, dpa_bp);
-+
-+ /* Free FQ structures */
-+ devm_kfree(dev, port_fqs.rx_defq);
-+ devm_kfree(dev, port_fqs.rx_errq);
-+ devm_kfree(dev, port_fqs.tx_defq);
-+ devm_kfree(dev, port_fqs.tx_errq);
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_enable(mac_dev->port_dev[i]);
-+ if (err)
-+ goto port_enable_fail;
-+ }
-+
-+ dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
-+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
-+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
-+
-+ return 0; /* Proxy interface initialization ended */
-+
-+port_enable_fail:
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_disable(mac_dev->port_dev[i]);
-+ dpa_eth_proxy_remove(_of_dev);
-+
-+ return err;
-+}
-+
-+int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev;
-+ int _errno;
-+
-+ mac_dev = proxy_dev->mac_dev;
-+
-+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
-+ net_dev->dev_addr);
-+ if (_errno < 0)
-+ return _errno;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_proxy_set_mac_address);
-+
-+int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
-+ struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ int _errno;
-+
-+ if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
-+ mac_dev->promisc = !mac_dev->promisc;
-+ _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
-+ mac_dev->promisc);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
-+ _errno);
-+ }
-+
-+ _errno = mac_dev->set_multi(net_dev, mac_dev);
-+ if (unlikely(_errno < 0))
-+ return _errno;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
-+
-+int dpa_proxy_start(struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev;
-+ const struct dpa_priv_s *priv;
-+ struct proxy_device *proxy_dev;
-+ int _errno;
-+ int i;
-+
-+ priv = netdev_priv(net_dev);
-+ proxy_dev = (struct proxy_device *)priv->peer;
-+ mac_dev = proxy_dev->mac_dev;
-+
-+ _errno = mac_dev->init_phy(net_dev, mac_dev);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev, "init_phy() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ _errno = fm_port_enable(mac_dev->port_dev[i]);
-+ if (_errno)
-+ goto port_enable_fail;
-+ }
-+
-+ _errno = mac_dev->start(mac_dev);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev, "mac_dev->start() = %d\n",
-+ _errno);
-+ goto port_enable_fail;
-+ }
-+
-+ return _errno;
-+
-+port_enable_fail:
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_disable(mac_dev->port_dev[i]);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_proxy_start);
-+
-+int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
-+{
-+ struct mac_device *mac_dev = proxy_dev->mac_dev;
-+ const struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ int _errno, i, err;
-+
-+ _errno = mac_dev->stop(mac_dev);
-+ if (_errno < 0) {
-+ if (netif_msg_drv(priv))
-+ netdev_err(net_dev, "mac_dev->stop() = %d\n",
-+ _errno);
-+ return _errno;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ err = fm_port_disable(mac_dev->port_dev[i]);
-+ _errno = err ? err : _errno;
-+ }
-+
-+ if (mac_dev->phy_dev)
-+ phy_disconnect(mac_dev->phy_dev);
-+ mac_dev->phy_dev = NULL;
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(dpa_proxy_stop);
-+
-+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
-+{
-+ struct device *dev = &of_dev->dev;
-+ struct proxy_device *proxy_dev = dev_get_drvdata(dev);
-+
-+ kfree(proxy_dev);
-+
-+ dev_set_drvdata(dev, NULL);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id dpa_proxy_match[] = {
-+ {
-+ .compatible = "fsl,dpa-ethernet-init"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, dpa_proxy_match);
-+
-+static struct platform_driver dpa_proxy_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME "-proxy",
-+ .of_match_table = dpa_proxy_match,
-+ .owner = THIS_MODULE,
-+ .pm = PROXY_PM_OPS,
-+ },
-+ .probe = dpaa_eth_proxy_probe,
-+ .remove = dpa_eth_proxy_remove
-+};
-+
-+static int __init __cold dpa_proxy_load(void)
-+{
-+ int _errno;
-+
-+ pr_info(DPA_DESCRIPTION "\n");
-+
-+ /* Initialize dpaa_eth mirror values */
-+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
-+ dpa_max_frm = fm_get_max_frm();
-+
-+ _errno = platform_driver_register(&dpa_proxy_driver);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ }
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ return _errno;
-+}
-+module_init(dpa_proxy_load);
-+
-+static void __exit __cold dpa_proxy_unload(void)
-+{
-+ platform_driver_unregister(&dpa_proxy_driver);
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(dpa_proxy_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
-@@ -0,0 +1,1195 @@
-+/* Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/skbuff.h>
-+#include <linux/highmem.h>
-+#include <linux/fsl_bman.h>
-+#include <net/sock.h>
-+
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+#ifdef CONFIG_FSL_DPAA_1588
-+#include "dpaa_1588.h"
-+#endif
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+#include "dpaa_eth_ceetm.h"
-+#endif
-+
-+/* DMA map and add a page frag back into the bpool.
-+ * @vaddr fragment must have been allocated with netdev_alloc_frag(),
-+ * specifically for fitting into @dpa_bp.
-+ */
-+static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
-+ int *count_ptr)
-+{
-+ struct bm_buffer bmb;
-+ dma_addr_t addr;
-+
-+ bmb.opaque = 0;
-+
-+ addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ return;
-+ }
-+
-+ bm_buffer_set64(&bmb, addr);
-+
-+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
-+ cpu_relax();
-+
-+ (*count_ptr)++;
-+}
-+
-+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
-+{
-+ void *new_buf, *fman_buf;
-+ struct bm_buffer bmb[8];
-+ dma_addr_t addr;
-+ uint8_t i;
-+ struct device *dev = dpa_bp->dev;
-+ struct sk_buff *skb, **skbh;
-+
-+ memset(bmb, 0, sizeof(struct bm_buffer) * 8);
-+
-+ for (i = 0; i < 8; i++) {
-+ /* We'll prepend the skb back-pointer; can't use the DPA
-+ * priv space, because FMan will overwrite it (from offset 0)
-+ * if it ends up being the second, third, etc. fragment
-+ * in a S/G frame.
-+ *
-+ * We only need enough space to store a pointer, but allocate
-+ * an entire cacheline for performance reasons.
-+ */
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022)) {
-+ struct page *new_page = alloc_page(GFP_ATOMIC);
-+ if (unlikely(!new_page))
-+ goto netdev_alloc_failed;
-+ new_buf = page_address(new_page);
-+ }
-+ else
-+#endif
-+ new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
-+
-+ if (unlikely(!new_buf))
-+ goto netdev_alloc_failed;
-+ new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
-+
-+ /* Apart from the buffer that will be used by the FMan, the
-+ * skb also guarantees enough space to hold the backpointer
-+ * in the headroom and the shared info at the end.
-+ */
-+ skb = build_skb(new_buf,
-+ SMP_CACHE_BYTES + DPA_SKB_SIZE(dpa_bp->size) +
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-+ if (unlikely(!skb)) {
-+ put_page(virt_to_head_page(new_buf));
-+ goto build_skb_failed;
-+ }
-+
-+ /* Reserve SMP_CACHE_BYTES in the skb's headroom to store the
-+ * backpointer. This area will not be synced to, or
-+ * overwritten by, the FMan.
-+ */
-+ skb_reserve(skb, SMP_CACHE_BYTES);
-+
-+ /* We don't sync the first SMP_CACHE_BYTES of the buffer to
-+ * the FMan. The skb backpointer is stored at the end of the
-+ * reserved headroom. Otherwise it will be overwritten by the
-+ * FMan.
-+ * The buffer synced with the FMan starts right after the
-+ * reserved headroom.
-+ */
-+ fman_buf = new_buf + SMP_CACHE_BYTES;
-+ DPA_WRITE_SKB_PTR(skb, skbh, fman_buf, -1);
-+
-+ addr = dma_map_single(dev, fman_buf,
-+ dpa_bp->size, DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dev, addr)))
-+ goto dma_map_failed;
-+
-+ bm_buffer_set64(&bmb[i], addr);
-+ }
-+
-+release_bufs:
-+ /* Release the buffers. In case bman is busy, keep trying
-+ * until successful. bman_release() is guaranteed to succeed
-+ * in a reasonable amount of time
-+ */
-+ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
-+ cpu_relax();
-+ return i;
-+
-+dma_map_failed:
-+ kfree_skb(skb);
-+
-+build_skb_failed:
-+netdev_alloc_failed:
-+ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
-+ WARN_ONCE(1, "Memory allocation failure on Rx\n");
-+
-+ bm_buffer_set64(&bmb[i], 0);
-+ /* Avoid releasing a completely null buffer; bman_release() requires
-+ * at least one buffer.
-+ */
-+ if (likely(i))
-+ goto release_bufs;
-+
-+ return 0;
-+}
-+
-+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
-+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
-+{
-+ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
-+ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
-+}
-+
-+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
-+{
-+ int i;
-+
-+ /* Give each CPU an allotment of "config_count" buffers */
-+ for_each_possible_cpu(i) {
-+ int j;
-+
-+ /* Although we access another CPU's counters here
-+ * we do it at boot time so it is safe
-+ */
-+ for (j = 0; j < dpa_bp->config_count; j += 8)
-+ dpa_bp_add_8_bufs(dpa_bp, i);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpa_bp_priv_seed);
-+
-+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
-+ * REFILL_THRESHOLD.
-+ */
-+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
-+{
-+ int count = *countptr;
-+ int new_bufs;
-+
-+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
-+ do {
-+ new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
-+ if (unlikely(!new_bufs)) {
-+ /* Avoid looping forever if we've temporarily
-+ * run out of memory. We'll try again at the
-+ * next NAPI cycle.
-+ */
-+ break;
-+ }
-+ count += new_bufs;
-+ } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
-+
-+ *countptr = count;
-+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpaa_eth_refill_bpools);
-+
-+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
-+ * either contiguous frames or scatter/gather ones.
-+ * Skb freeing is not handled here.
-+ *
-+ * This function may be called on error paths in the Tx function, so guard
-+ * against cases when not all fd relevant fields were filled in.
-+ *
-+ * Return the skb backpointer, since for S/G frames the buffer containing it
-+ * gets freed here.
-+ */
-+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd)
-+{
-+ const struct qm_sg_entry *sgt;
-+ int i;
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ dma_addr_t sg_addr;
-+ struct sk_buff **skbh;
-+ struct sk_buff *skb = NULL;
-+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
-+ int nr_frags;
-+ int sg_len;
-+
-+ /* retrieve skb back pointer */
-+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
-+
-+ if (unlikely(fd->format == qm_fd_sg)) {
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ dma_unmap_single(dpa_bp->dev, addr,
-+ dpa_fd_offset(fd) + DPA_SGT_SIZE,
-+ dma_dir);
-+
-+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
-+ * it's from lowmem.
-+ */
-+ sgt = phys_to_virt(addr + dpa_fd_offset(fd));
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid &&
-+ priv->tsu->hwts_tx_en_ioctl)
-+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (unlikely(priv->ts_tx_en &&
-+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
-+ struct skb_shared_hwtstamps shhwtstamps;
-+
-+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ }
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
-+ sg_addr = qm_sg_addr(&sgt[0]);
-+ sg_len = qm_sg_entry_get_len(&sgt[0]);
-+ dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
-+
-+ /* remaining pages were mapped with dma_map_page() */
-+ for (i = 1; i <= nr_frags; i++) {
-+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
-+ sg_addr = qm_sg_addr(&sgt[i]);
-+ sg_len = qm_sg_entry_get_len(&sgt[i]);
-+ dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
-+ }
-+
-+ /* Free the page frag that we allocated on Tx */
-+ put_page(virt_to_head_page(sgt));
-+ } else {
-+ dma_unmap_single(dpa_bp->dev, addr,
-+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
-+#ifdef CONFIG_FSL_DPAA_TS
-+ /* get the timestamp for non-SG frames */
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid &&
-+ priv->tsu->hwts_tx_en_ioctl)
-+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
-+#endif
-+ if (unlikely(priv->ts_tx_en &&
-+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
-+ struct skb_shared_hwtstamps shhwtstamps;
-+
-+ dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ }
-+#endif
-+ }
-+
-+ return skb;
-+}
-+EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
-+
-+#ifndef CONFIG_FSL_DPAA_TS
-+bool dpa_skb_is_recyclable(struct sk_buff *skb)
-+{
-+#ifndef CONFIG_PPC
-+ /* Do no recycle skbs realigned by the errata workaround */
-+ if (unlikely(dpaa_errata_a010022) && skb->mark == NONREC_MARK)
-+ return false;
-+#endif
-+
-+ /* No recycling possible if skb buffer is kmalloc'ed */
-+ if (skb->head_frag == 0)
-+ return false;
-+
-+ /* or if it's an userspace buffer */
-+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
-+ return false;
-+
-+ /* or if it's cloned or shared */
-+ if (skb_shared(skb) || skb_cloned(skb) ||
-+ skb->fclone != SKB_FCLONE_UNAVAILABLE)
-+ return false;
-+
-+ return true;
-+}
-+EXPORT_SYMBOL(dpa_skb_is_recyclable);
-+
-+bool dpa_buf_is_recyclable(struct sk_buff *skb,
-+ uint32_t min_size,
-+ uint16_t min_offset,
-+ unsigned char **new_buf_start)
-+{
-+ unsigned char *new;
-+
-+ /* In order to recycle a buffer, the following conditions must be met:
-+ * - buffer size no less than the buffer pool size
-+ * - buffer size no higher than an upper limit (to avoid moving too much
-+ * system memory to the buffer pools)
-+ * - buffer address aligned to cacheline bytes
-+ * - offset of data from start of buffer no lower than a minimum value
-+ * - offset of data from start of buffer no higher than a maximum value
-+ * - the skb back-pointer is stored safely
-+ */
-+
-+ /* guarantee both the minimum size and the minimum data offset */
-+ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
-+
-+ /* left align to the nearest cacheline */
-+ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
-+
-+ /* Make sure there is enough space to store the skb back-pointer in
-+ * the headroom, right before the start of the buffer.
-+ *
-+ * Guarantee that both maximum size and maximum data offsets aren't
-+ * crossed.
-+ */
-+ if (likely(new >= (skb->head + sizeof(void *)) &&
-+ new >= (skb->data - DPA_MAX_FD_OFFSET) &&
-+ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
-+ *new_buf_start = new;
-+ return true;
-+ }
-+
-+ return false;
-+}
-+EXPORT_SYMBOL(dpa_buf_is_recyclable);
-+#endif
-+
-+/* Build a linear skb around the received buffer.
-+ * We are guaranteed there is enough room at the end of the data buffer to
-+ * accommodate the shared info area of the skb.
-+ */
-+static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd, int *use_gro)
-+{
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ ssize_t fd_off = dpa_fd_offset(fd);
-+ void *vaddr;
-+ const fm_prs_result_t *parse_results;
-+ struct sk_buff *skb = NULL, **skbh;
-+
-+ vaddr = phys_to_virt(addr);
-+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-+
-+ /* Retrieve the skb and adjust data and tail pointers, to make sure
-+ * forwarded skbs will have enough space on Tx if extra headers
-+ * are added.
-+ */
-+ DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
-+ /* When using jumbo Rx buffers, we risk having frames dropped due to
-+ * the socket backlog reaching its maximum allowed size.
-+ * Use the frame length for the skb truesize instead of the buffer
-+ * size, as this is the size of the data that actually gets copied to
-+ * userspace.
-+ * The stack may increase the payload. In this case, it will want to
-+ * warn us that the frame length is larger than the truesize. We
-+ * bypass the warning.
-+ */
-+ skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
-+#endif
-+
-+ DPA_BUG_ON(fd_off != priv->rx_headroom);
-+ skb_reserve(skb, fd_off);
-+ skb_put(skb, dpa_fd_length(fd));
-+
-+ /* Peek at the parse results for csum validation */
-+ parse_results = (const fm_prs_result_t *)(vaddr +
-+ DPA_RX_PRIV_DATA_SIZE);
-+ _dpa_process_parse_results(parse_results, fd, skb, use_gro);
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
-+ dpa_ptp_store_rxstamp(priv, skb, vaddr);
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (priv->ts_rx_en)
-+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ return skb;
-+}
-+
-+
-+/* Build an skb with the data of the first S/G entry in the linear portion and
-+ * the rest of the frame as skb fragments.
-+ *
-+ * The page fragment holding the S/G Table is recycled here.
-+ */
-+static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
-+ const struct qm_fd *fd, int *use_gro,
-+ int *count_ptr)
-+{
-+ const struct qm_sg_entry *sgt;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ ssize_t fd_off = dpa_fd_offset(fd);
-+ dma_addr_t sg_addr;
-+ void *vaddr, *sg_vaddr;
-+ struct dpa_bp *dpa_bp;
-+ struct page *page, *head_page;
-+ int frag_offset, frag_len;
-+ int page_offset;
-+ int i;
-+ const fm_prs_result_t *parse_results;
-+ struct sk_buff *skb = NULL, *skb_tmp, **skbh;
-+
-+ vaddr = phys_to_virt(addr);
-+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-+
-+ dpa_bp = priv->dpa_bp;
-+ /* Iterate through the SGT entries and add data buffers to the skb */
-+ sgt = vaddr + fd_off;
-+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
-+ /* Extension bit is not supported */
-+ DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
-+
-+ /* We use a single global Rx pool */
-+ DPA_BUG_ON(dpa_bp !=
-+ dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
-+
-+ sg_addr = qm_sg_addr(&sgt[i]);
-+ sg_vaddr = phys_to_virt(sg_addr);
-+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
-+ SMP_CACHE_BYTES));
-+
-+ dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
-+ DMA_BIDIRECTIONAL);
-+ if (i == 0) {
-+ DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid &&
-+ priv->tsu->hwts_rx_en_ioctl)
-+ dpa_ptp_store_rxstamp(priv, skb, vaddr);
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (priv->ts_rx_en)
-+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ /* In the case of a SG frame, FMan stores the Internal
-+ * Context in the buffer containing the sgt.
-+ * Inspect the parse results before anything else.
-+ */
-+ parse_results = (const fm_prs_result_t *)(vaddr +
-+ DPA_RX_PRIV_DATA_SIZE);
-+ _dpa_process_parse_results(parse_results, fd, skb,
-+ use_gro);
-+
-+ /* Make sure forwarded skbs will have enough space
-+ * on Tx, if extra headers are added.
-+ */
-+ DPA_BUG_ON(fd_off != priv->rx_headroom);
-+ skb_reserve(skb, fd_off);
-+ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
-+ } else {
-+ /* Not the first S/G entry; all data from buffer will
-+ * be added in an skb fragment; fragment index is offset
-+ * by one since first S/G entry was incorporated in the
-+ * linear part of the skb.
-+ *
-+ * Caution: 'page' may be a tail page.
-+ */
-+ DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
-+ page = virt_to_page(sg_vaddr);
-+ head_page = virt_to_head_page(sg_vaddr);
-+
-+ /* Free (only) the skbuff shell because its data buffer
-+ * is already a frag in the main skb.
-+ */
-+ get_page(head_page);
-+ dev_kfree_skb(skb_tmp);
-+
-+ /* Compute offset in (possibly tail) page */
-+ page_offset = ((unsigned long)sg_vaddr &
-+ (PAGE_SIZE - 1)) +
-+ (page_address(page) - page_address(head_page));
-+ /* page_offset only refers to the beginning of sgt[i];
-+ * but the buffer itself may have an internal offset.
-+ */
-+ frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
-+ page_offset;
-+ frag_len = qm_sg_entry_get_len(&sgt[i]);
-+ /* skb_add_rx_frag() does no checking on the page; if
-+ * we pass it a tail page, we'll end up with
-+ * bad page accounting and eventually with segafults.
-+ */
-+ skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
-+ frag_len, dpa_bp->size);
-+ }
-+ /* Update the pool count for the current {cpu x bpool} */
-+ (*count_ptr)--;
-+
-+ if (qm_sg_entry_get_final(&sgt[i]))
-+ break;
-+ }
-+ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
-+
-+ /* recycle the SGT fragment */
-+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
-+ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
-+ return skb;
-+}
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
-+ struct sk_buff *skb)
-+{
-+ if (unlikely(priv->loop_to < 0))
-+ return 0; /* loop disabled by default */
-+
-+ skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
-+ /* Save the current CPU ID in order to maintain core affinity */
-+ skb_set_queue_mapping(skb, raw_smp_processor_id());
-+ dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
-+
-+ return 1; /* Frame Tx on the selected interface */
-+}
-+#endif
-+
-+void __hot _dpa_rx(struct net_device *net_dev,
-+ struct qman_portal *portal,
-+ const struct dpa_priv_s *priv,
-+ struct dpa_percpu_priv_s *percpu_priv,
-+ const struct qm_fd *fd,
-+ u32 fqid,
-+ int *count_ptr)
-+{
-+ struct dpa_bp *dpa_bp;
-+ struct sk_buff *skb;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ u32 fd_status = fd->status;
-+ unsigned int skb_len;
-+ struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
-+ int use_gro = net_dev->features & NETIF_F_GRO;
-+
-+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
-+ if (netif_msg_hw(priv) && net_ratelimit())
-+ netdev_warn(net_dev, "FD status = 0x%08x\n",
-+ fd_status & FM_FD_STAT_RX_ERRORS);
-+
-+ percpu_stats->rx_errors++;
-+ goto _release_frame;
-+ }
-+
-+ dpa_bp = priv->dpa_bp;
-+ DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
-+
-+ /* prefetch the first 64 bytes of the frame or the SGT start */
-+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
-+ prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
-+
-+ /* The only FD types that we may receive are contig and S/G */
-+ DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
-+
-+ if (likely(fd->format == qm_fd_contig)) {
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ /* Execute the Rx processing hook, if it exists. */
-+ if (dpaa_eth_hooks.rx_default &&
-+ dpaa_eth_hooks.rx_default((void *)fd, net_dev,
-+ fqid) == DPAA_ETH_STOLEN) {
-+ /* won't count the rx bytes in */
-+ return;
-+ }
-+#endif
-+ skb = contig_fd_to_skb(priv, fd, &use_gro);
-+ } else {
-+ skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
-+ percpu_priv->rx_sg++;
-+ }
-+
-+ /* Account for either the contig buffer or the SGT buffer (depending on
-+ * which case we were in) having been removed from the pool.
-+ */
-+ (*count_ptr)--;
-+ skb->protocol = eth_type_trans(skb, net_dev);
-+
-+ skb_len = skb->len;
-+
-+#ifdef CONFIG_FSL_DPAA_DBG_LOOP
-+ if (dpa_skb_loop(priv, skb)) {
-+ percpu_stats->rx_packets++;
-+ percpu_stats->rx_bytes += skb_len;
-+ return;
-+ }
-+#endif
-+
-+ skb_record_rx_queue(skb, raw_smp_processor_id());
-+
-+ if (use_gro) {
-+ gro_result_t gro_result;
-+ const struct qman_portal_config *pc =
-+ qman_p_get_portal_config(portal);
-+ struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
-+
-+ np->p = portal;
-+ gro_result = napi_gro_receive(&np->napi, skb);
-+ /* If frame is dropped by the stack, rx_dropped counter is
-+ * incremented automatically, so no need for us to update it
-+ */
-+ if (unlikely(gro_result == GRO_DROP))
-+ goto packet_dropped;
-+ } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
-+ goto packet_dropped;
-+
-+ percpu_stats->rx_packets++;
-+ percpu_stats->rx_bytes += skb_len;
-+
-+packet_dropped:
-+ return;
-+
-+_release_frame:
-+ dpa_fd_release(net_dev, fd);
-+}
-+
-+int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd,
-+ int *count_ptr, int *offset)
-+{
-+ struct sk_buff **skbh;
-+ dma_addr_t addr;
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ struct net_device *net_dev = priv->net_dev;
-+ int err;
-+ enum dma_data_direction dma_dir;
-+ unsigned char *buffer_start;
-+ int dma_map_size;
-+
-+#ifndef CONFIG_FSL_DPAA_TS
-+ /* Check recycling conditions; only if timestamp support is not
-+ * enabled, otherwise we need the fd back on tx confirmation
-+ */
-+
-+ /* We can recycle the buffer if:
-+ * - the pool is not full
-+ * - the buffer meets the skb recycling conditions
-+ * - the buffer meets our own (size, offset, align) conditions
-+ */
-+ if (likely((*count_ptr < dpa_bp->target_count) &&
-+ dpa_skb_is_recyclable(skb) &&
-+ dpa_buf_is_recyclable(skb, dpa_bp->size,
-+ priv->tx_headroom, &buffer_start))) {
-+ /* Buffer is recyclable; use the new start address
-+ * and set fd parameters and DMA mapping direction
-+ */
-+ fd->bpid = dpa_bp->bpid;
-+ DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
-+ fd->offset = (uint16_t)(skb->data - buffer_start);
-+ dma_dir = DMA_BIDIRECTIONAL;
-+ dma_map_size = dpa_bp->size;
-+
-+ /* Store the skb back-pointer before the start of the buffer.
-+ * Otherwise it will be overwritten by the FMan.
-+ */
-+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
-+ *offset = skb_headroom(skb) - fd->offset;
-+ } else
-+#endif
-+ {
-+ /* Not recyclable.
-+ * We are guaranteed to have at least tx_headroom bytes
-+ * available, so just use that for offset.
-+ */
-+ fd->bpid = 0xff;
-+ buffer_start = skb->data - priv->tx_headroom;
-+ fd->offset = priv->tx_headroom;
-+ dma_dir = DMA_TO_DEVICE;
-+ dma_map_size = skb_tail_pointer(skb) - buffer_start;
-+
-+ /* The buffer will be Tx-confirmed, but the TxConf cb must
-+ * necessarily look at our Tx private data to retrieve the
-+ * skbuff. Store the back-pointer inside the buffer.
-+ */
-+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
-+ }
-+
-+ /* Enable L3/L4 hardware checksum computation.
-+ *
-+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
-+ * need to write into the skb.
-+ */
-+ err = dpa_enable_tx_csum(priv, skb, fd,
-+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
-+ if (unlikely(err < 0)) {
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_err(net_dev, "HW csum error: %d\n", err);
-+ return err;
-+ }
-+
-+ /* Fill in the rest of the FD fields */
-+ fd->format = qm_fd_contig;
-+ fd->length20 = skb->len;
-+ fd->cmd |= FM_FD_CMD_FCO;
-+
-+ /* Map the entire buffer size that may be seen by FMan, but no more */
-+ addr = dma_map_single(dpa_bp->dev, skbh, dma_map_size, dma_dir);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_err(net_dev, "dma_map_single() failed\n");
-+ return -EINVAL;
-+ }
-+ qm_fd_addr_set64(fd, addr);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(skb_to_contig_fd);
-+
-+#ifndef CONFIG_PPC
-+/* Verify the conditions that trigger the A010022 errata: data unaligned to
-+ * 16 bytes, 4K memory address crossings and S/G fragments.
-+ */
-+static bool a010022_check_skb(struct sk_buff *skb, struct dpa_priv_s *priv)
-+{
-+ /* Check if the headroom is aligned */
-+ if (((uintptr_t)skb->data - priv->tx_headroom) %
-+ priv->buf_layout[TX].data_align != 0)
-+ return true;
-+
-+ /* Check for paged data in the skb. We do not support S/G fragments */
-+ if (skb_is_nonlinear(skb))
-+ return true;
-+
-+ /* Check if the headroom crosses a boundary */
-+ if (HAS_DMA_ISSUE(skb->head, skb_headroom(skb)))
-+ return true;
-+
-+ /* Check if the non-paged data crosses a boundary */
-+ if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb)))
-+ return true;
-+
-+ /* Check if the entire linear skb crosses a boundary */
-+ if (HAS_DMA_ISSUE(skb->head, skb_end_offset(skb)))
-+ return true;
-+
-+ return false;
-+}
-+
-+/* Realign the skb by copying its contents at the start of a newly allocated
-+ * page. Build a new skb around the new buffer and release the old one.
-+ * A performance drop should be expected.
-+ */
-+static struct sk_buff *a010022_realign_skb(struct sk_buff *skb,
-+ struct dpa_priv_s *priv)
-+{
-+ int trans_offset = skb_transport_offset(skb);
-+ int net_offset = skb_network_offset(skb);
-+ int nsize, headroom, npage_order;
-+ struct sk_buff *nskb = NULL;
-+ struct page *npage;
-+ void *npage_addr;
-+
-+ headroom = DPAA_A010022_HEADROOM;
-+
-+ /* For the new skb we only need the old one's data (both non-paged and
-+ * paged). We can skip the old tailroom.
-+ *
-+ * Make sure the skb_shinfo is cache-line aligned.
-+ */
-+ nsize = SMP_CACHE_BYTES + DPA_SKB_SIZE(headroom + skb->len) +
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-+
-+ /* Reserve enough memory to accommodate Jumbo frames */
-+ npage_order = (nsize - 1) / PAGE_SIZE;
-+ npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, npage_order);
-+ if (unlikely(!npage)) {
-+ WARN_ONCE(1, "Memory allocation failure\n");
-+ return NULL;
-+ }
-+ npage_addr = page_address(npage);
-+
-+ nskb = build_skb(npage_addr, nsize);
-+ if (unlikely(!nskb))
-+ goto err;
-+
-+ /* Reserve only the needed headroom in order to guarantee the data's
-+ * alignment.
-+ * Code borrowed and adapted from skb_copy().
-+ */
-+ skb_reserve(nskb, headroom);
-+ skb_put(nskb, skb->len);
-+ if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
-+ WARN_ONCE(1, "skb parsing failure\n");
-+ goto err;
-+ }
-+ copy_skb_header(nskb, skb);
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+ /* Copy relevant timestamp info from the old skb to the new */
-+ if (priv->ts_tx_en) {
-+ skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags;
-+ skb_shinfo(nskb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
-+ skb_shinfo(nskb)->tskey = skb_shinfo(skb)->tskey;
-+ if (skb->sk)
-+ skb_set_owner_w(nskb, skb->sk);
-+ }
-+#endif
-+ /* We move the headroom when we align it so we have to reset the
-+ * network and transport header offsets relative to the new data
-+ * pointer. The checksum offload relies on these offsets.
-+ */
-+ skb_set_network_header(nskb, net_offset);
-+ skb_set_transport_header(nskb, trans_offset);
-+
-+ /* We don't want the buffer to be recycled so we mark it accordingly */
-+ nskb->mark = NONREC_MARK;
-+
-+ dev_kfree_skb(skb);
-+ return nskb;
-+
-+err:
-+ if (nskb)
-+ dev_kfree_skb(nskb);
-+ put_page(npage);
-+ return NULL;
-+}
-+#endif
-+
-+int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
-+ struct sk_buff *skb, struct qm_fd *fd)
-+{
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ dma_addr_t addr;
-+ dma_addr_t sg_addr;
-+ struct sk_buff **skbh;
-+ struct net_device *net_dev = priv->net_dev;
-+ int sg_len, sgt_size;
-+ int err;
-+
-+ struct qm_sg_entry *sgt;
-+ void *sgt_buf;
-+ skb_frag_t *frag;
-+ int i = 0, j = 0;
-+ int nr_frags;
-+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
-+
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ fd->format = qm_fd_sg;
-+
-+ /* The FMan reads 256 bytes from the start of the SGT regardless of
-+ * its size. In accordance, we reserve the same amount of memory as
-+ * well.
-+ */
-+ sgt_size = DPA_SGT_SIZE;
-+
-+ /* Get a page frag to store the SGTable, or a full page if the errata
-+ * is in place and we need to avoid crossing a 4k boundary.
-+ */
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022))
-+ sgt_buf = page_address(alloc_page(GFP_ATOMIC));
-+ else
-+#endif
-+ sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
-+ if (unlikely(!sgt_buf)) {
-+ dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ /* it seems that the memory allocator does not zero the allocated mem */
-+ memset(sgt_buf, 0, priv->tx_headroom + sgt_size);
-+
-+ /* Enable L3/L4 hardware checksum computation.
-+ *
-+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
-+ * need to write into the skb.
-+ */
-+ err = dpa_enable_tx_csum(priv, skb, fd,
-+ sgt_buf + DPA_TX_PRIV_DATA_SIZE);
-+ if (unlikely(err < 0)) {
-+ if (netif_msg_tx_err(priv) && net_ratelimit())
-+ netdev_err(net_dev, "HW csum error: %d\n", err);
-+ goto csum_failed;
-+ }
-+
-+ /* Assign the data from skb->data to the first SG list entry */
-+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
-+ sg_len = skb_headlen(skb);
-+ qm_sg_entry_set_bpid(&sgt[0], 0xff);
-+ qm_sg_entry_set_offset(&sgt[0], 0);
-+ qm_sg_entry_set_len(&sgt[0], sg_len);
-+ qm_sg_entry_set_ext(&sgt[0], 0);
-+ qm_sg_entry_set_final(&sgt[0], 0);
-+
-+ addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ err = -EINVAL;
-+ goto sg0_map_failed;
-+ }
-+
-+ qm_sg_entry_set64(&sgt[0], addr);
-+
-+ /* populate the rest of SGT entries */
-+ for (i = 1; i <= nr_frags; i++) {
-+ frag = &skb_shinfo(skb)->frags[i - 1];
-+ qm_sg_entry_set_bpid(&sgt[i], 0xff);
-+ qm_sg_entry_set_offset(&sgt[i], 0);
-+ qm_sg_entry_set_len(&sgt[i], frag->size);
-+ qm_sg_entry_set_ext(&sgt[i], 0);
-+
-+ if (i == nr_frags)
-+ qm_sg_entry_set_final(&sgt[i], 1);
-+ else
-+ qm_sg_entry_set_final(&sgt[i], 0);
-+
-+ DPA_BUG_ON(!skb_frag_page(frag));
-+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
-+ dma_dir);
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ err = -EINVAL;
-+ goto sg_map_failed;
-+ }
-+
-+ /* keep the offset in the address */
-+ qm_sg_entry_set64(&sgt[i], addr);
-+ }
-+
-+ fd->length20 = skb->len;
-+ fd->offset = priv->tx_headroom;
-+
-+ /* DMA map the SGT page
-+ *
-+ * It's safe to store the skb back-pointer inside the buffer since
-+ * S/G frames are non-recyclable.
-+ */
-+ DPA_WRITE_SKB_PTR(skb, skbh, sgt_buf, 0);
-+ addr = dma_map_single(dpa_bp->dev, sgt_buf,
-+ priv->tx_headroom + sgt_size,
-+ dma_dir);
-+
-+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
-+ dev_err(dpa_bp->dev, "DMA mapping failed");
-+ err = -EINVAL;
-+ goto sgt_map_failed;
-+ }
-+
-+ qm_fd_addr_set64(fd, addr);
-+ fd->bpid = 0xff;
-+ fd->cmd |= FM_FD_CMD_FCO;
-+
-+ return 0;
-+
-+sgt_map_failed:
-+sg_map_failed:
-+ for (j = 0; j < i; j++) {
-+ sg_addr = qm_sg_addr(&sgt[j]);
-+ dma_unmap_page(dpa_bp->dev, sg_addr,
-+ qm_sg_entry_get_len(&sgt[j]), dma_dir);
-+ }
-+sg0_map_failed:
-+csum_failed:
-+ put_page(virt_to_head_page(sgt_buf));
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(skb_to_sg_fd);
-+
-+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv;
-+ int queue_mapping = dpa_get_queue_mapping(skb);
-+ struct qman_fq *egress_fq, *conf_fq;
-+
-+#ifdef CONFIG_FSL_DPAA_HOOKS
-+ /* If there is a Tx hook, run it. */
-+ if (dpaa_eth_hooks.tx &&
-+ dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
-+ /* won't update any Tx stats */
-+ return NETDEV_TX_OK;
-+#endif
-+
-+ priv = netdev_priv(net_dev);
-+
-+#ifdef CONFIG_FSL_DPAA_CEETM
-+ if (priv->ceetm_en)
-+ return ceetm_tx(skb, net_dev);
-+#endif
-+
-+ if (unlikely(queue_mapping >= DPAA_ETH_TX_QUEUES))
-+ queue_mapping = queue_mapping % DPAA_ETH_TX_QUEUES;
-+
-+ egress_fq = priv->egress_fqs[queue_mapping];
-+ conf_fq = priv->conf_fqs[queue_mapping];
-+
-+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
-+}
-+
-+int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
-+ struct qman_fq *egress_fq, struct qman_fq *conf_fq)
-+{
-+ struct dpa_priv_s *priv;
-+ struct qm_fd fd;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ int err = 0;
-+ bool nonlinear;
-+ int *countptr, offset = 0;
-+
-+ priv = netdev_priv(net_dev);
-+ /* Non-migratable context, safe to use raw_cpu_ptr */
-+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);
-+ percpu_stats = &percpu_priv->stats;
-+ countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
-+
-+ clear_fd(&fd);
-+
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) {
-+ skb = a010022_realign_skb(skb, priv);
-+ if (!skb)
-+ goto skb_to_fd_failed;
-+ }
-+#endif
-+
-+ nonlinear = skb_is_nonlinear(skb);
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
-+ fd.cmd |= FM_FD_CMD_UPD;
-+#endif
-+#ifdef CONFIG_FSL_DPAA_TS
-+ if (unlikely(priv->ts_tx_en &&
-+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
-+ fd.cmd |= FM_FD_CMD_UPD;
-+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-+#endif /* CONFIG_FSL_DPAA_TS */
-+
-+ /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
-+ * we don't feed FMan with more fragments than it supports.
-+ * Btw, we're using the first sgt entry to store the linear part of
-+ * the skb, so we're one extra frag short.
-+ */
-+ if (nonlinear &&
-+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
-+ /* Just create a S/G fd based on the skb */
-+ err = skb_to_sg_fd(priv, skb, &fd);
-+ percpu_priv->tx_frag_skbuffs++;
-+ } else {
-+ /* Make sure we have enough headroom to accommodate private
-+ * data, parse results, etc. Normally this shouldn't happen if
-+ * we're here via the standard kernel stack.
-+ */
-+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
-+ struct sk_buff *skb_new;
-+
-+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
-+ if (unlikely(!skb_new)) {
-+ dev_kfree_skb(skb);
-+ percpu_stats->tx_errors++;
-+ return NETDEV_TX_OK;
-+ }
-+
-+ /* propagate the skb ownership information */
-+ if (skb->sk)
-+ skb_set_owner_w(skb_new, skb->sk);
-+
-+ dev_kfree_skb(skb);
-+ skb = skb_new;
-+ }
-+
-+ /* We're going to store the skb backpointer at the beginning
-+ * of the data buffer, so we need a privately owned skb
-+ */
-+
-+ /* Code borrowed from skb_unshare(). */
-+ if (skb_cloned(skb)) {
-+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
-+ kfree_skb(skb);
-+ skb = nskb;
-+#ifndef CONFIG_PPC
-+ if (unlikely(dpaa_errata_a010022) &&
-+ a010022_check_skb(skb, priv)) {
-+ skb = a010022_realign_skb(skb, priv);
-+ if (!skb)
-+ goto skb_to_fd_failed;
-+ }
-+#endif
-+ /* skb_copy() has now linearized the skbuff. */
-+ } else if (unlikely(nonlinear)) {
-+ /* We are here because the egress skb contains
-+ * more fragments than we support. In this case,
-+ * we have no choice but to linearize it ourselves.
-+ */
-+ err = __skb_linearize(skb);
-+ }
-+ if (unlikely(!skb || err < 0))
-+ /* Common out-of-memory error path */
-+ goto enomem;
-+
-+ err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
-+ }
-+ if (unlikely(err < 0))
-+ goto skb_to_fd_failed;
-+
-+ if (fd.bpid != 0xff) {
-+ skb_recycle(skb);
-+ /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
-+ * but we need the skb to look as if returned by build_skb().
-+ * We need to manually adjust the tailptr as well.
-+ */
-+ skb->data = skb->head + offset;
-+ skb_reset_tail_pointer(skb);
-+
-+ (*countptr)++;
-+ percpu_priv->tx_returned++;
-+ }
-+
-+ if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
-+ goto xmit_failed;
-+
-+ netif_trans_update(net_dev);
-+ return NETDEV_TX_OK;
-+
-+xmit_failed:
-+ if (fd.bpid != 0xff) {
-+ (*countptr)--;
-+ percpu_priv->tx_returned--;
-+ dpa_fd_release(net_dev, &fd);
-+ percpu_stats->tx_errors++;
-+ return NETDEV_TX_OK;
-+ }
-+ _dpa_cleanup_tx_fd(priv, &fd);
-+skb_to_fd_failed:
-+enomem:
-+ percpu_stats->tx_errors++;
-+ dev_kfree_skb(skb);
-+ return NETDEV_TX_OK;
-+}
-+EXPORT_SYMBOL(dpa_tx_extended);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
-@@ -0,0 +1,278 @@
-+/* Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include <linux/io.h>
-+#include <linux/of_net.h>
-+#include "dpaa_eth.h"
-+#include "mac.h" /* struct mac_device */
-+#ifdef CONFIG_FSL_DPAA_1588
-+#include "dpaa_1588.h"
-+#endif
-+
-+static ssize_t dpaa_eth_show_addr(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+
-+ if (mac_dev)
-+ return sprintf(buf, "%llx",
-+ (unsigned long long)mac_dev->res->start);
-+ else
-+ return sprintf(buf, "none");
-+}
-+
-+static ssize_t dpaa_eth_show_type(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t res = 0;
-+
-+ if (priv)
-+ res = sprintf(buf, "%s", priv->if_type);
-+
-+ return res;
-+}
-+
-+static ssize_t dpaa_eth_show_fqids(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t bytes = 0;
-+ int i = 0;
-+ char *str;
-+ struct dpa_fq *fq;
-+ struct dpa_fq *tmp;
-+ struct dpa_fq *prev = NULL;
-+ u32 first_fqid = 0;
-+ u32 last_fqid = 0;
-+ char *prevstr = NULL;
-+
-+ list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
-+ switch (fq->fq_type) {
-+ case FQ_TYPE_RX_DEFAULT:
-+ str = "Rx default";
-+ break;
-+ case FQ_TYPE_RX_ERROR:
-+ str = "Rx error";
-+ break;
-+ case FQ_TYPE_RX_PCD:
-+ str = "Rx PCD";
-+ break;
-+ case FQ_TYPE_TX_CONFIRM:
-+ str = "Tx default confirmation";
-+ break;
-+ case FQ_TYPE_TX_CONF_MQ:
-+ str = "Tx confirmation (mq)";
-+ break;
-+ case FQ_TYPE_TX_ERROR:
-+ str = "Tx error";
-+ break;
-+ case FQ_TYPE_TX:
-+ str = "Tx";
-+ break;
-+ case FQ_TYPE_RX_PCD_HI_PRIO:
-+ str ="Rx PCD High Priority";
-+ break;
-+ default:
-+ str = "Unknown";
-+ }
-+
-+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
-+ str != prevstr)) {
-+ if (last_fqid == first_fqid)
-+ bytes += sprintf(buf + bytes,
-+ "%s: %d\n", prevstr, prev->fqid);
-+ else
-+ bytes += sprintf(buf + bytes,
-+ "%s: %d - %d\n", prevstr,
-+ first_fqid, last_fqid);
-+ }
-+
-+ if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
-+ last_fqid = fq->fqid;
-+ else
-+ first_fqid = last_fqid = fq->fqid;
-+
-+ prev = fq;
-+ prevstr = str;
-+ i++;
-+ }
-+
-+ if (prev) {
-+ if (last_fqid == first_fqid)
-+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
-+ prev->fqid);
-+ else
-+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
-+ first_fqid, last_fqid);
-+ }
-+
-+ return bytes;
-+}
-+
-+static ssize_t dpaa_eth_show_bpids(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ ssize_t bytes = 0;
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct dpa_bp *dpa_bp = priv->dpa_bp;
-+ int i = 0;
-+
-+ for (i = 0; i < priv->bp_count; i++)
-+ bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
-+ dpa_bp[i].bpid);
-+
-+ return bytes;
-+}
-+
-+static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ int n = 0;
-+
-+ if (mac_dev)
-+ n = fm_mac_dump_regs(mac_dev, buf, n);
-+ else
-+ return sprintf(buf, "no mac registers\n");
-+
-+ return n;
-+}
-+
-+static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ int n = 0;
-+
-+ if (mac_dev)
-+ n = fm_mac_dump_rx_stats(mac_dev, buf, n);
-+ else
-+ return sprintf(buf, "no mac rx stats\n");
-+
-+ return n;
-+}
-+
-+static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ int n = 0;
-+
-+ if (mac_dev)
-+ n = fm_mac_dump_tx_stats(mac_dev, buf, n);
-+ else
-+ return sprintf(buf, "no mac tx stats\n");
-+
-+ return n;
-+}
-+
-+#ifdef CONFIG_FSL_DPAA_1588
-+static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+
-+ if (priv->tsu && priv->tsu->valid)
-+ return sprintf(buf, "1\n");
-+ else
-+ return sprintf(buf, "0\n");
-+}
-+
-+static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ unsigned int num;
-+ unsigned long flags;
-+
-+ if (kstrtouint(buf, 0, &num) < 0)
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ if (num) {
-+ if (priv->tsu)
-+ priv->tsu->valid = TRUE;
-+ } else {
-+ if (priv->tsu)
-+ priv->tsu->valid = FALSE;
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return count;
-+}
-+#endif
-+
-+static struct device_attribute dpaa_eth_attrs[] = {
-+ __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
-+ __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
-+ __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
-+ __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
-+ __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
-+ __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
-+ __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
-+#ifdef CONFIG_FSL_DPAA_1588
-+ __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
-+ dpaa_eth_set_ptp_1588),
-+#endif
-+};
-+
-+void dpaa_eth_sysfs_init(struct device *dev)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
-+ if (device_create_file(dev, &dpaa_eth_attrs[i])) {
-+ dev_err(dev, "Error creating sysfs file\n");
-+ while (i > 0)
-+ device_remove_file(dev, &dpaa_eth_attrs[--i]);
-+ return;
-+ }
-+}
-+EXPORT_SYMBOL(dpaa_eth_sysfs_init);
-+
-+void dpaa_eth_sysfs_remove(struct device *dev)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
-+ device_remove_file(dev, &dpaa_eth_attrs[i]);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
-@@ -0,0 +1,144 @@
-+/* Copyright 2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM dpaa_eth
-+
-+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _DPAA_ETH_TRACE_H
-+
-+#include <linux/skbuff.h>
-+#include <linux/netdevice.h>
-+#include "dpaa_eth.h"
-+#include <linux/tracepoint.h>
-+
-+#define fd_format_name(format) { qm_fd_##format, #format }
-+#define fd_format_list \
-+ fd_format_name(contig), \
-+ fd_format_name(sg)
-+#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
-+ " status=0x%08x"
-+
-+/* This is used to declare a class of events.
-+ * individual events of this type will be defined below.
-+ */
-+
-+/* Store details about a frame descriptor and the FQ on which it was
-+ * transmitted/received.
-+ */
-+DECLARE_EVENT_CLASS(dpaa_eth_fd,
-+ /* Trace function prototype */
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ /* Repeat argument list here */
-+ TP_ARGS(netdev, fq, fd),
-+
-+ /* A structure containing the relevant information we want to record.
-+ * Declare name and type for each normal element, name, type and size
-+ * for arrays. Use __string for variable length strings.
-+ */
-+ TP_STRUCT__entry(
-+ __field(u32, fqid)
-+ __field(u64, fd_addr)
-+ __field(u8, fd_format)
-+ __field(u16, fd_offset)
-+ __field(u32, fd_length)
-+ __field(u32, fd_status)
-+ __string(name, netdev->name)
-+ ),
-+
-+ /* The function that assigns values to the above declared fields */
-+ TP_fast_assign(
-+ __entry->fqid = fq->fqid;
-+ __entry->fd_addr = qm_fd_addr_get64(fd);
-+ __entry->fd_format = fd->format;
-+ __entry->fd_offset = dpa_fd_offset(fd);
-+ __entry->fd_length = dpa_fd_length(fd);
-+ __entry->fd_status = fd->status;
-+ __assign_str(name, netdev->name);
-+ ),
-+
-+ /* This is what gets printed when the trace event is triggered */
-+ /* TODO: print the status using __print_flags() */
-+ TP_printk(TR_FMT,
-+ __get_str(name), __entry->fqid, __entry->fd_addr,
-+ __print_symbolic(__entry->fd_format, fd_format_list),
-+ __entry->fd_offset, __entry->fd_length, __entry->fd_status)
-+);
-+
-+/* Now declare events of the above type. Format is:
-+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
-+ */
-+
-+/* Tx (egress) fd */
-+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
-+
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ TP_ARGS(netdev, fq, fd)
-+);
-+
-+/* Rx fd */
-+DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
-+
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ TP_ARGS(netdev, fq, fd)
-+);
-+
-+/* Tx confirmation fd */
-+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
-+
-+ TP_PROTO(struct net_device *netdev,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd),
-+
-+ TP_ARGS(netdev, fq, fd)
-+);
-+
-+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
-+ * The syntax is the same as for DECLARE_EVENT_CLASS().
-+ */
-+
-+#endif /* _DPAA_ETH_TRACE_H */
-+
-+/* This must be outside ifdef _DPAA_ETH_TRACE_H */
-+#undef TRACE_INCLUDE_PATH
-+#define TRACE_INCLUDE_PATH .
-+#undef TRACE_INCLUDE_FILE
-+#define TRACE_INCLUDE_FILE dpaa_eth_trace
-+#include <trace/define_trace.h>
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
-@@ -0,0 +1,587 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/string.h>
-+#include <linux/of_platform.h>
-+#include <linux/net_tstamp.h>
-+#include <linux/fsl/ptp_qoriq.h>
-+
-+#include "dpaa_eth.h"
-+#include "mac.h" /* struct mac_device */
-+#include "dpaa_eth_common.h"
-+
-+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
-+ "interrupts",
-+ "rx packets",
-+ "tx packets",
-+ "tx recycled",
-+ "tx confirm",
-+ "tx S/G",
-+ "rx S/G",
-+ "tx error",
-+ "rx error",
-+ "bp count"
-+};
-+
-+static char dpa_stats_global[][ETH_GSTRING_LEN] = {
-+ /* dpa rx errors */
-+ "rx dma error",
-+ "rx frame physical error",
-+ "rx frame size error",
-+ "rx header error",
-+ "rx csum error",
-+
-+ /* demultiplexing errors */
-+ "qman cg_tdrop",
-+ "qman wred",
-+ "qman error cond",
-+ "qman early window",
-+ "qman late window",
-+ "qman fq tdrop",
-+ "qman fq retired",
-+ "qman orp disabled",
-+
-+ /* congestion related stats */
-+ "congestion time (ms)",
-+ "entered congestion",
-+ "congested (0/1)"
-+};
-+
-+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
-+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
-+
-+static int __cold dpa_get_ksettings(struct net_device *net_dev,
-+ struct ethtool_link_ksettings *cmd)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_dbg(net_dev, "phy device not initialized\n");
-+ return 0;
-+ }
-+
-+ phy_ethtool_ksettings_get(priv->mac_dev->phy_dev, cmd);
-+
-+ return _errno;
-+}
-+
-+static int __cold dpa_set_ksettings(struct net_device *net_dev,
-+ const struct ethtool_link_ksettings *cmd)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ _errno = phy_ethtool_ksettings_set(priv->mac_dev->phy_dev, cmd);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", _errno);
-+
-+ return _errno;
-+}
-+
-+static void __cold dpa_get_drvinfo(struct net_device *net_dev,
-+ struct ethtool_drvinfo *drvinfo)
-+{
-+ int _errno;
-+
-+ strncpy(drvinfo->driver, KBUILD_MODNAME,
-+ sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
-+ _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%X", 0);
-+
-+ if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
-+ /* Truncated output */
-+ netdev_notice(net_dev, "snprintf() = %d\n", _errno);
-+ } else if (unlikely(_errno < 0)) {
-+ netdev_warn(net_dev, "snprintf() = %d\n", _errno);
-+ memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
-+ }
-+ strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
-+}
-+
-+static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
-+{
-+ return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
-+}
-+
-+static void __cold dpa_set_msglevel(struct net_device *net_dev,
-+ uint32_t msg_enable)
-+{
-+ ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
-+}
-+
-+static int __cold dpa_nway_reset(struct net_device *net_dev)
-+{
-+ int _errno;
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ _errno = 0;
-+ if (priv->mac_dev->phy_dev->autoneg) {
-+ _errno = phy_start_aneg(priv->mac_dev->phy_dev);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
-+ _errno);
-+ }
-+
-+ return _errno;
-+}
-+
-+static void __cold dpa_get_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ struct phy_device *phy_dev;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ if (mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return;
-+ }
-+
-+ phy_dev = mac_dev->phy_dev;
-+ if (unlikely(phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return;
-+ }
-+
-+ epause->autoneg = mac_dev->autoneg_pause;
-+ epause->rx_pause = mac_dev->rx_pause_active;
-+ epause->tx_pause = mac_dev->tx_pause_active;
-+}
-+
-+static int __cold dpa_set_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct dpa_priv_s *priv;
-+ struct mac_device *mac_dev;
-+ struct phy_device *phy_dev;
-+ int _errno;
-+ u32 newadv, oldadv;
-+ bool rx_pause, tx_pause;
-+
-+ priv = netdev_priv(net_dev);
-+ mac_dev = priv->mac_dev;
-+
-+ if (mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ phy_dev = mac_dev->phy_dev;
-+ if (unlikely(phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ if (!(phy_dev->supported & SUPPORTED_Pause) ||
-+ (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
-+ (epause->rx_pause != epause->tx_pause)))
-+ return -EINVAL;
-+
-+ /* The MAC should know how to handle PAUSE frame autonegotiation before
-+ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
-+ * settings.
-+ */
-+ mac_dev->autoneg_pause = !!epause->autoneg;
-+ mac_dev->rx_pause_req = !!epause->rx_pause;
-+ mac_dev->tx_pause_req = !!epause->tx_pause;
-+
-+ /* Determine the sym/asym advertised PAUSE capabilities from the desired
-+ * rx/tx pause settings.
-+ */
-+ newadv = 0;
-+ if (epause->rx_pause)
-+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-+ if (epause->tx_pause)
-+ newadv |= ADVERTISED_Asym_Pause;
-+
-+ oldadv = phy_dev->advertising &
-+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-+
-+ /* If there are differences between the old and the new advertised
-+ * values, restart PHY autonegotiation and advertise the new values.
-+ */
-+ if (oldadv != newadv) {
-+ phy_dev->advertising &= ~(ADVERTISED_Pause
-+ | ADVERTISED_Asym_Pause);
-+ phy_dev->advertising |= newadv;
-+ if (phy_dev->autoneg) {
-+ _errno = phy_start_aneg(phy_dev);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
-+ _errno);
-+ }
-+ }
-+
-+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
-+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
-+
-+ return _errno;
-+}
-+
-+#ifdef CONFIG_PM
-+static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+
-+ wol->supported = 0;
-+ wol->wolopts = 0;
-+
-+ if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
-+ return;
-+
-+ if (priv->wol & DPAA_WOL_MAGIC) {
-+ wol->supported = WAKE_MAGIC;
-+ wol->wolopts = WAKE_MAGIC;
-+ }
-+}
-+
-+static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_dbg(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ if (!device_can_wakeup(net_dev->dev.parent) ||
-+ (wol->wolopts & ~WAKE_MAGIC))
-+ return -EOPNOTSUPP;
-+
-+ priv->wol = 0;
-+
-+ if (wol->wolopts & WAKE_MAGIC) {
-+ priv->wol = DPAA_WOL_MAGIC;
-+ device_set_wakeup_enable(net_dev->dev.parent, 1);
-+ } else {
-+ device_set_wakeup_enable(net_dev->dev.parent, 0);
-+ }
-+
-+ return 0;
-+}
-+#endif
-+
-+static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
-+{
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
-+}
-+
-+static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
-+{
-+ struct dpa_priv_s *priv;
-+
-+ priv = netdev_priv(net_dev);
-+ if (priv->mac_dev == NULL) {
-+ netdev_info(net_dev, "This is a MAC-less interface\n");
-+ return -ENODEV;
-+ }
-+
-+ if (unlikely(priv->mac_dev->phy_dev == NULL)) {
-+ netdev_err(net_dev, "phy device not initialized\n");
-+ return -ENODEV;
-+ }
-+
-+ return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
-+}
-+
-+static int dpa_get_sset_count(struct net_device *net_dev, int type)
-+{
-+ unsigned int total_stats, num_stats;
-+
-+ num_stats = num_online_cpus() + 1;
-+ total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
-+
-+ switch (type) {
-+ case ETH_SS_STATS:
-+ return total_stats;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
-+ int crr_cpu, u64 bp_count, u64 *data)
-+{
-+ int num_stat_values = num_cpus + 1;
-+ int crr_stat = 0;
-+
-+ /* update current CPU's stats and also add them to the total values */
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
-+ data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
-+
-+ data[crr_stat * num_stat_values + crr_cpu] = bp_count;
-+ data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
-+}
-+
-+static void dpa_get_ethtool_stats(struct net_device *net_dev,
-+ struct ethtool_stats *stats, u64 *data)
-+{
-+ u64 bp_count, cg_time, cg_num, cg_status;
-+ struct dpa_percpu_priv_s *percpu_priv;
-+ struct qm_mcr_querycgr query_cgr;
-+ struct dpa_rx_errors rx_errors;
-+ struct dpa_ern_cnt ern_cnt;
-+ struct dpa_priv_s *priv;
-+ unsigned int num_cpus, offset;
-+ struct dpa_bp *dpa_bp;
-+ int total_stats, i;
-+
-+ total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
-+ priv = netdev_priv(net_dev);
-+ dpa_bp = priv->dpa_bp;
-+ num_cpus = num_online_cpus();
-+ bp_count = 0;
-+
-+ memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
-+ memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
-+ memset(data, 0, total_stats * sizeof(u64));
-+
-+ for_each_online_cpu(i) {
-+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-+
-+ if (dpa_bp->percpu_count)
-+ bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
-+
-+ rx_errors.dme += percpu_priv->rx_errors.dme;
-+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
-+ rx_errors.fse += percpu_priv->rx_errors.fse;
-+ rx_errors.phe += percpu_priv->rx_errors.phe;
-+ rx_errors.cse += percpu_priv->rx_errors.cse;
-+
-+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
-+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
-+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
-+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
-+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
-+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
-+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
-+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
-+
-+ copy_stats(percpu_priv, num_cpus, i, bp_count, data);
-+ }
-+
-+ offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
-+ memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
-+
-+ offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
-+ memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
-+
-+ /* gather congestion related counters */
-+ cg_num = 0;
-+ cg_status = 0;
-+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
-+ if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
-+ cg_num = priv->cgr_data.cgr_congested_count;
-+ cg_status = query_cgr.cgr.cs;
-+
-+ /* reset congestion stats (like QMan API does */
-+ priv->cgr_data.congested_jiffies = 0;
-+ priv->cgr_data.cgr_congested_count = 0;
-+ }
-+
-+ offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
-+ data[offset++] = cg_time;
-+ data[offset++] = cg_num;
-+ data[offset++] = cg_status;
-+}
-+
-+static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
-+{
-+ unsigned int i, j, num_cpus, size;
-+ char stat_string_cpu[ETH_GSTRING_LEN];
-+ u8 *strings;
-+
-+ strings = data;
-+ num_cpus = num_online_cpus();
-+ size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
-+
-+ for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
-+ for (j = 0; j < num_cpus; j++) {
-+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
-+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
-+ strings += ETH_GSTRING_LEN;
-+ }
-+ snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
-+ memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
-+ strings += ETH_GSTRING_LEN;
-+ }
-+ memcpy(strings, dpa_stats_global, size);
-+}
-+
-+static int dpaa_get_ts_info(struct net_device *net_dev,
-+ struct ethtool_ts_info *info)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct device *dev = priv->mac_dev->dev;
-+ struct device_node *mac_node = dev->of_node;
-+ struct device_node *fman_node = NULL, *ptp_node = NULL;
-+ struct platform_device *ptp_dev = NULL;
-+ struct qoriq_ptp *ptp = NULL;
-+
-+ info->phc_index = -1;
-+
-+ fman_node = of_get_parent(mac_node);
-+ if (fman_node)
-+ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
-+
-+ if (ptp_node)
-+ ptp_dev = of_find_device_by_node(ptp_node);
-+
-+ if (ptp_dev)
-+ ptp = platform_get_drvdata(ptp_dev);
-+
-+ if (ptp)
-+ info->phc_index = ptp->phc_index;
-+
-+#ifdef CONFIG_FSL_DPAA_TS
-+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
-+ SOF_TIMESTAMPING_RX_HARDWARE |
-+ SOF_TIMESTAMPING_RAW_HARDWARE;
-+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
-+ (1 << HWTSTAMP_TX_ON);
-+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-+ (1 << HWTSTAMP_FILTER_ALL);
-+#else
-+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
-+ SOF_TIMESTAMPING_SOFTWARE;
-+#endif
-+
-+ return 0;
-+}
-+
-+const struct ethtool_ops dpa_ethtool_ops = {
-+ .get_link_ksettings = dpa_get_ksettings,
-+ .set_link_ksettings = dpa_set_ksettings,
-+ .get_drvinfo = dpa_get_drvinfo,
-+ .get_msglevel = dpa_get_msglevel,
-+ .set_msglevel = dpa_set_msglevel,
-+ .nway_reset = dpa_nway_reset,
-+ .get_pauseparam = dpa_get_pauseparam,
-+ .set_pauseparam = dpa_set_pauseparam,
-+ .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
-+ .get_link = ethtool_op_get_link,
-+ .get_eee = dpa_get_eee,
-+ .set_eee = dpa_set_eee,
-+ .get_sset_count = dpa_get_sset_count,
-+ .get_ethtool_stats = dpa_get_ethtool_stats,
-+ .get_strings = dpa_get_strings,
-+#ifdef CONFIG_PM
-+ .get_wol = dpa_get_wol,
-+ .set_wol = dpa_set_wol,
-+#endif
-+ .get_ts_info = dpaa_get_ts_info,
-+};
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
-@@ -0,0 +1,931 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/io.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_mdio.h>
-+#include <linux/phy.h>
-+#include <linux/netdevice.h>
-+
-+#include "dpaa_eth.h"
-+#include "mac.h"
-+#include "lnxwrp_fsl_fman.h"
-+
-+#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
-+
-+#include "fsl_fman_dtsec.h"
-+#include "fsl_fman_tgec.h"
-+#include "fsl_fman_memac.h"
-+#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
-+
-+#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
-+
-+MODULE_DESCRIPTION(MAC_DESCRIPTION);
-+
-+struct mac_priv_s {
-+ struct fm_mac_dev *fm_mac;
-+};
-+
-+const char *mac_driver_description __initconst = MAC_DESCRIPTION;
-+const size_t mac_sizeof_priv[] = {
-+ [DTSEC] = sizeof(struct mac_priv_s),
-+ [XGMAC] = sizeof(struct mac_priv_s),
-+ [MEMAC] = sizeof(struct mac_priv_s)
-+};
-+
-+static const enet_mode_t _100[] = {
-+ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
-+ [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
-+};
-+
-+static const enet_mode_t _1000[] = {
-+ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
-+ [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
-+ [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
-+ [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
-+ [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
-+ [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
-+};
-+
-+static enet_mode_t __cold __attribute__((nonnull))
-+macdev2enetinterface(const struct mac_device *mac_dev)
-+{
-+ switch (mac_dev->max_speed) {
-+ case SPEED_100:
-+ return _100[mac_dev->phy_if];
-+ case SPEED_1000:
-+ return _1000[mac_dev->phy_if];
-+ case SPEED_2500:
-+ return e_ENET_MODE_SGMII_2500;
-+ case SPEED_10000:
-+ return e_ENET_MODE_XGMII_10000;
-+ default:
-+ return e_ENET_MODE_MII_100;
-+ }
-+}
-+
-+static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
-+{
-+ struct mac_device *mac_dev;
-+
-+ mac_dev = (struct mac_device *)_mac_dev;
-+
-+ if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
-+ /* don't flag RX FIFO after the first */
-+ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
-+ e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
-+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
-+ exception);
-+ }
-+
-+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
-+ exception);
-+}
-+
-+static int __cold init(struct mac_device *mac_dev)
-+{
-+ int _errno;
-+ struct mac_priv_s *priv;
-+ t_FmMacParams param;
-+ uint32_t version;
-+
-+ priv = macdev_priv(mac_dev);
-+
-+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
-+ mac_dev->dev, mac_dev->res->start, 0x2000);
-+ param.enetMode = macdev2enetinterface(mac_dev);
-+ memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
-+ sizeof(mac_dev->addr)));
-+ param.macId = mac_dev->cell_index;
-+ param.h_Fm = (handle_t)mac_dev->fm;
-+ param.mdioIrq = NO_IRQ;
-+ param.f_Exception = mac_exception;
-+ param.f_Event = mac_exception;
-+ param.h_App = mac_dev;
-+
-+ priv->fm_mac = fm_mac_config(&param);
-+ if (unlikely(priv->fm_mac == NULL)) {
-+ _errno = -EINVAL;
-+ goto _return;
-+ }
-+
-+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
-+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
-+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
-+
-+ _errno = fm_mac_config_max_frame_length(priv->fm_mac,
-+ fm_get_max_frm());
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
-+ /* 10G always works with pad and CRC */
-+ _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ _errno = fm_mac_config_half_duplex(priv->fm_mac,
-+ mac_dev->half_duplex);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ } else {
-+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ }
-+
-+ _errno = fm_mac_init(priv->fm_mac);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
-+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
-+ if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
-+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
-+ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ }
-+#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
-+
-+ /* For 10G MAC, disable Tx ECC exception */
-+ if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
-+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
-+ e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+ }
-+
-+ _errno = fm_mac_get_version(priv->fm_mac, &version);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
-+ ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
-+ "dTSEC" : "XGEC"), version);
-+
-+ goto _return;
-+
-+
-+_return_fm_mac_free:
-+ fm_mac_free(mac_dev->get_mac_handle(mac_dev));
-+
-+_return:
-+ return _errno;
-+}
-+
-+static int __cold memac_init(struct mac_device *mac_dev)
-+{
-+ int _errno;
-+ struct mac_priv_s *priv;
-+ t_FmMacParams param;
-+
-+ priv = macdev_priv(mac_dev);
-+
-+ param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
-+ mac_dev->dev, mac_dev->res->start, 0x2000);
-+ param.enetMode = macdev2enetinterface(mac_dev);
-+ memcpy(&param.addr, mac_dev->addr, sizeof(mac_dev->addr));
-+ param.macId = mac_dev->cell_index;
-+ param.h_Fm = (handle_t)mac_dev->fm;
-+ param.mdioIrq = NO_IRQ;
-+ param.f_Exception = mac_exception;
-+ param.f_Event = mac_exception;
-+ param.h_App = mac_dev;
-+
-+ priv->fm_mac = fm_mac_config(&param);
-+ if (unlikely(priv->fm_mac == NULL)) {
-+ _errno = -EINVAL;
-+ goto _return;
-+ }
-+
-+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
-+ (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
-+ param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
-+
-+ _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ _errno = fm_mac_init(priv->fm_mac);
-+ if (unlikely(_errno < 0))
-+ goto _return_fm_mac_free;
-+
-+ dev_info(mac_dev->dev, "FMan MEMAC\n");
-+
-+ goto _return;
-+
-+_return_fm_mac_free:
-+ fm_mac_free(priv->fm_mac);
-+
-+_return:
-+ return _errno;
-+}
-+
-+static int __cold start(struct mac_device *mac_dev)
-+{
-+ int _errno;
-+ struct phy_device *phy_dev = mac_dev->phy_dev;
-+
-+ _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
-+
-+ if (!_errno && phy_dev)
-+ phy_start(phy_dev);
-+
-+ return _errno;
-+}
-+
-+static int __cold stop(struct mac_device *mac_dev)
-+{
-+ if (mac_dev->phy_dev)
-+ phy_stop(mac_dev->phy_dev);
-+
-+ return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
-+}
-+
-+static int __cold set_multi(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct mac_priv_s *mac_priv;
-+ struct mac_address *old_addr, *tmp;
-+ struct netdev_hw_addr *ha;
-+ int _errno;
-+
-+ mac_priv = macdev_priv(mac_dev);
-+
-+ /* Clear previous address list */
-+ list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
-+ _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
-+ (t_EnetAddr *)old_addr->addr);
-+ if (_errno < 0)
-+ return _errno;
-+
-+ list_del(&old_addr->list);
-+ kfree(old_addr);
-+ }
-+
-+ /* Add all the addresses from the new list */
-+ netdev_for_each_mc_addr(ha, net_dev) {
-+ _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
-+ (t_EnetAddr *)ha->addr);
-+ if (_errno < 0)
-+ return _errno;
-+
-+ tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
-+ if (!tmp) {
-+ dev_err(mac_dev->dev, "Out of memory\n");
-+ return -ENOMEM;
-+ }
-+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
-+ list_add(&tmp->list, &mac_dev->mc_addr_list);
-+ }
-+ return 0;
-+}
-+
-+/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
-+ * active PAUSE settings. Otherwise, the new active settings should be reflected
-+ * in FMan.
-+ */
-+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
-+{
-+ struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
-+ int _errno = 0;
-+
-+ if (unlikely(rx != mac_dev->rx_pause_active)) {
-+ _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
-+ if (likely(_errno == 0))
-+ mac_dev->rx_pause_active = rx;
-+ }
-+
-+ if (unlikely(tx != mac_dev->tx_pause_active)) {
-+ _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
-+ if (likely(_errno == 0))
-+ mac_dev->tx_pause_active = tx;
-+ }
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(set_mac_active_pause);
-+
-+/* Determine the MAC RX/TX PAUSE frames settings based on PHY
-+ * autonegotiation or values set by eththool.
-+ */
-+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
-+{
-+ struct phy_device *phy_dev = mac_dev->phy_dev;
-+ u16 lcl_adv, rmt_adv;
-+ u8 flowctrl;
-+
-+ *rx_pause = *tx_pause = false;
-+
-+ if (!phy_dev->duplex)
-+ return;
-+
-+ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
-+ * are those set by ethtool.
-+ */
-+ if (!mac_dev->autoneg_pause) {
-+ *rx_pause = mac_dev->rx_pause_req;
-+ *tx_pause = mac_dev->tx_pause_req;
-+ return;
-+ }
-+
-+ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
-+ * settings depend on the result of the link negotiation.
-+ */
-+
-+ /* get local capabilities */
-+ lcl_adv = 0;
-+ if (phy_dev->advertising & ADVERTISED_Pause)
-+ lcl_adv |= ADVERTISE_PAUSE_CAP;
-+ if (phy_dev->advertising & ADVERTISED_Asym_Pause)
-+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
-+
-+ /* get link partner capabilities */
-+ rmt_adv = 0;
-+ if (phy_dev->pause)
-+ rmt_adv |= LPA_PAUSE_CAP;
-+ if (phy_dev->asym_pause)
-+ rmt_adv |= LPA_PAUSE_ASYM;
-+
-+ /* Calculate TX/RX settings based on local and peer advertised
-+ * symmetric/asymmetric PAUSE capabilities.
-+ */
-+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
-+ if (flowctrl & FLOW_CTRL_RX)
-+ *rx_pause = true;
-+ if (flowctrl & FLOW_CTRL_TX)
-+ *tx_pause = true;
-+}
-+EXPORT_SYMBOL(get_pause_cfg);
-+
-+static void adjust_link_void(struct net_device *net_dev)
-+{
-+}
-+
-+static void adjust_link(struct net_device *net_dev)
-+{
-+ struct dpa_priv_s *priv = netdev_priv(net_dev);
-+ struct mac_device *mac_dev = priv->mac_dev;
-+ struct phy_device *phy_dev = mac_dev->phy_dev;
-+ struct fm_mac_dev *fm_mac_dev;
-+ bool rx_pause, tx_pause;
-+ int _errno;
-+
-+ fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
-+ fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
-+ phy_dev->duplex);
-+
-+ get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
-+ _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
-+ if (unlikely(_errno < 0))
-+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
-+}
-+
-+/* Initializes driver's PHY state, and attaches to the PHY.
-+ * Returns 0 on success.
-+ */
-+static int dtsec_init_phy(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct phy_device *phy_dev;
-+
-+ if (of_phy_is_fixed_link(mac_dev->phy_node))
-+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
-+ 0, mac_dev->phy_if);
-+ else
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link, 0, mac_dev->phy_if);
-+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
-+ netdev_err(net_dev, "Could not connect to PHY %s\n",
-+ mac_dev->phy_node ?
-+ mac_dev->phy_node->full_name :
-+ mac_dev->fixed_bus_id);
-+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
-+ }
-+
-+ /* Remove any features not supported by the controller */
-+ phy_dev->supported &= mac_dev->if_support;
-+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-+ * as most of the PHY drivers do not enable them by default.
-+ */
-+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ phy_dev->advertising = phy_dev->supported;
-+
-+ mac_dev->phy_dev = phy_dev;
-+
-+ return 0;
-+}
-+
-+static int xgmac_init_phy(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct phy_device *phy_dev;
-+
-+ if (of_phy_is_fixed_link(mac_dev->phy_node))
-+ phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
-+ 0, mac_dev->phy_if);
-+ else
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link_void, 0, mac_dev->phy_if);
-+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
-+ netdev_err(net_dev, "Could not attach to PHY %s\n",
-+ mac_dev->phy_node ?
-+ mac_dev->phy_node->full_name :
-+ mac_dev->fixed_bus_id);
-+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
-+ }
-+
-+ phy_dev->supported &= mac_dev->if_support;
-+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-+ * as most of the PHY drivers do not enable them by default.
-+ */
-+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ phy_dev->advertising = phy_dev->supported;
-+
-+ mac_dev->phy_dev = phy_dev;
-+
-+ return 0;
-+}
-+
-+static int memac_init_phy(struct net_device *net_dev,
-+ struct mac_device *mac_dev)
-+{
-+ struct phy_device *phy_dev;
-+ void (*adjust_link_handler)(struct net_device *);
-+
-+ if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
-+ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)) {
-+ /* Pass a void link state handler to the PHY state machine
-+ * for XGMII (10G) and SGMII 2.5G, as the hardware does not
-+ * permit dynamic link speed adjustments. */
-+ adjust_link_handler = adjust_link_void;
-+ } else if (macdev2enetinterface(mac_dev) & e_ENET_IF_RGMII) {
-+ /* Regular RGMII ports connected to a PHY, as well as
-+ * ports that are marked as "fixed-link" in the DTS,
-+ * will have the adjust_link callback. This calls
-+ * fman_memac_adjust_link in order to configure the
-+ * IF_MODE register, which is needed in both cases.
-+ */
-+ adjust_link_handler = adjust_link;
-+ } else if (of_phy_is_fixed_link(mac_dev->phy_node)) {
-+ /* Pass a void link state handler for fixed-link
-+ * interfaces that are not RGMII. Only RGMII has been
-+ * tested and confirmed to work with fixed-link. Other
-+ * MII interfaces may need further work.
-+ * TODO: Change this as needed.
-+ */
-+ adjust_link_handler = adjust_link_void;
-+ } else {
-+ /* MII, RMII, SMII, GMII, SGMII, BASEX ports,
-+ * that are NOT fixed-link.
-+ * TODO: May not be needed for interfaces that
-+ * pass through the SerDes block (*SGMII, XFI).
-+ */
-+ adjust_link_handler = adjust_link;
-+ }
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ adjust_link_handler, 0,
-+ mac_dev->phy_if);
-+
-+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
-+ netdev_err(net_dev, "Could not connect to PHY %s\n",
-+ mac_dev->phy_node ?
-+ mac_dev->phy_node->full_name :
-+ mac_dev->fixed_bus_id);
-+ return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
-+ }
-+
-+ /* Remove any features not supported by the controller */
-+ phy_dev->supported &= mac_dev->if_support;
-+ /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-+ * as most of the PHY drivers do not enable them by default.
-+ */
-+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ phy_dev->advertising = phy_dev->supported;
-+
-+ mac_dev->phy_dev = phy_dev;
-+
-+ return 0;
-+}
-+
-+static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int _errno, __errno;
-+
-+ _errno = fm_mac_disable(fm_mac_dev);
-+ __errno = fm_mac_free(fm_mac_dev);
-+
-+ if (unlikely(__errno < 0))
-+ _errno = __errno;
-+
-+ return _errno;
-+}
-+
-+static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
-+{
-+ const struct mac_priv_s *priv;
-+ priv = macdev_priv(mac_dev);
-+ return priv->fm_mac;
-+}
-+
-+static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
-+ int i = 0, n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
-+
-+ FM_DMP_V32(buf, n, p_mm, tsec_id);
-+ FM_DMP_V32(buf, n, p_mm, tsec_id2);
-+ FM_DMP_V32(buf, n, p_mm, ievent);
-+ FM_DMP_V32(buf, n, p_mm, imask);
-+ FM_DMP_V32(buf, n, p_mm, ecntrl);
-+ FM_DMP_V32(buf, n, p_mm, ptv);
-+ FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
-+ FM_DMP_V32(buf, n, p_mm, tmr_pevent);
-+ FM_DMP_V32(buf, n, p_mm, tmr_pemask);
-+ FM_DMP_V32(buf, n, p_mm, tctrl);
-+ FM_DMP_V32(buf, n, p_mm, rctrl);
-+ FM_DMP_V32(buf, n, p_mm, maccfg1);
-+ FM_DMP_V32(buf, n, p_mm, maccfg2);
-+ FM_DMP_V32(buf, n, p_mm, ipgifg);
-+ FM_DMP_V32(buf, n, p_mm, hafdup);
-+ FM_DMP_V32(buf, n, p_mm, maxfrm);
-+
-+ FM_DMP_V32(buf, n, p_mm, macstnaddr1);
-+ FM_DMP_V32(buf, n, p_mm, macstnaddr2);
-+
-+ for (i = 0; i < 7; ++i) {
-+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
-+ FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
-+ }
-+
-+ FM_DMP_V32(buf, n, p_mm, car1);
-+ FM_DMP_V32(buf, n, p_mm, car2);
-+
-+ return n;
-+}
-+
-+static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
-+
-+ FM_DMP_V32(buf, n, p_mm, tgec_id);
-+ FM_DMP_V32(buf, n, p_mm, command_config);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_0);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_1);
-+ FM_DMP_V32(buf, n, p_mm, maxfrm);
-+ FM_DMP_V32(buf, n, p_mm, pause_quant);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
-+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
-+ FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
-+ FM_DMP_V32(buf, n, p_mm, mdio_command);
-+ FM_DMP_V32(buf, n, p_mm, mdio_data);
-+ FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
-+ FM_DMP_V32(buf, n, p_mm, status);
-+ FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_2);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr_3);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
-+ FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
-+ FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
-+ FM_DMP_V32(buf, n, p_mm, imask);
-+ FM_DMP_V32(buf, n, p_mm, ievent);
-+
-+ return n;
-+}
-+
-+static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
-+ int i = 0, n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
-+
-+ FM_DMP_V32(buf, n, p_mm, command_config);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
-+ FM_DMP_V32(buf, n, p_mm, maxfrm);
-+ FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
-+ FM_DMP_V32(buf, n, p_mm, ievent);
-+ FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
-+ FM_DMP_V32(buf, n, p_mm, imask);
-+
-+ for (i = 0; i < 4; ++i)
-+ FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
-+
-+ for (i = 0; i < 4; ++i)
-+ FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
-+
-+ FM_DMP_V32(buf, n, p_mm, rx_pause_status);
-+
-+ for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
-+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
-+ FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
-+ }
-+
-+ FM_DMP_V32(buf, n, p_mm, lpwake_timer);
-+ FM_DMP_V32(buf, n, p_mm, sleep_timer);
-+ FM_DMP_V32(buf, n, p_mm, statn_config);
-+ FM_DMP_V32(buf, n, p_mm, if_mode);
-+ FM_DMP_V32(buf, n, p_mm, if_status);
-+ FM_DMP_V32(buf, n, p_mm, hg_config);
-+ FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
-+ FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
-+ FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
-+ FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
-+ FM_DMP_V32(buf, n, p_mm, rhm);
-+ FM_DMP_V32(buf, n, p_mm, thm);
-+
-+ return n;
-+}
-+
-+static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
-+
-+ /* Rx Statistics Counter */
-+ FM_DMP_V32(buf, n, p_mm, reoct_l);
-+ FM_DMP_V32(buf, n, p_mm, reoct_u);
-+ FM_DMP_V32(buf, n, p_mm, roct_l);
-+ FM_DMP_V32(buf, n, p_mm, roct_u);
-+ FM_DMP_V32(buf, n, p_mm, raln_l);
-+ FM_DMP_V32(buf, n, p_mm, raln_u);
-+ FM_DMP_V32(buf, n, p_mm, rxpf_l);
-+ FM_DMP_V32(buf, n, p_mm, rxpf_u);
-+ FM_DMP_V32(buf, n, p_mm, rfrm_l);
-+ FM_DMP_V32(buf, n, p_mm, rfrm_u);
-+ FM_DMP_V32(buf, n, p_mm, rfcs_l);
-+ FM_DMP_V32(buf, n, p_mm, rfcs_u);
-+ FM_DMP_V32(buf, n, p_mm, rvlan_l);
-+ FM_DMP_V32(buf, n, p_mm, rvlan_u);
-+ FM_DMP_V32(buf, n, p_mm, rerr_l);
-+ FM_DMP_V32(buf, n, p_mm, rerr_u);
-+ FM_DMP_V32(buf, n, p_mm, ruca_l);
-+ FM_DMP_V32(buf, n, p_mm, ruca_u);
-+ FM_DMP_V32(buf, n, p_mm, rmca_l);
-+ FM_DMP_V32(buf, n, p_mm, rmca_u);
-+ FM_DMP_V32(buf, n, p_mm, rbca_l);
-+ FM_DMP_V32(buf, n, p_mm, rbca_u);
-+ FM_DMP_V32(buf, n, p_mm, rdrp_l);
-+ FM_DMP_V32(buf, n, p_mm, rdrp_u);
-+ FM_DMP_V32(buf, n, p_mm, rpkt_l);
-+ FM_DMP_V32(buf, n, p_mm, rpkt_u);
-+ FM_DMP_V32(buf, n, p_mm, rund_l);
-+ FM_DMP_V32(buf, n, p_mm, rund_u);
-+ FM_DMP_V32(buf, n, p_mm, r64_l);
-+ FM_DMP_V32(buf, n, p_mm, r64_u);
-+ FM_DMP_V32(buf, n, p_mm, r127_l);
-+ FM_DMP_V32(buf, n, p_mm, r127_u);
-+ FM_DMP_V32(buf, n, p_mm, r255_l);
-+ FM_DMP_V32(buf, n, p_mm, r255_u);
-+ FM_DMP_V32(buf, n, p_mm, r511_l);
-+ FM_DMP_V32(buf, n, p_mm, r511_u);
-+ FM_DMP_V32(buf, n, p_mm, r1023_l);
-+ FM_DMP_V32(buf, n, p_mm, r1023_u);
-+ FM_DMP_V32(buf, n, p_mm, r1518_l);
-+ FM_DMP_V32(buf, n, p_mm, r1518_u);
-+ FM_DMP_V32(buf, n, p_mm, r1519x_l);
-+ FM_DMP_V32(buf, n, p_mm, r1519x_u);
-+ FM_DMP_V32(buf, n, p_mm, rovr_l);
-+ FM_DMP_V32(buf, n, p_mm, rovr_u);
-+ FM_DMP_V32(buf, n, p_mm, rjbr_l);
-+ FM_DMP_V32(buf, n, p_mm, rjbr_u);
-+ FM_DMP_V32(buf, n, p_mm, rfrg_l);
-+ FM_DMP_V32(buf, n, p_mm, rfrg_u);
-+ FM_DMP_V32(buf, n, p_mm, rcnp_l);
-+ FM_DMP_V32(buf, n, p_mm, rcnp_u);
-+ FM_DMP_V32(buf, n, p_mm, rdrntp_l);
-+ FM_DMP_V32(buf, n, p_mm, rdrntp_u);
-+
-+ return n;
-+}
-+
-+static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
-+
-+
-+ /* Tx Statistics Counter */
-+ FM_DMP_V32(buf, n, p_mm, teoct_l);
-+ FM_DMP_V32(buf, n, p_mm, teoct_u);
-+ FM_DMP_V32(buf, n, p_mm, toct_l);
-+ FM_DMP_V32(buf, n, p_mm, toct_u);
-+ FM_DMP_V32(buf, n, p_mm, txpf_l);
-+ FM_DMP_V32(buf, n, p_mm, txpf_u);
-+ FM_DMP_V32(buf, n, p_mm, tfrm_l);
-+ FM_DMP_V32(buf, n, p_mm, tfrm_u);
-+ FM_DMP_V32(buf, n, p_mm, tfcs_l);
-+ FM_DMP_V32(buf, n, p_mm, tfcs_u);
-+ FM_DMP_V32(buf, n, p_mm, tvlan_l);
-+ FM_DMP_V32(buf, n, p_mm, tvlan_u);
-+ FM_DMP_V32(buf, n, p_mm, terr_l);
-+ FM_DMP_V32(buf, n, p_mm, terr_u);
-+ FM_DMP_V32(buf, n, p_mm, tuca_l);
-+ FM_DMP_V32(buf, n, p_mm, tuca_u);
-+ FM_DMP_V32(buf, n, p_mm, tmca_l);
-+ FM_DMP_V32(buf, n, p_mm, tmca_u);
-+ FM_DMP_V32(buf, n, p_mm, tbca_l);
-+ FM_DMP_V32(buf, n, p_mm, tbca_u);
-+ FM_DMP_V32(buf, n, p_mm, tpkt_l);
-+ FM_DMP_V32(buf, n, p_mm, tpkt_u);
-+ FM_DMP_V32(buf, n, p_mm, tund_l);
-+ FM_DMP_V32(buf, n, p_mm, tund_u);
-+ FM_DMP_V32(buf, n, p_mm, t64_l);
-+ FM_DMP_V32(buf, n, p_mm, t64_u);
-+ FM_DMP_V32(buf, n, p_mm, t127_l);
-+ FM_DMP_V32(buf, n, p_mm, t127_u);
-+ FM_DMP_V32(buf, n, p_mm, t255_l);
-+ FM_DMP_V32(buf, n, p_mm, t255_u);
-+ FM_DMP_V32(buf, n, p_mm, t511_l);
-+ FM_DMP_V32(buf, n, p_mm, t511_u);
-+ FM_DMP_V32(buf, n, p_mm, t1023_l);
-+ FM_DMP_V32(buf, n, p_mm, t1023_u);
-+ FM_DMP_V32(buf, n, p_mm, t1518_l);
-+ FM_DMP_V32(buf, n, p_mm, t1518_u);
-+ FM_DMP_V32(buf, n, p_mm, t1519x_l);
-+ FM_DMP_V32(buf, n, p_mm, t1519x_u);
-+ FM_DMP_V32(buf, n, p_mm, tcnp_l);
-+ FM_DMP_V32(buf, n, p_mm, tcnp_u);
-+
-+ return n;
-+}
-+
-+int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ int n = nn;
-+
-+ n = h_mac->dump_mac_regs(h_mac, buf, n);
-+
-+ return n;
-+}
-+EXPORT_SYMBOL(fm_mac_dump_regs);
-+
-+int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ int n = nn;
-+
-+ if(h_mac->dump_mac_rx_stats)
-+ n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
-+
-+ return n;
-+}
-+EXPORT_SYMBOL(fm_mac_dump_rx_stats);
-+
-+int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
-+{
-+ int n = nn;
-+
-+ if(h_mac->dump_mac_tx_stats)
-+ n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
-+
-+ return n;
-+}
-+EXPORT_SYMBOL(fm_mac_dump_tx_stats);
-+
-+static void __cold setup_dtsec(struct mac_device *mac_dev)
-+{
-+ mac_dev->init_phy = dtsec_init_phy;
-+ mac_dev->init = init;
-+ mac_dev->start = start;
-+ mac_dev->stop = stop;
-+ mac_dev->set_promisc = fm_mac_set_promiscuous;
-+ mac_dev->change_addr = fm_mac_modify_mac_addr;
-+ mac_dev->set_multi = set_multi;
-+ mac_dev->uninit = uninit;
-+ mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
-+ mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
-+ mac_dev->get_mac_handle = get_mac_handle;
-+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
-+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
-+ mac_dev->fm_rtc_enable = fm_rtc_enable;
-+ mac_dev->fm_rtc_disable = fm_rtc_disable;
-+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
-+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
-+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
-+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
-+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
-+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
-+ mac_dev->set_wol = fm_mac_set_wol;
-+ mac_dev->dump_mac_regs = dtsec_dump_regs;
-+}
-+
-+static void __cold setup_xgmac(struct mac_device *mac_dev)
-+{
-+ mac_dev->init_phy = xgmac_init_phy;
-+ mac_dev->init = init;
-+ mac_dev->start = start;
-+ mac_dev->stop = stop;
-+ mac_dev->set_promisc = fm_mac_set_promiscuous;
-+ mac_dev->change_addr = fm_mac_modify_mac_addr;
-+ mac_dev->set_multi = set_multi;
-+ mac_dev->uninit = uninit;
-+ mac_dev->get_mac_handle = get_mac_handle;
-+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
-+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
-+ mac_dev->set_wol = fm_mac_set_wol;
-+ mac_dev->dump_mac_regs = xgmac_dump_regs;
-+}
-+
-+static void __cold setup_memac(struct mac_device *mac_dev)
-+{
-+ mac_dev->init_phy = memac_init_phy;
-+ mac_dev->init = memac_init;
-+ mac_dev->start = start;
-+ mac_dev->stop = stop;
-+ mac_dev->set_promisc = fm_mac_set_promiscuous;
-+ mac_dev->change_addr = fm_mac_modify_mac_addr;
-+ mac_dev->set_multi = set_multi;
-+ mac_dev->uninit = uninit;
-+ mac_dev->get_mac_handle = get_mac_handle;
-+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
-+ mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
-+ mac_dev->fm_rtc_enable = fm_rtc_enable;
-+ mac_dev->fm_rtc_disable = fm_rtc_disable;
-+ mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
-+ mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
-+ mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
-+ mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
-+ mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
-+ mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
-+ mac_dev->set_wol = fm_mac_set_wol;
-+ mac_dev->dump_mac_regs = memac_dump_regs;
-+ mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
-+ mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
-+}
-+
-+void (*const mac_setup[])(struct mac_device *mac_dev) = {
-+ [DTSEC] = setup_dtsec,
-+ [XGMAC] = setup_xgmac,
-+ [MEMAC] = setup_memac
-+};
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
-@@ -0,0 +1,490 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_address.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_net.h>
-+#include <linux/of_mdio.h>
-+#include <linux/phy_fixed.h>
-+#include <linux/device.h>
-+#include <linux/phy.h>
-+#include <linux/io.h>
-+
-+#include "lnxwrp_fm_ext.h"
-+
-+#include "mac.h"
-+
-+#define DTSEC_SUPPORTED \
-+ (SUPPORTED_10baseT_Half \
-+ | SUPPORTED_10baseT_Full \
-+ | SUPPORTED_100baseT_Half \
-+ | SUPPORTED_100baseT_Full \
-+ | SUPPORTED_Autoneg \
-+ | SUPPORTED_Pause \
-+ | SUPPORTED_Asym_Pause \
-+ | SUPPORTED_MII)
-+
-+static const char phy_str[][11] = {
-+ [PHY_INTERFACE_MODE_MII] = "mii",
-+ [PHY_INTERFACE_MODE_GMII] = "gmii",
-+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
-+ [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
-+ [PHY_INTERFACE_MODE_TBI] = "tbi",
-+ [PHY_INTERFACE_MODE_RMII] = "rmii",
-+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
-+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
-+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
-+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
-+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
-+ [PHY_INTERFACE_MODE_XGMII] = "xgmii",
-+ [PHY_INTERFACE_MODE_2500SGMII] = "sgmii-2500",
-+};
-+
-+static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(phy_str); i++)
-+ if (strcmp(str, phy_str[i]) == 0)
-+ return (phy_interface_t)i;
-+
-+ return PHY_INTERFACE_MODE_MII;
-+}
-+
-+static const uint16_t phy2speed[] = {
-+ [PHY_INTERFACE_MODE_MII] = SPEED_100,
-+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
-+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
-+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
-+ [PHY_INTERFACE_MODE_2500SGMII] = SPEED_2500,
-+};
-+
-+static struct mac_device * __cold
-+alloc_macdev(struct device *dev, size_t sizeof_priv,
-+ void (*setup)(struct mac_device *mac_dev))
-+{
-+ struct mac_device *mac_dev;
-+
-+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
-+ if (unlikely(mac_dev == NULL))
-+ mac_dev = ERR_PTR(-ENOMEM);
-+ else {
-+ mac_dev->dev = dev;
-+ dev_set_drvdata(dev, mac_dev);
-+ setup(mac_dev);
-+ }
-+
-+ return mac_dev;
-+}
-+
-+static int __cold free_macdev(struct mac_device *mac_dev)
-+{
-+ dev_set_drvdata(mac_dev->dev, NULL);
-+
-+ return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
-+}
-+
-+static const struct of_device_id mac_match[] = {
-+ [DTSEC] = {
-+ .compatible = "fsl,fman-dtsec"
-+ },
-+ [XGMAC] = {
-+ .compatible = "fsl,fman-xgec"
-+ },
-+ [MEMAC] = {
-+ .compatible = "fsl,fman-memac"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, mac_match);
-+
-+static int __cold mac_probe(struct platform_device *_of_dev)
-+{
-+ int _errno, i;
-+ struct device *dev;
-+ struct device_node *mac_node, *dev_node;
-+ struct mac_device *mac_dev;
-+ struct platform_device *of_dev;
-+ struct resource res;
-+ const uint8_t *mac_addr;
-+ const char *char_prop;
-+ int nph;
-+ u32 cell_index;
-+ const struct of_device_id *match;
-+
-+ dev = &_of_dev->dev;
-+ mac_node = dev->of_node;
-+
-+ match = of_match_device(mac_match, dev);
-+ if (!match)
-+ return -EINVAL;
-+
-+ for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
-+ i++)
-+ ;
-+ BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
-+
-+ mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
-+ if (IS_ERR(mac_dev)) {
-+ _errno = PTR_ERR(mac_dev);
-+ dev_err(dev, "alloc_macdev() = %d\n", _errno);
-+ goto _return;
-+ }
-+
-+ INIT_LIST_HEAD(&mac_dev->mc_addr_list);
-+
-+ /* Get the FM node */
-+ dev_node = of_get_parent(mac_node);
-+ if (unlikely(dev_node == NULL)) {
-+ dev_err(dev, "of_get_parent(%s) failed\n",
-+ mac_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ of_dev = of_find_device_by_node(dev_node);
-+ if (unlikely(of_dev == NULL)) {
-+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
-+ dev_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+
-+ mac_dev->fm_dev = fm_bind(&of_dev->dev);
-+ if (unlikely(mac_dev->fm_dev == NULL)) {
-+ dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
-+ _errno = -ENODEV;
-+ goto _return_of_node_put;
-+ }
-+
-+ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
-+ of_node_put(dev_node);
-+
-+ /* Get the address of the memory mapped registers */
-+ _errno = of_address_to_resource(mac_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ dev_err(dev, "of_address_to_resource(%s) = %d\n",
-+ mac_node->full_name, _errno);
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ mac_dev->res = __devm_request_region(
-+ dev,
-+ fm_get_mem_region(mac_dev->fm_dev),
-+ res.start, res.end + 1 - res.start, "mac");
-+ if (unlikely(mac_dev->res == NULL)) {
-+ dev_err(dev, "__devm_request_mem_region(mac) failed\n");
-+ _errno = -EBUSY;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
-+ mac_dev->res->end + 1
-+ - mac_dev->res->start);
-+ if (unlikely(mac_dev->vaddr == NULL)) {
-+ dev_err(dev, "devm_ioremap() failed\n");
-+ _errno = -EIO;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+#define TBIPA_OFFSET 0x1c
-+#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
-+ mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
-+ if (mac_dev->tbi_node) {
-+ u32 tbiaddr = TBIPA_DEFAULT_ADDR;
-+ const __be32 *tbi_reg;
-+ void __iomem *addr;
-+
-+ tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
-+ if (tbi_reg)
-+ tbiaddr = be32_to_cpup(tbi_reg);
-+ addr = mac_dev->vaddr + TBIPA_OFFSET;
-+ /* TODO: out_be32 does not exist on ARM */
-+ out_be32(addr, tbiaddr);
-+ }
-+
-+ if (!of_device_is_available(mac_node)) {
-+ devm_iounmap(dev, mac_dev->vaddr);
-+ __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
-+ res.start, res.end + 1 - res.start);
-+ fm_unbind(mac_dev->fm_dev);
-+ devm_kfree(dev, mac_dev);
-+ dev_set_drvdata(dev, NULL);
-+ return -ENODEV;
-+ }
-+
-+ /* Get the cell-index */
-+ _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
-+ if (unlikely(_errno)) {
-+ dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
-+ mac_node->full_name);
-+ goto _return_dev_set_drvdata;
-+ }
-+ mac_dev->cell_index = (uint8_t)cell_index;
-+ if (mac_dev->cell_index >= 8)
-+ mac_dev->cell_index -= 8;
-+
-+ /* Get the MAC address */
-+ mac_addr = of_get_mac_address(mac_node);
-+ if (unlikely(mac_addr == NULL)) {
-+ dev_err(dev, "of_get_mac_address(%s) failed\n",
-+ mac_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_dev_set_drvdata;
-+ }
-+ memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
-+
-+ /* Verify the number of port handles */
-+ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
-+ if (unlikely(nph < 0)) {
-+ dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
-+ mac_node->full_name);
-+ _errno = nph;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
-+ dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
-+ mac_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ for_each_port_device(i, mac_dev->port_dev) {
-+ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
-+ if (unlikely(dev_node == NULL)) {
-+ dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
-+ mac_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+
-+ of_dev = of_find_device_by_node(dev_node);
-+ if (unlikely(of_dev == NULL)) {
-+ dev_err(dev, "of_find_device_by_node(%s) failed\n",
-+ dev_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+
-+ mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
-+ if (unlikely(mac_dev->port_dev[i] == NULL)) {
-+ dev_err(dev, "dev_get_drvdata(%s) failed\n",
-+ dev_node->full_name);
-+ _errno = -EINVAL;
-+ goto _return_of_node_put;
-+ }
-+ of_node_put(dev_node);
-+ }
-+
-+ /* Get the PHY connection type */
-+ _errno = of_property_read_string(mac_node, "phy-connection-type",
-+ &char_prop);
-+ if (unlikely(_errno)) {
-+ dev_warn(dev,
-+ "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
-+ mac_node->full_name);
-+ mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
-+ } else
-+ mac_dev->phy_if = str2phy(char_prop);
-+
-+ mac_dev->link = false;
-+ mac_dev->half_duplex = false;
-+ mac_dev->speed = phy2speed[mac_dev->phy_if];
-+ mac_dev->max_speed = mac_dev->speed;
-+ mac_dev->if_support = DTSEC_SUPPORTED;
-+ /* We don't support half-duplex in SGMII mode */
-+ if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii") ||
-+ strstr(char_prop, "sgmii-2500"))
-+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
-+ SUPPORTED_100baseT_Half);
-+
-+ /* Gigabit support (no half-duplex) */
-+ if (mac_dev->max_speed == SPEED_1000 ||
-+ mac_dev->max_speed == SPEED_2500)
-+ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
-+
-+ /* The 10G interface only supports one mode */
-+ if (strstr(char_prop, "xgmii"))
-+ mac_dev->if_support = SUPPORTED_10000baseT_Full;
-+
-+ /* Get the rest of the PHY information */
-+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
-+ if (!mac_dev->phy_node) {
-+ struct phy_device *phy;
-+
-+ if (!of_phy_is_fixed_link(mac_node)) {
-+ dev_err(dev, "Wrong PHY information of mac node %s\n",
-+ mac_node->full_name);
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ _errno = of_phy_register_fixed_link(mac_node);
-+ if (_errno)
-+ goto _return_dev_set_drvdata;
-+
-+ mac_dev->fixed_link = devm_kzalloc(mac_dev->dev,
-+ sizeof(*mac_dev->fixed_link),
-+ GFP_KERNEL);
-+ if (!mac_dev->fixed_link)
-+ goto _return_dev_set_drvdata;
-+
-+ mac_dev->phy_node = of_node_get(mac_node);
-+ phy = of_phy_find_device(mac_dev->phy_node);
-+ if (!phy)
-+ goto _return_dev_set_drvdata;
-+
-+ mac_dev->fixed_link->link = phy->link;
-+ mac_dev->fixed_link->speed = phy->speed;
-+ mac_dev->fixed_link->duplex = phy->duplex;
-+ mac_dev->fixed_link->pause = phy->pause;
-+ mac_dev->fixed_link->asym_pause = phy->asym_pause;
-+ printk(KERN_INFO "Setting up fixed link, speed %d duplex %d\n", mac_dev->fixed_link->speed, mac_dev->fixed_link->duplex);
-+ }
-+
-+ _errno = mac_dev->init(mac_dev);
-+ if (unlikely(_errno < 0)) {
-+ dev_err(dev, "mac_dev->init() = %d\n", _errno);
-+ goto _return_dev_set_drvdata;
-+ }
-+
-+ /* pause frame autonegotiation enabled*/
-+ mac_dev->autoneg_pause = true;
-+
-+ /* by intializing the values to false, force FMD to enable PAUSE frames
-+ * on RX and TX
-+ */
-+ mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
-+ mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
-+ _errno = set_mac_active_pause(mac_dev, true, true);
-+ if (unlikely(_errno < 0))
-+ dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
-+
-+ dev_info(dev,
-+ "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
-+ mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
-+ mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
-+
-+ goto _return;
-+
-+_return_of_node_put:
-+ of_node_put(dev_node);
-+_return_dev_set_drvdata:
-+ dev_set_drvdata(dev, NULL);
-+_return:
-+ return _errno;
-+}
-+
-+static int __cold mac_remove(struct platform_device *of_dev)
-+{
-+ int i, _errno;
-+ struct device *dev;
-+ struct mac_device *mac_dev;
-+
-+ dev = &of_dev->dev;
-+ mac_dev = (struct mac_device *)dev_get_drvdata(dev);
-+
-+ for_each_port_device(i, mac_dev->port_dev)
-+ fm_port_unbind(mac_dev->port_dev[i]);
-+
-+ fm_unbind(mac_dev->fm_dev);
-+
-+ _errno = free_macdev(mac_dev);
-+
-+ return _errno;
-+}
-+
-+static struct platform_driver mac_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .of_match_table = mac_match,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = mac_probe,
-+ .remove = mac_remove
-+};
-+
-+static int __init __cold mac_load(void)
-+{
-+ int _errno;
-+
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
-+
-+ _errno = platform_driver_register(&mac_driver);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ goto _return;
-+ }
-+
-+ goto _return;
-+
-+_return:
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ return _errno;
-+}
-+module_init(mac_load);
-+
-+static void __exit __cold mac_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ platform_driver_unregister(&mac_driver);
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(mac_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
-@@ -0,0 +1,134 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __MAC_H
-+#define __MAC_H
-+
-+#include <linux/device.h> /* struct device, BUS_ID_SIZE */
-+#include <linux/if_ether.h> /* ETH_ALEN */
-+#include <linux/phy.h> /* phy_interface_t, struct phy_device */
-+#include <linux/list.h>
-+
-+#include "lnxwrp_fsl_fman.h" /* struct port_device */
-+
-+enum {DTSEC, XGMAC, MEMAC};
-+
-+struct mac_device {
-+ struct device *dev;
-+ void *priv;
-+ uint8_t cell_index;
-+ struct resource *res;
-+ void __iomem *vaddr;
-+ uint8_t addr[ETH_ALEN];
-+ bool promisc;
-+
-+ struct fm *fm_dev;
-+ struct fm_port *port_dev[2];
-+
-+ phy_interface_t phy_if;
-+ u32 if_support;
-+ bool link;
-+ bool half_duplex;
-+ uint16_t speed;
-+ uint16_t max_speed;
-+ struct device_node *phy_node;
-+ char fixed_bus_id[MII_BUS_ID_SIZE + 3];
-+ struct device_node *tbi_node;
-+ struct phy_device *phy_dev;
-+ void *fm;
-+ /* List of multicast addresses */
-+ struct list_head mc_addr_list;
-+ struct fixed_phy_status *fixed_link;
-+
-+ bool autoneg_pause;
-+ bool rx_pause_req;
-+ bool tx_pause_req;
-+ bool rx_pause_active;
-+ bool tx_pause_active;
-+
-+ struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
-+ int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
-+ int (*init)(struct mac_device *mac_dev);
-+ int (*start)(struct mac_device *mac_dev);
-+ int (*stop)(struct mac_device *mac_dev);
-+ int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
-+ int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
-+ int (*set_multi)(struct net_device *net_dev,
-+ struct mac_device *mac_dev);
-+ int (*uninit)(struct fm_mac_dev *fm_mac_dev);
-+ int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
-+ int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
-+ int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
-+ int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
-+ int (*fm_rtc_enable)(struct fm *fm_dev);
-+ int (*fm_rtc_disable)(struct fm *fm_dev);
-+ int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
-+ int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
-+ int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
-+ int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
-+ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
-+ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
-+ uint64_t fiper);
-+ int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
-+ int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
-+
-+ int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
-+ bool en);
-+ int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
-+ int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
-+ int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
-+};
-+
-+struct mac_address {
-+ uint8_t addr[ETH_ALEN];
-+ struct list_head list;
-+};
-+
-+#define get_fm_handle(net_dev) \
-+ (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
-+
-+#define for_each_port_device(i, port_dev) \
-+ for (i = 0; i < ARRAY_SIZE(port_dev); i++)
-+
-+static inline __attribute((nonnull)) void *macdev_priv(
-+ const struct mac_device *mac_dev)
-+{
-+ return (void *)mac_dev + sizeof(*mac_dev);
-+}
-+
-+extern const char *mac_driver_description;
-+extern const size_t mac_sizeof_priv[];
-+extern void (*const mac_setup[])(struct mac_device *mac_dev);
-+
-+int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
-+void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
-+
-+#endif /* __MAC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
-@@ -0,0 +1,848 @@
-+/* Copyright 2011-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
-+ * Validates device-tree configuration and sets up the offline ports.
-+ */
-+
-+#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
-+ KBUILD_BASENAME".c", __LINE__, __func__
-+#else
-+#define pr_fmt(fmt) \
-+ KBUILD_MODNAME ": " fmt
-+#endif
-+
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_platform.h>
-+#include <linux/fsl_qman.h>
-+
-+#include "offline_port.h"
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+
-+#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
-+/* Manip extra space and data alignment for fragmentation */
-+#define FRAG_MANIP_SPACE 128
-+#define FRAG_DATA_ALIGN 64
-+
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
-+MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
-+
-+
-+static const struct of_device_id oh_port_match_table[] = {
-+ {
-+ .compatible = "fsl,dpa-oh"
-+ },
-+ {
-+ .compatible = "fsl,dpa-oh-shared"
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, oh_port_match_table);
-+
-+#ifdef CONFIG_PM
-+
-+static int oh_suspend(struct device *dev)
-+{
-+ struct dpa_oh_config_s *oh_config;
-+
-+ oh_config = dev_get_drvdata(dev);
-+ return fm_port_suspend(oh_config->oh_port);
-+}
-+
-+static int oh_resume(struct device *dev)
-+{
-+ struct dpa_oh_config_s *oh_config;
-+
-+ oh_config = dev_get_drvdata(dev);
-+ return fm_port_resume(oh_config->oh_port);
-+}
-+
-+static const struct dev_pm_ops oh_pm_ops = {
-+ .suspend = oh_suspend,
-+ .resume = oh_resume,
-+};
-+
-+#define OH_PM_OPS (&oh_pm_ops)
-+
-+#else /* CONFIG_PM */
-+
-+#define OH_PM_OPS NULL
-+
-+#endif /* CONFIG_PM */
-+
-+/* Creates Frame Queues */
-+static uint32_t oh_fq_create(struct qman_fq *fq,
-+ uint32_t fq_id, uint16_t channel,
-+ uint16_t wq_id)
-+{
-+ struct qm_mcc_initfq fq_opts;
-+ uint32_t create_flags, init_flags;
-+ uint32_t ret = 0;
-+
-+ if (fq == NULL)
-+ return 1;
-+
-+ /* Set flags for FQ create */
-+ create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
-+
-+ /* Create frame queue */
-+ ret = qman_create_fq(fq_id, create_flags, fq);
-+ if (ret != 0)
-+ return 1;
-+
-+ /* Set flags for FQ init */
-+ init_flags = QMAN_INITFQ_FLAG_SCHED;
-+
-+ /* Set FQ init options. Specify destination WQ ID and channel */
-+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
-+ fq_opts.fqd.dest.wq = wq_id;
-+ fq_opts.fqd.dest.channel = channel;
-+
-+ /* Initialize frame queue */
-+ ret = qman_init_fq(fq, init_flags, &fq_opts);
-+ if (ret != 0) {
-+ qman_destroy_fq(fq, 0);
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static void dump_fq(struct device *dev, int fqid, uint16_t channel)
-+{
-+ if (channel) {
-+ /* display fqs with a valid (!= 0) destination channel */
-+ dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
-+ }
-+}
-+
-+static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
-+ int fqs_count, uint16_t channel_id)
-+{
-+ int i;
-+ for (i = 0; i < fqs_count; i++)
-+ dump_fq(dev, (fqs + i)->fqid, channel_id);
-+}
-+
-+static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
-+{
-+ struct list_head *fq_list;
-+ struct fq_duple *fqd;
-+ int i;
-+
-+ dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
-+ dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
-+
-+ /* TX queues (old initialization) */
-+ dev_info(dev, "Initialized queues:");
-+ for (i = 0; i < conf->egress_cnt; i++)
-+ dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
-+ conf->channel);
-+
-+ /* initialized ingress queues */
-+ list_for_each(fq_list, &conf->fqs_ingress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
-+ }
-+
-+ /* initialized egress queues */
-+ list_for_each(fq_list, &conf->fqs_egress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
-+ }
-+}
-+
-+/* Destroys Frame Queues */
-+static void oh_fq_destroy(struct qman_fq *fq)
-+{
-+ int _errno = 0;
-+
-+ _errno = qman_retire_fq(fq, NULL);
-+ if (unlikely(_errno < 0))
-+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__,
-+ qman_fq_fqid(fq), _errno);
-+
-+ _errno = qman_oos_fq(fq);
-+ if (unlikely(_errno < 0)) {
-+ pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__,
-+ qman_fq_fqid(fq), _errno);
-+ }
-+
-+ qman_destroy_fq(fq, 0);
-+}
-+
-+/* Allocation code for the OH port's PCD frame queues */
-+static int __cold oh_alloc_pcd_fqids(struct device *dev,
-+ uint32_t num,
-+ uint8_t alignment,
-+ uint32_t *base_fqid)
-+{
-+ dev_crit(dev, "callback not implemented!\n");
-+ BUG();
-+
-+ return 0;
-+}
-+
-+static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
-+{
-+ dev_crit(dev, "callback not implemented!\n");
-+ BUG();
-+
-+ return 0;
-+}
-+
-+static void oh_set_buffer_layout(struct fm_port *port,
-+ struct dpa_buffer_layout_s *layout)
-+{
-+ struct fm_port_params params;
-+
-+ layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
-+ layout->parse_results = true;
-+ layout->hash_results = true;
-+ layout->time_stamp = false;
-+
-+ fm_port_get_buff_layout_ext_params(port, &params);
-+ layout->manip_extra_space = params.manip_extra_space;
-+ layout->data_align = params.data_align;
-+}
-+
-+static int
-+oh_port_probe(struct platform_device *_of_dev)
-+{
-+ struct device *dpa_oh_dev;
-+ struct device_node *dpa_oh_node;
-+ int lenp, _errno = 0, fq_idx, duple_idx;
-+ int n_size, i, j, ret, duples_count;
-+ struct platform_device *oh_of_dev;
-+ struct device_node *oh_node, *bpool_node = NULL, *root_node;
-+ struct device *oh_dev;
-+ struct dpa_oh_config_s *oh_config = NULL;
-+ const __be32 *oh_all_queues;
-+ const __be32 *channel_ids;
-+ const __be32 *oh_tx_queues;
-+ uint32_t queues_count;
-+ uint32_t crt_fqid_base;
-+ uint32_t crt_fq_count;
-+ bool frag_enabled = false;
-+ struct fm_port_params oh_port_tx_params;
-+ struct fm_port_pcd_param oh_port_pcd_params;
-+ struct dpa_buffer_layout_s buf_layout;
-+
-+ /* True if the current partition owns the OH port. */
-+ bool init_oh_port;
-+
-+ const struct of_device_id *match;
-+ int crt_ext_pools_count;
-+ u32 ext_pool_size;
-+ u32 port_id;
-+ u32 channel_id;
-+
-+ int channel_ids_count;
-+ int channel_idx;
-+ struct fq_duple *fqd;
-+ struct list_head *fq_list, *fq_list_tmp;
-+
-+ const __be32 *bpool_cfg;
-+ uint32_t bpid;
-+
-+ memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
-+ dpa_oh_dev = &_of_dev->dev;
-+ dpa_oh_node = dpa_oh_dev->of_node;
-+ BUG_ON(dpa_oh_node == NULL);
-+
-+ match = of_match_device(oh_port_match_table, dpa_oh_dev);
-+ if (!match)
-+ return -EINVAL;
-+
-+ dev_dbg(dpa_oh_dev, "Probing OH port...\n");
-+
-+ /* Find the referenced OH node */
-+ oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
-+ if (oh_node == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "Can't find OH node referenced from node %s\n",
-+ dpa_oh_node->full_name);
-+ return -EINVAL;
-+ }
-+ dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
-+ match->compatible);
-+
-+ _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
-+ if (_errno) {
-+ dev_err(dpa_oh_dev, "No port id found in node %s\n",
-+ dpa_oh_node->full_name);
-+ goto return_kfree;
-+ }
-+
-+ _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
-+ &channel_id);
-+ if (_errno) {
-+ dev_err(dpa_oh_dev, "No channel id found in node %s\n",
-+ dpa_oh_node->full_name);
-+ goto return_kfree;
-+ }
-+
-+ oh_of_dev = of_find_device_by_node(oh_node);
-+ BUG_ON(oh_of_dev == NULL);
-+ oh_dev = &oh_of_dev->dev;
-+
-+ /* The OH port must be initialized exactly once.
-+ * The following scenarios are of interest:
-+ * - the node is Linux-private (will always initialize it);
-+ * - the node is shared between two Linux partitions
-+ * (only one of them will initialize it);
-+ * - the node is shared between a Linux and a LWE partition
-+ * (Linux will initialize it) - "fsl,dpa-oh-shared"
-+ */
-+
-+ /* Check if the current partition owns the OH port
-+ * and ought to initialize it. It may be the case that we leave this
-+ * to another (also Linux) partition.
-+ */
-+ init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
-+
-+ /* If we aren't the "owner" of the OH node, we're done here. */
-+ if (!init_oh_port) {
-+ dev_dbg(dpa_oh_dev,
-+ "Not owning the shared OH port %s, will not initialize it.\n",
-+ oh_node->full_name);
-+ of_node_put(oh_node);
-+ return 0;
-+ }
-+
-+ /* Allocate OH dev private data */
-+ oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
-+ if (oh_config == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "Can't allocate private data for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
-+ INIT_LIST_HEAD(&oh_config->fqs_egress_list);
-+
-+ /* FQs that enter OH port */
-+ lenp = 0;
-+ oh_all_queues = of_get_property(dpa_oh_node,
-+ "fsl,qman-frame-queues-ingress", &lenp);
-+ if (lenp % (2 * sizeof(*oh_all_queues))) {
-+ dev_warn(dpa_oh_dev,
-+ "Wrong ingress queues format for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ /* just ignore the last unpaired value */
-+ }
-+
-+ duples_count = lenp / (2 * sizeof(*oh_all_queues));
-+ dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
-+ duples_count);
-+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
-+
-+ fqd = devm_kzalloc(dpa_oh_dev,
-+ sizeof(struct fq_duple), GFP_KERNEL);
-+ if (!fqd) {
-+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ fqd->fqs = devm_kzalloc(dpa_oh_dev,
-+ crt_fq_count * sizeof(struct qman_fq),
-+ GFP_KERNEL);
-+ if (!fqd->fqs) {
-+ dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ for (j = 0; j < crt_fq_count; j++)
-+ (fqd->fqs + j)->fqid = crt_fqid_base + j;
-+ fqd->fqs_count = crt_fq_count;
-+ fqd->channel_id = (uint16_t)channel_id;
-+ list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
-+ }
-+
-+ /* create the ingress queues */
-+ list_for_each(fq_list, &oh_config->fqs_ingress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+
-+ for (j = 0; j < fqd->fqs_count; j++) {
-+ ret = oh_fq_create(fqd->fqs + j,
-+ (fqd->fqs + j)->fqid,
-+ fqd->channel_id, 3);
-+ if (ret != 0) {
-+ dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
-+ (fqd->fqs + j)->fqid,
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ }
-+ }
-+
-+ /* FQs that exit OH port */
-+ lenp = 0;
-+ oh_all_queues = of_get_property(dpa_oh_node,
-+ "fsl,qman-frame-queues-egress", &lenp);
-+ if (lenp % (2 * sizeof(*oh_all_queues))) {
-+ dev_warn(dpa_oh_dev,
-+ "Wrong egress queues format for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ /* just ignore the last unpaired value */
-+ }
-+
-+ duples_count = lenp / (2 * sizeof(*oh_all_queues));
-+ dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
-+ duples_count);
-+ for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
-+
-+ fqd = devm_kzalloc(dpa_oh_dev,
-+ sizeof(struct fq_duple), GFP_KERNEL);
-+ if (!fqd) {
-+ dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ fqd->fqs = devm_kzalloc(dpa_oh_dev,
-+ crt_fq_count * sizeof(struct qman_fq),
-+ GFP_KERNEL);
-+ if (!fqd->fqs) {
-+ dev_err(dpa_oh_dev,
-+ "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ for (j = 0; j < crt_fq_count; j++)
-+ (fqd->fqs + j)->fqid = crt_fqid_base + j;
-+ fqd->fqs_count = crt_fq_count;
-+ /* channel ID is specified in another attribute */
-+ fqd->channel_id = 0;
-+ list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
-+
-+ /* allocate the queue */
-+
-+ }
-+
-+ /* channel_ids for FQs that exit OH port */
-+ lenp = 0;
-+ channel_ids = of_get_property(dpa_oh_node,
-+ "fsl,qman-channel-ids-egress", &lenp);
-+
-+ channel_ids_count = lenp / (sizeof(*channel_ids));
-+ if (channel_ids_count != duples_count) {
-+ dev_warn(dpa_oh_dev,
-+ "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ /* just ignore the queues that do not have a Channel ID */
-+ }
-+
-+ channel_idx = 0;
-+ list_for_each(fq_list, &oh_config->fqs_egress_list) {
-+ if (channel_idx + 1 > channel_ids_count)
-+ break;
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ fqd->channel_id =
-+ (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
-+ }
-+
-+ /* create egress queues */
-+ list_for_each(fq_list, &oh_config->fqs_egress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+
-+ if (fqd->channel_id == 0) {
-+ /* missing channel id in dts */
-+ continue;
-+ }
-+
-+ for (j = 0; j < fqd->fqs_count; j++) {
-+ ret = oh_fq_create(fqd->fqs + j,
-+ (fqd->fqs + j)->fqid,
-+ fqd->channel_id, 3);
-+ if (ret != 0) {
-+ dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
-+ (fqd->fqs + j)->fqid,
-+ oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ }
-+ }
-+
-+ /* Read FQ ids/nums for the DPA OH node */
-+ oh_all_queues = of_get_property(dpa_oh_node,
-+ "fsl,qman-frame-queues-oh", &lenp);
-+ if (oh_all_queues == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "No frame queues have been defined for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ /* Check that the OH error and default FQs are there */
-+ BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
-+ queues_count = lenp / (2 * sizeof(*oh_all_queues));
-+ if (queues_count != 2) {
-+ dev_err(dpa_oh_dev,
-+ "Error and Default queues must be defined for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ /* Read the FQIDs defined for this OH port */
-+ dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
-+ fq_idx = 0;
-+
-+ /* Error FQID - must be present */
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ if (crt_fq_count != 1) {
-+ dev_err(dpa_oh_dev,
-+ "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
-+ oh_node->full_name, dpa_oh_node->full_name,
-+ crt_fq_count);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ oh_config->error_fqid = crt_fqid_base;
-+ dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
-+ oh_config->error_fqid, oh_node->full_name);
-+
-+ /* Default FQID - must be present */
-+ crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
-+ if (crt_fq_count != 1) {
-+ dev_err(dpa_oh_dev,
-+ "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
-+ oh_node->full_name, dpa_oh_node->full_name,
-+ crt_fq_count);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ oh_config->default_fqid = crt_fqid_base;
-+ dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
-+ oh_config->default_fqid, oh_node->full_name);
-+
-+ /* TX FQID - presence is optional */
-+ oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
-+ &lenp);
-+ if (oh_tx_queues == NULL) {
-+ dev_dbg(dpa_oh_dev,
-+ "No tx queues have been defined for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ goto config_port;
-+ }
-+
-+ /* Check that queues-tx has only a base and a count defined */
-+ BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
-+ queues_count = lenp / (2 * sizeof(*oh_tx_queues));
-+ if (queues_count != 1) {
-+ dev_err(dpa_oh_dev,
-+ "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ fq_idx = 0;
-+ crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
-+ crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
-+ oh_config->egress_cnt = crt_fq_count;
-+
-+ /* Allocate TX queues */
-+ dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
-+ oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
-+ crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
-+ if (oh_config->egress_fqs == NULL) {
-+ dev_err(dpa_oh_dev,
-+ "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
-+ oh_node->full_name, dpa_oh_node->full_name);
-+ _errno = -ENOMEM;
-+ goto return_kfree;
-+ }
-+
-+ /* Create TX queues */
-+ for (i = 0; i < crt_fq_count; i++) {
-+ ret = oh_fq_create(oh_config->egress_fqs + i,
-+ crt_fqid_base + i, (uint16_t)channel_id, 3);
-+ if (ret != 0) {
-+ dev_err(dpa_oh_dev,
-+ "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
-+ crt_fqid_base + i, oh_node->full_name,
-+ dpa_oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+ }
-+
-+config_port:
-+ /* Get a handle to the fm_port so we can set
-+ * its configuration params
-+ */
-+ oh_config->oh_port = fm_port_bind(oh_dev);
-+ if (oh_config->oh_port == NULL) {
-+ dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
-+ oh_node->full_name);
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
-+
-+ /* read the pool handlers */
-+ crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
-+ "fsl,bman-buffer-pools", NULL);
-+ if (crt_ext_pools_count <= 0) {
-+ dev_info(dpa_oh_dev,
-+ "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
-+ oh_node->full_name);
-+ goto init_port;
-+ }
-+
-+ /* used for reading ext_pool_size*/
-+ root_node = of_find_node_by_path("/");
-+ if (root_node == NULL) {
-+ dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ n_size = of_n_size_cells(root_node);
-+ of_node_put(root_node);
-+
-+ dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
-+ crt_ext_pools_count);
-+
-+ oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
-+
-+ for (i = 0; i < crt_ext_pools_count; i++) {
-+ bpool_node = of_parse_phandle(dpa_oh_node,
-+ "fsl,bman-buffer-pools", i);
-+ if (bpool_node == NULL) {
-+ dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
-+ if (_errno) {
-+ dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
-+ dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
-+
-+ bpool_cfg = of_get_property(bpool_node,
-+ "fsl,bpool-ethernet-cfg", &lenp);
-+ if (bpool_cfg == NULL) {
-+ dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
-+ _errno = -EINVAL;
-+ goto return_kfree;
-+ }
-+
-+ ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
-+ oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
-+ dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
-+ ext_pool_size);
-+ of_node_put(bpool_node);
-+
-+ }
-+
-+ if (buf_layout.data_align != FRAG_DATA_ALIGN ||
-+ buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
-+ goto init_port;
-+
-+ frag_enabled = true;
-+ dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
-+ port_id);
-+
-+init_port:
-+ of_node_put(oh_node);
-+ /* Set Tx params */
-+ dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
-+ oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
-+ frag_enabled);
-+ /* Set PCD params */
-+ oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
-+ oh_port_pcd_params.cbf = oh_free_pcd_fqids;
-+ oh_port_pcd_params.dev = dpa_oh_dev;
-+ fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
-+
-+ dev_set_drvdata(dpa_oh_dev, oh_config);
-+
-+ /* Enable the OH port */
-+ _errno = fm_port_enable(oh_config->oh_port);
-+ if (_errno)
-+ goto return_kfree;
-+
-+ dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
-+
-+ /* print of all referenced & created queues */
-+ dump_oh_config(dpa_oh_dev, oh_config);
-+
-+ return 0;
-+
-+return_kfree:
-+ if (bpool_node)
-+ of_node_put(bpool_node);
-+ if (oh_node)
-+ of_node_put(oh_node);
-+ if (oh_config && oh_config->egress_fqs)
-+ devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
-+
-+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ list_del(fq_list);
-+ devm_kfree(dpa_oh_dev, fqd->fqs);
-+ devm_kfree(dpa_oh_dev, fqd);
-+ }
-+
-+ list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
-+ fqd = list_entry(fq_list, struct fq_duple, fq_list);
-+ list_del(fq_list);
-+ devm_kfree(dpa_oh_dev, fqd->fqs);
-+ devm_kfree(dpa_oh_dev, fqd);
-+ }
-+
-+ devm_kfree(dpa_oh_dev, oh_config);
-+ return _errno;
-+}
-+
-+static int __cold oh_port_remove(struct platform_device *_of_dev)
-+{
-+ int _errno = 0, i;
-+ struct dpa_oh_config_s *oh_config;
-+
-+ pr_info("Removing OH port...\n");
-+
-+ oh_config = dev_get_drvdata(&_of_dev->dev);
-+ if (oh_config == NULL) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): No OH config in device private data!\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__);
-+ _errno = -ENODEV;
-+ goto return_error;
-+ }
-+
-+ if (oh_config->egress_fqs)
-+ for (i = 0; i < oh_config->egress_cnt; i++)
-+ oh_fq_destroy(oh_config->egress_fqs + i);
-+
-+ if (oh_config->oh_port == NULL) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): No fm port in device private data!\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__);
-+ _errno = -EINVAL;
-+ goto free_egress_fqs;
-+ }
-+
-+ _errno = fm_port_disable(oh_config->oh_port);
-+
-+free_egress_fqs:
-+ if (oh_config->egress_fqs)
-+ devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
-+ devm_kfree(&_of_dev->dev, oh_config);
-+ dev_set_drvdata(&_of_dev->dev, NULL);
-+
-+return_error:
-+ return _errno;
-+}
-+
-+static struct platform_driver oh_port_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .of_match_table = oh_port_match_table,
-+ .owner = THIS_MODULE,
-+ .pm = OH_PM_OPS,
-+ },
-+ .probe = oh_port_probe,
-+ .remove = oh_port_remove
-+};
-+
-+static int __init __cold oh_port_load(void)
-+{
-+ int _errno;
-+
-+ pr_info(OH_MOD_DESCRIPTION "\n");
-+
-+ _errno = platform_driver_register(&oh_port_driver);
-+ if (_errno < 0) {
-+ pr_err(KBUILD_MODNAME
-+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
-+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
-+ }
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+ return _errno;
-+}
-+module_init(oh_port_load);
-+
-+static void __exit __cold oh_port_unload(void)
-+{
-+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
-+ KBUILD_BASENAME".c", __func__);
-+
-+ platform_driver_unregister(&oh_port_driver);
-+
-+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
-+ KBUILD_BASENAME".c", __func__);
-+}
-+module_exit(oh_port_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
-@@ -0,0 +1,59 @@
-+/* Copyright 2011 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __OFFLINE_PORT_H
-+#define __OFFLINE_PORT_H
-+
-+struct fm_port;
-+struct qman_fq;
-+
-+/* fqs are defined in duples (base_fq, fq_count) */
-+struct fq_duple {
-+ struct qman_fq *fqs;
-+ int fqs_count;
-+ uint16_t channel_id;
-+ struct list_head fq_list;
-+};
-+
-+/* OH port configuration */
-+struct dpa_oh_config_s {
-+ uint32_t error_fqid;
-+ uint32_t default_fqid;
-+ struct fm_port *oh_port;
-+ uint32_t egress_cnt;
-+ struct qman_fq *egress_fqs;
-+ uint16_t channel;
-+
-+ struct list_head fqs_ingress_list;
-+ struct list_head fqs_egress_list;
-+};
-+
-+#endif /* __OFFLINE_PORT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Kconfig
-@@ -0,0 +1,153 @@
-+menu "Frame Manager support"
-+
-+menuconfig FSL_SDK_FMAN
-+ bool "Freescale Frame Manager (datapath) support - SDK driver"
-+ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && !FSL_FMAN
-+ default y
-+ ---help---
-+ If unsure, say Y.
-+
-+if FSL_SDK_FMAN
-+
-+config FSL_SDK_FMAN_TEST
-+ bool "FMan test module"
-+ default n
-+ select FSL_DPAA_HOOKS
-+ ---help---
-+ This option compiles test code for FMan.
-+
-+menu "FMAN Processor support"
-+choice
-+ depends on FSL_SDK_FMAN
-+ prompt "Processor Type"
-+
-+config FMAN_ARM
-+ bool "LS1043"
-+ depends on ARM64 || ARM
-+ ---help---
-+ Choose "LS1043" for the ARM platforms:
-+ LS1043
-+
-+config FMAN_P3040_P4080_P5020
-+ bool "P3040 P4080 5020"
-+
-+config FMAN_P1023
-+ bool "P1023"
-+
-+config FMAN_V3H
-+ bool "FmanV3H"
-+ ---help---
-+ Choose "FmanV3H" for Fman rev3H:
-+ B4860, T4240, T4160, etc
-+
-+config FMAN_V3L
-+ bool "FmanV3L"
-+ ---help---
-+ Choose "FmanV3L" for Fman rev3L:
-+ T1040, T1042, T1020, T1022, T1023, T1024, etc
-+
-+endchoice
-+endmenu
-+
-+config FMAN_MIB_CNT_OVF_IRQ_EN
-+ bool "Enable the dTSEC MIB counters overflow interrupt"
-+ default n
-+ ---help---
-+ Enable the dTSEC MIB counters overflow interrupt to get
-+ accurate MIB counters values. Enabled it compensates
-+ for the counters overflow but reduces performance and
-+ triggers error messages in HV setups.
-+
-+config FSL_FM_MAX_FRAME_SIZE
-+ int "Maximum L2 frame size"
-+ depends on FSL_SDK_FMAN
-+ range 64 9600
-+ default "1522"
-+ help
-+ Configure this in relation to the maximum possible MTU of your
-+ network configuration. In particular, one would need to
-+ increase this value in order to use jumbo frames.
-+ FSL_FM_MAX_FRAME_SIZE must accommodate the Ethernet FCS (4 bytes)
-+ and one ETH+VLAN header (18 bytes), to a total of 22 bytes in
-+ excess of the desired L3 MTU.
-+
-+ Note that having too large a FSL_FM_MAX_FRAME_SIZE (much larger
-+ than the actual MTU) may lead to buffer exhaustion, especially
-+ in the case of badly fragmented datagrams on the Rx path.
-+ Conversely, having a FSL_FM_MAX_FRAME_SIZE smaller than the actual
-+ MTU will lead to frames being dropped.
-+
-+ This can be overridden by specifying "fsl_fm_max_frm" in
-+ the kernel bootargs:
-+ * in Hypervisor-based scenarios, by adding a "chosen" node
-+ with the "bootargs" property specifying
-+ "fsl_fm_max_frm=<YourValue>";
-+ * in non-Hypervisor-based scenarios, via u-boot's env, by
-+ modifying the "bootargs" env variable.
-+
-+config FSL_FM_RX_EXTRA_HEADROOM
-+ int "Add extra headroom at beginning of data buffers"
-+ depends on FSL_SDK_FMAN
-+ range 16 384
-+ default "64"
-+ help
-+ Configure this to tell the Frame Manager to reserve some extra
-+ space at the beginning of a data buffer on the receive path,
-+ before Internal Context fields are copied. This is in addition
-+ to the private data area already reserved for driver internal
-+ use. The provided value must be a multiple of 16.
-+
-+ This setting can be overridden by specifying
-+ "fsl_fm_rx_extra_headroom" in the kernel bootargs:
-+ * in Hypervisor-based scenarios, by adding a "chosen" node
-+ with the "bootargs" property specifying
-+ "fsl_fm_rx_extra_headroom=<YourValue>";
-+ * in non-Hypervisor-based scenarios, via u-boot's env, by
-+ modifying the "bootargs" env variable.
-+
-+config FMAN_PFC
-+ bool "FMan PFC support (EXPERIMENTAL)"
-+ depends on ( FMAN_V3H || FMAN_V3L || FMAN_ARM) && FSL_SDK_FMAN
-+ default n
-+ help
-+ This option enables PFC support on FMan v3 ports.
-+ Data Center Bridging defines Classes of Service that are
-+ flow-controlled using PFC pause frames.
-+
-+if FMAN_PFC
-+config FMAN_PFC_COS_COUNT
-+ int "Number of PFC Classes of Service"
-+ depends on FMAN_PFC && FSL_SDK_FMAN
-+ range 1 4
-+ default "3"
-+ help
-+ The number of Classes of Service controlled by PFC.
-+
-+config FMAN_PFC_QUANTA_0
-+ int "The pause quanta for PFC CoS 0"
-+ depends on FMAN_PFC && FSL_SDK_FMAN
-+ range 0 65535
-+ default "65535"
-+
-+config FMAN_PFC_QUANTA_1
-+ int "The pause quanta for PFC CoS 1"
-+ depends on FMAN_PFC && FSL_SDK_FMAN
-+ range 0 65535
-+ default "65535"
-+
-+config FMAN_PFC_QUANTA_2
-+ int "The pause quanta for PFC CoS 2"
-+ depends on FMAN_PFC && FSL_SDK_FMAN
-+ range 0 65535
-+ default "65535"
-+
-+config FMAN_PFC_QUANTA_3
-+ int "The pause quanta for PFC CoS 3"
-+ depends on FMAN_PFC && FSL_SDK_FMAN
-+ range 0 65535
-+ default "65535"
-+endif
-+
-+endif # FSL_SDK_FMAN
-+
-+endmenu
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Makefile
-@@ -0,0 +1,11 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+#
-+obj-y += etc/
-+obj-y += Peripherals/FM/
-+obj-y += src/
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/Makefile
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+obj-y += fsl-ncsw-Hc.o
-+
-+fsl-ncsw-Hc-objs := hc.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/HC/hc.c
-@@ -0,0 +1,1232 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "sprint_ext.h"
-+#include "string_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_hc.h"
-+
-+
-+/**************************************************************************//**
-+ @Description defaults
-+*//***************************************************************************/
-+#define DEFAULT_dataMemId 0
-+
-+#define HC_HCOR_OPCODE_PLCR_PRFL 0x0
-+#define HC_HCOR_OPCODE_KG_SCM 0x1
-+#define HC_HCOR_OPCODE_SYNC 0x2
-+#define HC_HCOR_OPCODE_CC 0x3
-+#define HC_HCOR_OPCODE_CC_AGE_MASK 0x4
-+#define HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT 0x5
-+#define HC_HCOR_OPCODE_CC_REASSM_TIMEOUT 0x10
-+#define HC_HCOR_OPCODE_CC_IP_FRAG_INITIALIZATION 0x11
-+#define HC_HCOR_OPCODE_CC_UPDATE_WITH_AGING 0x13
-+#define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_ACTIVE_SHIFT 24
-+#define HC_HCOR_EXTRA_REG_REASSM_TIMEOUT_TSBS_SHIFT 24
-+#define HC_HCOR_EXTRA_REG_CC_AGING_ADD 0x80000000
-+#define HC_HCOR_EXTRA_REG_CC_AGING_REMOVE 0x40000000
-+#define HC_HCOR_EXTRA_REG_CC_AGING_CHANGE_MASK 0xC0000000
-+#define HC_HCOR_EXTRA_REG_CC_REMOVE_INDX_SHIFT 24
-+#define HC_HCOR_EXTRA_REG_CC_REMOVE_INDX_MASK 0x1F000000
-+#define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_SHIFT 16
-+#define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_MASK 0xF
-+#define HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_CMD_SHIFT 24
-+#define HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_BPID 16
-+
-+#define HC_HCOR_GBL 0x20000000
-+
-+#define HC_HCOR_KG_SCHEME_COUNTER 0x00000400
-+
-+#if (DPAA_VERSION == 10)
-+#define HC_HCOR_KG_SCHEME_REGS_MASK 0xFFFFF800
-+#else
-+#define HC_HCOR_KG_SCHEME_REGS_MASK 0xFFFFFE00
-+#endif /* (DPAA_VERSION == 10) */
-+
-+#define SIZE_OF_HC_FRAME_PORT_REGS (sizeof(t_HcFrame)-sizeof(struct fman_kg_scheme_regs)+sizeof(t_FmPcdKgPortRegs))
-+#define SIZE_OF_HC_FRAME_SCHEME_REGS sizeof(t_HcFrame)
-+#define SIZE_OF_HC_FRAME_PROFILES_REGS (sizeof(t_HcFrame)-sizeof(struct fman_kg_scheme_regs)+sizeof(t_FmPcdPlcrProfileRegs))
-+#define SIZE_OF_HC_FRAME_PROFILE_CNT (sizeof(t_HcFrame)-sizeof(t_FmPcdPlcrProfileRegs)+sizeof(uint32_t))
-+#define SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC 16
-+
-+#define HC_CMD_POOL_SIZE (INTG_MAX_NUM_OF_CORES)
-+
-+#define BUILD_FD(len) \
-+do { \
-+ memset(&fmFd, 0, sizeof(t_DpaaFD)); \
-+ DPAA_FD_SET_ADDR(&fmFd, p_HcFrame); \
-+ DPAA_FD_SET_OFFSET(&fmFd, 0); \
-+ DPAA_FD_SET_LENGTH(&fmFd, len); \
-+} while (0)
-+
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+typedef struct t_FmPcdKgPortRegs {
-+ volatile uint32_t spReg;
-+ volatile uint32_t cppReg;
-+} t_FmPcdKgPortRegs;
-+
-+typedef struct t_HcFrame {
-+ volatile uint32_t opcode;
-+ volatile uint32_t actionReg;
-+ volatile uint32_t extraReg;
-+ volatile uint32_t commandSequence;
-+ union {
-+ struct fman_kg_scheme_regs schemeRegs;
-+ struct fman_kg_scheme_regs schemeRegsWithoutCounter;
-+ t_FmPcdPlcrProfileRegs profileRegs;
-+ volatile uint32_t singleRegForWrite; /* for writing SP, CPP, profile counter */
-+ t_FmPcdKgPortRegs portRegsForRead;
-+ volatile uint32_t clsPlanEntries[CLS_PLAN_NUM_PER_GRP];
-+ t_FmPcdCcCapwapReassmTimeoutParams ccCapwapReassmTimeout;
-+ t_FmPcdCcReassmTimeoutParams ccReassmTimeout;
-+ } hcSpecificData;
-+} t_HcFrame;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+typedef struct t_FmHc {
-+ t_Handle h_FmPcd;
-+ t_Handle h_HcPortDev;
-+ t_FmPcdQmEnqueueCallback *f_QmEnqueue; /**< A callback for enqueuing frames to the QM */
-+ t_Handle h_QmArg; /**< A handle to the QM module */
-+ uint8_t dataMemId; /**< Memory partition ID for data buffers */
-+
-+ uint32_t seqNum[HC_CMD_POOL_SIZE]; /* FIFO of seqNum to use when
-+ taking buffer */
-+ uint32_t nextSeqNumLocation; /* seqNum location in seqNum[] for next buffer */
-+ volatile bool enqueued[HC_CMD_POOL_SIZE]; /* HC is active - frame is enqueued
-+ and not confirmed yet */
-+ t_HcFrame *p_Frm[HC_CMD_POOL_SIZE];
-+} t_FmHc;
-+
-+
-+static t_Error FillBufPool(t_FmHc *p_FmHc)
-+{
-+ uint32_t i;
-+
-+ ASSERT_COND(p_FmHc);
-+
-+ for (i = 0; i < HC_CMD_POOL_SIZE; i++)
-+ {
-+#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
-+ p_FmHc->p_Frm[i] = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + (16 - (sizeof(t_FmHc) % 16))),
-+ p_FmHc->dataMemId,
-+ 16);
-+#else
-+ p_FmHc->p_Frm[i] = (t_HcFrame *)XX_MallocSmart(sizeof(t_HcFrame),
-+ p_FmHc->dataMemId,
-+ 16);
-+#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */
-+ if (!p_FmHc->p_Frm[i])
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM HC frames!"));
-+ }
-+
-+ /* Initialize FIFO of seqNum to use during GetBuf */
-+ for (i = 0; i < HC_CMD_POOL_SIZE; i++)
-+ {
-+ p_FmHc->seqNum[i] = i;
-+ }
-+ p_FmHc->nextSeqNumLocation = 0;
-+
-+ return E_OK;
-+}
-+
-+static __inline__ t_HcFrame * GetBuf(t_FmHc *p_FmHc, uint32_t *p_SeqNum)
-+{
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_FmHc);
-+
-+ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
-+
-+ if (p_FmHc->nextSeqNumLocation == HC_CMD_POOL_SIZE)
-+ {
-+ /* No more buffers */
-+ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
-+ return NULL;
-+ }
-+
-+ *p_SeqNum = p_FmHc->seqNum[p_FmHc->nextSeqNumLocation];
-+ p_FmHc->nextSeqNumLocation++;
-+
-+ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
-+ return p_FmHc->p_Frm[*p_SeqNum];
-+}
-+
-+static __inline__ void PutBuf(t_FmHc *p_FmHc, t_HcFrame *p_Buf, uint32_t seqNum)
-+{
-+ uint32_t intFlags;
-+
-+ UNUSED(p_Buf);
-+
-+ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
-+ ASSERT_COND(p_FmHc->nextSeqNumLocation);
-+ p_FmHc->nextSeqNumLocation--;
-+ p_FmHc->seqNum[p_FmHc->nextSeqNumLocation] = seqNum;
-+ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
-+}
-+
-+static __inline__ t_Error EnQFrm(t_FmHc *p_FmHc, t_DpaaFD *p_FmFd, uint32_t seqNum)
-+{
-+ t_Error err = E_OK;
-+ uint32_t intFlags;
-+ uint32_t timeout=100;
-+
-+ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
-+ ASSERT_COND(!p_FmHc->enqueued[seqNum]);
-+ p_FmHc->enqueued[seqNum] = TRUE;
-+ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
-+ DBG(TRACE, ("Send Hc, SeqNum %d, buff@0x%x, fd offset 0x%x",
-+ seqNum,
-+ DPAA_FD_GET_ADDR(p_FmFd),
-+ DPAA_FD_GET_OFFSET(p_FmFd)));
-+ err = p_FmHc->f_QmEnqueue(p_FmHc->h_QmArg, (void *)p_FmFd);
-+ if (err)
-+ RETURN_ERROR(MINOR, err, ("HC enqueue failed"));
-+
-+ while (p_FmHc->enqueued[seqNum] && --timeout)
-+ XX_UDelay(100);
-+
-+ if (!timeout)
-+ RETURN_ERROR(MINOR, E_TIMEOUT, ("HC Callback, timeout exceeded"));
-+
-+ return err;
-+}
-+
-+
-+t_Handle FmHcConfigAndInit(t_FmHcParams *p_FmHcParams)
-+{
-+ t_FmHc *p_FmHc;
-+ t_FmPortParams fmPortParam;
-+ t_Error err;
-+
-+ p_FmHc = (t_FmHc *)XX_Malloc(sizeof(t_FmHc));
-+ if (!p_FmHc)
-+ {
-+ REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC obj"));
-+ return NULL;
-+ }
-+ memset(p_FmHc,0,sizeof(t_FmHc));
-+
-+ p_FmHc->h_FmPcd = p_FmHcParams->h_FmPcd;
-+ p_FmHc->f_QmEnqueue = p_FmHcParams->params.f_QmEnqueue;
-+ p_FmHc->h_QmArg = p_FmHcParams->params.h_QmArg;
-+ p_FmHc->dataMemId = DEFAULT_dataMemId;
-+
-+ err = FillBufPool(p_FmHc);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ FmHcFree(p_FmHc);
-+ return NULL;
-+ }
-+
-+ if (!FmIsMaster(p_FmHcParams->h_Fm))
-+ return (t_Handle)p_FmHc;
-+
-+ memset(&fmPortParam, 0, sizeof(fmPortParam));
-+ fmPortParam.baseAddr = p_FmHcParams->params.portBaseAddr;
-+ fmPortParam.portType = e_FM_PORT_TYPE_OH_HOST_COMMAND;
-+ fmPortParam.portId = p_FmHcParams->params.portId;
-+ fmPortParam.liodnBase = p_FmHcParams->params.liodnBase;
-+ fmPortParam.h_Fm = p_FmHcParams->h_Fm;
-+
-+ fmPortParam.specificParams.nonRxParams.errFqid = p_FmHcParams->params.errFqid;
-+ fmPortParam.specificParams.nonRxParams.dfltFqid = p_FmHcParams->params.confFqid;
-+ fmPortParam.specificParams.nonRxParams.qmChannel = p_FmHcParams->params.qmChannel;
-+
-+ p_FmHc->h_HcPortDev = FM_PORT_Config(&fmPortParam);
-+ if (!p_FmHc->h_HcPortDev)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM HC port!"));
-+ XX_Free(p_FmHc);
-+ return NULL;
-+ }
-+
-+ err = FM_PORT_ConfigMaxFrameLength(p_FmHc->h_HcPortDev,
-+ (uint16_t)sizeof(t_HcFrame));
-+
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, err, ("FM HC port init!"));
-+ FmHcFree(p_FmHc);
-+ return NULL;
-+ }
-+
-+ /* final init */
-+ err = FM_PORT_Init(p_FmHc->h_HcPortDev);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, err, ("FM HC port init!"));
-+ FmHcFree(p_FmHc);
-+ return NULL;
-+ }
-+
-+ err = FM_PORT_Enable(p_FmHc->h_HcPortDev);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, err, ("FM HC port enable!"));
-+ FmHcFree(p_FmHc);
-+ return NULL;
-+ }
-+
-+ return (t_Handle)p_FmHc;
-+}
-+
-+void FmHcFree(t_Handle h_FmHc)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ int i;
-+
-+ if (!p_FmHc)
-+ return;
-+
-+ for (i=0; i<HC_CMD_POOL_SIZE; i++)
-+ if (p_FmHc->p_Frm[i])
-+ XX_FreeSmart(p_FmHc->p_Frm[i]);
-+ else
-+ break;
-+
-+ if (p_FmHc->h_HcPortDev)
-+ FM_PORT_Free(p_FmHc->h_HcPortDev);
-+
-+ XX_Free(p_FmHc);
-+}
-+
-+/*****************************************************************************/
-+t_Error FmHcSetFramesDataMemory(t_Handle h_FmHc,
-+ uint8_t memId)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ int i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmHc, E_INVALID_HANDLE);
-+
-+ p_FmHc->dataMemId = memId;
-+
-+ for (i=0; i<HC_CMD_POOL_SIZE; i++)
-+ if (p_FmHc->p_Frm[i])
-+ XX_FreeSmart(p_FmHc->p_Frm[i]);
-+
-+ return FillBufPool(p_FmHc);
-+}
-+
-+void FmHcTxConf(t_Handle h_FmHc, t_DpaaFD *p_Fd)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_FmHc);
-+
-+ intFlags = FmPcdLock(p_FmHc->h_FmPcd);
-+ p_HcFrame = (t_HcFrame *)PTR_MOVE(DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd));
-+
-+ DBG(TRACE, ("Hc Conf, SeqNum %d, FD@0x%x, fd offset 0x%x",
-+ p_HcFrame->commandSequence, DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd)));
-+
-+ if (!(p_FmHc->enqueued[p_HcFrame->commandSequence]))
-+ REPORT_ERROR(MINOR, E_INVALID_FRAME, ("Not an Host-Command frame received!"));
-+ else
-+ p_FmHc->enqueued[p_HcFrame->commandSequence] = FALSE;
-+ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags);
-+}
-+
-+t_Error FmHcPcdKgSetScheme(t_Handle h_FmHc,
-+ t_Handle h_Scheme,
-+ struct fman_kg_scheme_regs *p_SchemeRegs,
-+ bool updateCounter)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_Error err = E_OK;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint8_t physicalSchemeId;
-+ uint32_t seqNum;
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+
-+ physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
-+
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, updateCounter);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+ memcpy(&p_HcFrame->hcSpecificData.schemeRegs, p_SchemeRegs, sizeof(struct fman_kg_scheme_regs));
-+ if (!updateCounter)
-+ {
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_dv0 = p_SchemeRegs->kgse_dv0;
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_dv1 = p_SchemeRegs->kgse_dv1;
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_ccbs = p_SchemeRegs->kgse_ccbs;
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_mv = p_SchemeRegs->kgse_mv;
-+ }
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdKgDeleteScheme(t_Handle h_FmHc, t_Handle h_Scheme)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_Error err = E_OK;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
-+ uint32_t seqNum;
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+ memset(&p_HcFrame->hcSpecificData.schemeRegs, 0, sizeof(struct fman_kg_scheme_regs));
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdKgCcGetSetParams(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_Error err = E_OK;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint8_t relativeSchemeId;
-+ uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
-+ uint32_t tmpReg32 = 0;
-+ uint32_t seqNum;
-+
-+ /* Scheme is locked by calling routine */
-+ /* WARNING - this lock will not be efficient if other HC routine will attempt to change
-+ * "kgse_mode" or "kgse_om" without locking scheme !
-+ */
-+
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId);
-+ if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+
-+ if (!FmPcdKgGetRequiredActionFlag(p_FmHc->h_FmPcd, relativeSchemeId) ||
-+ !(FmPcdKgGetRequiredAction(p_FmHc->h_FmPcd, relativeSchemeId) & requiredAction))
-+ {
-+ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) &&
-+ (FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_PLCR))
-+ {
-+ if ((FmPcdKgIsDirectPlcr(p_FmHc->h_FmPcd, relativeSchemeId) == FALSE) ||
-+ (FmPcdKgIsDistrOnPlcrProfile(p_FmHc->h_FmPcd, relativeSchemeId) == TRUE))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared"));
-+ err = FmPcdPlcrCcGetSetParams(p_FmHc->h_FmPcd, FmPcdKgGetRelativeProfileId(p_FmHc->h_FmPcd, relativeSchemeId), requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ else /* From here we deal with KG-Schemes only */
-+ {
-+ /* Pre change general code */
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+ p_HcFrame->commandSequence = seqNum;
-+ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ /* specific change */
-+ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) &&
-+ ((FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_DONE) &&
-+ (FmPcdKgGetDoneAction(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_ENQ_FRAME)))
-+ {
-+ tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode;
-+ ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME));
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+ }
-+
-+ if ((requiredAction & UPDATE_KG_NIA_CC_WA) &&
-+ (FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_CC))
-+ {
-+ tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode;
-+ ASSERT_COND(tmpReg32 & (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC));
-+ tmpReg32 &= ~NIA_FM_CTL_AC_CC;
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32 | NIA_FM_CTL_AC_PRE_CC;
-+ }
-+
-+ if (requiredAction & UPDATE_KG_OPT_MODE)
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_om = value;
-+
-+ if (requiredAction & UPDATE_KG_NIA)
-+ {
-+ tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode;
-+ tmpReg32 &= ~(NIA_ENG_MASK | NIA_AC_MASK);
-+ tmpReg32 |= value;
-+ p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32;
-+ }
-+
-+ /* Post change general code */
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+uint32_t FmHcPcdKgGetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_Error err;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint32_t retVal;
-+ uint8_t relativeSchemeId;
-+ uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
-+ uint32_t seqNum;
-+
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId);
-+ if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+ return 0;
-+ }
-+
-+ /* first read scheme and check that it is valid */
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ {
-+ REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ return 0;
-+ }
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+ if (err != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return 0;
-+ }
-+
-+ if (!FmPcdKgHwSchemeIsValid(p_HcFrame->hcSpecificData.schemeRegs.kgse_mode))
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is invalid"));
-+ return 0;
-+ }
-+
-+ retVal = p_HcFrame->hcSpecificData.schemeRegs.kgse_spc;
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ return retVal;
-+}
-+
-+t_Error FmHcPcdKgSetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t value)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_Error err = E_OK;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint8_t relativeSchemeId, physicalSchemeId;
-+ uint32_t seqNum;
-+
-+ physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme);
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId);
-+ if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+
-+ /* first read scheme and check that it is valid */
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_COUNTER;
-+ /* write counter */
-+ p_HcFrame->hcSpecificData.singleRegForWrite = value;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ return err;
-+}
-+
-+t_Error FmHcPcdKgSetClsPlan(t_Handle h_FmHc, t_FmPcdKgInterModuleClsPlanSet *p_Set)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint8_t i, idx;
-+ uint32_t seqNum;
-+ t_Error err = E_OK;
-+
-+ ASSERT_COND(p_FmHc);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+
-+ for (i = p_Set->baseEntry; i < (p_Set->baseEntry+p_Set->numOfClsPlanEntries); i+=8)
-+ {
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP));
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+
-+ idx = (uint8_t)(i - p_Set->baseEntry);
-+ ASSERT_COND(idx < FM_PCD_MAX_NUM_OF_CLS_PLANS);
-+ memcpy(&p_HcFrame->hcSpecificData.clsPlanEntries, &p_Set->vectors[idx], CLS_PLAN_NUM_PER_GRP*sizeof(uint32_t));
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+ }
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ return err;
-+}
-+
-+t_Error FmHcPcdKgDeleteClsPlan(t_Handle h_FmHc, uint8_t grpId)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet;
-+
-+ p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet));
-+ if (!p_ClsPlanSet)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set"));
-+
-+ memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet));
-+
-+ p_ClsPlanSet->baseEntry = FmPcdKgGetClsPlanGrpBase(p_FmHc->h_FmPcd, grpId);
-+ p_ClsPlanSet->numOfClsPlanEntries = FmPcdKgGetClsPlanGrpSize(p_FmHc->h_FmPcd, grpId);
-+ ASSERT_COND(p_ClsPlanSet->numOfClsPlanEntries <= FM_PCD_MAX_NUM_OF_CLS_PLANS);
-+
-+ if (FmHcPcdKgSetClsPlan(p_FmHc, p_ClsPlanSet) != E_OK)
-+ {
-+ XX_Free(p_ClsPlanSet);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+ }
-+
-+ XX_Free(p_ClsPlanSet);
-+ FmPcdKgDestroyClsPlanGrp(p_FmHc->h_FmPcd, grpId);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdCcCapwapTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcCapwapReassmTimeoutParams *p_CcCapwapReassmTimeoutParams )
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err;
-+ uint32_t seqNum;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT);
-+ memcpy(&p_HcFrame->hcSpecificData.ccCapwapReassmTimeout, p_CcCapwapReassmTimeoutParams, sizeof(t_FmPcdCcCapwapReassmTimeoutParams));
-+ p_HcFrame->commandSequence = seqNum;
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ return err;
-+}
-+
-+t_Error FmHcPcdCcIpFragScratchPollCmd(t_Handle h_FmHc, bool fill, t_FmPcdCcFragScratchPoolCmdParams *p_FmPcdCcFragScratchPoolCmdParams)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err;
-+ uint32_t seqNum;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_IP_FRAG_INITIALIZATION);
-+ p_HcFrame->actionReg = (uint32_t)(((fill == TRUE) ? 0 : 1) << HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_CMD_SHIFT);
-+ p_HcFrame->actionReg |= p_FmPcdCcFragScratchPoolCmdParams->bufferPoolId << HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_BPID;
-+ if (fill == TRUE)
-+ {
-+ p_HcFrame->extraReg = p_FmPcdCcFragScratchPoolCmdParams->numOfBuffers;
-+ }
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ p_FmPcdCcFragScratchPoolCmdParams->numOfBuffers = p_HcFrame->extraReg;
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdCcTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcReassmTimeoutParams *p_CcReassmTimeoutParams, uint8_t *p_Result)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err;
-+ uint32_t seqNum;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_REASSM_TIMEOUT);
-+ p_HcFrame->actionReg = (uint32_t)((p_CcReassmTimeoutParams->activate ? 0 : 1) << HC_HCOR_ACTION_REG_REASSM_TIMEOUT_ACTIVE_SHIFT);
-+ p_HcFrame->extraReg = (p_CcReassmTimeoutParams->tsbs << HC_HCOR_EXTRA_REG_REASSM_TIMEOUT_TSBS_SHIFT) | p_CcReassmTimeoutParams->iprcpt;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ *p_Result = (uint8_t)
-+ ((p_HcFrame->actionReg >> HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_SHIFT) & HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_MASK);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdPlcrCcGetSetParams(t_Handle h_FmHc,uint16_t absoluteProfileId, uint32_t requiredAction)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err;
-+ uint32_t tmpReg32 = 0;
-+ uint32_t requiredActionTmp, requiredActionFlag;
-+ uint32_t seqNum;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
-+
-+ /* Profile is locked by calling routine */
-+ /* WARNING - this lock will not be efficient if other HC routine will attempt to change
-+ * "fmpl_pegnia" "fmpl_peynia" or "fmpl_pernia" without locking Profile !
-+ */
-+
-+ requiredActionTmp = FmPcdPlcrGetRequiredAction(p_FmHc->h_FmPcd, absoluteProfileId);
-+ requiredActionFlag = FmPcdPlcrGetRequiredActionFlag(p_FmHc->h_FmPcd, absoluteProfileId);
-+
-+ if (!requiredActionFlag || !(requiredActionTmp & requiredAction))
-+ {
-+ if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
-+ {
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ /* first read scheme and check that it is valid */
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId);
-+ p_HcFrame->extraReg = 0x00008000;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
-+
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegnia;
-+ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
-+ }
-+
-+ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
-+ p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(TRUE, FALSE, FALSE);
-+ p_HcFrame->extraReg = 0x00008000;
-+ p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
-+
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_peynia;
-+ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
-+ }
-+
-+ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
-+ p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, TRUE, FALSE);
-+ p_HcFrame->extraReg = 0x00008000;
-+ p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
-+
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pernia;
-+ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
-+ }
-+
-+ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
-+ p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, FALSE, TRUE);
-+ p_HcFrame->extraReg = 0x00008000;
-+ p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
-+
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdPlcrSetProfile(t_Handle h_FmHc, t_Handle h_Profile, t_FmPcdPlcrProfileRegs *p_PlcrRegs)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_Error err = E_OK;
-+ uint16_t profileIndx;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint32_t seqNum;
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+
-+ profileIndx = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
-+
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx);
-+ p_HcFrame->extraReg = 0x00008000;
-+ memcpy(&p_HcFrame->hcSpecificData.profileRegs, p_PlcrRegs, sizeof(t_FmPcdPlcrProfileRegs));
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdPlcrDeleteProfile(t_Handle h_FmHc, t_Handle h_Profile)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
-+ t_Error err = E_OK;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint32_t seqNum;
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
-+ p_HcFrame->actionReg |= 0x00008000;
-+ p_HcFrame->extraReg = 0x00008000;
-+ memset(&p_HcFrame->hcSpecificData.profileRegs, 0, sizeof(t_FmPcdPlcrProfileRegs));
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdPlcrSetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value)
-+{
-+
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
-+ t_Error err = E_OK;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint32_t seqNum;
-+
-+ /* first read scheme and check that it is valid */
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId);
-+ p_HcFrame->actionReg |= FmPcdPlcrBuildCounterProfileReg(counter);
-+ p_HcFrame->extraReg = 0x00008000;
-+ p_HcFrame->hcSpecificData.singleRegForWrite = value;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT);
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+uint32_t FmHcPcdPlcrGetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
-+ t_Error err;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ uint32_t retVal = 0;
-+ uint32_t seqNum;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0);
-+
-+ /* first read scheme and check that it is valid */
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ {
-+ REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ return 0;
-+ }
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL);
-+ p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId);
-+ p_HcFrame->extraReg = 0x00008000;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+ if (err != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return 0;
-+ }
-+
-+ switch (counter)
-+ {
-+ case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER:
-+ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegpc;
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER:
-+ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_peypc;
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER:
-+ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perpc;
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER:
-+ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perypc;
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER:
-+ retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perrpc;
-+ break;
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ return retVal;
-+}
-+
-+t_Error FmHcKgWriteSp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t spReg, bool add)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err = E_OK;
-+ uint32_t seqNum;
-+
-+ ASSERT_COND(p_FmHc);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ /* first read SP register */
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_PORT_REGS);
-+
-+ if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK)
-+ {
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ /* spReg is the first reg, so we can use it both for read and for write */
-+ if (add)
-+ p_HcFrame->hcSpecificData.portRegsForRead.spReg |= spReg;
-+ else
-+ p_HcFrame->hcSpecificData.portRegsForRead.spReg &= ~spReg;
-+
-+ p_HcFrame->actionReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId);
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcKgWriteCpp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t cppReg)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err = E_OK;
-+ uint32_t seqNum;
-+
-+ ASSERT_COND(p_FmHc);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ /* first read SP register */
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM);
-+ p_HcFrame->actionReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId);
-+ p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK;
-+ p_HcFrame->hcSpecificData.singleRegForWrite = cppReg;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdCcDoDynamicChange(t_Handle h_FmHc, uint32_t oldAdAddrOffset, uint32_t newAdAddrOffset)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err = E_OK;
-+ uint32_t seqNum;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmHc, E_INVALID_HANDLE);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC);
-+ p_HcFrame->actionReg = newAdAddrOffset;
-+ p_HcFrame->actionReg |= 0xc0000000;
-+ p_HcFrame->extraReg = oldAdAddrOffset;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC);
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmHcPcdSync(t_Handle h_FmHc)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ t_HcFrame *p_HcFrame;
-+ t_DpaaFD fmFd;
-+ t_Error err = E_OK;
-+ uint32_t seqNum;
-+
-+ ASSERT_COND(p_FmHc);
-+
-+ p_HcFrame = GetBuf(p_FmHc, &seqNum);
-+ if (!p_HcFrame)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object"));
-+ memset(p_HcFrame, 0, sizeof(t_HcFrame));
-+ /* first read SP register */
-+ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_SYNC);
-+ p_HcFrame->actionReg = 0;
-+ p_HcFrame->extraReg = 0;
-+ p_HcFrame->commandSequence = seqNum;
-+
-+ BUILD_FD(sizeof(t_HcFrame));
-+
-+ err = EnQFrm(p_FmHc, &fmFd, seqNum);
-+
-+ PutBuf(p_FmHc, p_HcFrame, seqNum);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Handle FmHcGetPort(t_Handle h_FmHc)
-+{
-+ t_FmHc *p_FmHc = (t_FmHc*)h_FmHc;
-+ return p_FmHc->h_HcPortDev;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/Makefile
-@@ -0,0 +1,28 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+obj-y += fsl-ncsw-MAC.o
-+
-+fsl-ncsw-MAC-objs := dtsec.o dtsec_mii_acc.o fm_mac.o tgec.o tgec_mii_acc.o \
-+ fman_dtsec.o fman_dtsec_mii_acc.o fman_memac.o \
-+ fman_tgec.o fman_crc32.o
-+
-+ifeq ($(CONFIG_FMAN_V3H),y)
-+fsl-ncsw-MAC-objs += memac.o memac_mii_acc.o fman_memac_mii_acc.o
-+endif
-+ifeq ($(CONFIG_FMAN_V3L),y)
-+fsl-ncsw-MAC-objs += memac.o memac_mii_acc.o fman_memac_mii_acc.o
-+endif
-+ifeq ($(CONFIG_FMAN_ARM),y)
-+fsl-ncsw-MAC-objs += memac.o memac_mii_acc.o fman_memac_mii_acc.o
-+endif
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c
-@@ -0,0 +1,1504 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File dtsec.c
-+
-+ @Description FMan dTSEC driver
-+*//***************************************************************************/
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "xx_ext.h"
-+#include "endian_ext.h"
-+#include "debug_ext.h"
-+#include "crc_mac_addr_ext.h"
-+
-+#include "fm_common.h"
-+#include "dtsec.h"
-+#include "fsl_fman_dtsec.h"
-+#include "fsl_fman_dtsec_mii_acc.h"
-+
-+/*****************************************************************************/
-+/* Internal routines */
-+/*****************************************************************************/
-+
-+static t_Error CheckInitParameters(t_Dtsec *p_Dtsec)
-+{
-+ if (ENET_SPEED_FROM_MODE(p_Dtsec->enetMode) >= e_ENET_SPEED_10000)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 1G MAC driver only supports 1G or lower speeds"));
-+ if (p_Dtsec->macId >= FM_MAX_NUM_OF_1G_MACS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("macId can not be greater than the number of 1G MACs"));
-+ if (p_Dtsec->addr == 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC Must have a valid MAC Address"));
-+ if ((ENET_SPEED_FROM_MODE(p_Dtsec->enetMode) >= e_ENET_SPEED_1000) &&
-+ p_Dtsec->p_DtsecDriverParam->halfdup_on)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC 1G can't work in half duplex"));
-+ if (p_Dtsec->p_DtsecDriverParam->halfdup_on && (p_Dtsec->p_DtsecDriverParam)->loopback)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("LoopBack is not supported in halfDuplex mode"));
-+#ifdef FM_RX_PREAM_4_ERRATA_DTSEC_A001
-+ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev <= 6) /* fixed for rev3 */
-+ if (p_Dtsec->p_DtsecDriverParam->rx_preamble)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("preambleRxEn"));
-+#endif /* FM_RX_PREAM_4_ERRATA_DTSEC_A001 */
-+ if (((p_Dtsec->p_DtsecDriverParam)->tx_preamble || (p_Dtsec->p_DtsecDriverParam)->rx_preamble) &&( (p_Dtsec->p_DtsecDriverParam)->preamble_len != 0x7))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Preamble length should be 0x7 bytes"));
-+ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_on &&
-+ (p_Dtsec->p_DtsecDriverParam->tx_time_stamp_en || p_Dtsec->p_DtsecDriverParam->rx_time_stamp_en))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dTSEC in half duplex mode has to be with 1588 timeStamping diable"));
-+ if ((p_Dtsec->p_DtsecDriverParam)->rx_flow && (p_Dtsec->p_DtsecDriverParam)->rx_ctrl_acc )
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Receive control frame are not passed to the system memory so it can not be accept "));
-+ if ((p_Dtsec->p_DtsecDriverParam)->rx_prepend > MAX_PACKET_ALIGNMENT)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("packetAlignmentPadding can't be greater than %d ",MAX_PACKET_ALIGNMENT ));
-+ if (((p_Dtsec->p_DtsecDriverParam)->non_back_to_back_ipg1 > MAX_INTER_PACKET_GAP) ||
-+ ((p_Dtsec->p_DtsecDriverParam)->non_back_to_back_ipg2 > MAX_INTER_PACKET_GAP) ||
-+ ((p_Dtsec->p_DtsecDriverParam)->back_to_back_ipg > MAX_INTER_PACKET_GAP))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inter packet gap can't be greater than %d ",MAX_INTER_PACKET_GAP ));
-+ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_alt_backoff_val > MAX_INTER_PALTERNATE_BEB)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("alternateBackoffVal can't be greater than %d ",MAX_INTER_PALTERNATE_BEB ));
-+ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_retransmit > MAX_RETRANSMISSION)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("maxRetransmission can't be greater than %d ",MAX_RETRANSMISSION ));
-+ if ((p_Dtsec->p_DtsecDriverParam)->halfdup_coll_window > MAX_COLLISION_WINDOW)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("collisionWindow can't be greater than %d ",MAX_COLLISION_WINDOW ));
-+
-+ /* If Auto negotiation process is disabled, need to */
-+ /* Set up the PHY using the MII Management Interface */
-+ if (p_Dtsec->p_DtsecDriverParam->tbipa > MAX_PHYS)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("PHY address (should be 0-%d)", MAX_PHYS));
-+ if (!p_Dtsec->f_Exception)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("uninitialized f_Exception"));
-+ if (!p_Dtsec->f_Event)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("uninitialized f_Event"));
-+
-+#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002
-+ if (p_Dtsec->p_DtsecDriverParam->rx_len_check)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!"));
-+#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static uint32_t GetMacAddrHashCode(uint64_t ethAddr)
-+{
-+ uint32_t crc;
-+
-+ /* CRC calculation */
-+ GET_MAC_ADDR_CRC(ethAddr, crc);
-+
-+ crc = GetMirror32(crc);
-+
-+ return crc;
-+}
-+
-+/* ......................................................................... */
-+
-+static void UpdateStatistics(t_Dtsec *p_Dtsec)
-+{
-+ uint32_t car1, car2;
-+
-+ fman_dtsec_get_clear_carry_regs(p_Dtsec->p_MemMap, &car1, &car2);
-+
-+ if (car1)
-+ {
-+ if (car1 & CAR1_TR64)
-+ p_Dtsec->internalStatistics.tr64 += VAL22BIT;
-+ if (car1 & CAR1_TR127)
-+ p_Dtsec->internalStatistics.tr127 += VAL22BIT;
-+ if (car1 & CAR1_TR255)
-+ p_Dtsec->internalStatistics.tr255 += VAL22BIT;
-+ if (car1 & CAR1_TR511)
-+ p_Dtsec->internalStatistics.tr511 += VAL22BIT;
-+ if (car1 & CAR1_TRK1)
-+ p_Dtsec->internalStatistics.tr1k += VAL22BIT;
-+ if (car1 & CAR1_TRMAX)
-+ p_Dtsec->internalStatistics.trmax += VAL22BIT;
-+ if (car1 & CAR1_TRMGV)
-+ p_Dtsec->internalStatistics.trmgv += VAL22BIT;
-+ if (car1 & CAR1_RBYT)
-+ p_Dtsec->internalStatistics.rbyt += (uint64_t)VAL32BIT;
-+ if (car1 & CAR1_RPKT)
-+ p_Dtsec->internalStatistics.rpkt += VAL22BIT;
-+ if (car1 & CAR1_RMCA)
-+ p_Dtsec->internalStatistics.rmca += VAL22BIT;
-+ if (car1 & CAR1_RBCA)
-+ p_Dtsec->internalStatistics.rbca += VAL22BIT;
-+ if (car1 & CAR1_RXPF)
-+ p_Dtsec->internalStatistics.rxpf += VAL16BIT;
-+ if (car1 & CAR1_RALN)
-+ p_Dtsec->internalStatistics.raln += VAL16BIT;
-+ if (car1 & CAR1_RFLR)
-+ p_Dtsec->internalStatistics.rflr += VAL16BIT;
-+ if (car1 & CAR1_RCDE)
-+ p_Dtsec->internalStatistics.rcde += VAL16BIT;
-+ if (car1 & CAR1_RCSE)
-+ p_Dtsec->internalStatistics.rcse += VAL16BIT;
-+ if (car1 & CAR1_RUND)
-+ p_Dtsec->internalStatistics.rund += VAL16BIT;
-+ if (car1 & CAR1_ROVR)
-+ p_Dtsec->internalStatistics.rovr += VAL16BIT;
-+ if (car1 & CAR1_RFRG)
-+ p_Dtsec->internalStatistics.rfrg += VAL16BIT;
-+ if (car1 & CAR1_RJBR)
-+ p_Dtsec->internalStatistics.rjbr += VAL16BIT;
-+ if (car1 & CAR1_RDRP)
-+ p_Dtsec->internalStatistics.rdrp += VAL16BIT;
-+ }
-+ if (car2)
-+ {
-+ if (car2 & CAR2_TFCS)
-+ p_Dtsec->internalStatistics.tfcs += VAL12BIT;
-+ if (car2 & CAR2_TBYT)
-+ p_Dtsec->internalStatistics.tbyt += (uint64_t)VAL32BIT;
-+ if (car2 & CAR2_TPKT)
-+ p_Dtsec->internalStatistics.tpkt += VAL22BIT;
-+ if (car2 & CAR2_TMCA)
-+ p_Dtsec->internalStatistics.tmca += VAL22BIT;
-+ if (car2 & CAR2_TBCA)
-+ p_Dtsec->internalStatistics.tbca += VAL22BIT;
-+ if (car2 & CAR2_TXPF)
-+ p_Dtsec->internalStatistics.txpf += VAL16BIT;
-+ if (car2 & CAR2_TDRP)
-+ p_Dtsec->internalStatistics.tdrp += VAL16BIT;
-+ }
-+}
-+
-+/* .............................................................................. */
-+
-+static uint16_t DtsecGetMaxFrameLength(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Dtsec, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE, 0);
-+
-+ return fman_dtsec_get_max_frame_len(p_Dtsec->p_MemMap);
-+}
-+
-+/* .............................................................................. */
-+
-+static void DtsecIsr(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ uint32_t event;
-+ struct dtsec_regs *p_DtsecMemMap = p_Dtsec->p_MemMap;
-+
-+ /* do not handle MDIO events */
-+ event = fman_dtsec_get_event(p_DtsecMemMap, (uint32_t)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN)));
-+
-+ event &= fman_dtsec_get_interrupt_mask(p_DtsecMemMap);
-+
-+ fman_dtsec_ack_event(p_DtsecMemMap, event);
-+
-+ if (event & DTSEC_IMASK_BREN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_BAB_RX);
-+ if (event & DTSEC_IMASK_RXCEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_RX_CTL);
-+ if (event & DTSEC_IMASK_MSROEN)
-+ UpdateStatistics(p_Dtsec);
-+ if (event & DTSEC_IMASK_GTSCEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
-+ if (event & DTSEC_IMASK_BTEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_BAB_TX);
-+ if (event & DTSEC_IMASK_TXCEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_CTL);
-+ if (event & DTSEC_IMASK_TXEEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_ERR);
-+ if (event & DTSEC_IMASK_LCEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_LATE_COL);
-+ if (event & DTSEC_IMASK_CRLEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_COL_RET_LMT);
-+ if (event & DTSEC_IMASK_XFUNEN)
-+ {
-+#ifdef FM_TX_LOCKUP_ERRATA_DTSEC6
-+ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
-+ {
-+ uint32_t tpkt1, tmpReg1, tpkt2, tmpReg2, i;
-+ /* a. Write 0x00E0_0C00 to DTSEC_ID */
-+ /* This is a read only regidter */
-+
-+ /* b. Read and save the value of TPKT */
-+ tpkt1 = GET_UINT32(p_DtsecMemMap->tpkt);
-+
-+ /* c. Read the register at dTSEC address offset 0x32C */
-+ tmpReg1 = GET_UINT32(*(uint32_t*)((uint8_t*)p_DtsecMemMap + 0x32c));
-+
-+ /* d. Compare bits [9:15] to bits [25:31] of the register at address offset 0x32C. */
-+ if ((tmpReg1 & 0x007F0000) != (tmpReg1 & 0x0000007F))
-+ {
-+ /* If they are not equal, save the value of this register and wait for at least
-+ * MAXFRM*16 ns */
-+ XX_UDelay((uint32_t)(MIN(DtsecGetMaxFrameLength(p_Dtsec)*16/1000, 1)));
-+ }
-+
-+ /* e. Read and save TPKT again and read the register at dTSEC address offset
-+ 0x32C again*/
-+ tpkt2 = GET_UINT32(p_DtsecMemMap->tpkt);
-+ tmpReg2 = GET_UINT32(*(uint32_t*)((uint8_t*)p_DtsecMemMap + 0x32c));
-+
-+ /* f. Compare the value of TPKT saved in step b to value read in step e. Also
-+ compare bits [9:15] of the register at offset 0x32C saved in step d to the value
-+ of bits [9:15] saved in step e. If the two registers values are unchanged, then
-+ the transmit portion of the dTSEC controller is locked up and the user should
-+ proceed to the recover sequence. */
-+ if ((tpkt1 == tpkt2) && ((tmpReg1 & 0x007F0000) == (tmpReg2 & 0x007F0000)))
-+ {
-+ /* recover sequence */
-+
-+ /* a.Write a 1 to RCTRL[GRS]*/
-+
-+ WRITE_UINT32(p_DtsecMemMap->rctrl, GET_UINT32(p_DtsecMemMap->rctrl) | RCTRL_GRS);
-+
-+ /* b.Wait until IEVENT[GRSC]=1, or at least 100 us has elapsed. */
-+ for (i = 0 ; i < 100 ; i++ )
-+ {
-+ if (GET_UINT32(p_DtsecMemMap->ievent) & DTSEC_IMASK_GRSCEN)
-+ break;
-+ XX_UDelay(1);
-+ }
-+ if (GET_UINT32(p_DtsecMemMap->ievent) & DTSEC_IMASK_GRSCEN)
-+ WRITE_UINT32(p_DtsecMemMap->ievent, DTSEC_IMASK_GRSCEN);
-+ else
-+ DBG(INFO,("Rx lockup due to dTSEC Tx lockup"));
-+
-+ /* c.Write a 1 to bit n of FM_RSTC (offset 0x0CC of FPM)*/
-+ FmResetMac(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MAC_1G, p_Dtsec->fmMacControllerDriver.macId);
-+
-+ /* d.Wait 4 Tx clocks (32 ns) */
-+ XX_UDelay(1);
-+
-+ /* e.Write a 0 to bit n of FM_RSTC. */
-+ /* cleared by FMAN */
-+ }
-+ }
-+#endif /* FM_TX_LOCKUP_ERRATA_DTSEC6 */
-+
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_FIFO_UNDRN);
-+ }
-+ if (event & DTSEC_IMASK_MAGEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_MAG_PCKT);
-+ if (event & DTSEC_IMASK_GRSCEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
-+ if (event & DTSEC_IMASK_TDPEEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_DATA_ERR);
-+ if (event & DTSEC_IMASK_RDPEEN)
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_RX_DATA_ERR);
-+
-+ /* - masked interrupts */
-+ ASSERT_COND(!(event & DTSEC_IMASK_ABRTEN));
-+ ASSERT_COND(!(event & DTSEC_IMASK_IFERREN));
-+}
-+
-+static void DtsecMdioIsr(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ uint32_t event;
-+ struct dtsec_regs *p_DtsecMemMap = p_Dtsec->p_MemMap;
-+
-+ event = GET_UINT32(p_DtsecMemMap->ievent);
-+ /* handle only MDIO events */
-+ event &= (DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN);
-+ if (event)
-+ {
-+ event &= GET_UINT32(p_DtsecMemMap->imask);
-+
-+ WRITE_UINT32(p_DtsecMemMap->ievent, event);
-+
-+ if (event & DTSEC_IMASK_MMRDEN)
-+ p_Dtsec->f_Event(p_Dtsec->h_App, e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET);
-+ if (event & DTSEC_IMASK_MMWREN)
-+ p_Dtsec->f_Event(p_Dtsec->h_App, e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET);
-+ }
-+}
-+
-+static void Dtsec1588Isr(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ uint32_t event;
-+ struct dtsec_regs *p_DtsecMemMap = p_Dtsec->p_MemMap;
-+
-+ if (p_Dtsec->ptpTsuEnabled)
-+ {
-+ event = fman_dtsec_check_and_clear_tmr_event(p_DtsecMemMap);
-+
-+ if (event)
-+ {
-+ ASSERT_COND(event & TMR_PEVENT_TSRE);
-+ p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_1588_TS_RX_ERR);
-+ }
-+ }
-+}
-+
-+/* ........................................................................... */
-+
-+static void FreeInitResources(t_Dtsec *p_Dtsec)
-+{
-+ if (p_Dtsec->mdioIrq != NO_IRQ)
-+ {
-+ XX_DisableIntr(p_Dtsec->mdioIrq);
-+ XX_FreeIntr(p_Dtsec->mdioIrq);
-+ }
-+ FmUnregisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Dtsec->macId, e_FM_INTR_TYPE_ERR);
-+ FmUnregisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Dtsec->macId, e_FM_INTR_TYPE_NORMAL);
-+
-+ /* release the driver's group hash table */
-+ FreeHashTable(p_Dtsec->p_MulticastAddrHash);
-+ p_Dtsec->p_MulticastAddrHash = NULL;
-+
-+ /* release the driver's individual hash table */
-+ FreeHashTable(p_Dtsec->p_UnicastAddrHash);
-+ p_Dtsec->p_UnicastAddrHash = NULL;
-+}
-+
-+/* ........................................................................... */
-+
-+static t_Error GracefulStop(t_Dtsec *p_Dtsec, e_CommMode mode)
-+{
-+ struct dtsec_regs *p_MemMap;
-+ int pollTimeout = 0;
-+
-+ ASSERT_COND(p_Dtsec);
-+
-+ p_MemMap = p_Dtsec->p_MemMap;
-+ ASSERT_COND(p_MemMap);
-+
-+ /* Assert the graceful transmit stop bit */
-+ if (mode & e_COMM_MODE_RX)
-+ {
-+ fman_dtsec_stop_rx(p_MemMap);
-+
-+#ifdef FM_GRS_ERRATA_DTSEC_A002
-+ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
-+ XX_UDelay(100);
-+#else /* FM_GRS_ERRATA_DTSEC_A002 */
-+#ifdef FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839
-+ XX_UDelay(10);
-+#endif /* FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839 */
-+#endif /* FM_GRS_ERRATA_DTSEC_A002 */
-+ }
-+
-+ if (mode & e_COMM_MODE_TX)
-+ {
-+#if defined(FM_GTS_ERRATA_DTSEC_A004)
-+ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
-+ DBG(INFO, ("GTS not supported due to DTSEC_A004 errata."));
-+#else /* not defined(FM_GTS_ERRATA_DTSEC_A004) */
-+
-+ fman_dtsec_stop_tx(p_MemMap);
-+
-+#if defined(FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014) || defined(FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012)
-+ XX_UDelay(10);
-+#endif /* FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014 || FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012 */
-+#endif /* defined(FM_GTS_ERRATA_DTSEC_A004) */
-+ }
-+
-+ /* Poll GRSC/GTSC bits in IEVENT register until both are set */
-+#if defined(FM_GRS_ERRATA_DTSEC_A002) || defined(FM_GTS_ERRATA_DTSEC_A004) || defined(FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012) || defined(FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014) || defined(FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839)
-+ XX_UDelay(10);
-+#else
-+ while (fman_dtsec_get_event(p_MemMap, DTSEC_IMASK_GRSCEN | DTSEC_IMASK_GTSCEN) != (DTSEC_IMASK_GRSCEN | DTSEC_IMASK_GTSCEN))
-+ {
-+ if (pollTimeout == 100)
-+ break;
-+ XX_UDelay(1);
-+ pollTimeout++;
-+ }
-+#endif
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error GracefulRestart(t_Dtsec *p_Dtsec, e_CommMode mode)
-+{
-+ struct dtsec_regs *p_MemMap;
-+
-+ ASSERT_COND(p_Dtsec);
-+ p_MemMap = p_Dtsec->p_MemMap;
-+ ASSERT_COND(p_MemMap);
-+
-+ /* clear the graceful receive stop bit */
-+ if (mode & e_COMM_MODE_TX)
-+ fman_dtsec_start_tx(p_MemMap);
-+
-+ if (mode & e_COMM_MODE_RX)
-+ fman_dtsec_start_rx(p_MemMap);
-+
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* dTSEC Configs modification functions */
-+/*****************************************************************************/
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecConfigLoopback(t_Handle h_Dtsec, bool newVal)
-+{
-+
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->p_DtsecDriverParam->loopback = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecConfigMaxFrameLength(t_Handle h_Dtsec, uint16_t newVal)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->p_DtsecDriverParam->maximum_frame = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecConfigPadAndCrc(t_Handle h_Dtsec, bool newVal)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->p_DtsecDriverParam->tx_pad_crc = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecConfigHalfDuplex(t_Handle h_Dtsec, bool newVal)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->p_DtsecDriverParam->halfdup_on = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecConfigTbiPhyAddr(t_Handle h_Dtsec, uint8_t newVal)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->p_DtsecDriverParam->tbi_phy_addr = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecConfigLengthCheck(t_Handle h_Dtsec, bool newVal)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->p_DtsecDriverParam->rx_len_check = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecConfigException(t_Handle h_Dtsec, e_FmMacExceptions exception, bool enable)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ if (exception != e_FM_MAC_EX_1G_1588_TS_RX_ERR)
-+ {
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Dtsec->exceptions |= bitMask;
-+ else
-+ p_Dtsec->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+ }
-+ else
-+ {
-+ if (!p_Dtsec->ptpTsuEnabled)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exception valid for 1588 only"));
-+
-+ if (enable)
-+ p_Dtsec->enTsuErrExeption = TRUE;
-+ else
-+ p_Dtsec->enTsuErrExeption = FALSE;
-+ }
-+
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* dTSEC Run Time API functions */
-+/*****************************************************************************/
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecEnable(t_Handle h_Dtsec, e_CommMode mode)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ fman_dtsec_enable(p_Dtsec->p_MemMap,
-+ (bool)!!(mode & e_COMM_MODE_RX),
-+ (bool)!!(mode & e_COMM_MODE_TX));
-+
-+ GracefulRestart(p_Dtsec, mode);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecDisable (t_Handle h_Dtsec, e_CommMode mode)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ GracefulStop(p_Dtsec, mode);
-+
-+ fman_dtsec_disable(p_Dtsec->p_MemMap,
-+ (bool)!!(mode & e_COMM_MODE_RX),
-+ (bool)!!(mode & e_COMM_MODE_TX));
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecSetTxPauseFrames(t_Handle h_Dtsec,
-+ uint8_t priority,
-+ uint16_t pauseTime,
-+ uint16_t threshTime)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ UNUSED(priority);UNUSED(threshTime);
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+#ifdef FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003
-+ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
-+ if (0 < pauseTime && pauseTime <= 320)
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE,
-+ ("This pause-time value of %d is illegal due to errata dTSEC-A003!"
-+ " value should be greater than 320."));
-+#endif /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 */
-+
-+ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ fman_dtsec_set_tx_pause_frames(p_Dtsec->p_MemMap, pauseTime);
-+
-+ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+/* backward compatibility. will be removed in the future. */
-+static t_Error DtsecTxMacPause(t_Handle h_Dtsec, uint16_t pauseTime)
-+{
-+ return DtsecSetTxPauseFrames(h_Dtsec, 0, pauseTime, 0);
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecRxIgnoreMacPause(t_Handle h_Dtsec, bool en)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ bool accept_pause = !en;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ fman_dtsec_handle_rx_pause(p_Dtsec->p_MemMap, accept_pause);
-+
-+ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecEnable1588TimeStamp(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->ptpTsuEnabled = TRUE;
-+ fman_dtsec_set_ts(p_Dtsec->p_MemMap, TRUE);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecDisable1588TimeStamp(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->ptpTsuEnabled = FALSE;
-+ fman_dtsec_set_ts(p_Dtsec->p_MemMap, FALSE);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecGetStatistics(t_Handle h_Dtsec, t_FmMacStatistics *p_Statistics)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ struct dtsec_regs *p_DtsecMemMap;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER);
-+
-+ p_DtsecMemMap = p_Dtsec->p_MemMap;
-+
-+ if (p_Dtsec->statisticsLevel == e_FM_MAC_NONE_STATISTICS)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Statistics disabled"));
-+
-+ memset(p_Statistics, 0xff, sizeof(t_FmMacStatistics));
-+
-+ if (p_Dtsec->statisticsLevel == e_FM_MAC_FULL_STATISTICS)
-+ {
-+ p_Statistics->eStatPkts64 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR64)
-+ + p_Dtsec->internalStatistics.tr64;
-+ p_Statistics->eStatPkts65to127 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR127)
-+ + p_Dtsec->internalStatistics.tr127;
-+ p_Statistics->eStatPkts128to255 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR255)
-+ + p_Dtsec->internalStatistics.tr255;
-+ p_Statistics->eStatPkts256to511 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR511)
-+ + p_Dtsec->internalStatistics.tr511;
-+ p_Statistics->eStatPkts512to1023 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TR1K)
-+ + p_Dtsec->internalStatistics.tr1k;
-+ p_Statistics->eStatPkts1024to1518 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TRMAX)
-+ + p_Dtsec->internalStatistics.trmax;
-+ p_Statistics->eStatPkts1519to1522 = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TRMGV)
-+ + p_Dtsec->internalStatistics.trmgv;
-+
-+ /* MIB II */
-+ p_Statistics->ifInOctets = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RBYT)
-+ + p_Dtsec->internalStatistics.rbyt;
-+ p_Statistics->ifInPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RPKT)
-+ + p_Dtsec->internalStatistics.rpkt;
-+ p_Statistics->ifInUcastPkts = 0;
-+ p_Statistics->ifInMcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RMCA)
-+ + p_Dtsec->internalStatistics.rmca;
-+ p_Statistics->ifInBcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RBCA)
-+ + p_Dtsec->internalStatistics.rbca;
-+ p_Statistics->ifOutOctets = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TBYT)
-+ + p_Dtsec->internalStatistics.tbyt;
-+ p_Statistics->ifOutPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TPKT)
-+ + p_Dtsec->internalStatistics.tpkt;
-+ p_Statistics->ifOutUcastPkts = 0;
-+ p_Statistics->ifOutMcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TMCA)
-+ + p_Dtsec->internalStatistics.tmca;
-+ p_Statistics->ifOutBcastPkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TBCA)
-+ + p_Dtsec->internalStatistics.tbca;
-+ }
-+
-+ p_Statistics->eStatFragments = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RFRG)
-+ + p_Dtsec->internalStatistics.rfrg;
-+ p_Statistics->eStatJabbers = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RJBR)
-+ + p_Dtsec->internalStatistics.rjbr;
-+ p_Statistics->eStatsDropEvents = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RDRP)
-+ + p_Dtsec->internalStatistics.rdrp;
-+ p_Statistics->eStatCRCAlignErrors = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RALN)
-+ + p_Dtsec->internalStatistics.raln;
-+ p_Statistics->eStatUndersizePkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RUND)
-+ + p_Dtsec->internalStatistics.rund;
-+ p_Statistics->eStatOversizePkts = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_ROVR)
-+ + p_Dtsec->internalStatistics.rovr;
-+ p_Statistics->reStatPause = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_RXPF)
-+ + p_Dtsec->internalStatistics.rxpf;
-+ p_Statistics->teStatPause = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TXPF)
-+ + p_Dtsec->internalStatistics.txpf;
-+ p_Statistics->ifInDiscards = p_Statistics->eStatsDropEvents;
-+ p_Statistics->ifInErrors = p_Statistics->eStatsDropEvents + p_Statistics->eStatCRCAlignErrors
-+ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_RFLR) + p_Dtsec->internalStatistics.rflr
-+ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_RCDE) + p_Dtsec->internalStatistics.rcde
-+ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_RCSE) + p_Dtsec->internalStatistics.rcse;
-+
-+ p_Statistics->ifOutDiscards = fman_dtsec_get_stat_counter(p_DtsecMemMap, E_DTSEC_STAT_TDRP)
-+ + p_Dtsec->internalStatistics.tdrp;
-+ p_Statistics->ifOutErrors = p_Statistics->ifOutDiscards /**< Number of frames transmitted with error: */
-+ + fman_dtsec_get_stat_counter(p_DtsecMemMap,E_DTSEC_STAT_TFCS)
-+ + p_Dtsec->internalStatistics.tfcs;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecModifyMacAddress (t_Handle h_Dtsec, t_EnetAddr *p_EnetAddr)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ /* Initialize MAC Station Address registers (1 & 2) */
-+ /* Station address have to be swapped (big endian to little endian */
-+ p_Dtsec->addr = ENET_ADDR_TO_UINT64(*p_EnetAddr);
-+
-+ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ fman_dtsec_set_mac_address(p_Dtsec->p_MemMap, (uint8_t *)(*p_EnetAddr));
-+
-+ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecResetCounters (t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ /* clear HW counters */
-+ fman_dtsec_reset_stat(p_Dtsec->p_MemMap);
-+
-+ /* clear SW counters holding carries */
-+ memset(&p_Dtsec->internalStatistics, 0, sizeof(t_InternalStatistics));
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecAddExactMatchMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *) h_Dtsec;
-+ uint64_t ethAddr;
-+ uint8_t paddrNum;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ if (ethAddr & GROUP_ADDRESS)
-+ /* Multicast address has no effect in PADDR */
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address"));
-+
-+ /* Make sure no PADDR contains this address */
-+ for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++)
-+ if (p_Dtsec->indAddrRegUsed[paddrNum])
-+ if (p_Dtsec->paddr[paddrNum] == ethAddr)
-+ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
-+
-+ /* Find first unused PADDR */
-+ for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++)
-+ if (!(p_Dtsec->indAddrRegUsed[paddrNum]))
-+ {
-+ /* mark this PADDR as used */
-+ p_Dtsec->indAddrRegUsed[paddrNum] = TRUE;
-+ /* store address */
-+ p_Dtsec->paddr[paddrNum] = ethAddr;
-+
-+ /* put in hardware */
-+ fman_dtsec_add_addr_in_paddr(p_Dtsec->p_MemMap, (uint64_t)PTR_TO_UINT(&ethAddr), paddrNum);
-+ p_Dtsec->numOfIndAddrInRegs++;
-+
-+ return E_OK;
-+ }
-+
-+ /* No free PADDR */
-+ RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecDelExactMatchMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *) h_Dtsec;
-+ uint64_t ethAddr;
-+ uint8_t paddrNum;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ /* Find used PADDR containing this address */
-+ for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++)
-+ {
-+ if ((p_Dtsec->indAddrRegUsed[paddrNum]) &&
-+ (p_Dtsec->paddr[paddrNum] == ethAddr))
-+ {
-+ /* mark this PADDR as not used */
-+ p_Dtsec->indAddrRegUsed[paddrNum] = FALSE;
-+ /* clear in hardware */
-+ fman_dtsec_clear_addr_in_paddr(p_Dtsec->p_MemMap, paddrNum);
-+ p_Dtsec->numOfIndAddrInRegs--;
-+
-+ return E_OK;
-+ }
-+ }
-+
-+ RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG);
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecAddHashMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ t_EthHashEntry *p_HashEntry;
-+ uint64_t ethAddr;
-+ int32_t bucket;
-+ uint32_t crc;
-+ bool mcast, ghtx;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ ghtx = (bool)((fman_dtsec_get_rctrl(p_Dtsec->p_MemMap) & RCTRL_GHTX) ? TRUE : FALSE);
-+ mcast = (bool)((ethAddr & MAC_GROUP_ADDRESS) ? TRUE : FALSE);
-+
-+ if (ghtx && !mcast) /* Cannot handle unicast mac addr when GHTX is on */
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Could not compute hash bucket"));
-+
-+ crc = GetMacAddrHashCode(ethAddr);
-+
-+ /* considering the 9 highest order bits in crc H[8:0]:
-+ * if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
-+ * and H[5:1] (next 5 bits) identify the hash bit
-+ * if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
-+ * and H[4:0] (next 5 bits) identify the hash bit.
-+ *
-+ * In bucket index output the low 5 bits identify the hash register bit,
-+ * while the higher 4 bits identify the hash register
-+ */
-+
-+ if (ghtx)
-+ bucket = (int32_t)((crc >> 23) & 0x1ff);
-+ else {
-+ bucket = (int32_t)((crc >> 24) & 0xff);
-+ /* if !ghtx and mcast the bit must be set in gaddr instead of igaddr. */
-+ if (mcast)
-+ bucket += 0x100;
-+ }
-+
-+ fman_dtsec_set_bucket(p_Dtsec->p_MemMap, bucket, TRUE);
-+
-+ /* Create element to be added to the driver hash table */
-+ p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry));
-+ p_HashEntry->addr = ethAddr;
-+ INIT_LIST(&p_HashEntry->node);
-+
-+ if (ethAddr & MAC_GROUP_ADDRESS)
-+ /* Group Address */
-+ LIST_AddToTail(&(p_HashEntry->node), &(p_Dtsec->p_MulticastAddrHash->p_Lsts[bucket]));
-+ else
-+ LIST_AddToTail(&(p_HashEntry->node), &(p_Dtsec->p_UnicastAddrHash->p_Lsts[bucket]));
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecDelHashMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ t_List *p_Pos;
-+ t_EthHashEntry *p_HashEntry = NULL;
-+ uint64_t ethAddr;
-+ int32_t bucket;
-+ uint32_t crc;
-+ bool mcast, ghtx;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ ghtx = (bool)((fman_dtsec_get_rctrl(p_Dtsec->p_MemMap) & RCTRL_GHTX) ? TRUE : FALSE);
-+ mcast = (bool)((ethAddr & MAC_GROUP_ADDRESS) ? TRUE : FALSE);
-+
-+ if (ghtx && !mcast) /* Cannot handle unicast mac addr when GHTX is on */
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Could not compute hash bucket"));
-+
-+ crc = GetMacAddrHashCode(ethAddr);
-+
-+ if (ghtx)
-+ bucket = (int32_t)((crc >> 23) & 0x1ff);
-+ else {
-+ bucket = (int32_t)((crc >> 24) & 0xff);
-+ /* if !ghtx and mcast the bit must be set in gaddr instead of igaddr. */
-+ if (mcast)
-+ bucket += 0x100;
-+ }
-+
-+ if (ethAddr & MAC_GROUP_ADDRESS)
-+ {
-+ /* Group Address */
-+ LIST_FOR_EACH(p_Pos, &(p_Dtsec->p_MulticastAddrHash->p_Lsts[bucket]))
-+ {
-+ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
-+ if (p_HashEntry->addr == ethAddr)
-+ {
-+ LIST_DelAndInit(&p_HashEntry->node);
-+ XX_Free(p_HashEntry);
-+ break;
-+ }
-+ }
-+ if (LIST_IsEmpty(&p_Dtsec->p_MulticastAddrHash->p_Lsts[bucket]))
-+ fman_dtsec_set_bucket(p_Dtsec->p_MemMap, bucket, FALSE);
-+ }
-+ else
-+ {
-+ /* Individual Address */
-+ LIST_FOR_EACH(p_Pos, &(p_Dtsec->p_UnicastAddrHash->p_Lsts[bucket]))
-+ {
-+ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
-+ if (p_HashEntry->addr == ethAddr)
-+ {
-+ LIST_DelAndInit(&p_HashEntry->node);
-+ XX_Free(p_HashEntry);
-+ break;
-+ }
-+ }
-+ if (LIST_IsEmpty(&p_Dtsec->p_UnicastAddrHash->p_Lsts[bucket]))
-+ fman_dtsec_set_bucket(p_Dtsec->p_MemMap, bucket, FALSE);
-+ }
-+
-+ /* address does not exist */
-+ ASSERT_COND(p_HashEntry != NULL);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecSetPromiscuous(t_Handle h_Dtsec, bool newVal)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ fman_dtsec_set_uc_promisc(p_Dtsec->p_MemMap, newVal);
-+ fman_dtsec_set_mc_promisc(p_Dtsec->p_MemMap, newVal);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecSetStatistics(t_Handle h_Dtsec, e_FmMacStatisticsLevel statisticsLevel)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->statisticsLevel = statisticsLevel;
-+
-+ err = (t_Error)fman_dtsec_set_stat_level(p_Dtsec->p_MemMap,
-+ (enum dtsec_stat_level)statisticsLevel);
-+ if (err != E_OK)
-+ return err;
-+
-+ switch (statisticsLevel)
-+ {
-+ case (e_FM_MAC_NONE_STATISTICS):
-+ p_Dtsec->exceptions &= ~DTSEC_IMASK_MSROEN;
-+ break;
-+ case (e_FM_MAC_PARTIAL_STATISTICS):
-+ p_Dtsec->exceptions |= DTSEC_IMASK_MSROEN;
-+ break;
-+ case (e_FM_MAC_FULL_STATISTICS):
-+ p_Dtsec->exceptions |= DTSEC_IMASK_MSROEN;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecSetWakeOnLan(t_Handle h_Dtsec, bool en)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ fman_dtsec_set_wol(p_Dtsec->p_MemMap, en);
-+
-+ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecAdjustLink(t_Handle h_Dtsec, e_EnetSpeed speed, bool fullDuplex)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ int err;
-+ enum enet_interface enet_interface;
-+ enum enet_speed enet_speed;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ p_Dtsec->enetMode = MAKE_ENET_MODE(ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode), speed);
-+ enet_interface = (enum enet_interface) ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode);
-+ enet_speed = (enum enet_speed) ENET_SPEED_FROM_MODE(p_Dtsec->enetMode);
-+ p_Dtsec->halfDuplex = !fullDuplex;
-+
-+ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ err = fman_dtsec_adjust_link(p_Dtsec->p_MemMap, enet_interface, enet_speed, fullDuplex);
-+
-+ if (err == -EINVAL)
-+ RETURN_ERROR(MAJOR, E_CONFLICT, ("Ethernet interface does not support Half Duplex mode"));
-+
-+ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
-+
-+ return (t_Error)err;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecRestartAutoneg(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ uint16_t tmpReg16;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ DTSEC_MII_ReadPhyReg(p_Dtsec, p_Dtsec->tbi_phy_addr, 0, &tmpReg16);
-+
-+ tmpReg16 &= ~( PHY_CR_SPEED0 | PHY_CR_SPEED1 );
-+ tmpReg16 |= (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
-+
-+ DTSEC_MII_WritePhyReg(p_Dtsec, p_Dtsec->tbi_phy_addr, 0, tmpReg16);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecGetId(t_Handle h_Dtsec, uint32_t *macId)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ *macId = p_Dtsec->macId;
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecGetVersion(t_Handle h_Dtsec, uint32_t *macVersion)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ *macVersion = fman_dtsec_get_revision(p_Dtsec->p_MemMap);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecSetException(t_Handle h_Dtsec, e_FmMacExceptions exception, bool enable)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+
-+ if (exception != e_FM_MAC_EX_1G_1588_TS_RX_ERR)
-+ {
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Dtsec->exceptions |= bitMask;
-+ else
-+ p_Dtsec->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ if (enable)
-+ fman_dtsec_enable_interrupt(p_Dtsec->p_MemMap, bitMask);
-+ else
-+ fman_dtsec_disable_interrupt(p_Dtsec->p_MemMap, bitMask);
-+ }
-+ else
-+ {
-+ if (!p_Dtsec->ptpTsuEnabled)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exception valid for 1588 only"));
-+
-+ if (enable)
-+ {
-+ p_Dtsec->enTsuErrExeption = TRUE;
-+ fman_dtsec_enable_tmr_interrupt(p_Dtsec->p_MemMap);
-+ }
-+ else
-+ {
-+ p_Dtsec->enTsuErrExeption = FALSE;
-+ fman_dtsec_disable_tmr_interrupt(p_Dtsec->p_MemMap);
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* dTSEC Init & Free API */
-+/*****************************************************************************/
-+
-+/* .............................................................................. */
-+
-+static t_Error DtsecInit(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ struct dtsec_cfg *p_DtsecDriverParam;
-+ t_Error err;
-+ uint16_t maxFrmLn;
-+ enum enet_interface enet_interface;
-+ enum enet_speed enet_speed;
-+ t_EnetAddr ethAddr;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->fmMacControllerDriver.h_Fm, E_INVALID_HANDLE);
-+
-+ FM_GetRevision(p_Dtsec->fmMacControllerDriver.h_Fm, &p_Dtsec->fmMacControllerDriver.fmRevInfo);
-+ CHECK_INIT_PARAMETERS(p_Dtsec, CheckInitParameters);
-+
-+ p_DtsecDriverParam = p_Dtsec->p_DtsecDriverParam;
-+ p_Dtsec->halfDuplex = p_DtsecDriverParam->halfdup_on;
-+
-+ enet_interface = (enum enet_interface)ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode);
-+ enet_speed = (enum enet_speed)ENET_SPEED_FROM_MODE(p_Dtsec->enetMode);
-+ MAKE_ENET_ADDR_FROM_UINT64(p_Dtsec->addr, ethAddr);
-+
-+ err = (t_Error)fman_dtsec_init(p_Dtsec->p_MemMap,
-+ p_DtsecDriverParam,
-+ enet_interface,
-+ enet_speed,
-+ (uint8_t*)ethAddr,
-+ p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev,
-+ p_Dtsec->fmMacControllerDriver.fmRevInfo.minorRev,
-+ p_Dtsec->exceptions);
-+ if (err)
-+ {
-+ FreeInitResources(p_Dtsec);
-+ RETURN_ERROR(MAJOR, err, ("This DTSEC version does not support the required i/f mode"));
-+ }
-+
-+ if (ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode) == e_ENET_IF_SGMII)
-+ {
-+ uint16_t tmpReg16;
-+
-+ /* Configure the TBI PHY Control Register */
-+ tmpReg16 = PHY_TBICON_CLK_SEL | PHY_TBICON_SRESET;
-+ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 17, tmpReg16);
-+
-+ tmpReg16 = PHY_TBICON_CLK_SEL;
-+ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 17, tmpReg16);
-+
-+ tmpReg16 = (PHY_CR_PHY_RESET | PHY_CR_ANE | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
-+ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 0, tmpReg16);
-+
-+ if (p_Dtsec->enetMode & ENET_IF_SGMII_BASEX)
-+ tmpReg16 = PHY_TBIANA_1000X;
-+ else
-+ tmpReg16 = PHY_TBIANA_SGMII;
-+ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 4, tmpReg16);
-+
-+ tmpReg16 = (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
-+
-+ DTSEC_MII_WritePhyReg(p_Dtsec, (uint8_t)p_DtsecDriverParam->tbipa, 0, tmpReg16);
-+ }
-+
-+ /* Max Frame Length */
-+ maxFrmLn = fman_dtsec_get_max_frame_len(p_Dtsec->p_MemMap);
-+ err = FmSetMacMaxFrame(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MAC_1G,
-+ p_Dtsec->fmMacControllerDriver.macId, maxFrmLn);
-+ if (err)
-+ RETURN_ERROR(MINOR,err, NO_MSG);
-+
-+ p_Dtsec->p_MulticastAddrHash = AllocHashTable(EXTENDED_HASH_TABLE_SIZE);
-+ if (!p_Dtsec->p_MulticastAddrHash) {
-+ FreeInitResources(p_Dtsec);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MC hash table is FAILED"));
-+ }
-+
-+ p_Dtsec->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
-+ if (!p_Dtsec->p_UnicastAddrHash)
-+ {
-+ FreeInitResources(p_Dtsec);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("UC hash table is FAILED"));
-+ }
-+
-+ /* register err intr handler for dtsec to FPM (err)*/
-+ FmRegisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm,
-+ e_FM_MOD_1G_MAC,
-+ p_Dtsec->macId,
-+ e_FM_INTR_TYPE_ERR,
-+ DtsecIsr,
-+ p_Dtsec);
-+ /* register 1588 intr handler for TMR to FPM (normal)*/
-+ FmRegisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm,
-+ e_FM_MOD_1G_MAC,
-+ p_Dtsec->macId,
-+ e_FM_INTR_TYPE_NORMAL,
-+ Dtsec1588Isr,
-+ p_Dtsec);
-+ /* register normal intr handler for dtsec to main interrupt controller. */
-+ if (p_Dtsec->mdioIrq != NO_IRQ)
-+ {
-+ XX_SetIntr(p_Dtsec->mdioIrq, DtsecMdioIsr, p_Dtsec);
-+ XX_EnableIntr(p_Dtsec->mdioIrq);
-+ }
-+
-+ XX_Free(p_DtsecDriverParam);
-+ p_Dtsec->p_DtsecDriverParam = NULL;
-+
-+ err = DtsecSetStatistics(h_Dtsec, e_FM_MAC_FULL_STATISTICS);
-+ if (err)
-+ {
-+ FreeInitResources(p_Dtsec);
-+ RETURN_ERROR(MAJOR, err, ("Undefined statistics level"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+/* ........................................................................... */
-+
-+static t_Error DtsecFree(t_Handle h_Dtsec)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+
-+ if (p_Dtsec->p_DtsecDriverParam)
-+ {
-+ /* Called after config */
-+ XX_Free(p_Dtsec->p_DtsecDriverParam);
-+ p_Dtsec->p_DtsecDriverParam = NULL;
-+ }
-+ else
-+ /* Called after init */
-+ FreeInitResources(p_Dtsec);
-+
-+ XX_Free(p_Dtsec);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver)
-+{
-+ p_FmMacControllerDriver->f_FM_MAC_Init = DtsecInit;
-+ p_FmMacControllerDriver->f_FM_MAC_Free = DtsecFree;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetStatistics = DtsecSetStatistics;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = DtsecConfigLoopback;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = DtsecConfigMaxFrameLength;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigWan = NULL; /* Not supported on dTSEC */
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = DtsecConfigPadAndCrc;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = DtsecConfigHalfDuplex;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = DtsecConfigLengthCheck;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigTbiPhyAddr = DtsecConfigTbiPhyAddr;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigException = DtsecConfigException;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit = NULL;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_Enable = DtsecEnable;
-+ p_FmMacControllerDriver->f_FM_MAC_Disable = DtsecDisable;
-+ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetException = DtsecSetException;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = DtsecSetPromiscuous;
-+ p_FmMacControllerDriver->f_FM_MAC_AdjustLink = DtsecAdjustLink;
-+ p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan = DtsecSetWakeOnLan;
-+ p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg = DtsecRestartAutoneg;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = DtsecEnable1588TimeStamp;
-+ p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = DtsecDisable1588TimeStamp;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = DtsecTxMacPause;
-+ p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = DtsecSetTxPauseFrames;
-+ p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = DtsecRxIgnoreMacPause;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ResetCounters = DtsecResetCounters;
-+ p_FmMacControllerDriver->f_FM_MAC_GetStatistics = DtsecGetStatistics;
-+ p_FmMacControllerDriver->f_FM_MAC_GetFrameSizeCounters = NULL;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = DtsecModifyMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = DtsecAddHashMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = DtsecDelHashMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = DtsecAddExactMatchMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = DtsecDelExactMatchMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_GetId = DtsecGetId;
-+ p_FmMacControllerDriver->f_FM_MAC_GetVersion = DtsecGetVersion;
-+ p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = DtsecGetMaxFrameLength;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = DTSEC_MII_WritePhyReg;
-+ p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = DTSEC_MII_ReadPhyReg;
-+
-+}
-+
-+
-+/*****************************************************************************/
-+/* dTSEC Config Main Entry */
-+/*****************************************************************************/
-+
-+/* .............................................................................. */
-+
-+t_Handle DTSEC_Config(t_FmMacParams *p_FmMacParam)
-+{
-+ t_Dtsec *p_Dtsec;
-+ struct dtsec_cfg *p_DtsecDriverParam;
-+ uintptr_t baseAddr;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL);
-+
-+ baseAddr = p_FmMacParam->baseAddr;
-+
-+ /* allocate memory for the UCC GETH data structure. */
-+ p_Dtsec = (t_Dtsec *)XX_Malloc(sizeof(t_Dtsec));
-+ if (!p_Dtsec)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("dTSEC driver structure"));
-+ return NULL;
-+ }
-+ memset(p_Dtsec, 0, sizeof(t_Dtsec));
-+ InitFmMacControllerDriver(&p_Dtsec->fmMacControllerDriver);
-+
-+ /* allocate memory for the dTSEC driver parameters data structure. */
-+ p_DtsecDriverParam = (struct dtsec_cfg *) XX_Malloc(sizeof(struct dtsec_cfg));
-+ if (!p_DtsecDriverParam)
-+ {
-+ XX_Free(p_Dtsec);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("dTSEC driver parameters"));
-+ return NULL;
-+ }
-+ memset(p_DtsecDriverParam, 0, sizeof(struct dtsec_cfg));
-+
-+ /* Plant parameter structure pointer */
-+ p_Dtsec->p_DtsecDriverParam = p_DtsecDriverParam;
-+
-+ fman_dtsec_defconfig(p_DtsecDriverParam);
-+
-+ p_Dtsec->p_MemMap = (struct dtsec_regs *)UINT_TO_PTR(baseAddr);
-+ p_Dtsec->p_MiiMemMap = (struct dtsec_mii_reg *)UINT_TO_PTR(baseAddr + DTSEC_TO_MII_OFFSET);
-+ p_Dtsec->addr = ENET_ADDR_TO_UINT64(p_FmMacParam->addr);
-+ p_Dtsec->enetMode = p_FmMacParam->enetMode;
-+ p_Dtsec->macId = p_FmMacParam->macId;
-+ p_Dtsec->exceptions = DEFAULT_exceptions;
-+ p_Dtsec->mdioIrq = p_FmMacParam->mdioIrq;
-+ p_Dtsec->f_Exception = p_FmMacParam->f_Exception;
-+ p_Dtsec->f_Event = p_FmMacParam->f_Event;
-+ p_Dtsec->h_App = p_FmMacParam->h_App;
-+ p_Dtsec->ptpTsuEnabled = p_Dtsec->p_DtsecDriverParam->ptp_tsu_en;
-+ p_Dtsec->enTsuErrExeption = p_Dtsec->p_DtsecDriverParam->ptp_exception_en;
-+ p_Dtsec->tbi_phy_addr = p_Dtsec->p_DtsecDriverParam->tbi_phy_addr;
-+
-+ return p_Dtsec;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h
-@@ -0,0 +1,228 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File dtsec.h
-+
-+ @Description FM dTSEC ...
-+*//***************************************************************************/
-+#ifndef __DTSEC_H
-+#define __DTSEC_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+#include "enet_ext.h"
-+
-+#include "dtsec_mii_acc.h"
-+#include "fm_mac.h"
-+
-+
-+#define DEFAULT_exceptions \
-+ ((uint32_t)(DTSEC_IMASK_BREN | \
-+ DTSEC_IMASK_RXCEN | \
-+ DTSEC_IMASK_BTEN | \
-+ DTSEC_IMASK_TXCEN | \
-+ DTSEC_IMASK_TXEEN | \
-+ DTSEC_IMASK_ABRTEN | \
-+ DTSEC_IMASK_LCEN | \
-+ DTSEC_IMASK_CRLEN | \
-+ DTSEC_IMASK_XFUNEN | \
-+ DTSEC_IMASK_IFERREN | \
-+ DTSEC_IMASK_MAGEN | \
-+ DTSEC_IMASK_TDPEEN | \
-+ DTSEC_IMASK_RDPEEN))
-+
-+#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
-+ case e_FM_MAC_EX_1G_BAB_RX: \
-+ bitMask = DTSEC_IMASK_BREN; break; \
-+ case e_FM_MAC_EX_1G_RX_CTL: \
-+ bitMask = DTSEC_IMASK_RXCEN; break; \
-+ case e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET: \
-+ bitMask = DTSEC_IMASK_GTSCEN ; break; \
-+ case e_FM_MAC_EX_1G_BAB_TX: \
-+ bitMask = DTSEC_IMASK_BTEN ; break; \
-+ case e_FM_MAC_EX_1G_TX_CTL: \
-+ bitMask = DTSEC_IMASK_TXCEN ; break; \
-+ case e_FM_MAC_EX_1G_TX_ERR: \
-+ bitMask = DTSEC_IMASK_TXEEN ; break; \
-+ case e_FM_MAC_EX_1G_LATE_COL: \
-+ bitMask = DTSEC_IMASK_LCEN ; break; \
-+ case e_FM_MAC_EX_1G_COL_RET_LMT: \
-+ bitMask = DTSEC_IMASK_CRLEN ; break; \
-+ case e_FM_MAC_EX_1G_TX_FIFO_UNDRN: \
-+ bitMask = DTSEC_IMASK_XFUNEN ; break; \
-+ case e_FM_MAC_EX_1G_MAG_PCKT: \
-+ bitMask = DTSEC_IMASK_MAGEN ; break; \
-+ case e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET: \
-+ bitMask = DTSEC_IMASK_MMRDEN; break; \
-+ case e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET: \
-+ bitMask = DTSEC_IMASK_MMWREN ; break; \
-+ case e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET: \
-+ bitMask = DTSEC_IMASK_GRSCEN; break; \
-+ case e_FM_MAC_EX_1G_TX_DATA_ERR: \
-+ bitMask = DTSEC_IMASK_TDPEEN; break; \
-+ case e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL: \
-+ bitMask = DTSEC_IMASK_MSROEN ; break; \
-+ default: bitMask = 0;break;}
-+
-+
-+#define MAX_PACKET_ALIGNMENT 31
-+#define MAX_INTER_PACKET_GAP 0x7f
-+#define MAX_INTER_PALTERNATE_BEB 0x0f
-+#define MAX_RETRANSMISSION 0x0f
-+#define MAX_COLLISION_WINDOW 0x03ff
-+
-+
-+/********************* From mac ext ******************************************/
-+typedef uint32_t t_ErrorDisable;
-+
-+#define ERROR_DISABLE_TRANSMIT 0x00400000
-+#define ERROR_DISABLE_LATE_COLLISION 0x00040000
-+#define ERROR_DISABLE_COLLISION_RETRY_LIMIT 0x00020000
-+#define ERROR_DISABLE_TxFIFO_UNDERRUN 0x00010000
-+#define ERROR_DISABLE_TxABORT 0x00008000
-+#define ERROR_DISABLE_INTERFACE 0x00004000
-+#define ERROR_DISABLE_TxDATA_PARITY 0x00000002
-+#define ERROR_DISABLE_RxDATA_PARITY 0x00000001
-+
-+/*****************************************************************************/
-+#define DTSEC_NUM_OF_PADDRS 15 /* number of pattern match registers (entries) */
-+
-+#define GROUP_ADDRESS 0x0000010000000000LL /* Group address bit indication */
-+
-+#define HASH_TABLE_SIZE 256 /* Hash table size (= 32 bits * 8 regs) */
-+
-+#define HASH_TABLE_SIZE 256 /* Hash table size (32 bits * 8 regs) */
-+#define EXTENDED_HASH_TABLE_SIZE 512 /* Extended Hash table size (32 bits * 16 regs) */
-+
-+#define DTSEC_TO_MII_OFFSET 0x1000 /* number of pattern match registers (entries) */
-+
-+#define MAX_PHYS 32 /* maximum number of phys */
-+
-+#define VAL32BIT 0x100000000LL
-+#define VAL22BIT 0x00400000
-+#define VAL16BIT 0x00010000
-+#define VAL12BIT 0x00001000
-+
-+/* CAR1/2 bits */
-+#define CAR1_TR64 0x80000000
-+#define CAR1_TR127 0x40000000
-+#define CAR1_TR255 0x20000000
-+#define CAR1_TR511 0x10000000
-+#define CAR1_TRK1 0x08000000
-+#define CAR1_TRMAX 0x04000000
-+#define CAR1_TRMGV 0x02000000
-+
-+#define CAR1_RBYT 0x00010000
-+#define CAR1_RPKT 0x00008000
-+#define CAR1_RMCA 0x00002000
-+#define CAR1_RBCA 0x00001000
-+#define CAR1_RXPF 0x00000400
-+#define CAR1_RALN 0x00000100
-+#define CAR1_RFLR 0x00000080
-+#define CAR1_RCDE 0x00000040
-+#define CAR1_RCSE 0x00000020
-+#define CAR1_RUND 0x00000010
-+#define CAR1_ROVR 0x00000008
-+#define CAR1_RFRG 0x00000004
-+#define CAR1_RJBR 0x00000002
-+#define CAR1_RDRP 0x00000001
-+
-+#define CAR2_TFCS 0x00040000
-+#define CAR2_TBYT 0x00002000
-+#define CAR2_TPKT 0x00001000
-+#define CAR2_TMCA 0x00000800
-+#define CAR2_TBCA 0x00000400
-+#define CAR2_TXPF 0x00000200
-+#define CAR2_TDRP 0x00000001
-+
-+typedef struct t_InternalStatistics
-+{
-+ uint64_t tr64;
-+ uint64_t tr127;
-+ uint64_t tr255;
-+ uint64_t tr511;
-+ uint64_t tr1k;
-+ uint64_t trmax;
-+ uint64_t trmgv;
-+ uint64_t rfrg;
-+ uint64_t rjbr;
-+ uint64_t rdrp;
-+ uint64_t raln;
-+ uint64_t rund;
-+ uint64_t rovr;
-+ uint64_t rxpf;
-+ uint64_t txpf;
-+ uint64_t rbyt;
-+ uint64_t rpkt;
-+ uint64_t rmca;
-+ uint64_t rbca;
-+ uint64_t rflr;
-+ uint64_t rcde;
-+ uint64_t rcse;
-+ uint64_t tbyt;
-+ uint64_t tpkt;
-+ uint64_t tmca;
-+ uint64_t tbca;
-+ uint64_t tdrp;
-+ uint64_t tfcs;
-+} t_InternalStatistics;
-+
-+typedef struct {
-+ t_FmMacControllerDriver fmMacControllerDriver;
-+ t_Handle h_App; /**< Handle to the upper layer application */
-+ struct dtsec_regs *p_MemMap; /**< pointer to dTSEC memory mapped registers. */
-+ struct dtsec_mii_reg *p_MiiMemMap; /**< pointer to dTSEC MII memory mapped registers. */
-+ uint64_t addr; /**< MAC address of device; */
-+ e_EnetMode enetMode; /**< Ethernet physical interface */
-+ t_FmMacExceptionCallback *f_Exception;
-+ int mdioIrq;
-+ t_FmMacExceptionCallback *f_Event;
-+ bool indAddrRegUsed[DTSEC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */
-+ uint64_t paddr[DTSEC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */
-+ uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */
-+ bool halfDuplex;
-+ t_InternalStatistics internalStatistics;
-+ t_EthHash *p_MulticastAddrHash; /* pointer to driver's global address hash table */
-+ t_EthHash *p_UnicastAddrHash; /* pointer to driver's individual address hash table */
-+ uint8_t macId;
-+ uint8_t tbi_phy_addr;
-+ uint32_t exceptions;
-+ bool ptpTsuEnabled;
-+ bool enTsuErrExeption;
-+ e_FmMacStatisticsLevel statisticsLevel;
-+ struct dtsec_cfg *p_DtsecDriverParam;
-+} t_Dtsec;
-+
-+
-+#endif /* __DTSEC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c
-@@ -0,0 +1,97 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File dtsec_mii_acc.c
-+
-+ @Description FM dtsec MII register access MAC ...
-+*//***************************************************************************/
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_mac.h"
-+#include "dtsec.h"
-+#include "fsl_fman_dtsec_mii_acc.h"
-+
-+
-+/*****************************************************************************/
-+t_Error DTSEC_MII_WritePhyReg(t_Handle h_Dtsec,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t data)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ struct dtsec_mii_reg *miiregs;
-+ uint16_t dtsec_freq;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MiiMemMap, E_INVALID_HANDLE);
-+
-+ dtsec_freq = (uint16_t)(p_Dtsec->fmMacControllerDriver.clkFreq >> 1);
-+ miiregs = p_Dtsec->p_MiiMemMap;
-+
-+ err = (t_Error)fman_dtsec_mii_write_reg(miiregs, phyAddr, reg, data, dtsec_freq);
-+
-+ return err;
-+}
-+
-+/*****************************************************************************/
-+t_Error DTSEC_MII_ReadPhyReg(t_Handle h_Dtsec,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t *p_Data)
-+{
-+ t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec;
-+ struct dtsec_mii_reg *miiregs;
-+ uint16_t dtsec_freq;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MiiMemMap, E_INVALID_HANDLE);
-+
-+ dtsec_freq = (uint16_t)(p_Dtsec->fmMacControllerDriver.clkFreq >> 1);
-+ miiregs = p_Dtsec->p_MiiMemMap;
-+
-+ err = fman_dtsec_mii_read_reg(miiregs, phyAddr, reg, p_Data, dtsec_freq);
-+
-+ if (*p_Data == 0xffff)
-+ RETURN_ERROR(MINOR, E_NO_DEVICE,
-+ ("Read wrong data (0xffff): phyAddr 0x%x, reg 0x%x",
-+ phyAddr, reg));
-+ if (err)
-+ RETURN_ERROR(MINOR, (t_Error)err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h
-@@ -0,0 +1,42 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DTSEC_MII_ACC_H
-+#define __DTSEC_MII_ACC_H
-+
-+#include "std_ext.h"
-+
-+
-+t_Error DTSEC_MII_WritePhyReg(t_Handle h_Dtsec, uint8_t phyAddr, uint8_t reg, uint16_t data);
-+t_Error DTSEC_MII_ReadPhyReg(t_Handle h_Dtsec, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
-+
-+#endif /* __DTSEC_MII_ACC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c
-@@ -0,0 +1,674 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_mac.c
-+
-+ @Description FM MAC ...
-+*//***************************************************************************/
-+#include "std_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+#include "error_ext.h"
-+#include "fm_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_mac.h"
-+
-+
-+/* ......................................................................... */
-+
-+t_Handle FM_MAC_Config (t_FmMacParams *p_FmMacParam)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver;
-+ uint16_t fmClkFreq;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_INVALID_HANDLE, NULL);
-+
-+ fmClkFreq = FmGetClockFreq(p_FmMacParam->h_Fm);
-+ if (fmClkFreq == 0)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Can't get clock for MAC!"));
-+ return NULL;
-+ }
-+
-+#if (DPAA_VERSION == 10)
-+ if (ENET_SPEED_FROM_MODE(p_FmMacParam->enetMode) < e_ENET_SPEED_10000)
-+ p_FmMacControllerDriver = (t_FmMacControllerDriver *)DTSEC_Config(p_FmMacParam);
-+ else
-+#if FM_MAX_NUM_OF_10G_MACS > 0
-+ p_FmMacControllerDriver = (t_FmMacControllerDriver *)TGEC_Config(p_FmMacParam);
-+#else
-+ p_FmMacControllerDriver = NULL;
-+#endif /* FM_MAX_NUM_OF_10G_MACS > 0 */
-+#else
-+ p_FmMacControllerDriver = (t_FmMacControllerDriver *)MEMAC_Config(p_FmMacParam);
-+#endif /* (DPAA_VERSION == 10) */
-+
-+ if (!p_FmMacControllerDriver)
-+ return NULL;
-+
-+ p_FmMacControllerDriver->h_Fm = p_FmMacParam->h_Fm;
-+ p_FmMacControllerDriver->enetMode = p_FmMacParam->enetMode;
-+ p_FmMacControllerDriver->macId = p_FmMacParam->macId;
-+ p_FmMacControllerDriver->resetOnInit = DEFAULT_resetOnInit;
-+
-+ p_FmMacControllerDriver->clkFreq = fmClkFreq;
-+
-+ return (t_Handle)p_FmMacControllerDriver;
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_Init (t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->resetOnInit &&
-+ !p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit &&
-+ (FmResetMac(p_FmMacControllerDriver->h_Fm,
-+ ((ENET_INTERFACE_FROM_MODE(p_FmMacControllerDriver->enetMode) == e_ENET_IF_XGMII) ?
-+ e_FM_MAC_10G : e_FM_MAC_1G),
-+ p_FmMacControllerDriver->macId) != E_OK))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't reset MAC!"));
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_Init)
-+ return p_FmMacControllerDriver->f_FM_MAC_Init(h_FmMac);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_Free (t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_Free)
-+ return p_FmMacControllerDriver->f_FM_MAC_Free(h_FmMac);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigResetOnInit (t_Handle h_FmMac, bool enable)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit(h_FmMac, enable);
-+
-+ p_FmMacControllerDriver->resetOnInit = enable;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigLoopback (t_Handle h_FmMac, bool newVal)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback(h_FmMac, newVal);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigMaxFrameLength (t_Handle h_FmMac, uint16_t newVal)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength(h_FmMac, newVal);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigWan (t_Handle h_FmMac, bool flag)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigWan)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigWan(h_FmMac, flag);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigPadAndCrc (t_Handle h_FmMac, bool newVal)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc(h_FmMac, newVal);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigHalfDuplex (t_Handle h_FmMac, bool newVal)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex(h_FmMac,newVal);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigTbiPhyAddr (t_Handle h_FmMac, uint8_t newVal)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigTbiPhyAddr)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigTbiPhyAddr(h_FmMac,newVal);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigLengthCheck (t_Handle h_FmMac, bool newVal)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck(h_FmMac,newVal);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigException (t_Handle h_FmMac, e_FmMacExceptions ex, bool enable)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigException)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigException(h_FmMac, ex, enable);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ConfigSkipFman11Workaround (t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround)
-+ return p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround(h_FmMac);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+
-+/*****************************************************************************/
-+/* Run Time Control */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_Enable (t_Handle h_FmMac, e_CommMode mode)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_Enable)
-+ return p_FmMacControllerDriver->f_FM_MAC_Enable(h_FmMac, mode);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_Disable (t_Handle h_FmMac, e_CommMode mode)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_Disable)
-+ return p_FmMacControllerDriver->f_FM_MAC_Disable(h_FmMac, mode);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MAC_Resume (t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_Resume)
-+ return p_FmMacControllerDriver->f_FM_MAC_Resume(h_FmMac);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_Enable1588TimeStamp (t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp)
-+ return p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp(h_FmMac);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_Disable1588TimeStamp (t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp)
-+ return p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp(h_FmMac);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_SetTxAutoPauseFrames(t_Handle h_FmMac,
-+ uint16_t pauseTime)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames)
-+ return p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames(h_FmMac,
-+ pauseTime);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_SetTxPauseFrames(t_Handle h_FmMac,
-+ uint8_t priority,
-+ uint16_t pauseTime,
-+ uint16_t threshTime)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames)
-+ return p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames(h_FmMac,
-+ priority,
-+ pauseTime,
-+ threshTime);
-+
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_SetRxIgnorePauseFrames (t_Handle h_FmMac, bool en)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames)
-+ return p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames(h_FmMac, en);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_SetWakeOnLan (t_Handle h_FmMac, bool en)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan)
-+ return p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan(h_FmMac, en);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ResetCounters (t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ResetCounters)
-+ return p_FmMacControllerDriver->f_FM_MAC_ResetCounters(h_FmMac);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_SetException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_SetException)
-+ return p_FmMacControllerDriver->f_FM_MAC_SetException(h_FmMac, ex, enable);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_SetStatistics (t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_SetStatistics)
-+ return p_FmMacControllerDriver->f_FM_MAC_SetStatistics(h_FmMac, statisticsLevel);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_GetStatistics (t_Handle h_FmMac, t_FmMacStatistics *p_Statistics)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_GetStatistics)
-+ return p_FmMacControllerDriver->f_FM_MAC_GetStatistics(h_FmMac, p_Statistics);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_GetFrameSizeCounters(t_Handle h_FmMac, t_FmMacFrameSizeCounters *p_FrameSizeCounters, e_CommMode type)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ memset(p_FrameSizeCounters, 0, sizeof(t_FmMacFrameSizeCounters));
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_GetFrameSizeCounters)
-+ return p_FmMacControllerDriver->f_FM_MAC_GetFrameSizeCounters(h_FmMac, p_FrameSizeCounters, type);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_ModifyMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr)
-+ return p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr(h_FmMac, p_EnetAddr);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_AddHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr)
-+ return p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr(h_FmMac, p_EnetAddr);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_RemoveHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr)
-+ return p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr(h_FmMac, p_EnetAddr);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_AddExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr)
-+ return p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr(h_FmMac, p_EnetAddr);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_RemovelExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr)
-+ return p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr(h_FmMac, p_EnetAddr);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_GetVesrion (t_Handle h_FmMac, uint32_t *macVresion)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_GetVersion)
-+ return p_FmMacControllerDriver->f_FM_MAC_GetVersion(h_FmMac, macVresion);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_GetId (t_Handle h_FmMac, uint32_t *macId)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_GetId)
-+ return p_FmMacControllerDriver->f_FM_MAC_GetId(h_FmMac, macId);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_SetPromiscuous (t_Handle h_FmMac, bool newVal)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous)
-+ return p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous(h_FmMac, newVal);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_AdjustLink(t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_AdjustLink)
-+ return p_FmMacControllerDriver->f_FM_MAC_AdjustLink(h_FmMac, speed, fullDuplex);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_RestartAutoneg(t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg)
-+ return p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg(h_FmMac);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_MII_WritePhyReg (t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg)
-+ return p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg(h_FmMac, phyAddr, reg, data);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+t_Error FM_MAC_MII_ReadPhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg)
-+ return p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg(h_FmMac, phyAddr, reg, p_Data);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+uint16_t FM_MAC_GetMaxFrameLength(t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacControllerDriver, E_INVALID_HANDLE, 0);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength)
-+ return p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength(h_FmMac);
-+
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+ return 0;
-+}
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/*****************************************************************************/
-+t_Error FM_MAC_DumpRegs(t_Handle h_FmMac)
-+{
-+ t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacControllerDriver->f_FM_MAC_DumpRegs)
-+ return p_FmMacControllerDriver->f_FM_MAC_DumpRegs(h_FmMac);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+#endif /* (defined(DEBUG_ERRORS) && ... */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h
-@@ -0,0 +1,226 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_mac.h
-+
-+ @Description FM MAC ...
-+*//***************************************************************************/
-+#ifndef __FM_MAC_H
-+#define __FM_MAC_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+#include "fm_mac_ext.h"
-+#include "fm_common.h"
-+
-+
-+#define __ERR_MODULE__ MODULE_FM_MAC
-+
-+/**************************************************************************//**
-+ @Description defaults
-+*//***************************************************************************/
-+
-+
-+#define DEFAULT_halfDuplex FALSE
-+#define DEFAULT_padAndCrcEnable TRUE
-+#define DEFAULT_resetOnInit FALSE
-+
-+
-+typedef struct {
-+ uint64_t addr; /* Ethernet Address */
-+ t_List node;
-+} t_EthHashEntry;
-+#define ETH_HASH_ENTRY_OBJ(ptr) LIST_OBJECT(ptr, t_EthHashEntry, node)
-+
-+typedef struct {
-+ uint16_t size;
-+ t_List *p_Lsts;
-+} t_EthHash;
-+
-+typedef struct {
-+ t_Error (*f_FM_MAC_Init) (t_Handle h_FmMac);
-+ t_Error (*f_FM_MAC_Free) (t_Handle h_FmMac);
-+
-+ t_Error (*f_FM_MAC_SetStatistics) (t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel);
-+ t_Error (*f_FM_MAC_ConfigLoopback) (t_Handle h_FmMac, bool newVal);
-+ t_Error (*f_FM_MAC_ConfigMaxFrameLength) (t_Handle h_FmMac, uint16_t newVal);
-+ t_Error (*f_FM_MAC_ConfigWan) (t_Handle h_FmMac, bool flag);
-+ t_Error (*f_FM_MAC_ConfigPadAndCrc) (t_Handle h_FmMac, bool newVal);
-+ t_Error (*f_FM_MAC_ConfigHalfDuplex) (t_Handle h_FmMac, bool newVal);
-+ t_Error (*f_FM_MAC_ConfigLengthCheck) (t_Handle h_FmMac, bool newVal);
-+ t_Error (*f_FM_MAC_ConfigTbiPhyAddr) (t_Handle h_FmMac, uint8_t newVal);
-+ t_Error (*f_FM_MAC_ConfigException) (t_Handle h_FmMac, e_FmMacExceptions, bool enable);
-+ t_Error (*f_FM_MAC_ConfigResetOnInit) (t_Handle h_FmMac, bool enable);
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+ t_Error (*f_FM_MAC_ConfigSkipFman11Workaround) (t_Handle h_FmMac);
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+ t_Error (*f_FM_MAC_SetException) (t_Handle h_FmMac, e_FmMacExceptions ex, bool enable);
-+
-+ t_Error (*f_FM_MAC_Enable) (t_Handle h_FmMac, e_CommMode mode);
-+ t_Error (*f_FM_MAC_Disable) (t_Handle h_FmMac, e_CommMode mode);
-+ t_Error (*f_FM_MAC_Resume) (t_Handle h_FmMac);
-+ t_Error (*f_FM_MAC_Enable1588TimeStamp) (t_Handle h_FmMac);
-+ t_Error (*f_FM_MAC_Disable1588TimeStamp) (t_Handle h_FmMac);
-+ t_Error (*f_FM_MAC_Reset) (t_Handle h_FmMac, bool wait);
-+
-+ t_Error (*f_FM_MAC_SetTxAutoPauseFrames) (t_Handle h_FmMac,
-+ uint16_t pauseTime);
-+ t_Error (*f_FM_MAC_SetTxPauseFrames) (t_Handle h_FmMac,
-+ uint8_t priority,
-+ uint16_t pauseTime,
-+ uint16_t threshTime);
-+ t_Error (*f_FM_MAC_SetRxIgnorePauseFrames) (t_Handle h_FmMac, bool en);
-+
-+ t_Error (*f_FM_MAC_ResetCounters) (t_Handle h_FmMac);
-+ t_Error (*f_FM_MAC_GetStatistics) (t_Handle h_FmMac, t_FmMacStatistics *p_Statistics);
-+ t_Error (*f_FM_MAC_GetFrameSizeCounters) (t_Handle h_FmMac, t_FmMacFrameSizeCounters *p_FrameSizeCounters, e_CommMode type);
-+
-+ t_Error (*f_FM_MAC_ModifyMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+ t_Error (*f_FM_MAC_AddHashMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+ t_Error (*f_FM_MAC_RemoveHashMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+ t_Error (*f_FM_MAC_AddExactMatchMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+ t_Error (*f_FM_MAC_RemovelExactMatchMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+
-+ t_Error (*f_FM_MAC_SetPromiscuous) (t_Handle h_FmMac, bool newVal);
-+ t_Error (*f_FM_MAC_AdjustLink) (t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex);
-+ t_Error (*f_FM_MAC_RestartAutoneg) (t_Handle h_FmMac);
-+
-+ t_Error (*f_FM_MAC_SetWakeOnLan) (t_Handle h_FmMac, bool en);
-+
-+ t_Error (*f_FM_MAC_GetId) (t_Handle h_FmMac, uint32_t *macId);
-+
-+ t_Error (*f_FM_MAC_GetVersion) (t_Handle h_FmMac, uint32_t *macVersion);
-+
-+ uint16_t (*f_FM_MAC_GetMaxFrameLength) (t_Handle h_FmMac);
-+
-+ t_Error (*f_FM_MAC_MII_WritePhyReg)(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data);
-+ t_Error (*f_FM_MAC_MII_ReadPhyReg)(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_Error (*f_FM_MAC_DumpRegs) (t_Handle h_FmMac);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ t_Handle h_Fm;
-+ t_FmRevisionInfo fmRevInfo;
-+ e_EnetMode enetMode;
-+ uint8_t macId;
-+ bool resetOnInit;
-+ uint16_t clkFreq;
-+} t_FmMacControllerDriver;
-+
-+
-+#if (DPAA_VERSION == 10)
-+t_Handle DTSEC_Config(t_FmMacParams *p_FmMacParam);
-+t_Handle TGEC_Config(t_FmMacParams *p_FmMacParams);
-+#else
-+t_Handle MEMAC_Config(t_FmMacParams *p_FmMacParam);
-+#endif /* (DPAA_VERSION == 10) */
-+uint16_t FM_MAC_GetMaxFrameLength(t_Handle FmMac);
-+
-+
-+/* ........................................................................... */
-+
-+static __inline__ t_EthHashEntry *DequeueAddrFromHashEntry(t_List *p_AddrLst)
-+{
-+ t_EthHashEntry *p_HashEntry = NULL;
-+ if (!LIST_IsEmpty(p_AddrLst))
-+ {
-+ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_AddrLst->p_Next);
-+ LIST_DelAndInit(&p_HashEntry->node);
-+ }
-+ return p_HashEntry;
-+}
-+
-+/* ........................................................................... */
-+
-+static __inline__ void FreeHashTable(t_EthHash *p_Hash)
-+{
-+ t_EthHashEntry *p_HashEntry;
-+ int i = 0;
-+
-+ if (p_Hash)
-+ {
-+ if (p_Hash->p_Lsts)
-+ {
-+ for (i=0; i<p_Hash->size; i++)
-+ {
-+ p_HashEntry = DequeueAddrFromHashEntry(&p_Hash->p_Lsts[i]);
-+ while (p_HashEntry)
-+ {
-+ XX_Free(p_HashEntry);
-+ p_HashEntry = DequeueAddrFromHashEntry(&p_Hash->p_Lsts[i]);
-+ }
-+ }
-+
-+ XX_Free(p_Hash->p_Lsts);
-+ }
-+
-+ XX_Free(p_Hash);
-+ }
-+}
-+
-+/* ........................................................................... */
-+
-+static __inline__ t_EthHash * AllocHashTable(uint16_t size)
-+{
-+ uint32_t i;
-+ t_EthHash *p_Hash;
-+
-+ /* Allocate address hash table */
-+ p_Hash = (t_EthHash *)XX_Malloc(sizeof(t_EthHash));
-+ if (!p_Hash)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Address hash table"));
-+ return NULL;
-+ }
-+ p_Hash->size = size;
-+
-+ p_Hash->p_Lsts = (t_List *)XX_Malloc(p_Hash->size*sizeof(t_List));
-+ if (!p_Hash->p_Lsts)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Address hash table"));
-+ XX_Free(p_Hash);
-+ return NULL;
-+ }
-+
-+ for (i=0 ; i<p_Hash->size; i++)
-+ INIT_LIST(&p_Hash->p_Lsts[i]);
-+
-+ return p_Hash;
-+}
-+
-+
-+#endif /* __FM_MAC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.c
-@@ -0,0 +1,119 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "fman_crc32.h"
-+#include "common/general.h"
-+
-+
-+/* precomputed CRC values for address hashing */
-+static const uint32_t crc_tbl[256] = {
-+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
-+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
-+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
-+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
-+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
-+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
-+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
-+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
-+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
-+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
-+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
-+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
-+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
-+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
-+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
-+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
-+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
-+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
-+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
-+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
-+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
-+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
-+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
-+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
-+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
-+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
-+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
-+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
-+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
-+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
-+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
-+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
-+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
-+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
-+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
-+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
-+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
-+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
-+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
-+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
-+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
-+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
-+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
-+};
-+
-+/* Get the mirrored value of a byte size number. (0x11010011 --> 0x11001011) */
-+static inline uint8_t get_mirror8(uint8_t n)
-+{
-+ uint8_t mirror[16] = {
-+ 0x00, 0x08, 0x04, 0x0c, 0x02, 0x0a, 0x06, 0x0e,
-+ 0x01, 0x09, 0x05, 0x0d, 0x03, 0x0b, 0x07, 0x0f
-+ };
-+ return (uint8_t)(((mirror[n & 0x0f] << 4) | (mirror[n >> 4])));
-+}
-+
-+static inline uint32_t get_mirror32(uint32_t n)
-+{
-+ return ((uint32_t)get_mirror8((uint8_t)(n))<<24) |
-+ ((uint32_t)get_mirror8((uint8_t)(n>>8))<<16) |
-+ ((uint32_t)get_mirror8((uint8_t)(n>>16))<<8) |
-+ ((uint32_t)get_mirror8((uint8_t)(n>>24)));
-+}
-+
-+uint32_t get_mac_addr_crc(uint64_t _addr)
-+{
-+ uint32_t i;
-+ uint8_t data;
-+ uint32_t crc;
-+
-+ /* CRC calculation */
-+ crc = 0xffffffff;
-+ for (i = 0; i < 6; i++) {
-+ data = (uint8_t)(_addr >> ((5-i)*8));
-+ crc = crc ^ data;
-+ crc = crc_tbl[crc&0xff] ^ (crc>>8);
-+ }
-+
-+ crc = get_mirror32(crc);
-+ return crc;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_crc32.h
-@@ -0,0 +1,43 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __FMAN_CRC32_H
-+#define __FMAN_CRC32_H
-+
-+#include "common/general.h"
-+
-+
-+uint32_t get_mac_addr_crc(uint64_t _addr);
-+
-+
-+#endif /* __FMAN_CRC32_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec.c
-@@ -0,0 +1,847 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "fsl_fman_dtsec.h"
-+
-+
-+void fman_dtsec_stop_rx(struct dtsec_regs *regs)
-+{
-+ /* Assert the graceful stop bit */
-+ iowrite32be(ioread32be(&regs->rctrl) | RCTRL_GRS, &regs->rctrl);
-+}
-+
-+void fman_dtsec_stop_tx(struct dtsec_regs *regs)
-+{
-+ /* Assert the graceful stop bit */
-+ iowrite32be(ioread32be(&regs->tctrl) | DTSEC_TCTRL_GTS, &regs->tctrl);
-+}
-+
-+void fman_dtsec_start_tx(struct dtsec_regs *regs)
-+{
-+ /* clear the graceful stop bit */
-+ iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS, &regs->tctrl);
-+}
-+
-+void fman_dtsec_start_rx(struct dtsec_regs *regs)
-+{
-+ /* clear the graceful stop bit */
-+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
-+}
-+
-+void fman_dtsec_defconfig(struct dtsec_cfg *cfg)
-+{
-+ cfg->halfdup_on = DEFAULT_HALFDUP_ON;
-+ cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
-+ cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
-+ cfg->halfdup_excess_defer = DEFAULT_HALFDUP_EXCESS_DEFER;
-+ cfg->halfdup_no_backoff = DEFAULT_HALFDUP_NO_BACKOFF;
-+ cfg->halfdup_bp_no_backoff = DEFAULT_HALFDUP_BP_NO_BACKOFF;
-+ cfg->halfdup_alt_backoff_val = DEFAULT_HALFDUP_ALT_BACKOFF_VAL;
-+ cfg->halfdup_alt_backoff_en = DEFAULT_HALFDUP_ALT_BACKOFF_EN;
-+ cfg->rx_drop_bcast = DEFAULT_RX_DROP_BCAST;
-+ cfg->rx_short_frm = DEFAULT_RX_SHORT_FRM;
-+ cfg->rx_len_check = DEFAULT_RX_LEN_CHECK;
-+ cfg->tx_pad_crc = DEFAULT_TX_PAD_CRC;
-+ cfg->tx_crc = DEFAULT_TX_CRC;
-+ cfg->rx_ctrl_acc = DEFAULT_RX_CTRL_ACC;
-+ cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
-+ cfg->tbipa = DEFAULT_TBIPA; /* PHY address 0 is reserved (DPAA RM)*/
-+ cfg->rx_prepend = DEFAULT_RX_PREPEND;
-+ cfg->ptp_tsu_en = DEFAULT_PTP_TSU_EN;
-+ cfg->ptp_exception_en = DEFAULT_PTP_EXCEPTION_EN;
-+ cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
-+ cfg->rx_preamble = DEFAULT_RX_PREAMBLE;
-+ cfg->tx_preamble = DEFAULT_TX_PREAMBLE;
-+ cfg->loopback = DEFAULT_LOOPBACK;
-+ cfg->rx_time_stamp_en = DEFAULT_RX_TIME_STAMP_EN;
-+ cfg->tx_time_stamp_en = DEFAULT_TX_TIME_STAMP_EN;
-+ cfg->rx_flow = DEFAULT_RX_FLOW;
-+ cfg->tx_flow = DEFAULT_TX_FLOW;
-+ cfg->rx_group_hash_exd = DEFAULT_RX_GROUP_HASH_EXD;
-+ cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
-+ cfg->rx_promisc = DEFAULT_RX_PROMISC;
-+ cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
-+ cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
-+ cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
-+ cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
-+ cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
-+ cfg->tbi_phy_addr = DEFAULT_TBI_PHY_ADDR;
-+ cfg->wake_on_lan = DEFAULT_WAKE_ON_LAN;
-+}
-+
-+int fman_dtsec_init(struct dtsec_regs *regs, struct dtsec_cfg *cfg,
-+ enum enet_interface iface_mode,
-+ enum enet_speed iface_speed,
-+ uint8_t *macaddr,
-+ uint8_t fm_rev_maj,
-+ uint8_t fm_rev_min,
-+ uint32_t exception_mask)
-+{
-+ bool is_rgmii = FALSE;
-+ bool is_sgmii = FALSE;
-+ bool is_qsgmii = FALSE;
-+ int i;
-+ uint32_t tmp;
-+
-+UNUSED(fm_rev_maj);UNUSED(fm_rev_min);
-+
-+ /* let's start with a soft reset */
-+ iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
-+ iowrite32be(0, &regs->maccfg1);
-+
-+ /*************dtsec_id2******************/
-+ tmp = ioread32be(&regs->tsec_id2);
-+
-+ /* check RGMII support */
-+ if (iface_mode == E_ENET_IF_RGMII ||
-+ iface_mode == E_ENET_IF_RMII)
-+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
-+ return -EINVAL;
-+
-+ if (iface_mode == E_ENET_IF_SGMII ||
-+ iface_mode == E_ENET_IF_MII)
-+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
-+ return -EINVAL;
-+
-+ /***************ECNTRL************************/
-+
-+ is_rgmii = (bool)((iface_mode == E_ENET_IF_RGMII) ? TRUE : FALSE);
-+ is_sgmii = (bool)((iface_mode == E_ENET_IF_SGMII) ? TRUE : FALSE);
-+ is_qsgmii = (bool)((iface_mode == E_ENET_IF_QSGMII) ? TRUE : FALSE);
-+
-+ tmp = 0;
-+ if (is_rgmii || iface_mode == E_ENET_IF_GMII)
-+ tmp |= DTSEC_ECNTRL_GMIIM;
-+ if (is_sgmii)
-+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
-+ if (is_qsgmii)
-+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
-+ DTSEC_ECNTRL_QSGMIIM);
-+ if (is_rgmii)
-+ tmp |= DTSEC_ECNTRL_RPM;
-+ if (iface_speed == E_ENET_SPEED_100)
-+ tmp |= DTSEC_ECNTRL_R100M;
-+
-+ iowrite32be(tmp, &regs->ecntrl);
-+ /***************ECNTRL************************/
-+
-+ /***************TCTRL************************/
-+ tmp = 0;
-+ if (cfg->halfdup_on)
-+ tmp |= DTSEC_TCTRL_THDF;
-+ if (cfg->tx_time_stamp_en)
-+ tmp |= DTSEC_TCTRL_TTSE;
-+
-+ iowrite32be(tmp, &regs->tctrl);
-+
-+ /***************TCTRL************************/
-+
-+ /***************PTV************************/
-+ tmp = 0;
-+
-+#ifdef FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1
-+ if ((fm_rev_maj == 1) && (fm_rev_min == 0))
-+ cfg->tx_pause_time += 2;
-+#endif /* FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 */
-+
-+ if (cfg->tx_pause_time)
-+ tmp |= cfg->tx_pause_time;
-+ if (cfg->tx_pause_time_extd)
-+ tmp |= cfg->tx_pause_time_extd << PTV_PTE_OFST;
-+ iowrite32be(tmp, &regs->ptv);
-+
-+ /***************RCTRL************************/
-+ tmp = 0;
-+ tmp |= ((uint32_t)(cfg->rx_prepend & 0x0000001f)) << 16;
-+ if (cfg->rx_ctrl_acc)
-+ tmp |= RCTRL_CFA;
-+ if (cfg->rx_group_hash_exd)
-+ tmp |= RCTRL_GHTX;
-+ if (cfg->rx_time_stamp_en)
-+ tmp |= RCTRL_RTSE;
-+ if (cfg->rx_drop_bcast)
-+ tmp |= RCTRL_BC_REJ;
-+ if (cfg->rx_short_frm)
-+ tmp |= RCTRL_RSF;
-+ if (cfg->rx_promisc)
-+ tmp |= RCTRL_PROM;
-+
-+ iowrite32be(tmp, &regs->rctrl);
-+ /***************RCTRL************************/
-+
-+ /*
-+ * Assign a Phy Address to the TBI (TBIPA).
-+ * Done also in cases where TBI is not selected to avoid conflict with
-+ * the external PHY's Physical address
-+ */
-+ iowrite32be(cfg->tbipa, &regs->tbipa);
-+
-+ /***************TMR_CTL************************/
-+ iowrite32be(0, &regs->tmr_ctrl);
-+
-+ if (cfg->ptp_tsu_en) {
-+ tmp = 0;
-+ tmp |= TMR_PEVENT_TSRE;
-+ iowrite32be(tmp, &regs->tmr_pevent);
-+
-+ if (cfg->ptp_exception_en) {
-+ tmp = 0;
-+ tmp |= TMR_PEMASK_TSREEN;
-+ iowrite32be(tmp, &regs->tmr_pemask);
-+ }
-+ }
-+
-+ /***************MACCFG1***********************/
-+ tmp = 0;
-+ if (cfg->loopback)
-+ tmp |= MACCFG1_LOOPBACK;
-+ if (cfg->rx_flow)
-+ tmp |= MACCFG1_RX_FLOW;
-+ if (cfg->tx_flow)
-+ tmp |= MACCFG1_TX_FLOW;
-+ iowrite32be(tmp, &regs->maccfg1);
-+
-+ /***************MACCFG1***********************/
-+
-+ /***************MACCFG2***********************/
-+ tmp = 0;
-+
-+ if (iface_speed < E_ENET_SPEED_1000)
-+ tmp |= MACCFG2_NIBBLE_MODE;
-+ else if (iface_speed == E_ENET_SPEED_1000)
-+ tmp |= MACCFG2_BYTE_MODE;
-+
-+ tmp |= ((uint32_t) cfg->preamble_len & 0x0000000f)
-+ << PREAMBLE_LENGTH_SHIFT;
-+
-+ if (cfg->rx_preamble)
-+ tmp |= MACCFG2_PRE_AM_Rx_EN;
-+ if (cfg->tx_preamble)
-+ tmp |= MACCFG2_PRE_AM_Tx_EN;
-+ if (cfg->rx_len_check)
-+ tmp |= MACCFG2_LENGTH_CHECK;
-+ if (cfg->tx_pad_crc)
-+ tmp |= MACCFG2_PAD_CRC_EN;
-+ if (cfg->tx_crc)
-+ tmp |= MACCFG2_CRC_EN;
-+ if (!cfg->halfdup_on)
-+ tmp |= MACCFG2_FULL_DUPLEX;
-+ iowrite32be(tmp, &regs->maccfg2);
-+
-+ /***************MACCFG2***********************/
-+
-+ /***************IPGIFG************************/
-+ tmp = (((cfg->non_back_to_back_ipg1 <<
-+ IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
-+ & IPGIFG_NON_BACK_TO_BACK_IPG_1)
-+ | ((cfg->non_back_to_back_ipg2 <<
-+ IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
-+ & IPGIFG_NON_BACK_TO_BACK_IPG_2)
-+ | ((cfg->min_ifg_enforcement <<
-+ IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
-+ & IPGIFG_MIN_IFG_ENFORCEMENT)
-+ | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
-+ iowrite32be(tmp, &regs->ipgifg);
-+
-+ /***************IPGIFG************************/
-+
-+ /***************HAFDUP************************/
-+ tmp = 0;
-+
-+ if (cfg->halfdup_alt_backoff_en)
-+ tmp = (uint32_t)(HAFDUP_ALT_BEB |
-+ ((cfg->halfdup_alt_backoff_val & 0x0000000f)
-+ << HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT));
-+ if (cfg->halfdup_bp_no_backoff)
-+ tmp |= HAFDUP_BP_NO_BACKOFF;
-+ if (cfg->halfdup_no_backoff)
-+ tmp |= HAFDUP_NO_BACKOFF;
-+ if (cfg->halfdup_excess_defer)
-+ tmp |= HAFDUP_EXCESS_DEFER;
-+ tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
-+ & HAFDUP_RETRANSMISSION_MAX);
-+ tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
-+
-+ iowrite32be(tmp, &regs->hafdup);
-+ /***************HAFDUP************************/
-+
-+ /***************MAXFRM************************/
-+ /* Initialize MAXFRM */
-+ iowrite32be(cfg->maximum_frame, &regs->maxfrm);
-+
-+ /***************MAXFRM************************/
-+
-+ /***************CAM1************************/
-+ iowrite32be(0xffffffff, &regs->cam1);
-+ iowrite32be(0xffffffff, &regs->cam2);
-+
-+ /***************IMASK************************/
-+ iowrite32be(exception_mask, &regs->imask);
-+ /***************IMASK************************/
-+
-+ /***************IEVENT************************/
-+ iowrite32be(0xffffffff, &regs->ievent);
-+
-+ /***************MACSTNADDR1/2*****************/
-+
-+ tmp = (uint32_t)((macaddr[5] << 24) |
-+ (macaddr[4] << 16) |
-+ (macaddr[3] << 8) |
-+ macaddr[2]);
-+ iowrite32be(tmp, &regs->macstnaddr1);
-+
-+ tmp = (uint32_t)((macaddr[1] << 24) |
-+ (macaddr[0] << 16));
-+ iowrite32be(tmp, &regs->macstnaddr2);
-+
-+ /***************MACSTNADDR1/2*****************/
-+
-+ /*****************HASH************************/
-+ for (i = 0; i < NUM_OF_HASH_REGS ; i++) {
-+ /* Initialize IADDRx */
-+ iowrite32be(0, &regs->igaddr[i]);
-+ /* Initialize GADDRx */
-+ iowrite32be(0, &regs->gaddr[i]);
-+ }
-+
-+ fman_dtsec_reset_stat(regs);
-+
-+ return 0;
-+}
-+
-+uint16_t fman_dtsec_get_max_frame_len(struct dtsec_regs *regs)
-+{
-+ return (uint16_t)ioread32be(&regs->maxfrm);
-+}
-+
-+void fman_dtsec_set_max_frame_len(struct dtsec_regs *regs, uint16_t length)
-+{
-+ iowrite32be(length, &regs->maxfrm);
-+}
-+
-+void fman_dtsec_set_mac_address(struct dtsec_regs *regs, uint8_t *adr)
-+{
-+ uint32_t tmp;
-+
-+ tmp = (uint32_t)((adr[5] << 24) |
-+ (adr[4] << 16) |
-+ (adr[3] << 8) |
-+ adr[2]);
-+ iowrite32be(tmp, &regs->macstnaddr1);
-+
-+ tmp = (uint32_t)((adr[1] << 24) |
-+ (adr[0] << 16));
-+ iowrite32be(tmp, &regs->macstnaddr2);
-+}
-+
-+void fman_dtsec_get_mac_address(struct dtsec_regs *regs, uint8_t *macaddr)
-+{
-+ uint32_t tmp1, tmp2;
-+
-+ tmp1 = ioread32be(&regs->macstnaddr1);
-+ tmp2 = ioread32be(&regs->macstnaddr2);
-+
-+ macaddr[0] = (uint8_t)((tmp2 & 0x00ff0000) >> 16);
-+ macaddr[1] = (uint8_t)((tmp2 & 0xff000000) >> 24);
-+ macaddr[2] = (uint8_t)(tmp1 & 0x000000ff);
-+ macaddr[3] = (uint8_t)((tmp1 & 0x0000ff00) >> 8);
-+ macaddr[4] = (uint8_t)((tmp1 & 0x00ff0000) >> 16);
-+ macaddr[5] = (uint8_t)((tmp1 & 0xff000000) >> 24);
-+}
-+
-+void fman_dtsec_set_hash_table(struct dtsec_regs *regs, uint32_t crc, bool mcast, bool ghtx)
-+{
-+ int32_t bucket;
-+ if (ghtx)
-+ bucket = (int32_t)((crc >> 23) & 0x1ff);
-+ else {
-+ bucket = (int32_t)((crc >> 24) & 0xff);
-+ /* if !ghtx and mcast the bit must be set in gaddr instead of igaddr. */
-+ if (mcast)
-+ bucket += 0x100;
-+ }
-+ fman_dtsec_set_bucket(regs, bucket, TRUE);
-+}
-+
-+void fman_dtsec_set_bucket(struct dtsec_regs *regs, int bucket, bool enable)
-+{
-+ int reg_idx = (bucket >> 5) & 0xf;
-+ int bit_idx = bucket & 0x1f;
-+ uint32_t bit_mask = 0x80000000 >> bit_idx;
-+ uint32_t *reg;
-+
-+ if (reg_idx > 7)
-+ reg = &regs->gaddr[reg_idx-8];
-+ else
-+ reg = &regs->igaddr[reg_idx];
-+
-+ if (enable)
-+ iowrite32be(ioread32be(reg) | bit_mask, reg);
-+ else
-+ iowrite32be(ioread32be(reg) & (~bit_mask), reg);
-+}
-+
-+void fman_dtsec_reset_filter_table(struct dtsec_regs *regs, bool mcast, bool ucast)
-+{
-+ int i;
-+ bool ghtx;
-+
-+ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? TRUE : FALSE);
-+
-+ if (ucast || (ghtx && mcast)) {
-+ for (i = 0; i < NUM_OF_HASH_REGS; i++)
-+ iowrite32be(0, &regs->igaddr[i]);
-+ }
-+ if (mcast) {
-+ for (i = 0; i < NUM_OF_HASH_REGS; i++)
-+ iowrite32be(0, &regs->gaddr[i]);
-+ }
-+}
-+
-+int fman_dtsec_set_tbi_phy_addr(struct dtsec_regs *regs,
-+ uint8_t addr)
-+{
-+ if (addr > 0 && addr < 32)
-+ iowrite32be(addr, &regs->tbipa);
-+ else
-+ return -EINVAL;
-+
-+ return 0;
-+}
-+
-+void fman_dtsec_set_wol(struct dtsec_regs *regs, bool en)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->maccfg2);
-+ if (en)
-+ tmp |= MACCFG2_MAGIC_PACKET_EN;
-+ else
-+ tmp &= ~MACCFG2_MAGIC_PACKET_EN;
-+ iowrite32be(tmp, &regs->maccfg2);
-+}
-+
-+int fman_dtsec_adjust_link(struct dtsec_regs *regs,
-+ enum enet_interface iface_mode,
-+ enum enet_speed speed, bool full_dx)
-+{
-+ uint32_t tmp;
-+
-+ UNUSED(iface_mode);
-+
-+ if ((speed == E_ENET_SPEED_1000) && !full_dx)
-+ return -EINVAL;
-+
-+ tmp = ioread32be(&regs->maccfg2);
-+ if (!full_dx)
-+ tmp &= ~MACCFG2_FULL_DUPLEX;
-+ else
-+ tmp |= MACCFG2_FULL_DUPLEX;
-+
-+ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
-+ if (speed < E_ENET_SPEED_1000)
-+ tmp |= MACCFG2_NIBBLE_MODE;
-+ else if (speed == E_ENET_SPEED_1000)
-+ tmp |= MACCFG2_BYTE_MODE;
-+ iowrite32be(tmp, &regs->maccfg2);
-+
-+ tmp = ioread32be(&regs->ecntrl);
-+ if (speed == E_ENET_SPEED_100)
-+ tmp |= DTSEC_ECNTRL_R100M;
-+ else
-+ tmp &= ~DTSEC_ECNTRL_R100M;
-+ iowrite32be(tmp, &regs->ecntrl);
-+
-+ return 0;
-+}
-+
-+void fman_dtsec_set_uc_promisc(struct dtsec_regs *regs, bool enable)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->rctrl);
-+
-+ if (enable)
-+ tmp |= RCTRL_UPROM;
-+ else
-+ tmp &= ~RCTRL_UPROM;
-+
-+ iowrite32be(tmp, &regs->rctrl);
-+}
-+
-+void fman_dtsec_set_mc_promisc(struct dtsec_regs *regs, bool enable)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->rctrl);
-+
-+ if (enable)
-+ tmp |= RCTRL_MPROM;
-+ else
-+ tmp &= ~RCTRL_MPROM;
-+
-+ iowrite32be(tmp, &regs->rctrl);
-+}
-+
-+bool fman_dtsec_get_clear_carry_regs(struct dtsec_regs *regs,
-+ uint32_t *car1, uint32_t *car2)
-+{
-+ /* read carry registers */
-+ *car1 = ioread32be(&regs->car1);
-+ *car2 = ioread32be(&regs->car2);
-+ /* clear carry registers */
-+ if (*car1)
-+ iowrite32be(*car1, &regs->car1);
-+ if (*car2)
-+ iowrite32be(*car2, &regs->car2);
-+
-+ return (bool)((*car1 | *car2) ? TRUE : FALSE);
-+}
-+
-+void fman_dtsec_reset_stat(struct dtsec_regs *regs)
-+{
-+ /* clear HW counters */
-+ iowrite32be(ioread32be(&regs->ecntrl) |
-+ DTSEC_ECNTRL_CLRCNT, &regs->ecntrl);
-+}
-+
-+int fman_dtsec_set_stat_level(struct dtsec_regs *regs, enum dtsec_stat_level level)
-+{
-+ switch (level) {
-+ case E_MAC_STAT_NONE:
-+ iowrite32be(0xffffffff, &regs->cam1);
-+ iowrite32be(0xffffffff, &regs->cam2);
-+ iowrite32be(ioread32be(&regs->ecntrl) & ~DTSEC_ECNTRL_STEN,
-+ &regs->ecntrl);
-+ iowrite32be(ioread32be(&regs->imask) & ~DTSEC_IMASK_MSROEN,
-+ &regs->imask);
-+ break;
-+ case E_MAC_STAT_PARTIAL:
-+ iowrite32be(CAM1_ERRORS_ONLY, &regs->cam1);
-+ iowrite32be(CAM2_ERRORS_ONLY, &regs->cam2);
-+ iowrite32be(ioread32be(&regs->ecntrl) | DTSEC_ECNTRL_STEN,
-+ &regs->ecntrl);
-+ iowrite32be(ioread32be(&regs->imask) | DTSEC_IMASK_MSROEN,
-+ &regs->imask);
-+ break;
-+ case E_MAC_STAT_MIB_GRP1:
-+ iowrite32be((uint32_t)~CAM1_MIB_GRP_1, &regs->cam1);
-+ iowrite32be((uint32_t)~CAM2_MIB_GRP_1, &regs->cam2);
-+ iowrite32be(ioread32be(&regs->ecntrl) | DTSEC_ECNTRL_STEN,
-+ &regs->ecntrl);
-+ iowrite32be(ioread32be(&regs->imask) | DTSEC_IMASK_MSROEN,
-+ &regs->imask);
-+ break;
-+ case E_MAC_STAT_FULL:
-+ iowrite32be(0, &regs->cam1);
-+ iowrite32be(0, &regs->cam2);
-+ iowrite32be(ioread32be(&regs->ecntrl) | DTSEC_ECNTRL_STEN,
-+ &regs->ecntrl);
-+ iowrite32be(ioread32be(&regs->imask) | DTSEC_IMASK_MSROEN,
-+ &regs->imask);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+void fman_dtsec_set_ts(struct dtsec_regs *regs, bool en)
-+{
-+ if (en) {
-+ iowrite32be(ioread32be(&regs->rctrl) | RCTRL_RTSE,
-+ &regs->rctrl);
-+ iowrite32be(ioread32be(&regs->tctrl) | DTSEC_TCTRL_TTSE,
-+ &regs->tctrl);
-+ } else {
-+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_RTSE,
-+ &regs->rctrl);
-+ iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_TTSE,
-+ &regs->tctrl);
-+ }
-+}
-+
-+void fman_dtsec_enable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->maccfg1);
-+
-+ if (apply_rx)
-+ tmp |= MACCFG1_RX_EN ;
-+
-+ if (apply_tx)
-+ tmp |= MACCFG1_TX_EN ;
-+
-+ iowrite32be(tmp, &regs->maccfg1);
-+}
-+
-+void fman_dtsec_clear_addr_in_paddr(struct dtsec_regs *regs, uint8_t paddr_num)
-+{
-+ iowrite32be(0, &regs->macaddr[paddr_num].exact_match1);
-+ iowrite32be(0, &regs->macaddr[paddr_num].exact_match2);
-+}
-+
-+void fman_dtsec_add_addr_in_paddr(struct dtsec_regs *regs,
-+ uint64_t addr,
-+ uint8_t paddr_num)
-+{
-+ uint32_t tmp;
-+
-+ tmp = (uint32_t)(addr);
-+ /* swap */
-+ tmp = (((tmp & 0x000000FF) << 24) |
-+ ((tmp & 0x0000FF00) << 8) |
-+ ((tmp & 0x00FF0000) >> 8) |
-+ ((tmp & 0xFF000000) >> 24));
-+ iowrite32be(tmp, &regs->macaddr[paddr_num].exact_match1);
-+
-+ tmp = (uint32_t)(addr>>32);
-+ /* swap */
-+ tmp = (((tmp & 0x000000FF) << 24) |
-+ ((tmp & 0x0000FF00) << 8) |
-+ ((tmp & 0x00FF0000) >> 8) |
-+ ((tmp & 0xFF000000) >> 24));
-+ iowrite32be(tmp, &regs->macaddr[paddr_num].exact_match2);
-+}
-+
-+void fman_dtsec_disable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->maccfg1);
-+
-+ if (apply_rx)
-+ tmp &= ~MACCFG1_RX_EN;
-+
-+ if (apply_tx)
-+ tmp &= ~MACCFG1_TX_EN;
-+
-+ iowrite32be(tmp, &regs->maccfg1);
-+}
-+
-+void fman_dtsec_set_tx_pause_frames(struct dtsec_regs *regs, uint16_t time)
-+{
-+ uint32_t ptv = 0;
-+
-+ /* fixme: don't enable tx pause for half-duplex */
-+
-+ if (time) {
-+ ptv = ioread32be(&regs->ptv);
-+ ptv &= 0xffff0000;
-+ ptv |= time & 0x0000ffff;
-+ iowrite32be(ptv, &regs->ptv);
-+
-+ /* trigger the transmission of a flow-control pause frame */
-+ iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
-+ &regs->maccfg1);
-+ } else
-+ iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
-+ &regs->maccfg1);
-+}
-+
-+void fman_dtsec_handle_rx_pause(struct dtsec_regs *regs, bool en)
-+{
-+ uint32_t tmp;
-+
-+ /* todo: check if mac is set to full-duplex */
-+
-+ tmp = ioread32be(&regs->maccfg1);
-+ if (en)
-+ tmp |= MACCFG1_RX_FLOW;
-+ else
-+ tmp &= ~MACCFG1_RX_FLOW;
-+ iowrite32be(tmp, &regs->maccfg1);
-+}
-+
-+uint32_t fman_dtsec_get_rctrl(struct dtsec_regs *regs)
-+{
-+ return ioread32be(&regs->rctrl);
-+}
-+
-+uint32_t fman_dtsec_get_revision(struct dtsec_regs *regs)
-+{
-+ return ioread32be(&regs->tsec_id);
-+}
-+
-+uint32_t fman_dtsec_get_event(struct dtsec_regs *regs, uint32_t ev_mask)
-+{
-+ return ioread32be(&regs->ievent) & ev_mask;
-+}
-+
-+void fman_dtsec_ack_event(struct dtsec_regs *regs, uint32_t ev_mask)
-+{
-+ iowrite32be(ev_mask, &regs->ievent);
-+}
-+
-+uint32_t fman_dtsec_get_interrupt_mask(struct dtsec_regs *regs)
-+{
-+ return ioread32be(&regs->imask);
-+}
-+
-+uint32_t fman_dtsec_check_and_clear_tmr_event(struct dtsec_regs *regs)
-+{
-+ uint32_t event;
-+
-+ event = ioread32be(&regs->tmr_pevent);
-+ event &= ioread32be(&regs->tmr_pemask);
-+
-+ if (event)
-+ iowrite32be(event, &regs->tmr_pevent);
-+ return event;
-+}
-+
-+void fman_dtsec_enable_tmr_interrupt(struct dtsec_regs *regs)
-+{
-+ iowrite32be(ioread32be(&regs->tmr_pemask) | TMR_PEMASK_TSREEN,
-+ &regs->tmr_pemask);
-+}
-+
-+void fman_dtsec_disable_tmr_interrupt(struct dtsec_regs *regs)
-+{
-+ iowrite32be(ioread32be(&regs->tmr_pemask) & ~TMR_PEMASK_TSREEN,
-+ &regs->tmr_pemask);
-+}
-+
-+void fman_dtsec_enable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask)
-+{
-+ iowrite32be(ioread32be(&regs->imask) | ev_mask, &regs->imask);
-+}
-+
-+void fman_dtsec_disable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask)
-+{
-+ iowrite32be(ioread32be(&regs->imask) & ~ev_mask, &regs->imask);
-+}
-+
-+uint32_t fman_dtsec_get_stat_counter(struct dtsec_regs *regs,
-+ enum dtsec_stat_counters reg_name)
-+{
-+ uint32_t ret_val;
-+
-+ switch (reg_name) {
-+ case E_DTSEC_STAT_TR64:
-+ ret_val = ioread32be(&regs->tr64);
-+ break;
-+ case E_DTSEC_STAT_TR127:
-+ ret_val = ioread32be(&regs->tr127);
-+ break;
-+ case E_DTSEC_STAT_TR255:
-+ ret_val = ioread32be(&regs->tr255);
-+ break;
-+ case E_DTSEC_STAT_TR511:
-+ ret_val = ioread32be(&regs->tr511);
-+ break;
-+ case E_DTSEC_STAT_TR1K:
-+ ret_val = ioread32be(&regs->tr1k);
-+ break;
-+ case E_DTSEC_STAT_TRMAX:
-+ ret_val = ioread32be(&regs->trmax);
-+ break;
-+ case E_DTSEC_STAT_TRMGV:
-+ ret_val = ioread32be(&regs->trmgv);
-+ break;
-+ case E_DTSEC_STAT_RBYT:
-+ ret_val = ioread32be(&regs->rbyt);
-+ break;
-+ case E_DTSEC_STAT_RPKT:
-+ ret_val = ioread32be(&regs->rpkt);
-+ break;
-+ case E_DTSEC_STAT_RMCA:
-+ ret_val = ioread32be(&regs->rmca);
-+ break;
-+ case E_DTSEC_STAT_RBCA:
-+ ret_val = ioread32be(&regs->rbca);
-+ break;
-+ case E_DTSEC_STAT_RXPF:
-+ ret_val = ioread32be(&regs->rxpf);
-+ break;
-+ case E_DTSEC_STAT_RALN:
-+ ret_val = ioread32be(&regs->raln);
-+ break;
-+ case E_DTSEC_STAT_RFLR:
-+ ret_val = ioread32be(&regs->rflr);
-+ break;
-+ case E_DTSEC_STAT_RCDE:
-+ ret_val = ioread32be(&regs->rcde);
-+ break;
-+ case E_DTSEC_STAT_RCSE:
-+ ret_val = ioread32be(&regs->rcse);
-+ break;
-+ case E_DTSEC_STAT_RUND:
-+ ret_val = ioread32be(&regs->rund);
-+ break;
-+ case E_DTSEC_STAT_ROVR:
-+ ret_val = ioread32be(&regs->rovr);
-+ break;
-+ case E_DTSEC_STAT_RFRG:
-+ ret_val = ioread32be(&regs->rfrg);
-+ break;
-+ case E_DTSEC_STAT_RJBR:
-+ ret_val = ioread32be(&regs->rjbr);
-+ break;
-+ case E_DTSEC_STAT_RDRP:
-+ ret_val = ioread32be(&regs->rdrp);
-+ break;
-+ case E_DTSEC_STAT_TFCS:
-+ ret_val = ioread32be(&regs->tfcs);
-+ break;
-+ case E_DTSEC_STAT_TBYT:
-+ ret_val = ioread32be(&regs->tbyt);
-+ break;
-+ case E_DTSEC_STAT_TPKT:
-+ ret_val = ioread32be(&regs->tpkt);
-+ break;
-+ case E_DTSEC_STAT_TMCA:
-+ ret_val = ioread32be(&regs->tmca);
-+ break;
-+ case E_DTSEC_STAT_TBCA:
-+ ret_val = ioread32be(&regs->tbca);
-+ break;
-+ case E_DTSEC_STAT_TXPF:
-+ ret_val = ioread32be(&regs->txpf);
-+ break;
-+ case E_DTSEC_STAT_TNCL:
-+ ret_val = ioread32be(&regs->tncl);
-+ break;
-+ case E_DTSEC_STAT_TDRP:
-+ ret_val = ioread32be(&regs->tdrp);
-+ break;
-+ default:
-+ ret_val = 0;
-+ }
-+
-+ return ret_val;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_dtsec_mii_acc.c
-@@ -0,0 +1,165 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "common/general.h"
-+#include "fsl_fman_dtsec_mii_acc.h"
-+
-+
-+/**
-+ * dtsec_mii_get_div() - calculates the value of the dtsec mii divider
-+ * @dtsec_freq: dtsec clock frequency (in Mhz)
-+ *
-+ * This function calculates the dtsec mii clock divider that determines
-+ * the MII MDC clock. MII MDC clock will be set to work in the range
-+ * of 1.5 to 2.5Mhz
-+ * The output of this function is the value of MIIMCFG[MgmtClk] which
-+ * implicitly determines the divider value.
-+ * Note: the dTSEC system clock is equal to 1/2 of the FMan clock.
-+ *
-+ * The table below which reflects dtsec_mii_get_div() functionality
-+ * shows the relations among dtsec_freq, MgmtClk, actual divider
-+ * and the MII frequency:
-+ *
-+ * dtsec freq MgmtClk div MII freq Mhz
-+ * [0.....80] 1 (1/4)(1/8) [0 to 2.5]
-+ * [81...120] 2 (1/6)(1/8) [1.6 to 2.5]
-+ * [121..160] 3 (1/8)(1/8) [1.8 to 2.5]
-+ * [161..200] 4 (1/10)(1/8) [2.0 to 2.5]
-+ * [201..280] 5 (1/14)(1/8) [1.8 to 2.5]
-+ * [281..400] 6 (1/20)(1/8) [1.1 to 2.5]
-+ * [401..560] 7 (1/28)(1/8) [1.8 to 2.5]
-+ * [560..frq] 7 (1/28)(1/8) [frq/224]
-+ *
-+ * Returns: the MIIMCFG[MgmtClk] appropriate value
-+ */
-+
-+static uint8_t dtsec_mii_get_div(uint16_t dtsec_freq)
-+{
-+ uint16_t mgmt_clk;
-+
-+ if (dtsec_freq < 80) mgmt_clk = 1;
-+ else if (dtsec_freq < 120) mgmt_clk = 2;
-+ else if (dtsec_freq < 160) mgmt_clk = 3;
-+ else if (dtsec_freq < 200) mgmt_clk = 4;
-+ else if (dtsec_freq < 280) mgmt_clk = 5;
-+ else if (dtsec_freq < 400) mgmt_clk = 6;
-+ else mgmt_clk = 7;
-+
-+ return (uint8_t)mgmt_clk;
-+}
-+
-+void fman_dtsec_mii_reset(struct dtsec_mii_reg *regs)
-+{
-+ /* Reset the management interface */
-+ iowrite32be(ioread32be(&regs->miimcfg) | MIIMCFG_RESET_MGMT,
-+ &regs->miimcfg);
-+ iowrite32be(ioread32be(&regs->miimcfg) & ~MIIMCFG_RESET_MGMT,
-+ &regs->miimcfg);
-+}
-+
-+
-+int fman_dtsec_mii_write_reg(struct dtsec_mii_reg *regs, uint8_t addr,
-+ uint8_t reg, uint16_t data, uint16_t dtsec_freq)
-+{
-+ uint32_t tmp;
-+
-+ /* Setup the MII Mgmt clock speed */
-+ iowrite32be((uint32_t)dtsec_mii_get_div(dtsec_freq), &regs->miimcfg);
-+ wmb();
-+
-+ /* Stop the MII management read cycle */
-+ iowrite32be(0, &regs->miimcom);
-+ /* Dummy read to make sure MIIMCOM is written */
-+ tmp = ioread32be(&regs->miimcom);
-+ wmb();
-+
-+ /* Setting up MII Management Address Register */
-+ tmp = (uint32_t)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
-+ iowrite32be(tmp, &regs->miimadd);
-+ wmb();
-+
-+ /* Setting up MII Management Control Register with data */
-+ iowrite32be((uint32_t)data, &regs->miimcon);
-+ /* Dummy read to make sure MIIMCON is written */
-+ tmp = ioread32be(&regs->miimcon);
-+ wmb();
-+
-+ /* Wait until MII management write is complete */
-+ /* todo: a timeout could be useful here */
-+ while ((ioread32be(&regs->miimind)) & MIIMIND_BUSY)
-+ /* busy wait */;
-+
-+ return 0;
-+}
-+
-+int fman_dtsec_mii_read_reg(struct dtsec_mii_reg *regs, uint8_t addr,
-+ uint8_t reg, uint16_t *data, uint16_t dtsec_freq)
-+{
-+ uint32_t tmp;
-+
-+ /* Setup the MII Mgmt clock speed */
-+ iowrite32be((uint32_t)dtsec_mii_get_div(dtsec_freq), &regs->miimcfg);
-+ wmb();
-+
-+ /* Setting up the MII Management Address Register */
-+ tmp = (uint32_t)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
-+ iowrite32be(tmp, &regs->miimadd);
-+ wmb();
-+
-+ /* Perform an MII management read cycle */
-+ iowrite32be(MIIMCOM_READ_CYCLE, &regs->miimcom);
-+ /* Dummy read to make sure MIIMCOM is written */
-+ tmp = ioread32be(&regs->miimcom);
-+ wmb();
-+
-+ /* Wait until MII management read is complete */
-+ /* todo: a timeout could be useful here */
-+ while ((ioread32be(&regs->miimind)) & MIIMIND_BUSY)
-+ /* busy wait */;
-+
-+ /* Read MII management status */
-+ *data = (uint16_t)ioread32be(&regs->miimstat);
-+ wmb();
-+
-+ iowrite32be(0, &regs->miimcom);
-+ /* Dummy read to make sure MIIMCOM is written */
-+ tmp = ioread32be(&regs->miimcom);
-+
-+ if (*data == 0xffff)
-+ return -ENXIO;
-+
-+ return 0;
-+}
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac.c
-@@ -0,0 +1,532 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "fsl_fman_memac.h"
-+
-+
-+uint32_t fman_memac_get_event(struct memac_regs *regs, uint32_t ev_mask)
-+{
-+ return ioread32be(&regs->ievent) & ev_mask;
-+}
-+
-+uint32_t fman_memac_get_interrupt_mask(struct memac_regs *regs)
-+{
-+ return ioread32be(&regs->imask);
-+}
-+
-+void fman_memac_ack_event(struct memac_regs *regs, uint32_t ev_mask)
-+{
-+ iowrite32be(ev_mask, &regs->ievent);
-+}
-+
-+void fman_memac_set_promiscuous(struct memac_regs *regs, bool val)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+
-+ if (val)
-+ tmp |= CMD_CFG_PROMIS_EN;
-+ else
-+ tmp &= ~CMD_CFG_PROMIS_EN;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+void fman_memac_clear_addr_in_paddr(struct memac_regs *regs,
-+ uint8_t paddr_num)
-+{
-+ if (paddr_num == 0) {
-+ iowrite32be(0, &regs->mac_addr0.mac_addr_l);
-+ iowrite32be(0, &regs->mac_addr0.mac_addr_u);
-+ } else {
-+ iowrite32be(0x0, &regs->mac_addr[paddr_num - 1].mac_addr_l);
-+ iowrite32be(0x0, &regs->mac_addr[paddr_num - 1].mac_addr_u);
-+ }
-+}
-+
-+void fman_memac_add_addr_in_paddr(struct memac_regs *regs,
-+ uint8_t *adr,
-+ uint8_t paddr_num)
-+{
-+ uint32_t tmp0, tmp1;
-+
-+ tmp0 = (uint32_t)(adr[0] |
-+ adr[1] << 8 |
-+ adr[2] << 16 |
-+ adr[3] << 24);
-+ tmp1 = (uint32_t)(adr[4] | adr[5] << 8);
-+
-+ if (paddr_num == 0) {
-+ iowrite32be(tmp0, &regs->mac_addr0.mac_addr_l);
-+ iowrite32be(tmp1, &regs->mac_addr0.mac_addr_u);
-+ } else {
-+ iowrite32be(tmp0, &regs->mac_addr[paddr_num-1].mac_addr_l);
-+ iowrite32be(tmp1, &regs->mac_addr[paddr_num-1].mac_addr_u);
-+ }
-+}
-+
-+void fman_memac_enable(struct memac_regs *regs, bool apply_rx, bool apply_tx)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+
-+ if (apply_rx)
-+ tmp |= CMD_CFG_RX_EN;
-+
-+ if (apply_tx)
-+ tmp |= CMD_CFG_TX_EN;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+void fman_memac_disable(struct memac_regs *regs, bool apply_rx, bool apply_tx)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+
-+ if (apply_rx)
-+ tmp &= ~CMD_CFG_RX_EN;
-+
-+ if (apply_tx)
-+ tmp &= ~CMD_CFG_TX_EN;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+void fman_memac_reset_stat(struct memac_regs *regs)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->statn_config);
-+
-+ tmp |= STATS_CFG_CLR;
-+
-+ iowrite32be(tmp, &regs->statn_config);
-+
-+ while (ioread32be(&regs->statn_config) & STATS_CFG_CLR);
-+}
-+
-+void fman_memac_reset(struct memac_regs *regs)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+
-+ tmp |= CMD_CFG_SW_RESET;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+
-+ while (ioread32be(&regs->command_config) & CMD_CFG_SW_RESET);
-+}
-+
-+int fman_memac_init(struct memac_regs *regs,
-+ struct memac_cfg *cfg,
-+ enum enet_interface enet_interface,
-+ enum enet_speed enet_speed,
-+ bool slow_10g_if,
-+ uint32_t exceptions)
-+{
-+ uint32_t tmp;
-+
-+ /* Config */
-+ tmp = 0;
-+ if (cfg->wan_mode_enable)
-+ tmp |= CMD_CFG_WAN_MODE;
-+ if (cfg->promiscuous_mode_enable)
-+ tmp |= CMD_CFG_PROMIS_EN;
-+ if (cfg->pause_forward_enable)
-+ tmp |= CMD_CFG_PAUSE_FWD;
-+ if (cfg->pause_ignore)
-+ tmp |= CMD_CFG_PAUSE_IGNORE;
-+ if (cfg->tx_addr_ins_enable)
-+ tmp |= CMD_CFG_TX_ADDR_INS;
-+ if (cfg->loopback_enable)
-+ tmp |= CMD_CFG_LOOPBACK_EN;
-+ if (cfg->cmd_frame_enable)
-+ tmp |= CMD_CFG_CNT_FRM_EN;
-+ if (cfg->send_idle_enable)
-+ tmp |= CMD_CFG_SEND_IDLE;
-+ if (cfg->no_length_check_enable)
-+ tmp |= CMD_CFG_NO_LEN_CHK;
-+ if (cfg->rx_sfd_any)
-+ tmp |= CMD_CFG_SFD_ANY;
-+ if (cfg->pad_enable)
-+ tmp |= CMD_CFG_TX_PAD_EN;
-+ if (cfg->wake_on_lan)
-+ tmp |= CMD_CFG_MG;
-+
-+ tmp |= CMD_CFG_CRC_FWD;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+
-+ /* Max Frame Length */
-+ iowrite32be((uint32_t)cfg->max_frame_length, &regs->maxfrm);
-+
-+ /* Pause Time */
-+ iowrite32be((uint32_t)cfg->pause_quanta, &regs->pause_quanta[0]);
-+ iowrite32be((uint32_t)0, &regs->pause_thresh[0]);
-+
-+ /* IF_MODE */
-+ tmp = 0;
-+ switch (enet_interface) {
-+ case E_ENET_IF_XGMII:
-+ case E_ENET_IF_XFI:
-+ tmp |= IF_MODE_XGMII;
-+ break;
-+ default:
-+ tmp |= IF_MODE_GMII;
-+ if (enet_interface == E_ENET_IF_RGMII && !cfg->loopback_enable)
-+ tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
-+ }
-+ iowrite32be(tmp, &regs->if_mode);
-+
-+ /* TX_FIFO_SECTIONS */
-+ tmp = 0;
-+ if (enet_interface == E_ENET_IF_XGMII ||
-+ enet_interface == E_ENET_IF_XFI) {
-+ if(slow_10g_if) {
-+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
-+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
-+ } else {
-+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
-+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
-+ }
-+ } else {
-+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
-+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
-+ }
-+ iowrite32be(tmp, &regs->tx_fifo_sections);
-+
-+ /* clear all pending events and set-up interrupts */
-+ fman_memac_ack_event(regs, 0xffffffff);
-+ fman_memac_set_exception(regs, exceptions, TRUE);
-+
-+ return 0;
-+}
-+
-+void fman_memac_set_exception(struct memac_regs *regs, uint32_t val, bool enable)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->imask);
-+ if (enable)
-+ tmp |= val;
-+ else
-+ tmp &= ~val;
-+
-+ iowrite32be(tmp, &regs->imask);
-+}
-+
-+void fman_memac_reset_filter_table(struct memac_regs *regs)
-+{
-+ uint32_t i;
-+ for (i = 0; i < 64; i++)
-+ iowrite32be(i & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
-+}
-+
-+void fman_memac_set_hash_table_entry(struct memac_regs *regs, uint32_t crc)
-+{
-+ iowrite32be(crc | HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
-+}
-+
-+void fman_memac_set_hash_table(struct memac_regs *regs, uint32_t val)
-+{
-+ iowrite32be(val, &regs->hashtable_ctrl);
-+}
-+
-+uint16_t fman_memac_get_max_frame_len(struct memac_regs *regs)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->maxfrm);
-+
-+ return(uint16_t)tmp;
-+}
-+
-+
-+void fman_memac_set_tx_pause_frames(struct memac_regs *regs,
-+ uint8_t priority,
-+ uint16_t pause_time,
-+ uint16_t thresh_time)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->tx_fifo_sections);
-+
-+ if (priority == 0xff) {
-+ GET_TX_EMPTY_DEFAULT_VALUE(tmp);
-+ iowrite32be(tmp, &regs->tx_fifo_sections);
-+
-+ tmp = ioread32be(&regs->command_config);
-+ tmp &= ~CMD_CFG_PFC_MODE;
-+ priority = 0;
-+ } else {
-+ GET_TX_EMPTY_PFC_VALUE(tmp);
-+ iowrite32be(tmp, &regs->tx_fifo_sections);
-+
-+ tmp = ioread32be(&regs->command_config);
-+ tmp |= CMD_CFG_PFC_MODE;
-+ }
-+
-+ iowrite32be(tmp, &regs->command_config);
-+
-+ tmp = ioread32be(&regs->pause_quanta[priority / 2]);
-+ if (priority % 2)
-+ tmp &= 0x0000FFFF;
-+ else
-+ tmp &= 0xFFFF0000;
-+ tmp |= ((uint32_t)pause_time << (16 * (priority % 2)));
-+ iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
-+
-+ tmp = ioread32be(&regs->pause_thresh[priority / 2]);
-+ if (priority % 2)
-+ tmp &= 0x0000FFFF;
-+ else
-+ tmp &= 0xFFFF0000;
-+ tmp |= ((uint32_t)thresh_time<<(16 * (priority % 2)));
-+ iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
-+}
-+
-+void fman_memac_set_rx_ignore_pause_frames(struct memac_regs *regs,bool enable)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+ if (enable)
-+ tmp |= CMD_CFG_PAUSE_IGNORE;
-+ else
-+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+void fman_memac_set_wol(struct memac_regs *regs, bool enable)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+
-+ if (enable)
-+ tmp |= CMD_CFG_MG;
-+ else
-+ tmp &= ~CMD_CFG_MG;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+#define GET_MEMAC_CNTR_64(bn) \
-+ (ioread32be(&regs->bn ## _l) | \
-+ ((uint64_t)ioread32be(&regs->bn ## _u) << 32))
-+
-+uint64_t fman_memac_get_counter(struct memac_regs *regs,
-+ enum memac_counters reg_name)
-+{
-+ uint64_t ret_val;
-+
-+ switch (reg_name) {
-+ case E_MEMAC_COUNTER_R64:
-+ ret_val = GET_MEMAC_CNTR_64(r64);
-+ break;
-+ case E_MEMAC_COUNTER_T64:
-+ ret_val = GET_MEMAC_CNTR_64(t64);
-+ break;
-+ case E_MEMAC_COUNTER_R127:
-+ ret_val = GET_MEMAC_CNTR_64(r127);
-+ break;
-+ case E_MEMAC_COUNTER_T127:
-+ ret_val = GET_MEMAC_CNTR_64(t127);
-+ break;
-+ case E_MEMAC_COUNTER_R255:
-+ ret_val = GET_MEMAC_CNTR_64(r255);
-+ break;
-+ case E_MEMAC_COUNTER_T255:
-+ ret_val = GET_MEMAC_CNTR_64(t255);
-+ break;
-+ case E_MEMAC_COUNTER_R511:
-+ ret_val = GET_MEMAC_CNTR_64(r511);
-+ break;
-+ case E_MEMAC_COUNTER_T511:
-+ ret_val = GET_MEMAC_CNTR_64(t511);
-+ break;
-+ case E_MEMAC_COUNTER_R1023:
-+ ret_val = GET_MEMAC_CNTR_64(r1023);
-+ break;
-+ case E_MEMAC_COUNTER_T1023:
-+ ret_val = GET_MEMAC_CNTR_64(t1023);
-+ break;
-+ case E_MEMAC_COUNTER_R1518:
-+ ret_val = GET_MEMAC_CNTR_64(r1518);
-+ break;
-+ case E_MEMAC_COUNTER_T1518:
-+ ret_val = GET_MEMAC_CNTR_64(t1518);
-+ break;
-+ case E_MEMAC_COUNTER_R1519X:
-+ ret_val = GET_MEMAC_CNTR_64(r1519x);
-+ break;
-+ case E_MEMAC_COUNTER_T1519X:
-+ ret_val = GET_MEMAC_CNTR_64(t1519x);
-+ break;
-+ case E_MEMAC_COUNTER_RFRG:
-+ ret_val = GET_MEMAC_CNTR_64(rfrg);
-+ break;
-+ case E_MEMAC_COUNTER_RJBR:
-+ ret_val = GET_MEMAC_CNTR_64(rjbr);
-+ break;
-+ case E_MEMAC_COUNTER_RDRP:
-+ ret_val = GET_MEMAC_CNTR_64(rdrp);
-+ break;
-+ case E_MEMAC_COUNTER_RALN:
-+ ret_val = GET_MEMAC_CNTR_64(raln);
-+ break;
-+ case E_MEMAC_COUNTER_TUND:
-+ ret_val = GET_MEMAC_CNTR_64(tund);
-+ break;
-+ case E_MEMAC_COUNTER_ROVR:
-+ ret_val = GET_MEMAC_CNTR_64(rovr);
-+ break;
-+ case E_MEMAC_COUNTER_RXPF:
-+ ret_val = GET_MEMAC_CNTR_64(rxpf);
-+ break;
-+ case E_MEMAC_COUNTER_TXPF:
-+ ret_val = GET_MEMAC_CNTR_64(txpf);
-+ break;
-+ case E_MEMAC_COUNTER_ROCT:
-+ ret_val = GET_MEMAC_CNTR_64(roct);
-+ break;
-+ case E_MEMAC_COUNTER_RMCA:
-+ ret_val = GET_MEMAC_CNTR_64(rmca);
-+ break;
-+ case E_MEMAC_COUNTER_RBCA:
-+ ret_val = GET_MEMAC_CNTR_64(rbca);
-+ break;
-+ case E_MEMAC_COUNTER_RPKT:
-+ ret_val = GET_MEMAC_CNTR_64(rpkt);
-+ break;
-+ case E_MEMAC_COUNTER_RUCA:
-+ ret_val = GET_MEMAC_CNTR_64(ruca);
-+ break;
-+ case E_MEMAC_COUNTER_RERR:
-+ ret_val = GET_MEMAC_CNTR_64(rerr);
-+ break;
-+ case E_MEMAC_COUNTER_TOCT:
-+ ret_val = GET_MEMAC_CNTR_64(toct);
-+ break;
-+ case E_MEMAC_COUNTER_TMCA:
-+ ret_val = GET_MEMAC_CNTR_64(tmca);
-+ break;
-+ case E_MEMAC_COUNTER_TBCA:
-+ ret_val = GET_MEMAC_CNTR_64(tbca);
-+ break;
-+ case E_MEMAC_COUNTER_TUCA:
-+ ret_val = GET_MEMAC_CNTR_64(tuca);
-+ break;
-+ case E_MEMAC_COUNTER_TERR:
-+ ret_val = GET_MEMAC_CNTR_64(terr);
-+ break;
-+ default:
-+ ret_val = 0;
-+ }
-+
-+ return ret_val;
-+}
-+
-+void fman_memac_adjust_link(struct memac_regs *regs,
-+ enum enet_interface iface_mode,
-+ enum enet_speed speed, bool full_dx)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->if_mode);
-+
-+ if (full_dx)
-+ tmp &= ~IF_MODE_HD;
-+ else
-+ tmp |= IF_MODE_HD;
-+
-+ if (iface_mode == E_ENET_IF_RGMII) {
-+ /* Configure RGMII in manual mode */
-+ tmp &= ~IF_MODE_RGMII_AUTO;
-+ tmp &= ~IF_MODE_RGMII_SP_MASK;
-+
-+ if (full_dx)
-+ tmp |= IF_MODE_RGMII_FD;
-+ else
-+ tmp &= ~IF_MODE_RGMII_FD;
-+
-+ switch (speed) {
-+ case E_ENET_SPEED_1000:
-+ tmp |= IF_MODE_RGMII_1000;
-+ break;
-+ case E_ENET_SPEED_100:
-+ tmp |= IF_MODE_RGMII_100;
-+ break;
-+ case E_ENET_SPEED_10:
-+ tmp |= IF_MODE_RGMII_10;
-+ break;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ iowrite32be(tmp, &regs->if_mode);
-+}
-+
-+void fman_memac_defconfig(struct memac_cfg *cfg)
-+{
-+ cfg->reset_on_init = FALSE;
-+ cfg->wan_mode_enable = FALSE;
-+ cfg->promiscuous_mode_enable = FALSE;
-+ cfg->pause_forward_enable = FALSE;
-+ cfg->pause_ignore = FALSE;
-+ cfg->tx_addr_ins_enable = FALSE;
-+ cfg->loopback_enable = FALSE;
-+ cfg->cmd_frame_enable = FALSE;
-+ cfg->rx_error_discard = FALSE;
-+ cfg->send_idle_enable = FALSE;
-+ cfg->no_length_check_enable = TRUE;
-+ cfg->lgth_check_nostdr = FALSE;
-+ cfg->time_stamp_enable = FALSE;
-+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
-+ cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
-+ cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
-+ cfg->pad_enable = TRUE;
-+ cfg->phy_tx_ena_on = FALSE;
-+ cfg->rx_sfd_any = FALSE;
-+ cfg->rx_pbl_fwd = FALSE;
-+ cfg->tx_pbl_fwd = FALSE;
-+ cfg->debug_mode = FALSE;
-+ cfg->wake_on_lan = FALSE;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_memac_mii_acc.c
-@@ -0,0 +1,215 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "fsl_fman_memac_mii_acc.h"
-+
-+static void write_phy_reg_10g(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t data)
-+{
-+ uint32_t tmp_reg;
-+
-+ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
-+ /* Leave only MDIO_CLK_DIV bits set on */
-+ tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
-+ /* Set maximum MDIO_HOLD value to allow phy to see
-+ change of data signal */
-+ tmp_reg |= MDIO_CFG_HOLD_MASK;
-+ /* Add 10G interface mode */
-+ tmp_reg |= MDIO_CFG_ENC45;
-+ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-+
-+ /* Wait for command completion */
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ /* Specify phy and register to be accessed */
-+ iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
-+ iowrite32be(reg, &mii_regs->mdio_addr);
-+ wmb();
-+
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ /* Write data */
-+ iowrite32be(data, &mii_regs->mdio_data);
-+ wmb();
-+
-+ /* Wait for write transaction end */
-+ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
-+ udelay(1);
-+}
-+
-+static uint32_t read_phy_reg_10g(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t *data)
-+{
-+ uint32_t tmp_reg;
-+
-+ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
-+ /* Leave only MDIO_CLK_DIV bits set on */
-+ tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
-+ /* Set maximum MDIO_HOLD value to allow phy to see
-+ change of data signal */
-+ tmp_reg |= MDIO_CFG_HOLD_MASK;
-+ /* Add 10G interface mode */
-+ tmp_reg |= MDIO_CFG_ENC45;
-+ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-+
-+ /* Wait for command completion */
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ /* Specify phy and register to be accessed */
-+ iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
-+ iowrite32be(reg, &mii_regs->mdio_addr);
-+ wmb();
-+
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ /* Read cycle */
-+ tmp_reg = phy_addr;
-+ tmp_reg |= MDIO_CTL_READ;
-+ iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
-+ wmb();
-+
-+ /* Wait for data to be available */
-+ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
-+ udelay(1);
-+
-+ *data = (uint16_t)ioread32be(&mii_regs->mdio_data);
-+
-+ /* Check if there was an error */
-+ return ioread32be(&mii_regs->mdio_cfg);
-+}
-+
-+static void write_phy_reg_1g(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t data)
-+{
-+ uint32_t tmp_reg;
-+
-+ /* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
-+ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
-+ tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
-+ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-+
-+ /* Wait for command completion */
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ /* Write transaction */
-+ tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
-+ tmp_reg |= reg;
-+ iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
-+
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ iowrite32be(data, &mii_regs->mdio_data);
-+
-+ wmb();
-+
-+ /* Wait for write transaction to end */
-+ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
-+ udelay(1);
-+}
-+
-+static uint32_t read_phy_reg_1g(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t *data)
-+{
-+ uint32_t tmp_reg;
-+
-+ /* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
-+ tmp_reg = ioread32be(&mii_regs->mdio_cfg);
-+ tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
-+ iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
-+
-+ /* Wait for command completion */
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ /* Read transaction */
-+ tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
-+ tmp_reg |= reg;
-+ tmp_reg |= MDIO_CTL_READ;
-+ iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
-+
-+ while ((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY)
-+ udelay(1);
-+
-+ /* Wait for data to be available */
-+ while ((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY)
-+ udelay(1);
-+
-+ *data = (uint16_t)ioread32be(&mii_regs->mdio_data);
-+
-+ /* Check error */
-+ return ioread32be(&mii_regs->mdio_cfg);
-+}
-+
-+/*****************************************************************************/
-+int fman_memac_mii_write_phy_reg(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t data,
-+ enum enet_speed enet_speed)
-+{
-+ /* Figure out interface type - 10G vs 1G.
-+ In 10G interface both phy_addr and devAddr present. */
-+ if (enet_speed == E_ENET_SPEED_10000)
-+ write_phy_reg_10g(mii_regs, phy_addr, reg, data);
-+ else
-+ write_phy_reg_1g(mii_regs, phy_addr, reg, data);
-+
-+ return 0;
-+}
-+
-+/*****************************************************************************/
-+int fman_memac_mii_read_phy_reg(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t *data,
-+ enum enet_speed enet_speed)
-+{
-+ uint32_t ans;
-+ /* Figure out interface type - 10G vs 1G.
-+ In 10G interface both phy_addr and devAddr present. */
-+ if (enet_speed == E_ENET_SPEED_10000)
-+ ans = read_phy_reg_10g(mii_regs, phy_addr, reg, data);
-+ else
-+ ans = read_phy_reg_1g(mii_regs, phy_addr, reg, data);
-+
-+ if (ans & MDIO_CFG_READ_ERR)
-+ return -EINVAL;
-+ return 0;
-+}
-+
-+/* ......................................................................... */
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/fman_tgec.c
-@@ -0,0 +1,367 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "fsl_fman_tgec.h"
-+
-+
-+void fman_tgec_set_mac_address(struct tgec_regs *regs, uint8_t *adr)
-+{
-+ uint32_t tmp0, tmp1;
-+
-+ tmp0 = (uint32_t)(adr[0] |
-+ adr[1] << 8 |
-+ adr[2] << 16 |
-+ adr[3] << 24);
-+ tmp1 = (uint32_t)(adr[4] | adr[5] << 8);
-+ iowrite32be(tmp0, &regs->mac_addr_0);
-+ iowrite32be(tmp1, &regs->mac_addr_1);
-+}
-+
-+void fman_tgec_reset_stat(struct tgec_regs *regs)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+
-+ tmp |= CMD_CFG_STAT_CLR;
-+
-+ iowrite32be(tmp, &regs->command_config);
-+
-+ while (ioread32be(&regs->command_config) & CMD_CFG_STAT_CLR) ;
-+}
-+
-+#define GET_TGEC_CNTR_64(bn) \
-+ (((uint64_t)ioread32be(&regs->bn ## _u) << 32) | \
-+ ioread32be(&regs->bn ## _l))
-+
-+uint64_t fman_tgec_get_counter(struct tgec_regs *regs, enum tgec_counters reg_name)
-+{
-+ uint64_t ret_val;
-+
-+ switch (reg_name) {
-+ case E_TGEC_COUNTER_R64:
-+ ret_val = GET_TGEC_CNTR_64(r64);
-+ break;
-+ case E_TGEC_COUNTER_R127:
-+ ret_val = GET_TGEC_CNTR_64(r127);
-+ break;
-+ case E_TGEC_COUNTER_R255:
-+ ret_val = GET_TGEC_CNTR_64(r255);
-+ break;
-+ case E_TGEC_COUNTER_R511:
-+ ret_val = GET_TGEC_CNTR_64(r511);
-+ break;
-+ case E_TGEC_COUNTER_R1023:
-+ ret_val = GET_TGEC_CNTR_64(r1023);
-+ break;
-+ case E_TGEC_COUNTER_R1518:
-+ ret_val = GET_TGEC_CNTR_64(r1518);
-+ break;
-+ case E_TGEC_COUNTER_R1519X:
-+ ret_val = GET_TGEC_CNTR_64(r1519x);
-+ break;
-+ case E_TGEC_COUNTER_TRFRG:
-+ ret_val = GET_TGEC_CNTR_64(trfrg);
-+ break;
-+ case E_TGEC_COUNTER_TRJBR:
-+ ret_val = GET_TGEC_CNTR_64(trjbr);
-+ break;
-+ case E_TGEC_COUNTER_RDRP:
-+ ret_val = GET_TGEC_CNTR_64(rdrp);
-+ break;
-+ case E_TGEC_COUNTER_RALN:
-+ ret_val = GET_TGEC_CNTR_64(raln);
-+ break;
-+ case E_TGEC_COUNTER_TRUND:
-+ ret_val = GET_TGEC_CNTR_64(trund);
-+ break;
-+ case E_TGEC_COUNTER_TROVR:
-+ ret_val = GET_TGEC_CNTR_64(trovr);
-+ break;
-+ case E_TGEC_COUNTER_RXPF:
-+ ret_val = GET_TGEC_CNTR_64(rxpf);
-+ break;
-+ case E_TGEC_COUNTER_TXPF:
-+ ret_val = GET_TGEC_CNTR_64(txpf);
-+ break;
-+ case E_TGEC_COUNTER_ROCT:
-+ ret_val = GET_TGEC_CNTR_64(roct);
-+ break;
-+ case E_TGEC_COUNTER_RMCA:
-+ ret_val = GET_TGEC_CNTR_64(rmca);
-+ break;
-+ case E_TGEC_COUNTER_RBCA:
-+ ret_val = GET_TGEC_CNTR_64(rbca);
-+ break;
-+ case E_TGEC_COUNTER_RPKT:
-+ ret_val = GET_TGEC_CNTR_64(rpkt);
-+ break;
-+ case E_TGEC_COUNTER_RUCA:
-+ ret_val = GET_TGEC_CNTR_64(ruca);
-+ break;
-+ case E_TGEC_COUNTER_RERR:
-+ ret_val = GET_TGEC_CNTR_64(rerr);
-+ break;
-+ case E_TGEC_COUNTER_TOCT:
-+ ret_val = GET_TGEC_CNTR_64(toct);
-+ break;
-+ case E_TGEC_COUNTER_TMCA:
-+ ret_val = GET_TGEC_CNTR_64(tmca);
-+ break;
-+ case E_TGEC_COUNTER_TBCA:
-+ ret_val = GET_TGEC_CNTR_64(tbca);
-+ break;
-+ case E_TGEC_COUNTER_TUCA:
-+ ret_val = GET_TGEC_CNTR_64(tuca);
-+ break;
-+ case E_TGEC_COUNTER_TERR:
-+ ret_val = GET_TGEC_CNTR_64(terr);
-+ break;
-+ default:
-+ ret_val = 0;
-+ }
-+
-+ return ret_val;
-+}
-+
-+void fman_tgec_enable(struct tgec_regs *regs, bool apply_rx, bool apply_tx)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+ if (apply_rx)
-+ tmp |= CMD_CFG_RX_EN;
-+ if (apply_tx)
-+ tmp |= CMD_CFG_TX_EN;
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+void fman_tgec_disable(struct tgec_regs *regs, bool apply_rx, bool apply_tx)
-+{
-+ uint32_t tmp_reg_32;
-+
-+ tmp_reg_32 = ioread32be(&regs->command_config);
-+ if (apply_rx)
-+ tmp_reg_32 &= ~CMD_CFG_RX_EN;
-+ if (apply_tx)
-+ tmp_reg_32 &= ~CMD_CFG_TX_EN;
-+ iowrite32be(tmp_reg_32, &regs->command_config);
-+}
-+
-+void fman_tgec_set_promiscuous(struct tgec_regs *regs, bool val)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+ if (val)
-+ tmp |= CMD_CFG_PROMIS_EN;
-+ else
-+ tmp &= ~CMD_CFG_PROMIS_EN;
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+void fman_tgec_reset_filter_table(struct tgec_regs *regs)
-+{
-+ uint32_t i;
-+ for (i = 0; i < 512; i++)
-+ iowrite32be(i & ~TGEC_HASH_MCAST_EN, &regs->hashtable_ctrl);
-+}
-+
-+void fman_tgec_set_hash_table_entry(struct tgec_regs *regs, uint32_t crc)
-+{
-+ uint32_t hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; /* Take 9 MSB bits */
-+ iowrite32be(hash | TGEC_HASH_MCAST_EN, &regs->hashtable_ctrl);
-+}
-+
-+void fman_tgec_set_hash_table(struct tgec_regs *regs, uint32_t value)
-+{
-+ iowrite32be(value, &regs->hashtable_ctrl);
-+}
-+
-+void fman_tgec_set_tx_pause_frames(struct tgec_regs *regs, uint16_t pause_time)
-+{
-+ iowrite32be((uint32_t)pause_time, &regs->pause_quant);
-+}
-+
-+void fman_tgec_set_rx_ignore_pause_frames(struct tgec_regs *regs, bool en)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+ if (en)
-+ tmp |= CMD_CFG_PAUSE_IGNORE;
-+ else
-+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+void fman_tgec_enable_1588_time_stamp(struct tgec_regs *regs, bool en)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->command_config);
-+ if (en)
-+ tmp |= CMD_CFG_EN_TIMESTAMP;
-+ else
-+ tmp &= ~CMD_CFG_EN_TIMESTAMP;
-+ iowrite32be(tmp, &regs->command_config);
-+}
-+
-+uint32_t fman_tgec_get_event(struct tgec_regs *regs, uint32_t ev_mask)
-+{
-+ return ioread32be(&regs->ievent) & ev_mask;
-+}
-+
-+void fman_tgec_ack_event(struct tgec_regs *regs, uint32_t ev_mask)
-+{
-+ iowrite32be(ev_mask, &regs->ievent);
-+}
-+
-+uint32_t fman_tgec_get_interrupt_mask(struct tgec_regs *regs)
-+{
-+ return ioread32be(&regs->imask);
-+}
-+
-+void fman_tgec_add_addr_in_paddr(struct tgec_regs *regs, uint8_t *adr)
-+{
-+ uint32_t tmp0, tmp1;
-+
-+ tmp0 = (uint32_t)(adr[0] |
-+ adr[1] << 8 |
-+ adr[2] << 16 |
-+ adr[3] << 24);
-+ tmp1 = (uint32_t)(adr[4] | adr[5] << 8);
-+ iowrite32be(tmp0, &regs->mac_addr_2);
-+ iowrite32be(tmp1, &regs->mac_addr_3);
-+}
-+
-+void fman_tgec_clear_addr_in_paddr(struct tgec_regs *regs)
-+{
-+ iowrite32be(0, &regs->mac_addr_2);
-+ iowrite32be(0, &regs->mac_addr_3);
-+}
-+
-+uint32_t fman_tgec_get_revision(struct tgec_regs *regs)
-+{
-+ return ioread32be(&regs->tgec_id);
-+}
-+
-+void fman_tgec_enable_interrupt(struct tgec_regs *regs, uint32_t ev_mask)
-+{
-+ iowrite32be(ioread32be(&regs->imask) | ev_mask, &regs->imask);
-+}
-+
-+void fman_tgec_disable_interrupt(struct tgec_regs *regs, uint32_t ev_mask)
-+{
-+ iowrite32be(ioread32be(&regs->imask) & ~ev_mask, &regs->imask);
-+}
-+
-+uint16_t fman_tgec_get_max_frame_len(struct tgec_regs *regs)
-+{
-+ return (uint16_t) ioread32be(&regs->maxfrm);
-+}
-+
-+void fman_tgec_defconfig(struct tgec_cfg *cfg)
-+{
-+ cfg->wan_mode_enable = DEFAULT_WAN_MODE_ENABLE;
-+ cfg->promiscuous_mode_enable = DEFAULT_PROMISCUOUS_MODE_ENABLE;
-+ cfg->pause_forward_enable = DEFAULT_PAUSE_FORWARD_ENABLE;
-+ cfg->pause_ignore = DEFAULT_PAUSE_IGNORE;
-+ cfg->tx_addr_ins_enable = DEFAULT_TX_ADDR_INS_ENABLE;
-+ cfg->loopback_enable = DEFAULT_LOOPBACK_ENABLE;
-+ cfg->cmd_frame_enable = DEFAULT_CMD_FRAME_ENABLE;
-+ cfg->rx_error_discard = DEFAULT_RX_ERROR_DISCARD;
-+ cfg->send_idle_enable = DEFAULT_SEND_IDLE_ENABLE;
-+ cfg->no_length_check_enable = DEFAULT_NO_LENGTH_CHECK_ENABLE;
-+ cfg->lgth_check_nostdr = DEFAULT_LGTH_CHECK_NOSTDR;
-+ cfg->time_stamp_enable = DEFAULT_TIME_STAMP_ENABLE;
-+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
-+ cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
-+ cfg->pause_quant = DEFAULT_PAUSE_QUANT;
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+ cfg->skip_fman11_workaround = FALSE;
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+}
-+
-+int fman_tgec_init(struct tgec_regs *regs, struct tgec_cfg *cfg,
-+ uint32_t exception_mask)
-+{
-+ uint32_t tmp;
-+
-+ /* Config */
-+ tmp = 0x40; /* CRC forward */
-+ if (cfg->wan_mode_enable)
-+ tmp |= CMD_CFG_WAN_MODE;
-+ if (cfg->promiscuous_mode_enable)
-+ tmp |= CMD_CFG_PROMIS_EN;
-+ if (cfg->pause_forward_enable)
-+ tmp |= CMD_CFG_PAUSE_FWD;
-+ if (cfg->pause_ignore)
-+ tmp |= CMD_CFG_PAUSE_IGNORE;
-+ if (cfg->tx_addr_ins_enable)
-+ tmp |= CMD_CFG_TX_ADDR_INS;
-+ if (cfg->loopback_enable)
-+ tmp |= CMD_CFG_LOOPBACK_EN;
-+ if (cfg->cmd_frame_enable)
-+ tmp |= CMD_CFG_CMD_FRM_EN;
-+ if (cfg->rx_error_discard)
-+ tmp |= CMD_CFG_RX_ER_DISC;
-+ if (cfg->send_idle_enable)
-+ tmp |= CMD_CFG_SEND_IDLE;
-+ if (cfg->no_length_check_enable)
-+ tmp |= CMD_CFG_NO_LEN_CHK;
-+ if (cfg->time_stamp_enable)
-+ tmp |= CMD_CFG_EN_TIMESTAMP;
-+ iowrite32be(tmp, &regs->command_config);
-+
-+ /* Max Frame Length */
-+ iowrite32be((uint32_t)cfg->max_frame_length, &regs->maxfrm);
-+ /* Pause Time */
-+ iowrite32be(cfg->pause_quant, &regs->pause_quant);
-+
-+ /* clear all pending events and set-up interrupts */
-+ fman_tgec_ack_event(regs, 0xffffffff);
-+ fman_tgec_enable_interrupt(regs, exception_mask);
-+
-+ return 0;
-+}
-+
-+void fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007(struct tgec_regs *regs)
-+{
-+ uint32_t tmp;
-+
-+ /* restore the default tx ipg Length */
-+ tmp = (ioread32be(&regs->tx_ipg_len) & ~TGEC_TX_IPG_LENGTH_MASK) | 12;
-+
-+ iowrite32be(tmp, &regs->tx_ipg_len);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
-@@ -0,0 +1,1166 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File memac.c
-+
-+ @Description FM mEMAC driver
-+*//***************************************************************************/
-+#include <../../../../sdk_dpaa/mac.h>
-+#include <linux/phy_fixed.h>
-+
-+#include "std_ext.h"
-+#include "string_ext.h"
-+#include "error_ext.h"
-+#include "xx_ext.h"
-+#include "endian_ext.h"
-+#include "debug_ext.h"
-+
-+#include "fm_common.h"
-+#include "memac.h"
-+
-+
-+static t_Error MemacAdjustLink(t_Handle h_Memac, e_EnetSpeed speed, bool fullDuplex);
-+
-+/*****************************************************************************/
-+/* Internal routines */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+static uint32_t GetMacAddrHashCode(uint64_t ethAddr)
-+{
-+ uint64_t mask1, mask2;
-+ uint32_t xorVal = 0;
-+ uint8_t i, j;
-+
-+ for (i=0; i<6; i++)
-+ {
-+ mask1 = ethAddr & (uint64_t)0x01;
-+ ethAddr >>= 1;
-+
-+ for (j=0; j<7; j++)
-+ {
-+ mask2 = ethAddr & (uint64_t)0x01;
-+ mask1 ^= mask2;
-+ ethAddr >>= 1;
-+ }
-+
-+ xorVal |= (mask1 << (5-i));
-+ }
-+
-+ return xorVal;
-+}
-+
-+/* ......................................................................... */
-+
-+static void SetupSgmiiInternalPhy(t_Memac *p_Memac, uint8_t phyAddr)
-+{
-+ uint16_t tmpReg16;
-+ e_EnetMode enetMode;
-+
-+ /* In case the higher MACs are used (i.e. the MACs that should support 10G),
-+ speed=10000 is provided for SGMII ports. Temporary modify enet mode
-+ to 1G one, so MII functions can work correctly. */
-+ enetMode = p_Memac->enetMode;
-+
-+ /* SGMII mode + AN enable */
-+ tmpReg16 = PHY_SGMII_IF_MODE_AN | PHY_SGMII_IF_MODE_SGMII;
-+ if ((p_Memac->enetMode) == e_ENET_MODE_SGMII_2500)
-+ tmpReg16 = PHY_SGMII_CR_PHY_RESET | PHY_SGMII_IF_SPEED_GIGABIT | PHY_SGMII_IF_MODE_SGMII;
-+
-+ p_Memac->enetMode = MAKE_ENET_MODE(ENET_INTERFACE_FROM_MODE(p_Memac->enetMode), e_ENET_SPEED_1000);
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x14, tmpReg16);
-+
-+ /* Device ability according to SGMII specification */
-+ tmpReg16 = PHY_SGMII_DEV_ABILITY_SGMII;
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x4, tmpReg16);
-+
-+ /* Adjust link timer for SGMII -
-+ According to Cisco SGMII specification the timer should be 1.6 ms.
-+ The link_timer register is configured in units of the clock.
-+ - When running as 1G SGMII, Serdes clock is 125 MHz, so
-+ unit = 1 / (125*10^6 Hz) = 8 ns.
-+ 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2 * 10^5 = 0x30d40
-+ - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
-+ unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
-+ 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5 * 10^5 = 0x7a120.
-+ Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
-+ we always set up here a value of 2.5 SGMII. */
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x13, 0x0007);
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x12, 0xa120);
-+
-+ /* Restart AN */
-+ tmpReg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x0, tmpReg16);
-+
-+ /* Restore original enet mode */
-+ p_Memac->enetMode = enetMode;
-+}
-+
-+/* ......................................................................... */
-+
-+static void SetupSgmiiInternalPhyBaseX(t_Memac *p_Memac, uint8_t phyAddr)
-+{
-+ uint16_t tmpReg16;
-+ e_EnetMode enetMode;
-+
-+ /* In case the higher MACs are used (i.e. the MACs that should support 10G),
-+ speed=10000 is provided for SGMII ports. Temporary modify enet mode
-+ to 1G one, so MII functions can work correctly. */
-+ enetMode = p_Memac->enetMode;
-+ p_Memac->enetMode = MAKE_ENET_MODE(ENET_INTERFACE_FROM_MODE(p_Memac->enetMode), e_ENET_SPEED_1000);
-+
-+ /* 1000BaseX mode */
-+ tmpReg16 = PHY_SGMII_IF_MODE_1000X;
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x14, tmpReg16);
-+
-+ /* AN Device capability */
-+ tmpReg16 = PHY_SGMII_DEV_ABILITY_1000X;
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x4, tmpReg16);
-+
-+ /* Adjust link timer for SGMII -
-+ For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
-+ The link_timer register is configured in units of the clock.
-+ - When running as 1G SGMII, Serdes clock is 125 MHz, so
-+ unit = 1 / (125*10^6 Hz) = 8 ns.
-+ 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
-+ - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
-+ unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
-+ 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
-+ Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
-+ we always set up here a value of 2.5 SGMII. */
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x13, 0x002f);
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x12, 0xaf08);
-+
-+ /* Restart AN */
-+ tmpReg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
-+ MEMAC_MII_WritePhyReg(p_Memac, phyAddr, 0x0, tmpReg16);
-+
-+ /* Restore original enet mode */
-+ p_Memac->enetMode = enetMode;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error CheckInitParameters(t_Memac *p_Memac)
-+{
-+ e_FmMacType portType;
-+
-+ portType = ((ENET_SPEED_FROM_MODE(p_Memac->enetMode) < e_ENET_SPEED_10000) ? e_FM_MAC_1G : e_FM_MAC_10G);
-+
-+#if (FM_MAX_NUM_OF_10G_MACS > 0)
-+ if ((portType == e_FM_MAC_10G) && (p_Memac->macId >= FM_MAX_NUM_OF_10G_MACS))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("10G MAC ID must be less than %d", FM_MAX_NUM_OF_10G_MACS));
-+#endif /* (FM_MAX_NUM_OF_10G_MACS > 0) */
-+
-+ if ((portType == e_FM_MAC_1G) && (p_Memac->macId >= FM_MAX_NUM_OF_1G_MACS))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("1G MAC ID must be less than %d", FM_MAX_NUM_OF_1G_MACS));
-+ if (p_Memac->addr == 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC must have a valid MAC address"));
-+ if (!p_Memac->f_Exception)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Uninitialized f_Exception"));
-+ if (!p_Memac->f_Event)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Uninitialized f_Event"));
-+#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002
-+ if (!p_Memac->p_MemacDriverParam->no_length_check_enable)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!"));
-+#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */
-+
-+ return E_OK;
-+}
-+
-+/* ........................................................................... */
-+
-+static void MemacErrException(t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ uint32_t event, imask;
-+
-+ event = fman_memac_get_event(p_Memac->p_MemMap, 0xffffffff);
-+ imask = fman_memac_get_interrupt_mask(p_Memac->p_MemMap);
-+
-+ /* Imask include both error and notification/event bits.
-+ Leaving only error bits enabled by imask.
-+ The imask error bits are shifted by 16 bits offset from
-+ their corresponding location in the ievent - hence the >> 16 */
-+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
-+
-+ fman_memac_ack_event(p_Memac->p_MemMap, event);
-+
-+ if (event & MEMAC_IEVNT_TS_ECC_ER)
-+ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_TS_FIFO_ECC_ERR);
-+ if (event & MEMAC_IEVNT_TX_ECC_ER)
-+ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_10G_1TX_ECC_ER);
-+ if (event & MEMAC_IEVNT_RX_ECC_ER)
-+ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_10G_RX_ECC_ER);
-+}
-+
-+static void MemacException(t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ uint32_t event, imask;
-+
-+ event = fman_memac_get_event(p_Memac->p_MemMap, 0xffffffff);
-+ imask = fman_memac_get_interrupt_mask(p_Memac->p_MemMap);
-+
-+ /* Imask include both error and notification/event bits.
-+ Leaving only error bits enabled by imask.
-+ The imask error bits are shifted by 16 bits offset from
-+ their corresponding location in the ievent - hence the >> 16 */
-+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
-+
-+ fman_memac_ack_event(p_Memac->p_MemMap, event);
-+
-+ if (event & MEMAC_IEVNT_MGI)
-+ p_Memac->f_Exception(p_Memac->h_App, e_FM_MAC_EX_MAGIC_PACKET_INDICATION);
-+}
-+
-+/* ......................................................................... */
-+
-+static void FreeInitResources(t_Memac *p_Memac)
-+{
-+ e_FmMacType portType;
-+
-+ portType =
-+ ((ENET_SPEED_FROM_MODE(p_Memac->enetMode) < e_ENET_SPEED_10000) ? e_FM_MAC_1G : e_FM_MAC_10G);
-+
-+ if (portType == e_FM_MAC_10G)
-+ FmUnregisterIntr(p_Memac->fmMacControllerDriver.h_Fm, e_FM_MOD_10G_MAC, p_Memac->macId, e_FM_INTR_TYPE_ERR);
-+ else
-+ FmUnregisterIntr(p_Memac->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Memac->macId, e_FM_INTR_TYPE_ERR);
-+
-+ /* release the driver's group hash table */
-+ FreeHashTable(p_Memac->p_MulticastAddrHash);
-+ p_Memac->p_MulticastAddrHash = NULL;
-+
-+ /* release the driver's individual hash table */
-+ FreeHashTable(p_Memac->p_UnicastAddrHash);
-+ p_Memac->p_UnicastAddrHash = NULL;
-+}
-+
-+
-+/*****************************************************************************/
-+/* mEMAC API routines */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacEnable(t_Handle h_Memac, e_CommMode mode)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ struct mac_device *mac_dev = (struct mac_device *)p_Memac->h_App;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ fman_memac_enable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
-+
-+ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_RGMII) {
-+ if (mac_dev->fixed_link) {
-+ printk(KERN_INFO "This is a fixed-link, forcing speed %d duplex %d\n",mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
-+ MemacAdjustLink(h_Memac,mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacDisable (t_Handle h_Memac, e_CommMode mode)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ fman_memac_disable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacSetPromiscuous(t_Handle h_Memac, bool newVal)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ fman_memac_set_promiscuous(p_Memac->p_MemMap, newVal);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error MemacAdjustLink(t_Handle h_Memac, e_EnetSpeed speed, bool fullDuplex)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ if ((speed >= e_ENET_SPEED_1000) && (!fullDuplex))
-+ RETURN_ERROR(MAJOR, E_CONFLICT,
-+ ("Ethernet MAC 1G or 10G does not support half-duplex"));
-+
-+ fman_memac_adjust_link(p_Memac->p_MemMap,
-+ (enum enet_interface)ENET_INTERFACE_FROM_MODE(p_Memac->enetMode),
-+ (enum enet_speed)speed,
-+ fullDuplex);
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* Memac Configs modification functions */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacConfigLoopback(t_Handle h_Memac, bool newVal)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ p_Memac->p_MemacDriverParam->loopback_enable = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacConfigWan(t_Handle h_Memac, bool newVal)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ p_Memac->p_MemacDriverParam->wan_mode_enable = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacConfigMaxFrameLength(t_Handle h_Memac, uint16_t newVal)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ p_Memac->p_MemacDriverParam->max_frame_length = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacConfigPad(t_Handle h_Memac, bool newVal)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ p_Memac->p_MemacDriverParam->pad_enable = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacConfigLengthCheck(t_Handle h_Memac, bool newVal)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ p_Memac->p_MemacDriverParam->no_length_check_enable = !newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacConfigException(t_Handle h_Memac, e_FmMacExceptions exception, bool enable)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Memac->exceptions |= bitMask;
-+ else
-+ p_Memac->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacConfigResetOnInit(t_Handle h_Memac, bool enable)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ p_Memac->p_MemacDriverParam->reset_on_init = enable;
-+
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* Memac Run Time API functions */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacSetTxPauseFrames(t_Handle h_Memac,
-+ uint8_t priority,
-+ uint16_t pauseTime,
-+ uint16_t threshTime)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ if (priority != 0xFF)
-+ {
-+ bool PortConfigured, PreFetchEnabled;
-+
-+ if (FmGetTnumAgingPeriod(p_Memac->fmMacControllerDriver.h_Fm) == 0)
-+ RETURN_ERROR(MAJOR, E_CONFLICT, ("For PFC operation, TNUM aging must be enabled"));
-+
-+ FmGetPortPreFetchConfiguration(p_Memac->fmMacControllerDriver.h_Fm,
-+ p_Memac->fmMacControllerDriver.macId,
-+ &PortConfigured,
-+ &PreFetchEnabled);
-+
-+ if ((ENET_SPEED_FROM_MODE(p_Memac->fmMacControllerDriver.enetMode) == e_ENET_SPEED_1000) && !PortConfigured)
-+ DBG(INFO, ("For PFC correct operation, prefetch must be configured on the FM Tx PORT"));
-+
-+ if ((ENET_SPEED_FROM_MODE(p_Memac->fmMacControllerDriver.enetMode) == e_ENET_SPEED_1000) && PortConfigured && !PreFetchEnabled)
-+ DBG(WARNING, ("For PFC correct operation, prefetch must be configured on the FM Tx PORT"));
-+ }
-+
-+ fman_memac_set_tx_pause_frames(p_Memac->p_MemMap, priority, pauseTime, threshTime);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacSetTxAutoPauseFrames(t_Handle h_Memac,
-+ uint16_t pauseTime)
-+{
-+ return MemacSetTxPauseFrames(h_Memac, FM_MAC_NO_PFC, pauseTime, 0);
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacSetRxIgnorePauseFrames(t_Handle h_Memac, bool en)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ fman_memac_set_rx_ignore_pause_frames(p_Memac->p_MemMap, en);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacSetWakeOnLan(t_Handle h_Memac, bool en)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ fman_memac_set_wol(p_Memac->p_MemMap, en);
-+
-+ return E_OK;
-+}
-+
-+/* .............................................................................. */
-+
-+static t_Error MemacEnable1588TimeStamp(t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+UNUSED(p_Memac);
-+DBG(WARNING, ("mEMAC has 1588 always enabled!"));
-+
-+ return E_OK;
-+}
-+
-+/* Counters handling */
-+/* ......................................................................... */
-+
-+static t_Error MemacGetStatistics(t_Handle h_Memac, t_FmMacStatistics *p_Statistics)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER);
-+
-+ p_Statistics->eStatPkts64 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R64);
-+ p_Statistics->eStatPkts65to127 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R127);
-+ p_Statistics->eStatPkts128to255 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R255);
-+ p_Statistics->eStatPkts256to511 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R511);
-+ p_Statistics->eStatPkts512to1023 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1023);
-+ p_Statistics->eStatPkts1024to1518 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1518);
-+ p_Statistics->eStatPkts1519to1522 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1519X);
-+/* */
-+ p_Statistics->eStatFragments = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RFRG);
-+ p_Statistics->eStatJabbers = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RJBR);
-+
-+ p_Statistics->eStatsDropEvents = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RDRP);
-+ p_Statistics->eStatCRCAlignErrors = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RALN);
-+
-+ p_Statistics->eStatUndersizePkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TUND);
-+ p_Statistics->eStatOversizePkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_ROVR);
-+/* Pause */
-+ p_Statistics->reStatPause = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RXPF);
-+ p_Statistics->teStatPause = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TXPF);
-+
-+/* MIB II */
-+ p_Statistics->ifInOctets = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_ROCT);
-+ p_Statistics->ifInUcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RUCA);
-+ p_Statistics->ifInMcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RMCA);
-+ p_Statistics->ifInBcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RBCA);
-+ p_Statistics->ifInPkts = p_Statistics->ifInUcastPkts
-+ + p_Statistics->ifInMcastPkts
-+ + p_Statistics->ifInBcastPkts;
-+ p_Statistics->ifInDiscards = 0;
-+ p_Statistics->ifInErrors = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_RERR);
-+
-+ p_Statistics->ifOutOctets = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TOCT);
-+ p_Statistics->ifOutUcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TUCA);
-+ p_Statistics->ifOutMcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TMCA);
-+ p_Statistics->ifOutBcastPkts = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TBCA);
-+ p_Statistics->ifOutPkts = p_Statistics->ifOutUcastPkts
-+ + p_Statistics->ifOutMcastPkts
-+ + p_Statistics->ifOutBcastPkts;
-+ p_Statistics->ifOutDiscards = 0;
-+ p_Statistics->ifOutErrors = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_TERR);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacGetFrameSizeCounters(t_Handle h_Memac, t_FmMacFrameSizeCounters *p_FrameSizeCounters, e_CommMode type)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FrameSizeCounters, E_NULL_POINTER);
-+
-+ switch (type)
-+ {
-+ case e_COMM_MODE_NONE:
-+ break;
-+
-+ case e_COMM_MODE_RX:
-+ p_FrameSizeCounters->count_pkts_64 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R64);
-+ p_FrameSizeCounters->count_pkts_65_to_127 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R127);
-+ p_FrameSizeCounters->count_pkts_128_to_255 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R255);
-+ p_FrameSizeCounters->count_pkts_256_to_511 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R511);
-+ p_FrameSizeCounters->count_pkts_512_to_1023 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1023);
-+ p_FrameSizeCounters->count_pkts_1024_to_1518 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1518);
-+ p_FrameSizeCounters->count_pkts_1519_to_1522 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1519X);
-+ break;
-+
-+ case e_COMM_MODE_TX:
-+ p_FrameSizeCounters->count_pkts_64 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T64);
-+ p_FrameSizeCounters->count_pkts_65_to_127 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T127);
-+ p_FrameSizeCounters->count_pkts_128_to_255 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T255);
-+ p_FrameSizeCounters->count_pkts_256_to_511 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T511);
-+ p_FrameSizeCounters->count_pkts_512_to_1023 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T1023);
-+ p_FrameSizeCounters->count_pkts_1024_to_1518 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T1518);
-+ p_FrameSizeCounters->count_pkts_1519_to_1522 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T1519X);
-+ break;
-+
-+ case e_COMM_MODE_RX_AND_TX:
-+ p_FrameSizeCounters->count_pkts_64 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R64)
-+ + fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T64);
-+ p_FrameSizeCounters->count_pkts_65_to_127 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R127)
-+ + fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T127);
-+ p_FrameSizeCounters->count_pkts_128_to_255 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R255)
-+ + fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T255);
-+ p_FrameSizeCounters->count_pkts_256_to_511 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R511)
-+ + fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T511);
-+ p_FrameSizeCounters->count_pkts_512_to_1023 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1023)
-+ + fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T1023);
-+ p_FrameSizeCounters->count_pkts_1024_to_1518 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1518)
-+ + fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T1518);
-+ p_FrameSizeCounters->count_pkts_1519_to_1522 = fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_R1519X)
-+ + fman_memac_get_counter(p_Memac->p_MemMap, E_MEMAC_COUNTER_T1519X);
-+ break;
-+ }
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacModifyMacAddress (t_Handle h_Memac, t_EnetAddr *p_EnetAddr)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ fman_memac_add_addr_in_paddr(p_Memac->p_MemMap, (uint8_t *)(*p_EnetAddr), 0);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacResetCounters (t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ fman_memac_reset_stat(p_Memac->p_MemMap);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacAddExactMatchMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
-+{
-+ t_Memac *p_Memac = (t_Memac *) h_Memac;
-+ uint64_t ethAddr;
-+ uint8_t paddrNum;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ if (ethAddr & GROUP_ADDRESS)
-+ /* Multicast address has no effect in PADDR */
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address"));
-+
-+ /* Make sure no PADDR contains this address */
-+ for (paddrNum = 0; paddrNum < MEMAC_NUM_OF_PADDRS; paddrNum++)
-+ if (p_Memac->indAddrRegUsed[paddrNum])
-+ if (p_Memac->paddr[paddrNum] == ethAddr)
-+ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
-+
-+ /* Find first unused PADDR */
-+ for (paddrNum = 0; paddrNum < MEMAC_NUM_OF_PADDRS; paddrNum++)
-+ if (!(p_Memac->indAddrRegUsed[paddrNum]))
-+ {
-+ /* mark this PADDR as used */
-+ p_Memac->indAddrRegUsed[paddrNum] = TRUE;
-+ /* store address */
-+ p_Memac->paddr[paddrNum] = ethAddr;
-+
-+ /* put in hardware */
-+ fman_memac_add_addr_in_paddr(p_Memac->p_MemMap, (uint8_t*)(*p_EthAddr), paddrNum);
-+ p_Memac->numOfIndAddrInRegs++;
-+
-+ return E_OK;
-+ }
-+
-+ /* No free PADDR */
-+ RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacDelExactMatchMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
-+{
-+ t_Memac *p_Memac = (t_Memac *) h_Memac;
-+ uint64_t ethAddr;
-+ uint8_t paddrNum;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ /* Find used PADDR containing this address */
-+ for (paddrNum = 0; paddrNum < MEMAC_NUM_OF_PADDRS; paddrNum++)
-+ {
-+ if ((p_Memac->indAddrRegUsed[paddrNum]) &&
-+ (p_Memac->paddr[paddrNum] == ethAddr))
-+ {
-+ /* mark this PADDR as not used */
-+ p_Memac->indAddrRegUsed[paddrNum] = FALSE;
-+ /* clear in hardware */
-+ fman_memac_clear_addr_in_paddr(p_Memac->p_MemMap, paddrNum);
-+ p_Memac->numOfIndAddrInRegs--;
-+
-+ return E_OK;
-+ }
-+ }
-+
-+ RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacGetId(t_Handle h_Memac, uint32_t *macId)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ *macId = p_Memac->macId;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+
-+static t_Error MemacAddHashMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ t_EthHashEntry *p_HashEntry;
-+ uint32_t hash;
-+ uint64_t ethAddr;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ if (!(ethAddr & GROUP_ADDRESS))
-+ /* Unicast addresses not supported in hash */
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unicast Address"));
-+
-+ hash = GetMacAddrHashCode(ethAddr) & HASH_CTRL_ADDR_MASK;
-+
-+ /* Create element to be added to the driver hash table */
-+ p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry));
-+ p_HashEntry->addr = ethAddr;
-+ INIT_LIST(&p_HashEntry->node);
-+
-+ LIST_AddToTail(&(p_HashEntry->node), &(p_Memac->p_MulticastAddrHash->p_Lsts[hash]));
-+ fman_memac_set_hash_table(p_Memac->p_MemMap, (hash | HASH_CTRL_MCAST_EN));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacDelHashMacAddress(t_Handle h_Memac, t_EnetAddr *p_EthAddr)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ t_EthHashEntry *p_HashEntry = NULL;
-+ t_List *p_Pos;
-+ uint32_t hash;
-+ uint64_t ethAddr;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ hash = GetMacAddrHashCode(ethAddr) & HASH_CTRL_ADDR_MASK;
-+
-+ LIST_FOR_EACH(p_Pos, &(p_Memac->p_MulticastAddrHash->p_Lsts[hash]))
-+ {
-+ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
-+ if (p_HashEntry->addr == ethAddr)
-+ {
-+ LIST_DelAndInit(&p_HashEntry->node);
-+ XX_Free(p_HashEntry);
-+ break;
-+ }
-+ }
-+ if (LIST_IsEmpty(&p_Memac->p_MulticastAddrHash->p_Lsts[hash]))
-+ fman_memac_set_hash_table(p_Memac->p_MemMap, (hash & ~HASH_CTRL_MCAST_EN));
-+
-+ return E_OK;
-+}
-+
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacSetException(t_Handle h_Memac, e_FmMacExceptions exception, bool enable)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Memac->exceptions |= bitMask;
-+ else
-+ p_Memac->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ fman_memac_set_exception(p_Memac->p_MemMap, bitMask, enable);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static uint16_t MemacGetMaxFrameLength(t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Memac, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_Memac->p_MemacDriverParam, E_INVALID_STATE, 0);
-+
-+ return fman_memac_get_max_frame_len(p_Memac->p_MemMap);
-+}
-+
-+static t_Error MemacInitInternalPhy(t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ uint8_t i, phyAddr;
-+
-+ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_SGMII)
-+ {
-+ /* Configure internal SGMII PHY */
-+ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX)
-+ SetupSgmiiInternalPhyBaseX(p_Memac, PHY_MDIO_ADDR);
-+ else
-+ SetupSgmiiInternalPhy(p_Memac, PHY_MDIO_ADDR);
-+ }
-+ else if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_QSGMII)
-+ {
-+ /* Configure 4 internal SGMII PHYs */
-+ for (i = 0; i < 4; i++)
-+ {
-+ /* QSGMII PHY address occupies 3 upper bits of 5-bit
-+ phyAddress; the lower 2 bits are used to extend
-+ register address space and access each one of 4
-+ ports inside QSGMII. */
-+ phyAddr = (uint8_t)((PHY_MDIO_ADDR << 2) | i);
-+ if (p_Memac->enetMode & ENET_IF_SGMII_BASEX)
-+ SetupSgmiiInternalPhyBaseX(p_Memac, phyAddr);
-+ else
-+ SetupSgmiiInternalPhy(p_Memac, phyAddr);
-+ }
-+ }
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+/* mEMAC Init & Free API */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+void *g_MemacRegs;
-+static t_Error MemacInit(t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+ struct memac_cfg *p_MemacDriverParam;
-+ enum enet_interface enet_interface;
-+ enum enet_speed enet_speed;
-+ t_EnetAddr ethAddr;
-+ e_FmMacType portType;
-+ t_Error err;
-+ bool slow_10g_if = FALSE;
-+ if (p_Memac->macId == 3) /* This is a quick WA */
-+ g_MemacRegs = p_Memac->p_MemMap;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->fmMacControllerDriver.h_Fm, E_INVALID_HANDLE);
-+
-+ FM_GetRevision(p_Memac->fmMacControllerDriver.h_Fm, &p_Memac->fmMacControllerDriver.fmRevInfo);
-+ if (p_Memac->fmMacControllerDriver.fmRevInfo.majorRev == 6 &&
-+ p_Memac->fmMacControllerDriver.fmRevInfo.minorRev == 4)
-+ slow_10g_if = TRUE;
-+
-+ CHECK_INIT_PARAMETERS(p_Memac, CheckInitParameters);
-+
-+ p_MemacDriverParam = p_Memac->p_MemacDriverParam;
-+
-+ portType =
-+ ((ENET_SPEED_FROM_MODE(p_Memac->enetMode) < e_ENET_SPEED_10000) ? e_FM_MAC_1G : e_FM_MAC_10G);
-+
-+ /* First, reset the MAC if desired. */
-+ if (p_MemacDriverParam->reset_on_init)
-+ fman_memac_reset(p_Memac->p_MemMap);
-+
-+ /* MAC Address */
-+ MAKE_ENET_ADDR_FROM_UINT64(p_Memac->addr, ethAddr);
-+ fman_memac_add_addr_in_paddr(p_Memac->p_MemMap, (uint8_t*)ethAddr, 0);
-+
-+ enet_interface = (enum enet_interface) ENET_INTERFACE_FROM_MODE(p_Memac->enetMode);
-+ enet_speed = (enum enet_speed) ENET_SPEED_FROM_MODE(p_Memac->enetMode);
-+
-+ fman_memac_init(p_Memac->p_MemMap,
-+ p_Memac->p_MemacDriverParam,
-+ enet_interface,
-+ enet_speed,
-+ slow_10g_if,
-+ p_Memac->exceptions);
-+
-+#ifdef FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
-+ {
-+ uint32_t tmpReg = 0;
-+
-+ FM_GetRevision(p_Memac->fmMacControllerDriver.h_Fm, &p_Memac->fmMacControllerDriver.fmRevInfo);
-+ /* check the FMAN version - the bug exists only in rev1 */
-+ if ((p_Memac->fmMacControllerDriver.fmRevInfo.majorRev == 6) &&
-+ (p_Memac->fmMacControllerDriver.fmRevInfo.minorRev == 0))
-+ {
-+ /* MAC strips CRC from received frames - this workaround should
-+ decrease the likelihood of bug appearance
-+ */
-+ tmpReg = GET_UINT32(p_Memac->p_MemMap->command_config);
-+ tmpReg &= ~CMD_CFG_CRC_FWD;
-+ WRITE_UINT32(p_Memac->p_MemMap->command_config, tmpReg);
-+ /* DBG(WARNING, ("mEMAC strips CRC from received frames as part of A006320 errata workaround"));*/
-+ }
-+ }
-+#endif /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 */
-+
-+ MemacInitInternalPhy(h_Memac);
-+
-+ /* Max Frame Length */
-+ err = FmSetMacMaxFrame(p_Memac->fmMacControllerDriver.h_Fm,
-+ portType,
-+ p_Memac->fmMacControllerDriver.macId,
-+ p_MemacDriverParam->max_frame_length);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("settings Mac max frame length is FAILED"));
-+
-+ p_Memac->p_MulticastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
-+ if (!p_Memac->p_MulticastAddrHash)
-+ {
-+ FreeInitResources(p_Memac);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
-+ }
-+
-+ p_Memac->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
-+ if (!p_Memac->p_UnicastAddrHash)
-+ {
-+ FreeInitResources(p_Memac);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
-+ }
-+
-+ FmRegisterIntr(p_Memac->fmMacControllerDriver.h_Fm,
-+ (portType == e_FM_MAC_10G) ? e_FM_MOD_10G_MAC : e_FM_MOD_1G_MAC,
-+ p_Memac->macId,
-+ e_FM_INTR_TYPE_ERR,
-+ MemacErrException,
-+ p_Memac);
-+
-+ FmRegisterIntr(p_Memac->fmMacControllerDriver.h_Fm,
-+ (portType == e_FM_MAC_10G) ? e_FM_MOD_10G_MAC : e_FM_MOD_1G_MAC,
-+ p_Memac->macId,
-+ e_FM_INTR_TYPE_NORMAL,
-+ MemacException,
-+ p_Memac);
-+
-+ XX_Free(p_MemacDriverParam);
-+ p_Memac->p_MemacDriverParam = NULL;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error MemacFree(t_Handle h_Memac)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+
-+ if (p_Memac->p_MemacDriverParam)
-+ {
-+ /* Called after config */
-+ XX_Free(p_Memac->p_MemacDriverParam);
-+ p_Memac->p_MemacDriverParam = NULL;
-+ }
-+ else
-+ /* Called after init */
-+ FreeInitResources(p_Memac);
-+
-+ XX_Free(p_Memac);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver)
-+{
-+ p_FmMacControllerDriver->f_FM_MAC_Init = MemacInit;
-+ p_FmMacControllerDriver->f_FM_MAC_Free = MemacFree;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetStatistics = NULL;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = MemacConfigLoopback;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = MemacConfigMaxFrameLength;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigWan = MemacConfigWan;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = MemacConfigPad;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = NULL; /* half-duplex is detected automatically */
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = MemacConfigLengthCheck;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigException = MemacConfigException;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit = MemacConfigResetOnInit;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetException = MemacSetException;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = MemacEnable1588TimeStamp; /* always enabled */
-+ p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = NULL;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = MemacSetPromiscuous;
-+ p_FmMacControllerDriver->f_FM_MAC_AdjustLink = MemacAdjustLink;
-+ p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg = NULL;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_Enable = MemacEnable;
-+ p_FmMacControllerDriver->f_FM_MAC_Disable = MemacDisable;
-+ p_FmMacControllerDriver->f_FM_MAC_Resume = MemacInitInternalPhy;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = MemacSetTxAutoPauseFrames;
-+ p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = MemacSetTxPauseFrames;
-+ p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = MemacSetRxIgnorePauseFrames;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan = MemacSetWakeOnLan;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ResetCounters = MemacResetCounters;
-+ p_FmMacControllerDriver->f_FM_MAC_GetStatistics = MemacGetStatistics;
-+ p_FmMacControllerDriver->f_FM_MAC_GetFrameSizeCounters = MemacGetFrameSizeCounters;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = MemacModifyMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = MemacAddHashMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = MemacDelHashMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = MemacAddExactMatchMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = MemacDelExactMatchMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_GetId = MemacGetId;
-+ p_FmMacControllerDriver->f_FM_MAC_GetVersion = NULL;
-+ p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = MemacGetMaxFrameLength;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = MEMAC_MII_WritePhyReg;
-+ p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = MEMAC_MII_ReadPhyReg;
-+}
-+
-+
-+/*****************************************************************************/
-+/* mEMAC Config Main Entry */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+t_Handle MEMAC_Config(t_FmMacParams *p_FmMacParam)
-+{
-+ t_Memac *p_Memac;
-+ struct memac_cfg *p_MemacDriverParam;
-+ uintptr_t baseAddr;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL);
-+
-+ baseAddr = p_FmMacParam->baseAddr;
-+ /* Allocate memory for the mEMAC data structure */
-+ p_Memac = (t_Memac *)XX_Malloc(sizeof(t_Memac));
-+ if (!p_Memac)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("mEMAC driver structure"));
-+ return NULL;
-+ }
-+ memset(p_Memac, 0, sizeof(t_Memac));
-+ InitFmMacControllerDriver(&p_Memac->fmMacControllerDriver);
-+
-+ /* Allocate memory for the mEMAC driver parameters data structure */
-+ p_MemacDriverParam = (struct memac_cfg *)XX_Malloc(sizeof(struct memac_cfg));
-+ if (!p_MemacDriverParam)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("mEMAC driver parameters"));
-+ XX_Free(p_Memac);
-+ return NULL;
-+ }
-+ memset(p_MemacDriverParam, 0, sizeof(struct memac_cfg));
-+
-+ /* Plant parameter structure pointer */
-+ p_Memac->p_MemacDriverParam = p_MemacDriverParam;
-+
-+ fman_memac_defconfig(p_MemacDriverParam);
-+
-+ p_Memac->addr = ENET_ADDR_TO_UINT64(p_FmMacParam->addr);
-+
-+ p_Memac->p_MemMap = (struct memac_regs *)UINT_TO_PTR(baseAddr);
-+ p_Memac->p_MiiMemMap = (struct memac_mii_access_mem_map*)UINT_TO_PTR(baseAddr + MEMAC_TO_MII_OFFSET);
-+
-+ p_Memac->enetMode = p_FmMacParam->enetMode;
-+ p_Memac->macId = p_FmMacParam->macId;
-+ p_Memac->exceptions = MEMAC_default_exceptions;
-+ p_Memac->f_Exception = p_FmMacParam->f_Exception;
-+ p_Memac->f_Event = p_FmMacParam->f_Event;
-+ p_Memac->h_App = p_FmMacParam->h_App;
-+
-+ return p_Memac;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.h
-@@ -0,0 +1,110 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File memac.h
-+
-+ @Description FM Multirate Ethernet MAC (mEMAC)
-+*//***************************************************************************/
-+#ifndef __MEMAC_H
-+#define __MEMAC_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+
-+#include "fsl_fman_memac_mii_acc.h"
-+#include "fm_mac.h"
-+#include "fsl_fman_memac.h"
-+
-+
-+#define MEMAC_default_exceptions \
-+ ((uint32_t)(MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI))
-+
-+#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
-+ case e_FM_MAC_EX_10G_1TX_ECC_ER: \
-+ bitMask = MEMAC_IMASK_TECC_ER; break; \
-+ case e_FM_MAC_EX_10G_RX_ECC_ER: \
-+ bitMask = MEMAC_IMASK_RECC_ER; break; \
-+ case e_FM_MAC_EX_TS_FIFO_ECC_ERR: \
-+ bitMask = MEMAC_IMASK_TSECC_ER; break; \
-+ case e_FM_MAC_EX_MAGIC_PACKET_INDICATION: \
-+ bitMask = MEMAC_IMASK_MGI; break; \
-+ default: bitMask = 0;break;}
-+
-+
-+typedef struct
-+{
-+ t_FmMacControllerDriver fmMacControllerDriver; /**< Upper Mac control block */
-+ t_Handle h_App; /**< Handle to the upper layer application */
-+ struct memac_regs *p_MemMap; /**< Pointer to MAC memory mapped registers */
-+ struct memac_mii_access_mem_map *p_MiiMemMap; /**< Pointer to MII memory mapped registers */
-+ uint64_t addr; /**< MAC address of device */
-+ e_EnetMode enetMode; /**< Ethernet physical interface */
-+ t_FmMacExceptionCallback *f_Exception;
-+ int mdioIrq;
-+ t_FmMacExceptionCallback *f_Event;
-+ bool indAddrRegUsed[MEMAC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */
-+ uint64_t paddr[MEMAC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */
-+ uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */
-+ t_EthHash *p_MulticastAddrHash; /**< Pointer to driver's global address hash table */
-+ t_EthHash *p_UnicastAddrHash; /**< Pointer to driver's individual address hash table */
-+ bool debugMode;
-+ uint8_t macId;
-+ uint32_t exceptions;
-+ struct memac_cfg *p_MemacDriverParam;
-+} t_Memac;
-+
-+
-+/* Internal PHY access */
-+#define PHY_MDIO_ADDR 0
-+
-+/* Internal PHY Registers - SGMII */
-+#define PHY_SGMII_CR_PHY_RESET 0x8000
-+#define PHY_SGMII_CR_RESET_AN 0x0200
-+#define PHY_SGMII_CR_DEF_VAL 0x1140
-+#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
-+#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
-+#define PHY_SGMII_IF_SPEED_GIGABIT 0x0008
-+#define PHY_SGMII_IF_MODE_AN 0x0002
-+#define PHY_SGMII_IF_MODE_SGMII 0x0001
-+#define PHY_SGMII_IF_MODE_1000X 0x0000
-+
-+
-+#define MEMAC_TO_MII_OFFSET 0x030 /* Offset from the MEM map to the MDIO mem map */
-+
-+t_Error MEMAC_MII_WritePhyReg(t_Handle h_Memac, uint8_t phyAddr, uint8_t reg, uint16_t data);
-+t_Error MEMAC_MII_ReadPhyReg(t_Handle h_Memac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
-+
-+
-+#endif /* __MEMAC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c
-@@ -0,0 +1,78 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_mac.h"
-+#include "memac.h"
-+#include "xx_ext.h"
-+
-+#include "fm_common.h"
-+#include "memac_mii_acc.h"
-+
-+
-+/*****************************************************************************/
-+t_Error MEMAC_MII_WritePhyReg(t_Handle h_Memac,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t data)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MiiMemMap, E_INVALID_HANDLE);
-+
-+ return (t_Error)fman_memac_mii_write_phy_reg(p_Memac->p_MiiMemMap,
-+ phyAddr,
-+ reg,
-+ data,
-+ (enum enet_speed)ENET_SPEED_FROM_MODE(p_Memac->enetMode));
-+}
-+
-+/*****************************************************************************/
-+t_Error MEMAC_MII_ReadPhyReg(t_Handle h_Memac,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t *p_Data)
-+{
-+ t_Memac *p_Memac = (t_Memac *)h_Memac;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Memac->p_MiiMemMap, E_INVALID_HANDLE);
-+
-+ return fman_memac_mii_read_phy_reg(p_Memac->p_MiiMemMap,
-+ phyAddr,
-+ reg,
-+ p_Data,
-+ (enum enet_speed)ENET_SPEED_FROM_MODE(p_Memac->enetMode));
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h
-@@ -0,0 +1,73 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __MEMAC_MII_ACC_H
-+#define __MEMAC_MII_ACC_H
-+
-+#include "std_ext.h"
-+
-+
-+/* MII Management Registers */
-+#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
-+#define MDIO_CFG_CLK_DIV_SHIFT 7
-+#define MDIO_CFG_HOLD_MASK 0x0000001c
-+#define MDIO_CFG_ENC45 0x00000040
-+#define MDIO_CFG_READ_ERR 0x00000002
-+#define MDIO_CFG_BSY 0x00000001
-+
-+#define MDIO_CTL_PHY_ADDR_SHIFT 5
-+#define MDIO_CTL_READ 0x00008000
-+
-+#define MDIO_DATA_BSY 0x80000000
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/*----------------------------------------------------*/
-+/* MII Configuration Control Memory Map Registers */
-+/*----------------------------------------------------*/
-+typedef struct t_MemacMiiAccessMemMap
-+{
-+ volatile uint32_t mdio_cfg; /* 0x030 */
-+ volatile uint32_t mdio_ctrl; /* 0x034 */
-+ volatile uint32_t mdio_data; /* 0x038 */
-+ volatile uint32_t mdio_addr; /* 0x03c */
-+} t_MemacMiiAccessMemMap ;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+#endif /* __MEMAC_MII_ACC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.c
-@@ -0,0 +1,1017 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File tgec.c
-+
-+ @Description FM 10G MAC ...
-+*//***************************************************************************/
-+
-+#include "std_ext.h"
-+#include "string_ext.h"
-+#include "error_ext.h"
-+#include "xx_ext.h"
-+#include "endian_ext.h"
-+#include "debug_ext.h"
-+#include "crc_mac_addr_ext.h"
-+
-+#include "fm_common.h"
-+#include "fsl_fman_tgec.h"
-+#include "tgec.h"
-+
-+
-+/*****************************************************************************/
-+/* Internal routines */
-+/*****************************************************************************/
-+
-+static t_Error CheckInitParameters(t_Tgec *p_Tgec)
-+{
-+ if (ENET_SPEED_FROM_MODE(p_Tgec->enetMode) < e_ENET_SPEED_10000)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 10G MAC driver only support 10G speed"));
-+#if (FM_MAX_NUM_OF_10G_MACS > 0)
-+ if (p_Tgec->macId >= FM_MAX_NUM_OF_10G_MACS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("macId of 10G can not be greater than 0"));
-+#endif /* (FM_MAX_NUM_OF_10G_MACS > 0) */
-+
-+ if (p_Tgec->addr == 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 10G MAC Must have a valid MAC Address"));
-+ if (!p_Tgec->f_Exception)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("uninitialized f_Exception"));
-+ if (!p_Tgec->f_Event)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("uninitialized f_Event"));
-+#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002
-+ if (!p_Tgec->p_TgecDriverParam->no_length_check_enable)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!"));
-+#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static uint32_t GetMacAddrHashCode(uint64_t ethAddr)
-+{
-+ uint32_t crc;
-+
-+ /* CRC calculation */
-+ GET_MAC_ADDR_CRC(ethAddr, crc);
-+
-+ crc = GetMirror32(crc);
-+
-+ return crc;
-+}
-+
-+/* ......................................................................... */
-+
-+static void TgecErrException(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ uint32_t event;
-+ struct tgec_regs *p_TgecMemMap = p_Tgec->p_MemMap;
-+
-+ /* do not handle MDIO events */
-+ event = fman_tgec_get_event(p_TgecMemMap, ~(TGEC_IMASK_MDIO_SCAN_EVENT | TGEC_IMASK_MDIO_CMD_CMPL));
-+ event &= fman_tgec_get_interrupt_mask(p_TgecMemMap);
-+
-+ fman_tgec_ack_event(p_TgecMemMap, event);
-+
-+ if (event & TGEC_IMASK_REM_FAULT)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_REM_FAULT);
-+ if (event & TGEC_IMASK_LOC_FAULT)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_LOC_FAULT);
-+ if (event & TGEC_IMASK_TX_ECC_ER)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_1TX_ECC_ER);
-+ if (event & TGEC_IMASK_TX_FIFO_UNFL)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_FIFO_UNFL);
-+ if (event & TGEC_IMASK_TX_FIFO_OVFL)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_FIFO_OVFL);
-+ if (event & TGEC_IMASK_TX_ER)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_ER);
-+ if (event & TGEC_IMASK_RX_FIFO_OVFL)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_FIFO_OVFL);
-+ if (event & TGEC_IMASK_RX_ECC_ER)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_ECC_ER);
-+ if (event & TGEC_IMASK_RX_JAB_FRM)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_JAB_FRM);
-+ if (event & TGEC_IMASK_RX_OVRSZ_FRM)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_OVRSZ_FRM);
-+ if (event & TGEC_IMASK_RX_RUNT_FRM)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_RUNT_FRM);
-+ if (event & TGEC_IMASK_RX_FRAG_FRM)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_FRAG_FRM);
-+ if (event & TGEC_IMASK_RX_LEN_ER)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_LEN_ER);
-+ if (event & TGEC_IMASK_RX_CRC_ER)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_CRC_ER);
-+ if (event & TGEC_IMASK_RX_ALIGN_ER)
-+ p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_ALIGN_ER);
-+}
-+
-+/* ......................................................................... */
-+
-+static void TgecException(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ uint32_t event;
-+ struct tgec_regs *p_TgecMemMap = p_Tgec->p_MemMap;
-+
-+ /* handle only MDIO events */
-+ event = fman_tgec_get_event(p_TgecMemMap, (TGEC_IMASK_MDIO_SCAN_EVENT | TGEC_IMASK_MDIO_CMD_CMPL));
-+ event &= fman_tgec_get_interrupt_mask(p_TgecMemMap);
-+
-+ fman_tgec_ack_event(p_TgecMemMap, event);
-+
-+ if (event & TGEC_IMASK_MDIO_SCAN_EVENT)
-+ p_Tgec->f_Event(p_Tgec->h_App, e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO);
-+ if (event & TGEC_IMASK_MDIO_CMD_CMPL)
-+ p_Tgec->f_Event(p_Tgec->h_App, e_FM_MAC_EX_10G_MDIO_CMD_CMPL);
-+}
-+
-+/* ......................................................................... */
-+
-+static void FreeInitResources(t_Tgec *p_Tgec)
-+{
-+ if (p_Tgec->mdioIrq != NO_IRQ)
-+ {
-+ XX_DisableIntr(p_Tgec->mdioIrq);
-+ XX_FreeIntr(p_Tgec->mdioIrq);
-+ }
-+
-+ FmUnregisterIntr(p_Tgec->fmMacControllerDriver.h_Fm, e_FM_MOD_10G_MAC, p_Tgec->macId, e_FM_INTR_TYPE_ERR);
-+
-+ /* release the driver's group hash table */
-+ FreeHashTable(p_Tgec->p_MulticastAddrHash);
-+ p_Tgec->p_MulticastAddrHash = NULL;
-+
-+ /* release the driver's individual hash table */
-+ FreeHashTable(p_Tgec->p_UnicastAddrHash);
-+ p_Tgec->p_UnicastAddrHash = NULL;
-+}
-+
-+
-+/*****************************************************************************/
-+/* 10G MAC API routines */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecEnable(t_Handle h_Tgec, e_CommMode mode)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ fman_tgec_enable(p_Tgec->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecDisable (t_Handle h_Tgec, e_CommMode mode)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ fman_tgec_disable(p_Tgec->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecSetPromiscuous(t_Handle h_Tgec, bool newVal)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ fman_tgec_set_promiscuous(p_Tgec->p_MemMap, newVal);
-+
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* Tgec Configs modification functions */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecConfigLoopback(t_Handle h_Tgec, bool newVal)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ p_Tgec->p_TgecDriverParam->loopback_enable = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecConfigWan(t_Handle h_Tgec, bool newVal)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ p_Tgec->p_TgecDriverParam->wan_mode_enable = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecConfigMaxFrameLength(t_Handle h_Tgec, uint16_t newVal)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ p_Tgec->p_TgecDriverParam->max_frame_length = newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecConfigLengthCheck(t_Handle h_Tgec, bool newVal)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ UNUSED(newVal);
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ p_Tgec->p_TgecDriverParam->no_length_check_enable = !newVal;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecConfigException(t_Handle h_Tgec, e_FmMacExceptions exception, bool enable)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Tgec->exceptions |= bitMask;
-+ else
-+ p_Tgec->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ return E_OK;
-+}
-+
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+/* ......................................................................... */
-+
-+static t_Error TgecConfigSkipFman11Workaround(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ p_Tgec->p_TgecDriverParam->skip_fman11_workaround = TRUE;
-+
-+ return E_OK;
-+}
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+
-+/*****************************************************************************/
-+/* Tgec Run Time API functions */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+/* backward compatibility. will be removed in the future. */
-+static t_Error TgecTxMacPause(t_Handle h_Tgec, uint16_t pauseTime)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+ fman_tgec_set_tx_pause_frames(p_Tgec->p_MemMap, pauseTime);
-+
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecSetTxPauseFrames(t_Handle h_Tgec,
-+ uint8_t priority,
-+ uint16_t pauseTime,
-+ uint16_t threshTime)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ UNUSED(priority); UNUSED(threshTime);
-+
-+ fman_tgec_set_tx_pause_frames(p_Tgec->p_MemMap, pauseTime);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecRxIgnoreMacPause(t_Handle h_Tgec, bool en)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ fman_tgec_set_rx_ignore_pause_frames(p_Tgec->p_MemMap, en);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecGetStatistics(t_Handle h_Tgec, t_FmMacStatistics *p_Statistics)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ struct tgec_regs *p_TgecMemMap;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER);
-+
-+ p_TgecMemMap = p_Tgec->p_MemMap;
-+
-+ p_Statistics->eStatPkts64 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R64);
-+ p_Statistics->eStatPkts65to127 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R127);
-+ p_Statistics->eStatPkts128to255 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R255);
-+ p_Statistics->eStatPkts256to511 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R511);
-+ p_Statistics->eStatPkts512to1023 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1023);
-+ p_Statistics->eStatPkts1024to1518 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1518);
-+ p_Statistics->eStatPkts1519to1522 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1519X);
-+/* */
-+ p_Statistics->eStatFragments = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TRFRG);
-+ p_Statistics->eStatJabbers = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TRJBR);
-+
-+ p_Statistics->eStatsDropEvents = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RDRP);
-+ p_Statistics->eStatCRCAlignErrors = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RALN);
-+
-+ p_Statistics->eStatUndersizePkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TRUND);
-+ p_Statistics->eStatOversizePkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TROVR);
-+/* Pause */
-+ p_Statistics->reStatPause = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RXPF);
-+ p_Statistics->teStatPause = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TXPF);
-+
-+/* MIB II */
-+ p_Statistics->ifInOctets = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_ROCT);
-+ p_Statistics->ifInUcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RUCA);
-+ p_Statistics->ifInMcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RMCA);
-+ p_Statistics->ifInBcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RBCA);
-+ p_Statistics->ifInPkts = p_Statistics->ifInUcastPkts
-+ + p_Statistics->ifInMcastPkts
-+ + p_Statistics->ifInBcastPkts;
-+ p_Statistics->ifInDiscards = 0;
-+ p_Statistics->ifInErrors = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_RERR);
-+
-+ p_Statistics->ifOutOctets = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TOCT);
-+ p_Statistics->ifOutUcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TUCA);
-+ p_Statistics->ifOutMcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TMCA);
-+ p_Statistics->ifOutBcastPkts = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TBCA);
-+ p_Statistics->ifOutPkts = p_Statistics->ifOutUcastPkts
-+ + p_Statistics->ifOutMcastPkts
-+ + p_Statistics->ifOutBcastPkts;
-+ p_Statistics->ifOutDiscards = 0;
-+ p_Statistics->ifOutErrors = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_TERR);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecGetFrameSizeCounters(t_Handle h_Tgec, t_FmMacFrameSizeCounters *p_FrameSizeCounters, e_CommMode type)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ struct tgec_regs *p_TgecMemMap;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FrameSizeCounters, E_NULL_POINTER);
-+
-+ p_TgecMemMap = p_Tgec->p_MemMap;
-+
-+ switch (type)
-+ {
-+ case e_COMM_MODE_NONE:
-+ break;
-+
-+ case e_COMM_MODE_RX:
-+ p_FrameSizeCounters->count_pkts_64 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R64);
-+ p_FrameSizeCounters->count_pkts_65_to_127 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R127);
-+ p_FrameSizeCounters->count_pkts_128_to_255 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R255);
-+ p_FrameSizeCounters->count_pkts_256_to_511 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R511);
-+ p_FrameSizeCounters->count_pkts_512_to_1023 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1023);
-+ p_FrameSizeCounters->count_pkts_1024_to_1518 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1518);
-+ p_FrameSizeCounters->count_pkts_1519_to_1522 = fman_tgec_get_counter(p_TgecMemMap, E_TGEC_COUNTER_R1519X);
-+ break;
-+
-+ case e_COMM_MODE_TX:
-+ //Tx counters not supported
-+ break;
-+
-+ case e_COMM_MODE_RX_AND_TX:
-+ //Tx counters not supported
-+ break;
-+ }
-+
-+ return E_OK;
-+}
-+
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecEnable1588TimeStamp(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ fman_tgec_enable_1588_time_stamp(p_Tgec->p_MemMap, 1);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecDisable1588TimeStamp(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ fman_tgec_enable_1588_time_stamp(p_Tgec->p_MemMap, 0);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecModifyMacAddress (t_Handle h_Tgec, t_EnetAddr *p_EnetAddr)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ p_Tgec->addr = ENET_ADDR_TO_UINT64(*p_EnetAddr);
-+ fman_tgec_set_mac_address(p_Tgec->p_MemMap, (uint8_t *)(*p_EnetAddr));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecResetCounters (t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ fman_tgec_reset_stat(p_Tgec->p_MemMap);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecAddExactMatchMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *) h_Tgec;
-+ uint64_t ethAddr;
-+ uint8_t paddrNum;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ if (ethAddr & GROUP_ADDRESS)
-+ /* Multicast address has no effect in PADDR */
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address"));
-+
-+ /* Make sure no PADDR contains this address */
-+ for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++)
-+ if (p_Tgec->indAddrRegUsed[paddrNum])
-+ if (p_Tgec->paddr[paddrNum] == ethAddr)
-+ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
-+
-+ /* Find first unused PADDR */
-+ for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++)
-+ {
-+ if (!(p_Tgec->indAddrRegUsed[paddrNum]))
-+ {
-+ /* mark this PADDR as used */
-+ p_Tgec->indAddrRegUsed[paddrNum] = TRUE;
-+ /* store address */
-+ p_Tgec->paddr[paddrNum] = ethAddr;
-+
-+ /* put in hardware */
-+ fman_tgec_add_addr_in_paddr(p_Tgec->p_MemMap, (uint8_t*)(*p_EthAddr)/* , paddrNum */);
-+ p_Tgec->numOfIndAddrInRegs++;
-+
-+ return E_OK;
-+ }
-+ }
-+
-+ /* No free PADDR */
-+ RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecDelExactMatchMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *) h_Tgec;
-+ uint64_t ethAddr;
-+ uint8_t paddrNum;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ /* Find used PADDR containing this address */
-+ for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++)
-+ {
-+ if ((p_Tgec->indAddrRegUsed[paddrNum]) &&
-+ (p_Tgec->paddr[paddrNum] == ethAddr))
-+ {
-+ /* mark this PADDR as not used */
-+ p_Tgec->indAddrRegUsed[paddrNum] = FALSE;
-+ /* clear in hardware */
-+ fman_tgec_clear_addr_in_paddr(p_Tgec->p_MemMap /*, paddrNum */);
-+ p_Tgec->numOfIndAddrInRegs--;
-+
-+ return E_OK;
-+ }
-+ }
-+
-+ RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG);
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecAddHashMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ t_EthHashEntry *p_HashEntry;
-+ uint32_t crc;
-+ uint32_t hash;
-+ uint64_t ethAddr;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ENET_ADDR_TO_UINT64(*p_EthAddr);
-+
-+ if (!(ethAddr & GROUP_ADDRESS))
-+ /* Unicast addresses not supported in hash */
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unicast Address"));
-+
-+ /* CRC calculation */
-+ crc = GetMacAddrHashCode(ethAddr);
-+
-+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; /* Take 9 MSB bits */
-+
-+ /* Create element to be added to the driver hash table */
-+ p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry));
-+ p_HashEntry->addr = ethAddr;
-+ INIT_LIST(&p_HashEntry->node);
-+
-+ LIST_AddToTail(&(p_HashEntry->node), &(p_Tgec->p_MulticastAddrHash->p_Lsts[hash]));
-+ fman_tgec_set_hash_table(p_Tgec->p_MemMap, (hash | TGEC_HASH_MCAST_EN));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecDelHashMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ t_EthHashEntry *p_HashEntry = NULL;
-+ t_List *p_Pos;
-+ uint32_t crc;
-+ uint32_t hash;
-+ uint64_t ethAddr;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ ethAddr = ((*(uint64_t *)p_EthAddr) >> 16);
-+
-+ /* CRC calculation */
-+ crc = GetMacAddrHashCode(ethAddr);
-+
-+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; /* Take 9 MSB bits */
-+
-+ LIST_FOR_EACH(p_Pos, &(p_Tgec->p_MulticastAddrHash->p_Lsts[hash]))
-+ {
-+ p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos);
-+ if (p_HashEntry->addr == ethAddr)
-+ {
-+ LIST_DelAndInit(&p_HashEntry->node);
-+ XX_Free(p_HashEntry);
-+ break;
-+ }
-+ }
-+ if (LIST_IsEmpty(&p_Tgec->p_MulticastAddrHash->p_Lsts[hash]))
-+ fman_tgec_set_hash_table(p_Tgec->p_MemMap, (hash & ~TGEC_HASH_MCAST_EN));
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecGetId(t_Handle h_Tgec, uint32_t *macId)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ UNUSED(p_Tgec);
-+ UNUSED(macId);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("TgecGetId Not Supported"));
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecGetVersion(t_Handle h_Tgec, uint32_t *macVersion)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ *macVersion = fman_tgec_get_revision(p_Tgec->p_MemMap);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecSetExcpetion(t_Handle h_Tgec, e_FmMacExceptions exception, bool enable)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Tgec->exceptions |= bitMask;
-+ else
-+ p_Tgec->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ if (enable)
-+ fman_tgec_enable_interrupt(p_Tgec->p_MemMap, bitMask);
-+ else
-+ fman_tgec_disable_interrupt(p_Tgec->p_MemMap, bitMask);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static uint16_t TgecGetMaxFrameLength(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Tgec, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE, 0);
-+
-+ return fman_tgec_get_max_frame_len(p_Tgec->p_MemMap);
-+}
-+
-+/* ......................................................................... */
-+
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+static t_Error TgecTxEccWorkaround(t_Tgec *p_Tgec)
-+{
-+ t_Error err;
-+
-+#if defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)
-+ XX_Print("Applying 10G TX ECC workaround (10GMAC-A004) ... ");
-+#endif /* (DEBUG_ERRORS > 0) */
-+ /* enable and set promiscuous */
-+ fman_tgec_enable(p_Tgec->p_MemMap, TRUE, TRUE);
-+ fman_tgec_set_promiscuous(p_Tgec->p_MemMap, TRUE);
-+ err = Fm10GTxEccWorkaround(p_Tgec->fmMacControllerDriver.h_Fm, p_Tgec->macId);
-+ /* disable */
-+ fman_tgec_set_promiscuous(p_Tgec->p_MemMap, FALSE);
-+ fman_tgec_enable(p_Tgec->p_MemMap, FALSE, FALSE);
-+ fman_tgec_reset_stat(p_Tgec->p_MemMap);
-+ fman_tgec_ack_event(p_Tgec->p_MemMap, 0xffffffff);
-+#if defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)
-+ if (err)
-+ XX_Print("FAILED!\n");
-+ else
-+ XX_Print("done.\n");
-+#endif /* (DEBUG_ERRORS > 0) */
-+
-+ return err;
-+}
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+/*****************************************************************************/
-+/* FM Init & Free API */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecInit(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ struct tgec_cfg *p_TgecDriverParam;
-+ t_EnetAddr ethAddr;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->fmMacControllerDriver.h_Fm, E_INVALID_HANDLE);
-+
-+ FM_GetRevision(p_Tgec->fmMacControllerDriver.h_Fm, &p_Tgec->fmMacControllerDriver.fmRevInfo);
-+ CHECK_INIT_PARAMETERS(p_Tgec, CheckInitParameters);
-+
-+ p_TgecDriverParam = p_Tgec->p_TgecDriverParam;
-+
-+ MAKE_ENET_ADDR_FROM_UINT64(p_Tgec->addr, ethAddr);
-+ fman_tgec_set_mac_address(p_Tgec->p_MemMap, (uint8_t *)ethAddr);
-+
-+ /* interrupts */
-+#ifdef FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005
-+ {
-+ if (p_Tgec->fmMacControllerDriver.fmRevInfo.majorRev <=2)
-+ p_Tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT | TGEC_IMASK_LOC_FAULT);
-+ }
-+#endif /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 */
-+
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+ if (!p_Tgec->p_TgecDriverParam->skip_fman11_workaround &&
-+ ((err = TgecTxEccWorkaround(p_Tgec)) != E_OK))
-+ {
-+ FreeInitResources(p_Tgec);
-+ REPORT_ERROR(MINOR, err, ("TgecTxEccWorkaround FAILED"));
-+ }
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+ err = fman_tgec_init(p_Tgec->p_MemMap, p_TgecDriverParam, p_Tgec->exceptions);
-+ if (err)
-+ {
-+ FreeInitResources(p_Tgec);
-+ RETURN_ERROR(MAJOR, err, ("This TGEC version does not support the required i/f mode"));
-+ }
-+
-+ /* Max Frame Length */
-+ err = FmSetMacMaxFrame(p_Tgec->fmMacControllerDriver.h_Fm,
-+ e_FM_MAC_10G,
-+ p_Tgec->fmMacControllerDriver.macId,
-+ p_TgecDriverParam->max_frame_length);
-+ if (err != E_OK)
-+ {
-+ FreeInitResources(p_Tgec);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+/* we consider having no IPC a non crasher... */
-+
-+#ifdef FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007
-+ if (p_Tgec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
-+ fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007(p_Tgec->p_MemMap);
-+#endif /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 */
-+
-+ p_Tgec->p_MulticastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
-+ if (!p_Tgec->p_MulticastAddrHash)
-+ {
-+ FreeInitResources(p_Tgec);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
-+ }
-+
-+ p_Tgec->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE);
-+ if (!p_Tgec->p_UnicastAddrHash)
-+ {
-+ FreeInitResources(p_Tgec);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED"));
-+ }
-+
-+ FmRegisterIntr(p_Tgec->fmMacControllerDriver.h_Fm,
-+ e_FM_MOD_10G_MAC,
-+ p_Tgec->macId,
-+ e_FM_INTR_TYPE_ERR,
-+ TgecErrException,
-+ p_Tgec);
-+ if (p_Tgec->mdioIrq != NO_IRQ)
-+ {
-+ XX_SetIntr(p_Tgec->mdioIrq, TgecException, p_Tgec);
-+ XX_EnableIntr(p_Tgec->mdioIrq);
-+ }
-+
-+ XX_Free(p_TgecDriverParam);
-+ p_Tgec->p_TgecDriverParam = NULL;
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static t_Error TgecFree(t_Handle h_Tgec)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+
-+ if (p_Tgec->p_TgecDriverParam)
-+ {
-+ /* Called after config */
-+ XX_Free(p_Tgec->p_TgecDriverParam);
-+ p_Tgec->p_TgecDriverParam = NULL;
-+ }
-+ else
-+ /* Called after init */
-+ FreeInitResources(p_Tgec);
-+
-+ XX_Free(p_Tgec);
-+
-+ return E_OK;
-+}
-+
-+/* ......................................................................... */
-+
-+static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver)
-+{
-+ p_FmMacControllerDriver->f_FM_MAC_Init = TgecInit;
-+ p_FmMacControllerDriver->f_FM_MAC_Free = TgecFree;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetStatistics = NULL;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = TgecConfigLoopback;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = TgecConfigMaxFrameLength;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigWan = TgecConfigWan;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = NULL; /* TGEC always works with pad+crc */
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = NULL; /* half-duplex is not supported in xgec */
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = TgecConfigLengthCheck;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigException = TgecConfigException;
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigResetOnInit = NULL;
-+
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+ p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround= TgecConfigSkipFman11Workaround;
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetException = TgecSetExcpetion;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = TgecEnable1588TimeStamp;
-+ p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = TgecDisable1588TimeStamp;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = TgecSetPromiscuous;
-+ p_FmMacControllerDriver->f_FM_MAC_AdjustLink = NULL;
-+ p_FmMacControllerDriver->f_FM_MAC_SetWakeOnLan = NULL;
-+ p_FmMacControllerDriver->f_FM_MAC_RestartAutoneg = NULL;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_Enable = TgecEnable;
-+ p_FmMacControllerDriver->f_FM_MAC_Disable = TgecDisable;
-+ p_FmMacControllerDriver->f_FM_MAC_Resume = NULL;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = TgecTxMacPause;
-+ p_FmMacControllerDriver->f_FM_MAC_SetTxPauseFrames = TgecSetTxPauseFrames;
-+ p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = TgecRxIgnoreMacPause;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ResetCounters = TgecResetCounters;
-+ p_FmMacControllerDriver->f_FM_MAC_GetStatistics = TgecGetStatistics;
-+ p_FmMacControllerDriver->f_FM_MAC_GetFrameSizeCounters = TgecGetFrameSizeCounters;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = TgecModifyMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = TgecAddHashMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = TgecDelHashMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = TgecAddExactMatchMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = TgecDelExactMatchMacAddress;
-+ p_FmMacControllerDriver->f_FM_MAC_GetId = TgecGetId;
-+ p_FmMacControllerDriver->f_FM_MAC_GetVersion = TgecGetVersion;
-+ p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = TgecGetMaxFrameLength;
-+
-+ p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = TGEC_MII_WritePhyReg;
-+ p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = TGEC_MII_ReadPhyReg;
-+}
-+
-+
-+/*****************************************************************************/
-+/* Tgec Config Main Entry */
-+/*****************************************************************************/
-+
-+/* ......................................................................... */
-+
-+t_Handle TGEC_Config(t_FmMacParams *p_FmMacParam)
-+{
-+ t_Tgec *p_Tgec;
-+ struct tgec_cfg *p_TgecDriverParam;
-+ uintptr_t baseAddr;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL);
-+
-+ baseAddr = p_FmMacParam->baseAddr;
-+ /* allocate memory for the UCC GETH data structure. */
-+ p_Tgec = (t_Tgec *)XX_Malloc(sizeof(t_Tgec));
-+ if (!p_Tgec)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("10G MAC driver structure"));
-+ return NULL;
-+ }
-+ memset(p_Tgec, 0, sizeof(t_Tgec));
-+ InitFmMacControllerDriver(&p_Tgec->fmMacControllerDriver);
-+
-+ /* allocate memory for the 10G MAC driver parameters data structure. */
-+ p_TgecDriverParam = (struct tgec_cfg *) XX_Malloc(sizeof(struct tgec_cfg));
-+ if (!p_TgecDriverParam)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("10G MAC driver parameters"));
-+ XX_Free(p_Tgec);
-+ return NULL;
-+ }
-+ memset(p_TgecDriverParam, 0, sizeof(struct tgec_cfg));
-+
-+ /* Plant parameter structure pointer */
-+ p_Tgec->p_TgecDriverParam = p_TgecDriverParam;
-+
-+ fman_tgec_defconfig(p_TgecDriverParam);
-+
-+ p_Tgec->p_MemMap = (struct tgec_regs *)UINT_TO_PTR(baseAddr);
-+ p_Tgec->p_MiiMemMap = (t_TgecMiiAccessMemMap *)UINT_TO_PTR(baseAddr + TGEC_TO_MII_OFFSET);
-+ p_Tgec->addr = ENET_ADDR_TO_UINT64(p_FmMacParam->addr);
-+ p_Tgec->enetMode = p_FmMacParam->enetMode;
-+ p_Tgec->macId = p_FmMacParam->macId;
-+ p_Tgec->exceptions = DEFAULT_exceptions;
-+ p_Tgec->mdioIrq = p_FmMacParam->mdioIrq;
-+ p_Tgec->f_Exception = p_FmMacParam->f_Exception;
-+ p_Tgec->f_Event = p_FmMacParam->f_Event;
-+ p_Tgec->h_App = p_FmMacParam->h_App;
-+
-+ return p_Tgec;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec.h
-@@ -0,0 +1,151 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File tgec.h
-+
-+ @Description FM 10G MAC ...
-+*//***************************************************************************/
-+#ifndef __TGEC_H
-+#define __TGEC_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+#include "enet_ext.h"
-+
-+#include "tgec_mii_acc.h"
-+#include "fm_mac.h"
-+
-+
-+#define DEFAULT_exceptions \
-+ ((uint32_t)(TGEC_IMASK_MDIO_SCAN_EVENT | \
-+ TGEC_IMASK_REM_FAULT | \
-+ TGEC_IMASK_LOC_FAULT | \
-+ TGEC_IMASK_TX_ECC_ER | \
-+ TGEC_IMASK_TX_FIFO_UNFL | \
-+ TGEC_IMASK_TX_FIFO_OVFL | \
-+ TGEC_IMASK_TX_ER | \
-+ TGEC_IMASK_RX_FIFO_OVFL | \
-+ TGEC_IMASK_RX_ECC_ER | \
-+ TGEC_IMASK_RX_JAB_FRM | \
-+ TGEC_IMASK_RX_OVRSZ_FRM | \
-+ TGEC_IMASK_RX_RUNT_FRM | \
-+ TGEC_IMASK_RX_FRAG_FRM | \
-+ TGEC_IMASK_RX_CRC_ER | \
-+ TGEC_IMASK_RX_ALIGN_ER))
-+
-+#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
-+ case e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO: \
-+ bitMask = TGEC_IMASK_MDIO_SCAN_EVENT ; break; \
-+ case e_FM_MAC_EX_10G_MDIO_CMD_CMPL: \
-+ bitMask = TGEC_IMASK_MDIO_CMD_CMPL ; break; \
-+ case e_FM_MAC_EX_10G_REM_FAULT: \
-+ bitMask = TGEC_IMASK_REM_FAULT ; break; \
-+ case e_FM_MAC_EX_10G_LOC_FAULT: \
-+ bitMask = TGEC_IMASK_LOC_FAULT ; break; \
-+ case e_FM_MAC_EX_10G_1TX_ECC_ER: \
-+ bitMask = TGEC_IMASK_TX_ECC_ER ; break; \
-+ case e_FM_MAC_EX_10G_TX_FIFO_UNFL: \
-+ bitMask = TGEC_IMASK_TX_FIFO_UNFL ; break; \
-+ case e_FM_MAC_EX_10G_TX_FIFO_OVFL: \
-+ bitMask = TGEC_IMASK_TX_FIFO_OVFL ; break; \
-+ case e_FM_MAC_EX_10G_TX_ER: \
-+ bitMask = TGEC_IMASK_TX_ER ; break; \
-+ case e_FM_MAC_EX_10G_RX_FIFO_OVFL: \
-+ bitMask = TGEC_IMASK_RX_FIFO_OVFL ; break; \
-+ case e_FM_MAC_EX_10G_RX_ECC_ER: \
-+ bitMask = TGEC_IMASK_RX_ECC_ER ; break; \
-+ case e_FM_MAC_EX_10G_RX_JAB_FRM: \
-+ bitMask = TGEC_IMASK_RX_JAB_FRM ; break; \
-+ case e_FM_MAC_EX_10G_RX_OVRSZ_FRM: \
-+ bitMask = TGEC_IMASK_RX_OVRSZ_FRM ; break; \
-+ case e_FM_MAC_EX_10G_RX_RUNT_FRM: \
-+ bitMask = TGEC_IMASK_RX_RUNT_FRM ; break; \
-+ case e_FM_MAC_EX_10G_RX_FRAG_FRM: \
-+ bitMask = TGEC_IMASK_RX_FRAG_FRM ; break; \
-+ case e_FM_MAC_EX_10G_RX_LEN_ER: \
-+ bitMask = TGEC_IMASK_RX_LEN_ER ; break; \
-+ case e_FM_MAC_EX_10G_RX_CRC_ER: \
-+ bitMask = TGEC_IMASK_RX_CRC_ER ; break; \
-+ case e_FM_MAC_EX_10G_RX_ALIGN_ER: \
-+ bitMask = TGEC_IMASK_RX_ALIGN_ER ; break; \
-+ default: bitMask = 0;break;}
-+
-+#define MAX_PACKET_ALIGNMENT 31
-+#define MAX_INTER_PACKET_GAP 0x7f
-+#define MAX_INTER_PALTERNATE_BEB 0x0f
-+#define MAX_RETRANSMISSION 0x0f
-+#define MAX_COLLISION_WINDOW 0x03ff
-+
-+#define TGEC_NUM_OF_PADDRS 1 /* number of pattern match registers (entries) */
-+
-+#define GROUP_ADDRESS 0x0000010000000000LL /* Group address bit indication */
-+
-+#define HASH_TABLE_SIZE 512 /* Hash table size (= 32 bits * 8 regs) */
-+
-+#define TGEC_TO_MII_OFFSET 0x1030 /* Offset from the MEM map to the MDIO mem map */
-+
-+/* 10-gigabit Ethernet MAC Controller ID (10GEC_ID) */
-+#define TGEC_ID_ID 0xffff0000
-+#define TGEC_ID_MAC_VERSION 0x0000FF00
-+#define TGEC_ID_MAC_REV 0x000000ff
-+
-+
-+typedef struct {
-+ t_FmMacControllerDriver fmMacControllerDriver; /**< Upper Mac control block */
-+ t_Handle h_App; /**< Handle to the upper layer application */
-+ struct tgec_regs *p_MemMap; /**< pointer to 10G memory mapped registers. */
-+ t_TgecMiiAccessMemMap *p_MiiMemMap; /**< pointer to MII memory mapped registers. */
-+ uint64_t addr; /**< MAC address of device; */
-+ e_EnetMode enetMode; /**< Ethernet physical interface */
-+ t_FmMacExceptionCallback *f_Exception;
-+ int mdioIrq;
-+ t_FmMacExceptionCallback *f_Event;
-+ bool indAddrRegUsed[TGEC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */
-+ uint64_t paddr[TGEC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */
-+ uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */
-+ t_EthHash *p_MulticastAddrHash; /**< pointer to driver's global address hash table */
-+ t_EthHash *p_UnicastAddrHash; /**< pointer to driver's individual address hash table */
-+ bool debugMode;
-+ uint8_t macId;
-+ uint32_t exceptions;
-+ struct tgec_cfg *p_TgecDriverParam;
-+} t_Tgec;
-+
-+
-+t_Error TGEC_MII_WritePhyReg(t_Handle h_Tgec, uint8_t phyAddr, uint8_t reg, uint16_t data);
-+t_Error TGEC_MII_ReadPhyReg(t_Handle h_Tgec, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
-+
-+
-+#endif /* __TGEC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c
-@@ -0,0 +1,139 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_mac.h"
-+#include "tgec.h"
-+#include "xx_ext.h"
-+
-+#include "fm_common.h"
-+
-+
-+/*****************************************************************************/
-+t_Error TGEC_MII_WritePhyReg(t_Handle h_Tgec,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t data)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ t_TgecMiiAccessMemMap *p_MiiAccess;
-+ uint32_t cfgStatusReg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MiiMemMap, E_INVALID_HANDLE);
-+
-+ p_MiiAccess = p_Tgec->p_MiiMemMap;
-+
-+ /* Configure MII */
-+ cfgStatusReg = GET_UINT32(p_MiiAccess->mdio_cfg_status);
-+ cfgStatusReg &= ~MIIMCOM_DIV_MASK;
-+ /* (one half of fm clock => 2.5Mhz) */
-+ cfgStatusReg |=((((p_Tgec->fmMacControllerDriver.clkFreq*10)/2)/25) << MIIMCOM_DIV_SHIFT);
-+ WRITE_UINT32(p_MiiAccess->mdio_cfg_status, cfgStatusReg);
-+
-+ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
-+ XX_UDelay (1);
-+
-+ WRITE_UINT32(p_MiiAccess->mdio_command, phyAddr);
-+
-+ WRITE_UINT32(p_MiiAccess->mdio_regaddr, reg);
-+
-+ CORE_MemoryBarrier();
-+
-+ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
-+ XX_UDelay (1);
-+
-+ WRITE_UINT32(p_MiiAccess->mdio_data, data);
-+
-+ CORE_MemoryBarrier();
-+
-+ while ((GET_UINT32(p_MiiAccess->mdio_data)) & MIIDATA_BUSY)
-+ XX_UDelay (1);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error TGEC_MII_ReadPhyReg(t_Handle h_Tgec,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t *p_Data)
-+{
-+ t_Tgec *p_Tgec = (t_Tgec *)h_Tgec;
-+ t_TgecMiiAccessMemMap *p_MiiAccess;
-+ uint32_t cfgStatusReg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MiiMemMap, E_INVALID_HANDLE);
-+
-+ p_MiiAccess = p_Tgec->p_MiiMemMap;
-+
-+ /* Configure MII */
-+ cfgStatusReg = GET_UINT32(p_MiiAccess->mdio_cfg_status);
-+ cfgStatusReg &= ~MIIMCOM_DIV_MASK;
-+ /* (one half of fm clock => 2.5Mhz) */
-+ cfgStatusReg |=((((p_Tgec->fmMacControllerDriver.clkFreq*10)/2)/25) << MIIMCOM_DIV_SHIFT);
-+ WRITE_UINT32(p_MiiAccess->mdio_cfg_status, cfgStatusReg);
-+
-+ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
-+ XX_UDelay (1);
-+
-+ WRITE_UINT32(p_MiiAccess->mdio_command, phyAddr);
-+
-+ WRITE_UINT32(p_MiiAccess->mdio_regaddr, reg);
-+
-+ CORE_MemoryBarrier();
-+
-+ while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY)
-+ XX_UDelay (1);
-+
-+ WRITE_UINT32(p_MiiAccess->mdio_command, (uint32_t)(phyAddr | MIIMCOM_READ_CYCLE));
-+
-+ CORE_MemoryBarrier();
-+
-+ while ((GET_UINT32(p_MiiAccess->mdio_data)) & MIIDATA_BUSY)
-+ XX_UDelay (1);
-+
-+ *p_Data = (uint16_t)GET_UINT32(p_MiiAccess->mdio_data);
-+
-+ cfgStatusReg = GET_UINT32(p_MiiAccess->mdio_cfg_status);
-+
-+ if (cfgStatusReg & MIIMIND_READ_ERROR)
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE,
-+ ("Read Error: phyAddr 0x%x, dev 0x%x, reg 0x%x, cfgStatusReg 0x%x",
-+ ((phyAddr & 0xe0)>>5), (phyAddr & 0x1f), reg, cfgStatusReg));
-+
-+ return E_OK;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h
-@@ -0,0 +1,80 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __TGEC_MII_ACC_H
-+#define __TGEC_MII_ACC_H
-+
-+#include "std_ext.h"
-+
-+
-+/* MII Management Command Register */
-+#define MIIMCOM_READ_POST_INCREMENT 0x00004000
-+#define MIIMCOM_READ_CYCLE 0x00008000
-+#define MIIMCOM_SCAN_CYCLE 0x00000800
-+#define MIIMCOM_PREAMBLE_DISABLE 0x00000400
-+
-+#define MIIMCOM_MDIO_HOLD_1_REG_CLK 0
-+#define MIIMCOM_MDIO_HOLD_2_REG_CLK 1
-+#define MIIMCOM_MDIO_HOLD_3_REG_CLK 2
-+#define MIIMCOM_MDIO_HOLD_4_REG_CLK 3
-+
-+#define MIIMCOM_DIV_MASK 0x0000ff00
-+#define MIIMCOM_DIV_SHIFT 8
-+
-+/* MII Management Indicator Register */
-+#define MIIMIND_BUSY 0x00000001
-+#define MIIMIND_READ_ERROR 0x00000002
-+
-+#define MIIDATA_BUSY 0x80000000
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/*----------------------------------------------------*/
-+/* MII Configuration Control Memory Map Registers */
-+/*----------------------------------------------------*/
-+typedef _Packed struct t_TgecMiiAccessMemMap
-+{
-+ volatile uint32_t mdio_cfg_status; /* 0x030 */
-+ volatile uint32_t mdio_command; /* 0x034 */
-+ volatile uint32_t mdio_data; /* 0x038 */
-+ volatile uint32_t mdio_regaddr; /* 0x03c */
-+} _PackedType t_TgecMiiAccessMemMap ;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+#endif /* __TGEC_MII_ACC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/Makefile
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+obj-y += fsl-ncsw-macsec.o
-+
-+fsl-ncsw-macsec-objs := fm_macsec.o fm_macsec_guest.o fm_macsec_master.o fm_macsec_secy.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c
-@@ -0,0 +1,237 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+/******************************************************************************
-+
-+ @File fm_macsec.c
-+
-+ @Description FM MACSEC driver routines implementation.
-+*//***************************************************************************/
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "xx_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+#include "debug_ext.h"
-+
-+#include "fm_macsec.h"
-+
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+t_Handle FM_MACSEC_Config(t_FmMacsecParams *p_FmMacsecParam)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacsecParam, E_INVALID_HANDLE, NULL);
-+
-+ if (p_FmMacsecParam->guestMode)
-+ p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)FM_MACSEC_GUEST_Config(p_FmMacsecParam);
-+ else
-+ p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)FM_MACSEC_MASTER_Config(p_FmMacsecParam);
-+
-+ if (!p_FmMacsecControllerDriver)
-+ return NULL;
-+
-+ return (t_Handle)p_FmMacsecControllerDriver;
-+}
-+
-+t_Error FM_MACSEC_Init(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Init)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_Init(h_FmMacsec);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_Free(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Free)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_Free(h_FmMacsec);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigUnknownSciFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUnknownSciFrameTreatment)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUnknownSciFrameTreatment(h_FmMacsec, treatMode);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigInvalidTagsFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigInvalidTagsFrameTreatment)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigInvalidTagsFrameTreatment(h_FmMacsec, deliverUncontrolled);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(t_Handle h_FmMacsec, bool discardUncontrolled)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(h_FmMacsec, discardUncontrolled);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigUntagFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUntagFrameTreatment)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUntagFrameTreatment(h_FmMacsec, treatMode);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigPnExhaustionThreshold(t_Handle h_FmMacsec, uint32_t pnExhThr)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigPnExhaustionThreshold)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigPnExhaustionThreshold(h_FmMacsec, pnExhThr);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigKeysUnreadable(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigKeysUnreadable)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigKeysUnreadable(h_FmMacsec);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigSectagWithoutSCI(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigSectagWithoutSCI)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigSectagWithoutSCI(h_FmMacsec);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_ConfigException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigException)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigException(h_FmMacsec, exception, enable);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_GetRevision(t_Handle h_FmMacsec, uint32_t *p_MacsecRevision)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_GetRevision)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_GetRevision(h_FmMacsec, p_MacsecRevision);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+
-+t_Error FM_MACSEC_Enable(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Enable)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_Enable(h_FmMacsec);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_Disable(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_Disable)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_Disable(h_FmMacsec);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_SetException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
-+{
-+ t_FmMacsecControllerDriver *p_FmMacsecControllerDriver = (t_FmMacsecControllerDriver *)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecControllerDriver, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecControllerDriver->f_FM_MACSEC_SetException)
-+ return p_FmMacsecControllerDriver->f_FM_MACSEC_SetException(h_FmMacsec, exception, enable);
-+
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h
-@@ -0,0 +1,203 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fm_macsec.h
-+
-+ @Description FM MACSEC internal structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_MACSEC_H
-+#define __FM_MACSEC_H
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_macsec_ext.h"
-+
-+#include "fm_common.h"
-+
-+
-+#define __ERR_MODULE__ MODULE_FM_MACSEC
-+
-+
-+typedef struct
-+{
-+ t_Error (*f_FM_MACSEC_Init) (t_Handle h_FmMacsec);
-+ t_Error (*f_FM_MACSEC_Free) (t_Handle h_FmMacsec);
-+
-+ t_Error (*f_FM_MACSEC_ConfigUnknownSciFrameTreatment) (t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode);
-+ t_Error (*f_FM_MACSEC_ConfigInvalidTagsFrameTreatment) (t_Handle h_FmMacsec, bool deliverUncontrolled);
-+ t_Error (*f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment) (t_Handle h_FmMacsec, bool discardUncontrolled);
-+ t_Error (*f_FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment) (t_Handle h_FmMacsec, bool deliverUncontrolled);
-+ t_Error (*f_FM_MACSEC_ConfigUntagFrameTreatment) (t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode);
-+ t_Error (*f_FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment) (t_Handle h_FmMacsec, bool deliverUncontrolled);
-+ t_Error (*f_FM_MACSEC_ConfigPnExhaustionThreshold) (t_Handle h_FmMacsec, uint32_t pnExhThr);
-+ t_Error (*f_FM_MACSEC_ConfigKeysUnreadable) (t_Handle h_FmMacsec);
-+ t_Error (*f_FM_MACSEC_ConfigSectagWithoutSCI) (t_Handle h_FmMacsec);
-+ t_Error (*f_FM_MACSEC_ConfigException) (t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
-+
-+ t_Error (*f_FM_MACSEC_GetRevision) (t_Handle h_FmMacsec, uint32_t *p_MacsecRevision);
-+ t_Error (*f_FM_MACSEC_Enable) (t_Handle h_FmMacsec);
-+ t_Error (*f_FM_MACSEC_Disable) (t_Handle h_FmMacsec);
-+ t_Error (*f_FM_MACSEC_SetException) (t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
-+
-+} t_FmMacsecControllerDriver;
-+
-+t_Handle FM_MACSEC_GUEST_Config(t_FmMacsecParams *p_FmMacsecParam);
-+t_Handle FM_MACSEC_MASTER_Config(t_FmMacsecParams *p_FmMacsecParams);
-+
-+/***********************************************************************/
-+/* MACSEC internal routines */
-+/***********************************************************************/
-+
-+/**************************************************************************//**
-+
-+ @Group FM_MACSEC_InterModule_grp FM MACSEC Inter-Module Unit
-+
-+ @Description FM MACSEC Inter Module functions -
-+ These are not User API routines but routines that may be called
-+ from other modules. This will be the case in a single core environment,
-+ where instead of using the XX messaging mechanism, the routines may be
-+ called from other modules. In a multicore environment, the other modules may
-+ be run by other cores and therefore these routines may not be called directly.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define MAX_NUM_OF_SA_PER_SC 4
-+
-+typedef enum
-+{
-+ e_SC_RX = 0,
-+ e_SC_TX
-+} e_ScType;
-+
-+typedef enum
-+{
-+ e_SC_SA_A = 0,
-+ e_SC_SA_B ,
-+ e_SC_SA_C ,
-+ e_SC_SA_D
-+} e_ScSaId;
-+
-+typedef struct
-+{
-+ uint32_t scId;
-+ macsecSCI_t sci;
-+ bool replayProtect;
-+ uint32_t replayWindow;
-+ e_FmMacsecValidFrameBehavior validateFrames;
-+ uint16_t confidentialityOffset;
-+ e_FmMacsecSecYCipherSuite cipherSuite;
-+} t_RxScParams;
-+
-+typedef struct
-+{
-+ uint32_t scId;
-+ macsecSCI_t sci;
-+ bool protectFrames;
-+ e_FmMacsecSciInsertionMode sciInsertionMode;
-+ bool confidentialityEnable;
-+ uint16_t confidentialityOffset;
-+ e_FmMacsecSecYCipherSuite cipherSuite;
-+} t_TxScParams;
-+
-+typedef enum e_FmMacsecGlobalExceptions {
-+ e_FM_MACSEC_EX_TX_SC, /**< Tx Sc 0 frame discarded error. */
-+ e_FM_MACSEC_EX_ECC /**< MACSEC memory ECC multiple-bit error. */
-+} e_FmMacsecGlobalExceptions;
-+
-+typedef enum e_FmMacsecGlobalEvents {
-+ e_FM_MACSEC_EV_TX_SC_NEXT_PN /**< Tx Sc 0 Next Pn exhaustion threshold reached. */
-+} e_FmMacsecGlobalEvents;
-+
-+/**************************************************************************//**
-+ @Description Enum for inter-module interrupts registration
-+*//***************************************************************************/
-+typedef enum e_FmMacsecEventModules{
-+ e_FM_MACSEC_MOD_SC_TX,
-+ e_FM_MACSEC_MOD_DUMMY_LAST
-+} e_FmMacsecEventModules;
-+
-+typedef enum e_FmMacsecInterModuleEvent {
-+ e_FM_MACSEC_EV_SC_TX,
-+ e_FM_MACSEC_EV_ERR_SC_TX,
-+ e_FM_MACSEC_EV_DUMMY_LAST
-+} e_FmMacsecInterModuleEvent;
-+
-+#define NUM_OF_INTER_MODULE_EVENTS (NUM_OF_TX_SC * 2)
-+
-+#define GET_MACSEC_MODULE_EVENT(mod, id, intrType, event) \
-+ switch(mod){ \
-+ case e_FM_MACSEC_MOD_SC_TX: \
-+ event = (intrType == e_FM_INTR_TYPE_ERR) ? \
-+ e_FM_MACSEC_EV_ERR_SC_TX: \
-+ e_FM_MACSEC_EV_SC_TX; \
-+ event += (uint8_t)(2 * id);break; \
-+ break; \
-+ default:event = e_FM_MACSEC_EV_DUMMY_LAST; \
-+ break;}
-+
-+void FmMacsecRegisterIntr(t_Handle h_FmMacsec,
-+ e_FmMacsecEventModules module,
-+ uint8_t modId,
-+ e_FmIntrType intrType,
-+ void (*f_Isr) (t_Handle h_Arg, uint32_t id),
-+ t_Handle h_Arg);
-+
-+void FmMacsecUnregisterIntr(t_Handle h_FmMacsec,
-+ e_FmMacsecEventModules module,
-+ uint8_t modId,
-+ e_FmIntrType intrType);
-+
-+t_Error FmMacsecAllocScs(t_Handle h_FmMacsec, e_ScType type, bool isPtp, uint32_t numOfScs, uint32_t *p_ScIds);
-+t_Error FmMacsecFreeScs(t_Handle h_FmMacsec, e_ScType type, uint32_t numOfScs, uint32_t *p_ScIds);
-+t_Error FmMacsecCreateRxSc(t_Handle h_FmMacsec, t_RxScParams *p_RxScParams);
-+t_Error FmMacsecDeleteRxSc(t_Handle h_FmMacsec, uint32_t scId);
-+t_Error FmMacsecCreateTxSc(t_Handle h_FmMacsec, t_TxScParams *p_RxScParams);
-+t_Error FmMacsecDeleteTxSc(t_Handle h_FmMacsec, uint32_t scId);
-+t_Error FmMacsecCreateRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key);
-+t_Error FmMacsecCreateTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecSAKey_t key);
-+t_Error FmMacsecDeleteRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId);
-+t_Error FmMacsecDeleteTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId);
-+t_Error FmMacsecRxSaSetReceive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, bool enableReceive);
-+t_Error FmMacsecRxSaUpdateNextPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtNextPN);
-+t_Error FmMacsecRxSaUpdateLowestPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtLowestPN);
-+t_Error FmMacsecTxSaSetActive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an);
-+t_Error FmMacsecTxSaGetActive(t_Handle h_FmMacsec, uint32_t scId, macsecAN_t *p_An);
-+t_Error FmMacsecSetPTP(t_Handle h_FmMacsec, bool enable);
-+
-+t_Error FmMacsecSetException(t_Handle h_FmMacsec, e_FmMacsecGlobalExceptions exception, uint32_t scId, bool enable);
-+t_Error FmMacsecSetEvent(t_Handle h_FmMacsec, e_FmMacsecGlobalEvents event, uint32_t scId, bool enable);
-+
-+
-+
-+#endif /* __FM_MACSEC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_guest.c
-@@ -0,0 +1,59 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fm_macsec.c
-+
-+ @Description FM MACSEC driver routines implementation.
-+*//***************************************************************************/
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "xx_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+#include "debug_ext.h"
-+#include "fm_macsec.h"
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+t_Handle FM_MACSEC_GUEST_Config(t_FmMacsecParams *p_FmMacsecParam)
-+{
-+ UNUSED(p_FmMacsecParam);
-+ return NULL;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.c
-@@ -0,0 +1,1031 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fm_macsec.c
-+
-+ @Description FM MACSEC driver routines implementation.
-+*//***************************************************************************/
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "xx_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+#include "fm_mac_ext.h"
-+
-+#include "fm_macsec_master.h"
-+
-+
-+extern uint16_t FM_MAC_GetMaxFrameLength(t_Handle FmMac);
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+static t_Error CheckFmMacsecParameters(t_FmMacsec *p_FmMacsec)
-+{
-+ if (!p_FmMacsec->f_Exception)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
-+
-+ return E_OK;
-+}
-+
-+static void UnimplementedIsr(t_Handle h_Arg, uint32_t id)
-+{
-+ UNUSED(h_Arg); UNUSED(id);
-+
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented Isr!"));
-+}
-+
-+static void MacsecEventIsr(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t events,event,i;
-+
-+ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
-+
-+ events = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->evr);
-+ events |= GET_UINT32(p_FmMacsec->p_FmMacsecRegs->ever);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->evr,events);
-+
-+ for (i=0; i<NUM_OF_TX_SC; i++)
-+ if (events & FM_MACSEC_EV_TX_SC_NEXT_PN(i))
-+ {
-+ GET_MACSEC_MODULE_EVENT(e_FM_MACSEC_MOD_SC_TX, i, e_FM_INTR_TYPE_NORMAL, event);
-+ p_FmMacsec->intrMng[event].f_Isr(p_FmMacsec->intrMng[event].h_SrcHandle, i);
-+ }
-+}
-+
-+static void MacsecErrorIsr(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t errors,error,i;
-+
-+ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
-+
-+ errors = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->err);
-+ errors |= GET_UINT32(p_FmMacsec->p_FmMacsecRegs->erer);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->err,errors);
-+
-+ for (i=0; i<NUM_OF_TX_SC; i++)
-+ if (errors & FM_MACSEC_EX_TX_SC(i))
-+ {
-+ GET_MACSEC_MODULE_EVENT(e_FM_MACSEC_MOD_SC_TX, i, e_FM_INTR_TYPE_ERR, error);
-+ p_FmMacsec->intrMng[error].f_Isr(p_FmMacsec->intrMng[error].h_SrcHandle, i);
-+ }
-+
-+ if (errors & FM_MACSEC_EX_ECC)
-+ {
-+ uint8_t eccType;
-+ uint32_t tmpReg;
-+
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->meec);
-+ ASSERT_COND(tmpReg & MECC_CAP);
-+ eccType = (uint8_t)((tmpReg & MECC_CET) >> MECC_CET_SHIFT);
-+
-+ if (!eccType && (p_FmMacsec->userExceptions & FM_MACSEC_USER_EX_SINGLE_BIT_ECC))
-+ p_FmMacsec->f_Exception(p_FmMacsec->h_App,e_FM_MACSEC_EX_SINGLE_BIT_ECC);
-+ else if (eccType && (p_FmMacsec->userExceptions & FM_MACSEC_USER_EX_MULTI_BIT_ECC))
-+ p_FmMacsec->f_Exception(p_FmMacsec->h_App,e_FM_MACSEC_EX_MULTI_BIT_ECC);
-+ else
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->meec,tmpReg);
-+ }
-+}
-+
-+static t_Error MacsecInit(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_FmMacsecDriverParam *p_FmMacsecDriverParam = NULL;
-+ uint32_t tmpReg,i,macId;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ CHECK_INIT_PARAMETERS(p_FmMacsec, CheckFmMacsecParameters);
-+
-+ p_FmMacsecDriverParam = p_FmMacsec->p_FmMacsecDriverParam;
-+
-+ for (i=0;i<e_FM_MACSEC_EV_DUMMY_LAST;i++)
-+ p_FmMacsec->intrMng[i].f_Isr = UnimplementedIsr;
-+
-+ tmpReg = 0;
-+ tmpReg |= (p_FmMacsecDriverParam->changedTextWithNoEncryptDeliverUncontrolled << CFG_UECT_SHIFT)|
-+ (p_FmMacsecDriverParam->onlyScbIsSetDeliverUncontrolled << CFG_ESCBT_SHIFT) |
-+ (p_FmMacsecDriverParam->unknownSciTreatMode << CFG_USFT_SHIFT) |
-+ (p_FmMacsecDriverParam->invalidTagsDeliverUncontrolled << CFG_ITT_SHIFT) |
-+ (p_FmMacsecDriverParam->encryptWithNoChangedTextDiscardUncontrolled << CFG_KFT_SHIFT) |
-+ (p_FmMacsecDriverParam->untagTreatMode << CFG_UFT_SHIFT) |
-+ (p_FmMacsecDriverParam->keysUnreadable << CFG_KSS_SHIFT) |
-+ (p_FmMacsecDriverParam->reservedSc0 << CFG_S0I_SHIFT) |
-+ (p_FmMacsecDriverParam->byPassMode << CFG_BYPN_SHIFT);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg, tmpReg);
-+
-+ tmpReg = FM_MAC_GetMaxFrameLength(p_FmMacsec->h_FmMac);
-+ /* At least Ethernet FCS (4 bytes) overhead must be subtracted from MFL.
-+ * In addition, the SCI (8 bytes) overhead might be subtracted as well. */
-+ tmpReg -= p_FmMacsecDriverParam->mflSubtract;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->mfl, tmpReg);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->tpnet, p_FmMacsecDriverParam->pnExhThr);
-+
-+ if (!p_FmMacsec->userExceptions)
-+ p_FmMacsec->exceptions &= ~FM_MACSEC_EX_ECC;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->erer, p_FmMacsec->exceptions);
-+
-+ p_FmMacsec->numRxScAvailable = NUM_OF_RX_SC;
-+ if (p_FmMacsecDriverParam->reservedSc0)
-+ p_FmMacsec->numRxScAvailable --;
-+ p_FmMacsec->numTxScAvailable = NUM_OF_TX_SC;
-+
-+ XX_Free(p_FmMacsecDriverParam);
-+ p_FmMacsec->p_FmMacsecDriverParam = NULL;
-+
-+ FM_MAC_GetId(p_FmMacsec->h_FmMac, &macId);
-+ FmRegisterIntr(p_FmMacsec->h_Fm,
-+ e_FM_MOD_MACSEC,
-+ (uint8_t)macId,
-+ e_FM_INTR_TYPE_NORMAL,
-+ MacsecEventIsr,
-+ p_FmMacsec);
-+
-+ FmRegisterIntr(p_FmMacsec->h_Fm,
-+ e_FM_MOD_MACSEC,
-+ 0,
-+ e_FM_INTR_TYPE_ERR,
-+ MacsecErrorIsr,
-+ p_FmMacsec);
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecFree(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t macId;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ FM_MAC_GetId(p_FmMacsec->h_FmMac, &macId);
-+ FmUnregisterIntr(p_FmMacsec->h_Fm,
-+ e_FM_MOD_MACSEC,
-+ (uint8_t)macId,
-+ e_FM_INTR_TYPE_NORMAL);
-+
-+ FmUnregisterIntr(p_FmMacsec->h_Fm,
-+ e_FM_MOD_MACSEC,
-+ 0,
-+ e_FM_INTR_TYPE_ERR);
-+
-+ if (p_FmMacsec->rxScSpinLock)
-+ XX_FreeSpinlock(p_FmMacsec->rxScSpinLock);
-+ if (p_FmMacsec->txScSpinLock)
-+ XX_FreeSpinlock(p_FmMacsec->txScSpinLock);
-+
-+ XX_Free(p_FmMacsec);
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigUnknownSciFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->unknownSciTreatMode = treatMode;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigInvalidTagsFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->invalidTagsDeliverUncontrolled = deliverUncontrolled;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigChangedTextWithNoEncryptFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->changedTextWithNoEncryptDeliverUncontrolled = deliverUncontrolled;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigOnlyScbIsSetFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->onlyScbIsSetDeliverUncontrolled = deliverUncontrolled;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigEncryptWithNoChangedTextFrameTreatment(t_Handle h_FmMacsec, bool discardUncontrolled)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->encryptWithNoChangedTextDiscardUncontrolled = discardUncontrolled;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigUntagFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->untagTreatMode = treatMode;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigPnExhaustionThreshold(t_Handle h_FmMacsec, uint32_t pnExhThr)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->pnExhThr = pnExhThr;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigKeysUnreadable(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->keysUnreadable = TRUE;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigSectagWithoutSCI(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmMacsec->p_FmMacsecDriverParam->sectagOverhead -= MACSEC_SCI_SIZE;
-+ p_FmMacsec->p_FmMacsecDriverParam->mflSubtract += MACSEC_SCI_SIZE;
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecConfigException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ GET_USER_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmMacsec->userExceptions |= bitMask;
-+ else
-+ p_FmMacsec->userExceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecGetRevision(t_Handle h_FmMacsec, uint32_t *p_MacsecRevision)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ *p_MacsecRevision = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->ip_rev1);
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecEnable(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t tmpReg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg);
-+ tmpReg |= CFG_BYPN;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg,tmpReg);
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecDisable(t_Handle h_FmMacsec)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t tmpReg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg);
-+ tmpReg &= ~CFG_BYPN;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg,tmpReg);
-+
-+ return E_OK;
-+}
-+
-+static t_Error MacsecSetException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t bitMask;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ GET_USER_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmMacsec->userExceptions |= bitMask;
-+ else
-+ p_FmMacsec->userExceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ if (!p_FmMacsec->userExceptions)
-+ p_FmMacsec->exceptions &= ~FM_MACSEC_EX_ECC;
-+ else
-+ p_FmMacsec->exceptions |= FM_MACSEC_EX_ECC;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->erer, p_FmMacsec->exceptions);
-+
-+ return E_OK;
-+}
-+
-+static void InitFmMacsecControllerDriver(t_FmMacsecControllerDriver *p_FmMacsecControllerDriver)
-+{
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_Init = MacsecInit;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_Free = MacsecFree;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUnknownSciFrameTreatment = MacsecConfigUnknownSciFrameTreatment;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigInvalidTagsFrameTreatment = MacsecConfigInvalidTagsFrameTreatment;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment = MacsecConfigEncryptWithNoChangedTextFrameTreatment;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigUntagFrameTreatment = MacsecConfigUntagFrameTreatment;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment = MacsecConfigChangedTextWithNoEncryptFrameTreatment;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment = MacsecConfigOnlyScbIsSetFrameTreatment;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigPnExhaustionThreshold = MacsecConfigPnExhaustionThreshold;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigKeysUnreadable = MacsecConfigKeysUnreadable;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigSectagWithoutSCI = MacsecConfigSectagWithoutSCI;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_ConfigException = MacsecConfigException;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_GetRevision = MacsecGetRevision;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_Enable = MacsecEnable;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_Disable = MacsecDisable;
-+ p_FmMacsecControllerDriver->f_FM_MACSEC_SetException = MacsecSetException;
-+}
-+
-+/****************************************/
-+/* Inter-Module functions */
-+/****************************************/
-+
-+void FmMacsecRegisterIntr(t_Handle h_FmMacsec,
-+ e_FmMacsecEventModules module,
-+ uint8_t modId,
-+ e_FmIntrType intrType,
-+ void (*f_Isr) (t_Handle h_Arg, uint32_t id),
-+ t_Handle h_Arg)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint8_t event= 0;
-+
-+ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
-+
-+ GET_MACSEC_MODULE_EVENT(module, modId, intrType, event);
-+
-+ ASSERT_COND(event != e_FM_MACSEC_EV_DUMMY_LAST);
-+ p_FmMacsec->intrMng[event].f_Isr = f_Isr;
-+ p_FmMacsec->intrMng[event].h_SrcHandle = h_Arg;
-+}
-+
-+void FmMacsecUnregisterIntr(t_Handle h_FmMacsec,
-+ e_FmMacsecEventModules module,
-+ uint8_t modId,
-+ e_FmIntrType intrType)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint8_t event= 0;
-+
-+ SANITY_CHECK_RETURN(p_FmMacsec, E_INVALID_HANDLE);
-+
-+ GET_MACSEC_MODULE_EVENT(module, modId,intrType, event);
-+
-+ ASSERT_COND(event != e_FM_MACSEC_EV_DUMMY_LAST);
-+ p_FmMacsec->intrMng[event].f_Isr = NULL;
-+ p_FmMacsec->intrMng[event].h_SrcHandle = NULL;
-+}
-+
-+t_Error FmMacsecAllocScs(t_Handle h_FmMacsec, e_ScType type, bool isPtp, uint32_t numOfScs, uint32_t *p_ScIds)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ bool *p_ScTable;
-+ uint32_t *p_ScAvailable,i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_ScIds, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(numOfScs, E_INVALID_HANDLE);
-+
-+ if (type == e_SC_RX)
-+ {
-+ p_ScTable = (bool *)p_FmMacsec->rxScTable;
-+ p_ScAvailable = &p_FmMacsec->numRxScAvailable;
-+ i = (NUM_OF_RX_SC - 1);
-+ }
-+ else
-+ {
-+ p_ScTable = (bool *)p_FmMacsec->txScTable;
-+ p_ScAvailable = &p_FmMacsec->numTxScAvailable;
-+ i = (NUM_OF_TX_SC - 1);
-+
-+ }
-+ if (*p_ScAvailable < numOfScs)
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Not enough SCs available"));
-+
-+ if (isPtp)
-+ {
-+ i = 0;
-+ if (p_ScTable[i])
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Sc 0 Not available"));
-+ }
-+
-+ for (;numOfScs;i--)
-+ {
-+ if (p_ScTable[i])
-+ continue;
-+ numOfScs --;
-+ (*p_ScAvailable)--;
-+ p_ScIds[numOfScs] = i;
-+ p_ScTable[i] = TRUE;
-+ }
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecFreeScs(t_Handle h_FmMacsec, e_ScType type, uint32_t numOfScs, uint32_t *p_ScIds)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ bool *p_ScTable;
-+ uint32_t *p_ScAvailable,maxNumOfSc,i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_ScIds, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(numOfScs, E_INVALID_HANDLE);
-+
-+ if (type == e_SC_RX)
-+ {
-+ p_ScTable = (bool *)p_FmMacsec->rxScTable;
-+ p_ScAvailable = &p_FmMacsec->numRxScAvailable;
-+ maxNumOfSc = NUM_OF_RX_SC;
-+ }
-+ else
-+ {
-+ p_ScTable = (bool *)p_FmMacsec->txScTable;
-+ p_ScAvailable = &p_FmMacsec->numTxScAvailable;
-+ maxNumOfSc = NUM_OF_TX_SC;
-+ }
-+
-+ if ((*p_ScAvailable + numOfScs) > maxNumOfSc)
-+ RETURN_ERROR(MINOR, E_FULL, ("Too much SCs"));
-+
-+ for (i=0;i<numOfScs;i++)
-+ {
-+ p_ScTable[p_ScIds[i]] = FALSE;
-+ (*p_ScAvailable)++;
-+ }
-+
-+ return err;
-+
-+}
-+
-+t_Error FmMacsecSetPTP(t_Handle h_FmMacsec, bool enable)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t tmpReg = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg);
-+ if (enable && (tmpReg & CFG_S0I))
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("MACSEC already in point-to-point mode"));
-+
-+ if (enable)
-+ tmpReg |= CFG_S0I;
-+ else
-+ tmpReg &= ~CFG_S0I;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->cfg, tmpReg);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmMacsecCreateRxSc(t_Handle h_FmMacsec, t_RxScParams *p_RxScParams)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_RxScParams, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_RxScParams->scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, p_RxScParams->scId);
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsccfg);
-+ if (tmpReg & RX_SCCFG_SCI_EN_MASK)
-+ {
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Rx Sc %d must be disable",p_RxScParams->scId));
-+ }
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsci1h, GET_SCI_FIRST_HALF(p_RxScParams->sci));
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsci2h, GET_SCI_SECOND_HALF(p_RxScParams->sci));
-+ tmpReg |= ((p_RxScParams->replayProtect << RX_SCCFG_RP_SHIFT) & RX_SCCFG_RP_MASK);
-+ tmpReg |= ((p_RxScParams->validateFrames << RX_SCCFG_VF_SHIFT) & RX_SCCFG_VF_MASK);
-+ tmpReg |= ((p_RxScParams->confidentialityOffset << RX_SCCFG_CO_SHIFT) & RX_SCCFG_CO_MASK);
-+ tmpReg |= RX_SCCFG_SCI_EN_MASK;
-+ tmpReg |= (p_RxScParams->cipherSuite << RX_SCCFG_CS_SHIFT);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsccfg, tmpReg);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rpw, p_RxScParams->replayWindow);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecDeleteRxSc(t_Handle h_FmMacsec, uint32_t scId)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
-+
-+ tmpReg &= ~RX_SCCFG_SCI_EN_MASK;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsccfg, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecCreateTxSc(t_Handle h_FmMacsec, t_TxScParams *p_TxScParams)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+ bool alwaysIncludeSCI = FALSE, useES = FALSE, useSCB = FALSE;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_TxScParams, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_TxScParams->scId < NUM_OF_TX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, p_TxScParams->scId);
-+
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg);
-+ if (tmpReg & TX_SCCFG_SCE_MASK)
-+ {
-+ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tx Sc %d must be disable",p_TxScParams->scId));
-+ }
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsci1h, GET_SCI_FIRST_HALF(p_TxScParams->sci));
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsci2h, GET_SCI_SECOND_HALF(p_TxScParams->sci));
-+ alwaysIncludeSCI = (p_TxScParams->sciInsertionMode == e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG);
-+ useES = (p_TxScParams->sciInsertionMode == e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_MAC_SA);
-+
-+ tmpReg |= ((p_TxScParams->protectFrames << TX_SCCFG_PF_SHIFT) & TX_SCCFG_PF_MASK);
-+ tmpReg |= ((alwaysIncludeSCI << TX_SCCFG_AIS_SHIFT) & TX_SCCFG_AIS_MASK);
-+ tmpReg |= ((useES << TX_SCCFG_UES_SHIFT) & TX_SCCFG_UES_MASK);
-+ tmpReg |= ((useSCB << TX_SCCFG_USCB_SHIFT) & TX_SCCFG_USCB_MASK);
-+ tmpReg |= ((p_TxScParams->confidentialityEnable << TX_SCCFG_CE_SHIFT) & TX_SCCFG_CE_MASK);
-+ tmpReg |= ((p_TxScParams->confidentialityOffset << TX_SCCFG_CO_SHIFT) & TX_SCCFG_CO_MASK);
-+ tmpReg |= TX_SCCFG_SCE_MASK;
-+ tmpReg |= (p_TxScParams->cipherSuite << TX_SCCFG_CS_SHIFT);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecDeleteTxSc(t_Handle h_FmMacsec, uint32_t scId)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_TX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
-+
-+ tmpReg &= ~TX_SCCFG_SCE_MASK;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecCreateRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsanpn, DEFAULT_initNextPn);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsalpn, lowestPn);
-+ MemCpy8((void*)p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsak, key, sizeof(macsecSAKey_t));
-+
-+ tmpReg |= RX_SACFG_ACTIVE;
-+ tmpReg |= ((an << RX_SACFG_AN_SHIFT) & RX_SACFG_AN_MASK);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecCreateTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecSAKey_t key)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_TX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsanpn, DEFAULT_initNextPn);
-+ MemCpy8((void*)p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsak, key, sizeof(macsecSAKey_t));
-+
-+ tmpReg |= TX_SACFG_ACTIVE;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsacs, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecDeleteRxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, i, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsanpn, 0x0);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsalpn, 0x0);
-+ for (i=0; i<4; i++)
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsak[i], 0x0);
-+
-+ tmpReg |= RX_SACFG_ACTIVE;
-+ tmpReg &= ~RX_SACFG_EN_MASK;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecDeleteTxSa(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, i, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_TX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsanpn, 0x0);
-+ for (i=0; i<4; i++)
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsak[i], 0x0);
-+
-+ tmpReg |= TX_SACFG_ACTIVE;
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecTxScSa[saId].txsacs, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecRxSaSetReceive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, bool enableReceive)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs);
-+ if (enableReceive)
-+ tmpReg |= RX_SACFG_EN_MASK;
-+ else
-+ tmpReg &= ~RX_SACFG_EN_MASK;
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsacs, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecRxSaUpdateNextPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtNextPN)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsanpn, updtNextPN);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecRxSaUpdateLowestPn(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, uint32_t updtLowestPN)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_RX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->rxScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->rxsca, scId);
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->fmMacsecRxScSa[saId].rxsalpn, updtLowestPN);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->rxScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecTxSaSetActive(t_Handle h_FmMacsec, uint32_t scId, e_ScSaId saId, macsecAN_t an)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(saId < NUM_OF_SA_PER_TX_SC, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
-+
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg);
-+
-+ tmpReg |= ((an << TX_SCCFG_AN_SHIFT) & TX_SCCFG_AN_MASK);
-+ tmpReg |= ((saId << TX_SCCFG_ASA_SHIFT) & TX_SCCFG_ASA_MASK);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg, tmpReg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecTxSaGetActive(t_Handle h_FmMacsec, uint32_t scId, macsecAN_t *p_An)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg = 0, intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(scId < NUM_OF_RX_SC, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_An, E_INVALID_HANDLE);
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmMacsec->txScSpinLock);
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->txsca, scId);
-+
-+ tmpReg = GET_UINT32(p_FmMacsec->p_FmMacsecRegs->txsccfg);
-+
-+ XX_UnlockIntrSpinlock(p_FmMacsec->txScSpinLock, intFlags);
-+
-+ *p_An = (macsecAN_t)((tmpReg & TX_SCCFG_AN_MASK) >> TX_SCCFG_AN_SHIFT);
-+
-+ return err;
-+}
-+
-+t_Error FmMacsecSetException(t_Handle h_FmMacsec, e_FmMacsecGlobalExceptions exception, uint32_t scId, bool enable)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t bitMask;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception, scId);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmMacsec->exceptions |= bitMask;
-+ else
-+ p_FmMacsec->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->erer, p_FmMacsec->exceptions);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmMacsecSetEvent(t_Handle h_FmMacsec, e_FmMacsecGlobalEvents event, uint32_t scId, bool enable)
-+{
-+ t_FmMacsec *p_FmMacsec = (t_FmMacsec*)h_FmMacsec;
-+ uint32_t bitMask;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsec->p_FmMacsecDriverParam, E_INVALID_HANDLE);
-+
-+ GET_EVENT_FLAG(bitMask, event, scId);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmMacsec->events |= bitMask;
-+ else
-+ p_FmMacsec->events &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined event"));
-+
-+ WRITE_UINT32(p_FmMacsec->p_FmMacsecRegs->ever, p_FmMacsec->events);
-+
-+ return E_OK;
-+}
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+t_Handle FM_MACSEC_MASTER_Config(t_FmMacsecParams *p_FmMacsecParam)
-+{
-+ t_FmMacsec *p_FmMacsec;
-+ uint32_t macId;
-+
-+ /* Allocate FM MACSEC structure */
-+ p_FmMacsec = (t_FmMacsec *) XX_Malloc(sizeof(t_FmMacsec));
-+ if (!p_FmMacsec)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC driver structure"));
-+ return NULL;
-+ }
-+ memset(p_FmMacsec, 0, sizeof(t_FmMacsec));
-+ InitFmMacsecControllerDriver(&p_FmMacsec->fmMacsecControllerDriver);
-+
-+ /* Allocate the FM MACSEC driver's parameters structure */
-+ p_FmMacsec->p_FmMacsecDriverParam = (t_FmMacsecDriverParam *)XX_Malloc(sizeof(t_FmMacsecDriverParam));
-+ if (!p_FmMacsec->p_FmMacsecDriverParam)
-+ {
-+ XX_Free(p_FmMacsec);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC driver parameters"));
-+ return NULL;
-+ }
-+ memset(p_FmMacsec->p_FmMacsecDriverParam, 0, sizeof(t_FmMacsecDriverParam));
-+
-+ /* Initialize FM MACSEC parameters which will be kept by the driver */
-+ p_FmMacsec->h_Fm = p_FmMacsecParam->h_Fm;
-+ p_FmMacsec->h_FmMac = p_FmMacsecParam->nonGuestParams.h_FmMac;
-+ p_FmMacsec->p_FmMacsecRegs = (t_FmMacsecRegs *)UINT_TO_PTR(p_FmMacsecParam->nonGuestParams.baseAddr);
-+ p_FmMacsec->f_Exception = p_FmMacsecParam->nonGuestParams.f_Exception;
-+ p_FmMacsec->h_App = p_FmMacsecParam->nonGuestParams.h_App;
-+ p_FmMacsec->userExceptions = DEFAULT_userExceptions;
-+ p_FmMacsec->exceptions = DEFAULT_exceptions;
-+ p_FmMacsec->events = DEFAULT_events;
-+ p_FmMacsec->rxScSpinLock = XX_InitSpinlock();
-+ p_FmMacsec->txScSpinLock = XX_InitSpinlock();
-+
-+ /* Initialize FM MACSEC driver parameters parameters (for initialization phase only) */
-+ p_FmMacsec->p_FmMacsecDriverParam->unknownSciTreatMode = DEFAULT_unknownSciFrameTreatment;
-+ p_FmMacsec->p_FmMacsecDriverParam->invalidTagsDeliverUncontrolled = DEFAULT_invalidTagsFrameTreatment;
-+ p_FmMacsec->p_FmMacsecDriverParam->encryptWithNoChangedTextDiscardUncontrolled = DEFAULT_encryptWithNoChangedTextFrameTreatment;
-+ p_FmMacsec->p_FmMacsecDriverParam->untagTreatMode = DEFAULT_untagFrameTreatment;
-+ p_FmMacsec->p_FmMacsecDriverParam->keysUnreadable = DEFAULT_keysUnreadable;
-+ p_FmMacsec->p_FmMacsecDriverParam->reservedSc0 = DEFAULT_sc0ReservedForPTP;
-+ p_FmMacsec->p_FmMacsecDriverParam->byPassMode = !DEFAULT_normalMode;
-+ p_FmMacsec->p_FmMacsecDriverParam->pnExhThr = DEFAULT_pnExhThr;
-+ p_FmMacsec->p_FmMacsecDriverParam->sectagOverhead = DEFAULT_sectagOverhead;
-+ p_FmMacsec->p_FmMacsecDriverParam->mflSubtract = DEFAULT_mflSubtract;
-+ /* build the FM MACSEC master IPC address */
-+ memset(p_FmMacsec->fmMacsecModuleName, 0, (sizeof(char))*MODULE_NAME_SIZE);
-+ FM_MAC_GetId(p_FmMacsec->h_FmMac,&macId);
-+ if (Sprint (p_FmMacsec->fmMacsecModuleName, "FM-%d-MAC-%d-MACSEC-Master",
-+ FmGetId(p_FmMacsec->h_Fm),macId) != 24)
-+ {
-+ XX_Free(p_FmMacsec->p_FmMacsecDriverParam);
-+ XX_Free(p_FmMacsec);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+ return NULL;
-+ }
-+ return p_FmMacsec;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_master.h
-@@ -0,0 +1,479 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fm_macsec_master.h
-+
-+ @Description FM MACSEC internal structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_MACSEC_MASTER_H
-+#define __FM_MACSEC_MASTER_H
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+
-+#include "fm_macsec.h"
-+
-+
-+#define MACSEC_ICV_SIZE 16
-+#define MACSEC_SECTAG_SIZE 16
-+#define MACSEC_SCI_SIZE 8
-+#define MACSEC_FCS_SIZE 4
-+
-+/**************************************************************************//**
-+ @Description Exceptions
-+*//***************************************************************************/
-+
-+#define FM_MACSEC_EX_TX_SC_0 0x80000000
-+#define FM_MACSEC_EX_TX_SC(sc) (FM_MACSEC_EX_TX_SC_0 >> (sc))
-+#define FM_MACSEC_EX_ECC 0x00000001
-+
-+#define GET_EXCEPTION_FLAG(bitMask, exception, id) switch (exception){ \
-+ case e_FM_MACSEC_EX_TX_SC: \
-+ bitMask = FM_MACSEC_EX_TX_SC(id); break; \
-+ case e_FM_MACSEC_EX_ECC: \
-+ bitMask = FM_MACSEC_EX_ECC; break; \
-+ default: bitMask = 0;break;}
-+
-+#define FM_MACSEC_USER_EX_SINGLE_BIT_ECC 0x80000000
-+#define FM_MACSEC_USER_EX_MULTI_BIT_ECC 0x40000000
-+
-+#define GET_USER_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
-+ case e_FM_MACSEC_EX_SINGLE_BIT_ECC: \
-+ bitMask = FM_MACSEC_USER_EX_SINGLE_BIT_ECC; break; \
-+ case e_FM_MACSEC_EX_MULTI_BIT_ECC: \
-+ bitMask = FM_MACSEC_USER_EX_MULTI_BIT_ECC; break; \
-+ default: bitMask = 0;break;}
-+
-+/**************************************************************************//**
-+ @Description Events
-+*//***************************************************************************/
-+
-+#define FM_MACSEC_EV_TX_SC_0_NEXT_PN 0x80000000
-+#define FM_MACSEC_EV_TX_SC_NEXT_PN(sc) (FM_MACSEC_EV_TX_SC_0_NEXT_PN >> (sc))
-+
-+#define GET_EVENT_FLAG(bitMask, event, id) switch (event){ \
-+ case e_FM_MACSEC_EV_TX_SC_NEXT_PN: \
-+ bitMask = FM_MACSEC_EV_TX_SC_NEXT_PN(id); break; \
-+ default: bitMask = 0;break;}
-+
-+/**************************************************************************//**
-+ @Description Defaults
-+*//***************************************************************************/
-+#define DEFAULT_userExceptions (FM_MACSEC_USER_EX_SINGLE_BIT_ECC |\
-+ FM_MACSEC_USER_EX_MULTI_BIT_ECC)
-+
-+#define DEFAULT_exceptions (FM_MACSEC_EX_TX_SC(0) |\
-+ FM_MACSEC_EX_TX_SC(1) |\
-+ FM_MACSEC_EX_TX_SC(2) |\
-+ FM_MACSEC_EX_TX_SC(3) |\
-+ FM_MACSEC_EX_TX_SC(4) |\
-+ FM_MACSEC_EX_TX_SC(5) |\
-+ FM_MACSEC_EX_TX_SC(6) |\
-+ FM_MACSEC_EX_TX_SC(7) |\
-+ FM_MACSEC_EX_TX_SC(8) |\
-+ FM_MACSEC_EX_TX_SC(9) |\
-+ FM_MACSEC_EX_TX_SC(10) |\
-+ FM_MACSEC_EX_TX_SC(11) |\
-+ FM_MACSEC_EX_TX_SC(12) |\
-+ FM_MACSEC_EX_TX_SC(13) |\
-+ FM_MACSEC_EX_TX_SC(14) |\
-+ FM_MACSEC_EX_TX_SC(15) |\
-+ FM_MACSEC_EX_ECC )
-+
-+#define DEFAULT_events (FM_MACSEC_EV_TX_SC_NEXT_PN(0) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(1) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(2) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(3) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(4) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(5) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(6) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(7) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(8) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(9) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(10) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(11) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(12) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(13) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(14) |\
-+ FM_MACSEC_EV_TX_SC_NEXT_PN(15) )
-+
-+#define DEFAULT_unknownSciFrameTreatment e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_BOTH
-+#define DEFAULT_invalidTagsFrameTreatment FALSE
-+#define DEFAULT_encryptWithNoChangedTextFrameTreatment FALSE
-+#define DEFAULT_untagFrameTreatment e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED
-+#define DEFAULT_changedTextWithNoEncryptFrameTreatment FALSE
-+#define DEFAULT_onlyScbIsSetFrameTreatment FALSE
-+#define DEFAULT_keysUnreadable FALSE
-+#define DEFAULT_normalMode TRUE
-+#define DEFAULT_sc0ReservedForPTP FALSE
-+#define DEFAULT_initNextPn 1
-+#define DEFAULT_pnExhThr 0xffffffff
-+#define DEFAULT_sectagOverhead (MACSEC_ICV_SIZE + MACSEC_SECTAG_SIZE)
-+#define DEFAULT_mflSubtract MACSEC_FCS_SIZE
-+
-+
-+/**************************************************************************//**
-+ @Description Memory Mapped Registers
-+*//***************************************************************************/
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+typedef _Packed struct
-+{
-+ /* MACsec configuration */
-+ volatile uint32_t cfg; /**< MACsec configuration */
-+ volatile uint32_t et; /**< MACsec EtherType */
-+ volatile uint8_t res1[56]; /**< reserved */
-+ volatile uint32_t mfl; /**< Maximum Frame Length */
-+ volatile uint32_t tpnet; /**< TX Packet Number exhaustion threshold */
-+ volatile uint8_t res2[56]; /**< reserved */
-+ volatile uint32_t rxsca; /**< RX SC access select */
-+ volatile uint8_t res3[60]; /**< reserved */
-+ volatile uint32_t txsca; /**< TX SC access select */
-+ volatile uint8_t res4[60]; /**< reserved */
-+
-+ /* RX configuration, status and statistic */
-+ volatile uint32_t rxsci1h; /**< RX Secure Channel Identifier first half */
-+ volatile uint32_t rxsci2h; /**< RX Secure Channel Identifier second half */
-+ volatile uint8_t res5[8]; /**< reserved */
-+ volatile uint32_t ifio1hs; /**< ifInOctets first half Statistic */
-+ volatile uint32_t ifio2hs; /**< ifInOctets second half Statistic */
-+ volatile uint32_t ifiups; /**< ifInUcastPkts Statistic */
-+ volatile uint8_t res6[4]; /**< reserved */
-+ volatile uint32_t ifimps; /**< ifInMulticastPkts Statistic */
-+ volatile uint32_t ifibps; /**< ifInBroadcastPkts Statistic */
-+ volatile uint32_t rxsccfg; /**< RX Secure Channel configuration */
-+ volatile uint32_t rpw; /**< replayWindow */
-+ volatile uint8_t res7[16]; /**< reserved */
-+ volatile uint32_t inov1hs; /**< InOctetsValidated first half Statistic */
-+ volatile uint32_t inov2hs; /**< InOctetsValidated second half Statistic */
-+ volatile uint32_t inod1hs; /**< InOctetsDecrypted first half Statistic */
-+ volatile uint32_t inod2hs; /**< InOctetsDecrypted second half Statistic */
-+ volatile uint32_t rxscipus; /**< RX Secure Channel InPktsUnchecked Statistic */
-+ volatile uint32_t rxscipds; /**< RX Secure Channel InPktsDelayed Statistic */
-+ volatile uint32_t rxscipls; /**< RX Secure Channel InPktsLate Statistic */
-+ volatile uint8_t res8[4]; /**< reserved */
-+ volatile uint32_t rxaninuss[MAX_NUM_OF_SA_PER_SC]; /**< RX AN 0-3 InNotUsingSA Statistic */
-+ volatile uint32_t rxanipuss[MAX_NUM_OF_SA_PER_SC]; /**< RX AN 0-3 InPktsUnusedSA Statistic */
-+ _Packed struct
-+ {
-+ volatile uint32_t rxsacs; /**< RX Security Association configuration and status */
-+ volatile uint32_t rxsanpn; /**< RX Security Association nextPN */
-+ volatile uint32_t rxsalpn; /**< RX Security Association lowestPN */
-+ volatile uint32_t rxsaipos; /**< RX Security Association InPktsOK Statistic */
-+ volatile uint32_t rxsak[4]; /**< RX Security Association key (128 bit) */
-+ volatile uint32_t rxsah[4]; /**< RX Security Association hash (128 bit) */
-+ volatile uint32_t rxsaipis; /**< RX Security Association InPktsInvalid Statistic */
-+ volatile uint32_t rxsaipnvs; /**< RX Security Association InPktsNotValid Statistic */
-+ volatile uint8_t res9[8]; /**< reserved */
-+ } _PackedType fmMacsecRxScSa[NUM_OF_SA_PER_RX_SC];
-+
-+ /* TX configuration, status and statistic */
-+ volatile uint32_t txsci1h; /**< TX Secure Channel Identifier first half */
-+ volatile uint32_t txsci2h; /**< TX Secure Channel Identifier second half */
-+ volatile uint8_t res10[8]; /**< reserved */
-+ volatile uint32_t ifoo1hs; /**< ifOutOctets first half Statistic */
-+ volatile uint32_t ifoo2hs; /**< ifOutOctets second half Statistic */
-+ volatile uint32_t ifoups; /**< ifOutUcastPkts Statistic */
-+ volatile uint32_t opus; /**< OutPktsUntagged Statistic */
-+ volatile uint32_t ifomps; /**< ifOutMulticastPkts Statistic */
-+ volatile uint32_t ifobps; /**< ifOutBroadcastPkts Statistic */
-+ volatile uint32_t txsccfg; /**< TX Secure Channel configuration */
-+ volatile uint32_t optls; /**< OutPktsTooLong Statistic */
-+ volatile uint8_t res11[16]; /**< reserved */
-+ volatile uint32_t oop1hs; /**< OutOctetsProtected first half Statistic */
-+ volatile uint32_t oop2hs; /**< OutOctetsProtected second half Statistic */
-+ volatile uint32_t ooe1hs; /**< OutOctetsEncrypted first half Statistic */
-+ volatile uint32_t ooe2hs; /**< OutOctetsEncrypted second half Statistic */
-+ volatile uint8_t res12[48]; /**< reserved */
-+ _Packed struct
-+ {
-+ volatile uint32_t txsacs; /**< TX Security Association configuration and status */
-+ volatile uint32_t txsanpn; /**< TX Security Association nextPN */
-+ volatile uint32_t txsaopps; /**< TX Security Association OutPktsProtected Statistic */
-+ volatile uint32_t txsaopes; /**< TX Security Association OutPktsEncrypted Statistic */
-+ volatile uint32_t txsak[4]; /**< TX Security Association key (128 bit) */
-+ volatile uint32_t txsah[4]; /**< TX Security Association hash (128 bit) */
-+ volatile uint8_t res13[16]; /**< reserved */
-+ } _PackedType fmMacsecTxScSa[NUM_OF_SA_PER_TX_SC];
-+ volatile uint8_t res14[248]; /**< reserved */
-+
-+ /* Global configuration and status */
-+ volatile uint32_t ip_rev1; /**< MACsec IP Block Revision 1 register */
-+ volatile uint32_t ip_rev2; /**< MACsec IP Block Revision 2 register */
-+ volatile uint32_t evr; /**< MACsec Event Register */
-+ volatile uint32_t ever; /**< MACsec Event Enable Register */
-+ volatile uint32_t evfr; /**< MACsec Event Force Register */
-+ volatile uint32_t err; /**< MACsec Error Register */
-+ volatile uint32_t erer; /**< MACsec Error Enable Register */
-+ volatile uint32_t erfr; /**< MACsec Error Force Register */
-+ volatile uint8_t res15[40]; /**< reserved */
-+ volatile uint32_t meec; /**< MACsec Memory ECC Error Capture Register */
-+ volatile uint32_t idle; /**< MACsec Idle status Register */
-+ volatile uint8_t res16[184]; /**< reserved */
-+ /* DEBUG */
-+ volatile uint32_t rxec; /**< MACsec RX error capture Register */
-+ volatile uint8_t res17[28]; /**< reserved */
-+ volatile uint32_t txec; /**< MACsec TX error capture Register */
-+ volatile uint8_t res18[220]; /**< reserved */
-+
-+ /* Macsec Rx global statistic */
-+ volatile uint32_t ifiocp1hs; /**< ifInOctetsCp first half Statistic */
-+ volatile uint32_t ifiocp2hs; /**< ifInOctetsCp second half Statistic */
-+ volatile uint32_t ifiupcps; /**< ifInUcastPktsCp Statistic */
-+ volatile uint8_t res19[4]; /**< reserved */
-+ volatile uint32_t ifioup1hs; /**< ifInOctetsUp first half Statistic */
-+ volatile uint32_t ifioup2hs; /**< ifInOctetsUp second half Statistic */
-+ volatile uint32_t ifiupups; /**< ifInUcastPktsUp Statistic */
-+ volatile uint8_t res20[4]; /**< reserved */
-+ volatile uint32_t ifimpcps; /**< ifInMulticastPktsCp Statistic */
-+ volatile uint32_t ifibpcps; /**< ifInBroadcastPktsCp Statistic */
-+ volatile uint32_t ifimpups; /**< ifInMulticastPktsUp Statistic */
-+ volatile uint32_t ifibpups; /**< ifInBroadcastPktsUp Statistic */
-+ volatile uint32_t ipwts; /**< InPktsWithoutTag Statistic */
-+ volatile uint32_t ipkays; /**< InPktsKaY Statistic */
-+ volatile uint32_t ipbts; /**< InPktsBadTag Statistic */
-+ volatile uint32_t ipsnfs; /**< InPktsSCINotFound Statistic */
-+ volatile uint32_t ipuecs; /**< InPktsUnsupportedEC Statistic */
-+ volatile uint32_t ipescbs; /**< InPktsEponSingleCopyBroadcast Statistic */
-+ volatile uint32_t iptls; /**< InPktsTooLong Statistic */
-+ volatile uint8_t res21[52]; /**< reserved */
-+
-+ /* Macsec Tx global statistic */
-+ volatile uint32_t opds; /**< OutPktsDiscarded Statistic */
-+#if (DPAA_VERSION >= 11)
-+ volatile uint8_t res22[124]; /**< reserved */
-+ _Packed struct
-+ {
-+ volatile uint32_t rxsak[8]; /**< RX Security Association key (128/256 bit) */
-+ volatile uint8_t res23[32]; /**< reserved */
-+ } _PackedType rxScSaKey[NUM_OF_SA_PER_RX_SC];
-+ _Packed struct
-+ {
-+ volatile uint32_t txsak[8]; /**< TX Security Association key (128/256 bit) */
-+ volatile uint8_t res24[32]; /**< reserved */
-+ } _PackedType txScSaKey[NUM_OF_SA_PER_TX_SC];
-+#endif /* (DPAA_VERSION >= 11) */
-+} _PackedType t_FmMacsecRegs;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/**************************************************************************//**
-+ @Description General defines
-+*//***************************************************************************/
-+
-+#define SCI_HIGH_MASK 0xffffffff00000000LL
-+#define SCI_LOW_MASK 0x00000000ffffffffLL
-+
-+#define LONG_SHIFT 32
-+
-+#define GET_SCI_FIRST_HALF(sci) (uint32_t)((macsecSCI_t)((macsecSCI_t)(sci) & SCI_HIGH_MASK) >> LONG_SHIFT)
-+#define GET_SCI_SECOND_HALF(sci) (uint32_t)((macsecSCI_t)(sci) & SCI_LOW_MASK)
-+
-+/**************************************************************************//**
-+ @Description Configuration defines
-+*//***************************************************************************/
-+
-+/* masks */
-+#define CFG_UECT 0x00000800
-+#define CFG_ESCBT 0x00000400
-+#define CFG_USFT 0x00000300
-+#define CFG_ITT 0x00000080
-+#define CFG_KFT 0x00000040
-+#define CFG_UFT 0x00000030
-+#define CFG_KSS 0x00000004
-+#define CFG_BYPN 0x00000002
-+#define CFG_S0I 0x00000001
-+
-+#define ET_TYPE 0x0000ffff
-+
-+#define MFL_MAX_LEN 0x0000ffff
-+
-+#define RXSCA_SC_SEL 0x0000000f
-+
-+#define TXSCA_SC_SEL 0x0000000f
-+
-+#define IP_REV_1_IP_ID 0xffff0000
-+#define IP_REV_1_IP_MJ 0x0000ff00
-+#define IP_REV_1_IP_MM 0x000000ff
-+
-+#define IP_REV_2_IP_INT 0x00ff0000
-+#define IP_REV_2_IP_ERR 0x0000ff00
-+#define IP_REV_2_IP_CFG 0x000000ff
-+
-+#define MECC_CAP 0x80000000
-+#define MECC_CET 0x40000000
-+#define MECC_SERCNT 0x00ff0000
-+#define MECC_MEMADDR 0x000001ff
-+
-+/* shifts */
-+#define CFG_UECT_SHIFT (31-20)
-+#define CFG_ESCBT_SHIFT (31-21)
-+#define CFG_USFT_SHIFT (31-23)
-+#define CFG_ITT_SHIFT (31-24)
-+#define CFG_KFT_SHIFT (31-25)
-+#define CFG_UFT_SHIFT (31-27)
-+#define CFG_KSS_SHIFT (31-29)
-+#define CFG_BYPN_SHIFT (31-30)
-+#define CFG_S0I_SHIFT (31-31)
-+
-+#define IP_REV_1_IP_ID_SHIFT (31-15)
-+#define IP_REV_1_IP_MJ_SHIFT (31-23)
-+#define IP_REV_1_IP_MM_SHIFT (31-31)
-+
-+#define IP_REV_2_IP_INT_SHIFT (31-15)
-+#define IP_REV_2_IP_ERR_SHIFT (31-23)
-+#define IP_REV_2_IP_CFG_SHIFT (31-31)
-+
-+#define MECC_CAP_SHIFT (31-0)
-+#define MECC_CET_SHIFT (31-1)
-+#define MECC_SERCNT_SHIFT (31-15)
-+#define MECC_MEMADDR_SHIFT (31-31)
-+
-+/**************************************************************************//**
-+ @Description RX SC defines
-+*//***************************************************************************/
-+
-+/* masks */
-+#define RX_SCCFG_SCI_EN_MASK 0x00000800
-+#define RX_SCCFG_RP_MASK 0x00000400
-+#define RX_SCCFG_VF_MASK 0x00000300
-+#define RX_SCCFG_CO_MASK 0x0000003f
-+
-+/* shifts */
-+#define RX_SCCFG_SCI_EN_SHIFT (31-20)
-+#define RX_SCCFG_RP_SHIFT (31-21)
-+#define RX_SCCFG_VF_SHIFT (31-23)
-+#define RX_SCCFG_CO_SHIFT (31-31)
-+#define RX_SCCFG_CS_SHIFT (31-7)
-+
-+/**************************************************************************//**
-+ @Description RX SA defines
-+*//***************************************************************************/
-+
-+/* masks */
-+#define RX_SACFG_ACTIVE 0x80000000
-+#define RX_SACFG_AN_MASK 0x00000006
-+#define RX_SACFG_EN_MASK 0x00000001
-+
-+/* shifts */
-+#define RX_SACFG_AN_SHIFT (31-30)
-+#define RX_SACFG_EN_SHIFT (31-31)
-+
-+/**************************************************************************//**
-+ @Description TX SC defines
-+*//***************************************************************************/
-+
-+/* masks */
-+#define TX_SCCFG_AN_MASK 0x000c0000
-+#define TX_SCCFG_ASA_MASK 0x00020000
-+#define TX_SCCFG_SCE_MASK 0x00010000
-+#define TX_SCCFG_CO_MASK 0x00003f00
-+#define TX_SCCFG_CE_MASK 0x00000010
-+#define TX_SCCFG_PF_MASK 0x00000008
-+#define TX_SCCFG_AIS_MASK 0x00000004
-+#define TX_SCCFG_UES_MASK 0x00000002
-+#define TX_SCCFG_USCB_MASK 0x00000001
-+
-+/* shifts */
-+#define TX_SCCFG_AN_SHIFT (31-13)
-+#define TX_SCCFG_ASA_SHIFT (31-14)
-+#define TX_SCCFG_SCE_SHIFT (31-15)
-+#define TX_SCCFG_CO_SHIFT (31-23)
-+#define TX_SCCFG_CE_SHIFT (31-27)
-+#define TX_SCCFG_PF_SHIFT (31-28)
-+#define TX_SCCFG_AIS_SHIFT (31-29)
-+#define TX_SCCFG_UES_SHIFT (31-30)
-+#define TX_SCCFG_USCB_SHIFT (31-31)
-+#define TX_SCCFG_CS_SHIFT (31-7)
-+
-+/**************************************************************************//**
-+ @Description TX SA defines
-+*//***************************************************************************/
-+
-+/* masks */
-+#define TX_SACFG_ACTIVE 0x80000000
-+
-+
-+typedef struct
-+{
-+ void (*f_Isr) (t_Handle h_Arg, uint32_t id);
-+ t_Handle h_SrcHandle;
-+} t_FmMacsecIntrSrc;
-+
-+typedef struct
-+{
-+ e_FmMacsecUnknownSciFrameTreatment unknownSciTreatMode;
-+ bool invalidTagsDeliverUncontrolled;
-+ bool changedTextWithNoEncryptDeliverUncontrolled;
-+ bool onlyScbIsSetDeliverUncontrolled;
-+ bool encryptWithNoChangedTextDiscardUncontrolled;
-+ e_FmMacsecUntagFrameTreatment untagTreatMode;
-+ uint32_t pnExhThr;
-+ bool keysUnreadable;
-+ bool byPassMode;
-+ bool reservedSc0;
-+ uint32_t sectagOverhead;
-+ uint32_t mflSubtract;
-+} t_FmMacsecDriverParam;
-+
-+typedef struct
-+{
-+ t_FmMacsecControllerDriver fmMacsecControllerDriver;
-+ t_Handle h_Fm;
-+ t_FmMacsecRegs *p_FmMacsecRegs;
-+ t_Handle h_FmMac; /**< A handle to the FM MAC object related to */
-+ char fmMacsecModuleName[MODULE_NAME_SIZE];
-+ t_FmMacsecIntrSrc intrMng[NUM_OF_INTER_MODULE_EVENTS];
-+ uint32_t events;
-+ uint32_t exceptions;
-+ uint32_t userExceptions;
-+ t_FmMacsecExceptionsCallback *f_Exception; /**< Exception Callback Routine */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks */
-+ bool rxScTable[NUM_OF_RX_SC];
-+ uint32_t numRxScAvailable;
-+ bool txScTable[NUM_OF_TX_SC];
-+ uint32_t numTxScAvailable;
-+ t_Handle rxScSpinLock;
-+ t_Handle txScSpinLock;
-+ t_FmMacsecDriverParam *p_FmMacsecDriverParam;
-+} t_FmMacsec;
-+
-+
-+#endif /* __FM_MACSEC_MASTER_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.c
-@@ -0,0 +1,883 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fm_macsec_secy.c
-+
-+ @Description FM MACSEC SECY driver routines implementation.
-+*//***************************************************************************/
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "xx_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+
-+#include "fm_macsec_secy.h"
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+static void FmMacsecSecYExceptionsIsr(t_Handle h_FmMacsecSecY, uint32_t id)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ UNUSED(id);
-+ SANITY_CHECK_RETURN(p_FmMacsecSecY, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecSecY->exceptions & FM_MACSEC_SECY_EX_FRAME_DISCARDED)
-+ p_FmMacsecSecY->f_Exception(p_FmMacsecSecY->h_App, e_FM_MACSEC_SECY_EX_FRAME_DISCARDED);
-+}
-+
-+static void FmMacsecSecYEventsIsr(t_Handle h_FmMacsecSecY, uint32_t id)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ UNUSED(id);
-+ SANITY_CHECK_RETURN(p_FmMacsecSecY, E_INVALID_HANDLE);
-+
-+ if (p_FmMacsecSecY->events & FM_MACSEC_SECY_EV_NEXT_PN)
-+ p_FmMacsecSecY->f_Event(p_FmMacsecSecY->h_App, e_FM_MACSEC_SECY_EV_NEXT_PN);
-+}
-+
-+static t_Error CheckFmMacsecSecYParameters(t_FmMacsecSecY *p_FmMacsecSecY)
-+{
-+ if (!p_FmMacsecSecY->f_Exception)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
-+
-+ if (!p_FmMacsecSecY->f_Event)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Events callback not provided"));
-+
-+ if (!p_FmMacsecSecY->numOfRxSc)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Num of Rx Scs must be greater than '0'"));
-+
-+
-+ return E_OK;
-+}
-+
-+static t_Handle FmMacsecSecYCreateSc(t_FmMacsecSecY *p_FmMacsecSecY,
-+ macsecSCI_t sci,
-+ e_FmMacsecSecYCipherSuite cipherSuite,
-+ e_ScType type)
-+{
-+ t_SecYSc *p_ScTable;
-+ void *p_Params;
-+ uint32_t numOfSc,i;
-+ t_Error err = E_OK;
-+ t_RxScParams rxScParams;
-+ t_TxScParams txScParams;
-+
-+ ASSERT_COND(p_FmMacsecSecY);
-+ ASSERT_COND(p_FmMacsecSecY->h_FmMacsec);
-+
-+ if (type == e_SC_RX)
-+ {
-+ memset(&rxScParams, 0, sizeof(rxScParams));
-+ i = (NUM_OF_RX_SC - 1);
-+ p_ScTable = p_FmMacsecSecY->p_RxSc;
-+ numOfSc = p_FmMacsecSecY->numOfRxSc;
-+ rxScParams.confidentialityOffset = p_FmMacsecSecY->confidentialityOffset;
-+ rxScParams.replayProtect = p_FmMacsecSecY->replayProtect;
-+ rxScParams.replayWindow = p_FmMacsecSecY->replayWindow;
-+ rxScParams.validateFrames = p_FmMacsecSecY->validateFrames;
-+ rxScParams.cipherSuite = cipherSuite;
-+ p_Params = &rxScParams;
-+ }
-+ else
-+ {
-+ memset(&txScParams, 0, sizeof(txScParams));
-+ i = (NUM_OF_TX_SC - 1);
-+ p_ScTable = p_FmMacsecSecY->p_TxSc;
-+ numOfSc = p_FmMacsecSecY->numOfTxSc;
-+ txScParams.sciInsertionMode = p_FmMacsecSecY->sciInsertionMode;
-+ txScParams.protectFrames = p_FmMacsecSecY->protectFrames;
-+ txScParams.confidentialityEnable = p_FmMacsecSecY->confidentialityEnable;
-+ txScParams.confidentialityOffset = p_FmMacsecSecY->confidentialityOffset;
-+ txScParams.cipherSuite = cipherSuite;
-+ p_Params = &txScParams;
-+ }
-+
-+ for (i=0;i<numOfSc;i++)
-+ if (!p_ScTable[i].inUse)
-+ break;
-+ if (i == numOfSc)
-+ {
-+ REPORT_ERROR(MAJOR, E_FULL, ("FM MACSEC SECY SC"));
-+ return NULL;
-+ }
-+
-+ if (type == e_SC_RX)
-+ {
-+ ((t_RxScParams *)p_Params)->scId = p_ScTable[i].scId;
-+ ((t_RxScParams *)p_Params)->sci = sci;
-+ if ((err = FmMacsecCreateRxSc(p_FmMacsecSecY->h_FmMacsec, (t_RxScParams *)p_Params)) != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY RX SC"));
-+ return NULL;
-+ }
-+ }
-+ else
-+ {
-+ ((t_TxScParams *)p_Params)->scId = p_ScTable[i].scId;
-+ ((t_TxScParams *)p_Params)->sci = sci;
-+ if ((err = FmMacsecCreateTxSc(p_FmMacsecSecY->h_FmMacsec, (t_TxScParams *)p_Params)) != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY TX SC"));
-+ return NULL;
-+ }
-+ }
-+
-+ p_ScTable[i].inUse = TRUE;
-+ return &p_ScTable[i];
-+}
-+
-+static t_Error FmMacsecSecYDeleteSc(t_FmMacsecSecY *p_FmMacsecSecY, t_SecYSc *p_FmSecYSc, e_ScType type)
-+{
-+ t_Error err = E_OK;
-+
-+ ASSERT_COND(p_FmMacsecSecY);
-+ ASSERT_COND(p_FmMacsecSecY->h_FmMacsec);
-+ ASSERT_COND(p_FmSecYSc);
-+
-+ if (type == e_SC_RX)
-+ {
-+ if ((err = FmMacsecDeleteRxSc(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+ else
-+ if ((err = FmMacsecDeleteTxSc(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ p_FmSecYSc->inUse = FALSE;
-+
-+ return err;
-+}
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+t_Handle FM_MACSEC_SECY_Config(t_FmMacsecSecYParams *p_FmMacsecSecYParam)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY;
-+
-+ /* Allocate FM MACSEC structure */
-+ p_FmMacsecSecY = (t_FmMacsecSecY *) XX_Malloc(sizeof(t_FmMacsecSecY));
-+ if (!p_FmMacsecSecY)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY driver structure"));
-+ return NULL;
-+ }
-+ memset(p_FmMacsecSecY, 0, sizeof(t_FmMacsecSecY));
-+
-+ /* Allocate the FM MACSEC driver's parameters structure */
-+ p_FmMacsecSecY->p_FmMacsecSecYDriverParam = (t_FmMacsecSecYDriverParam *)XX_Malloc(sizeof(t_FmMacsecSecYDriverParam));
-+ if (!p_FmMacsecSecY->p_FmMacsecSecYDriverParam)
-+ {
-+ XX_Free(p_FmMacsecSecY);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY driver parameters"));
-+ return NULL;
-+ }
-+ memset(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, 0, sizeof(t_FmMacsecSecYDriverParam));
-+
-+ /* Initialize FM MACSEC SECY parameters which will be kept by the driver */
-+ p_FmMacsecSecY->h_FmMacsec = p_FmMacsecSecYParam->h_FmMacsec;
-+ p_FmMacsecSecY->f_Event = p_FmMacsecSecYParam->f_Event;
-+ p_FmMacsecSecY->f_Exception = p_FmMacsecSecYParam->f_Exception;
-+ p_FmMacsecSecY->h_App = p_FmMacsecSecYParam->h_App;
-+ p_FmMacsecSecY->confidentialityEnable = DEFAULT_confidentialityEnable;
-+ p_FmMacsecSecY->confidentialityOffset = DEFAULT_confidentialityOffset;
-+ p_FmMacsecSecY->validateFrames = DEFAULT_validateFrames;
-+ p_FmMacsecSecY->replayProtect = DEFAULT_replayEnable;
-+ p_FmMacsecSecY->replayWindow = DEFAULT_replayWindow;
-+ p_FmMacsecSecY->protectFrames = DEFAULT_protectFrames;
-+ p_FmMacsecSecY->sciInsertionMode = DEFAULT_sciInsertionMode;
-+ p_FmMacsecSecY->isPointToPoint = DEFAULT_ptp;
-+ p_FmMacsecSecY->numOfRxSc = p_FmMacsecSecYParam->numReceiveChannels;
-+ p_FmMacsecSecY->numOfTxSc = DEFAULT_numOfTxSc;
-+ p_FmMacsecSecY->exceptions = DEFAULT_exceptions;
-+ p_FmMacsecSecY->events = DEFAULT_events;
-+
-+ memcpy(&p_FmMacsecSecY->p_FmMacsecSecYDriverParam->txScParams,
-+ &p_FmMacsecSecYParam->txScParams,
-+ sizeof(t_FmMacsecSecYSCParams));
-+ return p_FmMacsecSecY;
-+}
-+
-+t_Error FM_MACSEC_SECY_Init(t_Handle h_FmMacsecSecY)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_FmMacsecSecYDriverParam *p_FmMacsecSecYDriverParam = NULL;
-+ uint32_t rxScIds[NUM_OF_RX_SC], txScIds[NUM_OF_TX_SC], i, j;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_HANDLE);
-+
-+ CHECK_INIT_PARAMETERS(p_FmMacsecSecY, CheckFmMacsecSecYParameters);
-+
-+ p_FmMacsecSecYDriverParam = p_FmMacsecSecY->p_FmMacsecSecYDriverParam;
-+
-+ if ((p_FmMacsecSecY->isPointToPoint) &&
-+ ((err = FmMacsecSetPTP(p_FmMacsecSecY->h_FmMacsec, TRUE)) != E_OK))
-+ RETURN_ERROR(MAJOR, err, ("Can't set Poin-to-Point"));
-+
-+ /* Rx Sc Allocation */
-+ p_FmMacsecSecY->p_RxSc = (t_SecYSc *)XX_Malloc(sizeof(t_SecYSc) * p_FmMacsecSecY->numOfRxSc);
-+ if (!p_FmMacsecSecY->p_RxSc)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY RX SC"));
-+ memset(p_FmMacsecSecY->p_RxSc, 0, sizeof(t_SecYSc) * p_FmMacsecSecY->numOfRxSc);
-+ if ((err = FmMacsecAllocScs(p_FmMacsecSecY->h_FmMacsec, e_SC_RX, p_FmMacsecSecY->isPointToPoint, p_FmMacsecSecY->numOfRxSc, rxScIds)) != E_OK)
-+ {
-+ if (p_FmMacsecSecY->p_TxSc)
-+ XX_Free(p_FmMacsecSecY->p_TxSc);
-+ if (p_FmMacsecSecY->p_RxSc)
-+ XX_Free(p_FmMacsecSecY->p_RxSc);
-+ return ERROR_CODE(err);
-+ }
-+ for (i=0; i<p_FmMacsecSecY->numOfRxSc; i++)
-+ {
-+ p_FmMacsecSecY->p_RxSc[i].scId = rxScIds[i];
-+ p_FmMacsecSecY->p_RxSc[i].type = e_SC_RX;
-+ for (j=0; j<MAX_NUM_OF_SA_PER_SC;j++)
-+ p_FmMacsecSecY->p_RxSc[i].sa[j].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
-+ }
-+
-+ /* Tx Sc Allocation */
-+ p_FmMacsecSecY->p_TxSc = (t_SecYSc *)XX_Malloc(sizeof(t_SecYSc) * p_FmMacsecSecY->numOfTxSc);
-+ if (!p_FmMacsecSecY->p_TxSc)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM MACSEC SECY TX SC"));
-+ memset(p_FmMacsecSecY->p_TxSc, 0, sizeof(t_SecYSc) * p_FmMacsecSecY->numOfTxSc);
-+
-+ if ((err = FmMacsecAllocScs(p_FmMacsecSecY->h_FmMacsec, e_SC_TX, p_FmMacsecSecY->isPointToPoint, p_FmMacsecSecY->numOfTxSc, txScIds)) != E_OK)
-+ {
-+ if (p_FmMacsecSecY->p_TxSc)
-+ XX_Free(p_FmMacsecSecY->p_TxSc);
-+ if (p_FmMacsecSecY->p_RxSc)
-+ XX_Free(p_FmMacsecSecY->p_RxSc);
-+ return ERROR_CODE(err);
-+ }
-+ for (i=0; i<p_FmMacsecSecY->numOfTxSc; i++)
-+ {
-+ p_FmMacsecSecY->p_TxSc[i].scId = txScIds[i];
-+ p_FmMacsecSecY->p_TxSc[i].type = e_SC_TX;
-+ for (j=0; j<MAX_NUM_OF_SA_PER_SC;j++)
-+ p_FmMacsecSecY->p_TxSc[i].sa[j].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
-+ FmMacsecRegisterIntr(p_FmMacsecSecY->h_FmMacsec,
-+ e_FM_MACSEC_MOD_SC_TX,
-+ (uint8_t)txScIds[i],
-+ e_FM_INTR_TYPE_ERR,
-+ FmMacsecSecYExceptionsIsr,
-+ p_FmMacsecSecY);
-+ FmMacsecRegisterIntr(p_FmMacsecSecY->h_FmMacsec,
-+ e_FM_MACSEC_MOD_SC_TX,
-+ (uint8_t)txScIds[i],
-+ e_FM_INTR_TYPE_NORMAL,
-+ FmMacsecSecYEventsIsr,
-+ p_FmMacsecSecY);
-+
-+ if (p_FmMacsecSecY->exceptions & FM_MACSEC_SECY_EX_FRAME_DISCARDED)
-+ FmMacsecSetException(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EX_TX_SC, txScIds[i], TRUE);
-+ if (p_FmMacsecSecY->events & FM_MACSEC_SECY_EV_NEXT_PN)
-+ FmMacsecSetEvent(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EV_TX_SC_NEXT_PN, txScIds[i], TRUE);
-+ }
-+
-+ FmMacsecSecYCreateSc(p_FmMacsecSecY,
-+ p_FmMacsecSecYDriverParam->txScParams.sci,
-+ p_FmMacsecSecYDriverParam->txScParams.cipherSuite,
-+ e_SC_TX);
-+ XX_Free(p_FmMacsecSecYDriverParam);
-+ p_FmMacsecSecY->p_FmMacsecSecYDriverParam = NULL;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_Free(t_Handle h_FmMacsecSecY)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_Error err = E_OK;
-+ uint32_t rxScIds[NUM_OF_RX_SC], txScIds[NUM_OF_TX_SC], i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmMacsecSecY->isPointToPoint)
-+ FmMacsecSetPTP(p_FmMacsecSecY->h_FmMacsec, FALSE);
-+ if (p_FmMacsecSecY->p_RxSc)
-+ {
-+ for (i=0; i<p_FmMacsecSecY->numOfRxSc; i++)
-+ rxScIds[i] = p_FmMacsecSecY->p_RxSc[i].scId;
-+ if ((err = FmMacsecFreeScs(p_FmMacsecSecY->h_FmMacsec, e_SC_RX, p_FmMacsecSecY->numOfRxSc, rxScIds)) != E_OK)
-+ return ERROR_CODE(err);
-+ XX_Free(p_FmMacsecSecY->p_RxSc);
-+ }
-+ if (p_FmMacsecSecY->p_TxSc)
-+ {
-+ FmMacsecSecYDeleteSc(p_FmMacsecSecY, &p_FmMacsecSecY->p_TxSc[0], e_SC_TX);
-+
-+ for (i=0; i<p_FmMacsecSecY->numOfTxSc; i++) {
-+ txScIds[i] = p_FmMacsecSecY->p_TxSc[i].scId;
-+ FmMacsecUnregisterIntr(p_FmMacsecSecY->h_FmMacsec,
-+ e_FM_MACSEC_MOD_SC_TX,
-+ (uint8_t)txScIds[i],
-+ e_FM_INTR_TYPE_ERR);
-+ FmMacsecUnregisterIntr(p_FmMacsecSecY->h_FmMacsec,
-+ e_FM_MACSEC_MOD_SC_TX,
-+ (uint8_t)txScIds[i],
-+ e_FM_INTR_TYPE_NORMAL);
-+
-+ if (p_FmMacsecSecY->exceptions & FM_MACSEC_SECY_EX_FRAME_DISCARDED)
-+ FmMacsecSetException(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EX_TX_SC, txScIds[i], FALSE);
-+ if (p_FmMacsecSecY->events & FM_MACSEC_SECY_EV_NEXT_PN)
-+ FmMacsecSetEvent(p_FmMacsecSecY->h_FmMacsec, e_FM_MACSEC_EV_TX_SC_NEXT_PN, txScIds[i], FALSE);
-+ }
-+
-+ if ((err = FmMacsecFreeScs(p_FmMacsecSecY->h_FmMacsec, e_SC_TX, p_FmMacsecSecY->numOfTxSc, txScIds)) != E_OK)
-+ return ERROR_CODE(err);
-+ XX_Free(p_FmMacsecSecY->p_TxSc);
-+ }
-+
-+ XX_Free(p_FmMacsecSecY);
-+
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigSciInsertionMode(t_Handle h_FmMacsecSecY, e_FmMacsecSciInsertionMode sciInsertionMode)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ p_FmMacsecSecY->sciInsertionMode = sciInsertionMode;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigProtectFrames(t_Handle h_FmMacsecSecY, bool protectFrames)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ p_FmMacsecSecY->protectFrames = protectFrames;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigReplayWindow(t_Handle h_FmMacsecSecY, bool replayProtect, uint32_t replayWindow)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ p_FmMacsecSecY->replayProtect = replayProtect;
-+ p_FmMacsecSecY->replayWindow = replayWindow;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigValidationMode(t_Handle h_FmMacsecSecY, e_FmMacsecValidFrameBehavior validateFrames)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ p_FmMacsecSecY->validateFrames = validateFrames;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigConfidentiality(t_Handle h_FmMacsecSecY, bool confidentialityEnable, uint16_t confidentialityOffset)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ p_FmMacsecSecY->confidentialityEnable = confidentialityEnable;
-+ p_FmMacsecSecY->confidentialityOffset = confidentialityOffset;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigPointToPoint(t_Handle h_FmMacsecSecY)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ p_FmMacsecSecY->numOfRxSc = 1;
-+ p_FmMacsecSecY->isPointToPoint = TRUE;
-+ p_FmMacsecSecY->sciInsertionMode = e_FM_MACSEC_SCI_INSERTION_MODE_IMPLICT_PTP;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigException(t_Handle h_FmMacsecSecY, e_FmMacsecSecYExceptions exception, bool enable)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmMacsecSecY->exceptions |= bitMask;
-+ else
-+ p_FmMacsecSecY->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_MACSEC_SECY_ConfigEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+
-+ GET_EVENT_FLAG(bitMask, event);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmMacsecSecY->events |= bitMask;
-+ else
-+ p_FmMacsecSecY->events &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined event"));
-+
-+ return E_OK;
-+}
-+
-+t_Handle FM_MACSEC_SECY_CreateRxSc(t_Handle h_FmMacsecSecY, t_FmMacsecSecYSCParams *p_ScParams)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacsecSecY, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_ScParams, E_NULL_POINTER, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE, NULL);
-+
-+ return FmMacsecSecYCreateSc(p_FmMacsecSecY, p_ScParams->sci, p_ScParams->cipherSuite, e_SC_RX);
-+}
-+
-+t_Error FM_MACSEC_SECY_DeleteRxSc(t_Handle h_FmMacsecSecY, t_Handle h_Sc)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+
-+ return FmMacsecSecYDeleteSc(p_FmMacsecSecY, p_FmSecYSc, e_SC_RX);
-+}
-+
-+t_Error FM_MACSEC_SECY_CreateRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId != SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is already assigned",an));
-+
-+ if ((err = FmMacsecCreateRxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, (e_ScSaId)p_FmSecYSc->numOfSa, an, lowestPn, key)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ p_FmSecYSc->sa[an].saId = (e_ScSaId)p_FmSecYSc->numOfSa++;
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_DeleteRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is already deleted",an));
-+
-+ if ((err = FmMacsecDeleteRxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ p_FmSecYSc->numOfSa--;
-+ p_FmSecYSc->sa[an].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
-+ /* TODO - check if statistics need to be read*/
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_RxSaEnableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
-+
-+ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, TRUE)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ p_FmSecYSc->sa[an].active = TRUE;
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_RxSaDisableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
-+
-+ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, FALSE)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ p_FmSecYSc->sa[an].active = FALSE;
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_RxSaUpdateNextPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtNextPN)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
-+
-+ if ((err = FmMacsecRxSaUpdateNextPn(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, updtNextPN)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_RxSaUpdateLowestPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtLowestPN)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
-+
-+ if ((err = FmMacsecRxSaUpdateLowestPn(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, updtLowestPN)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_RxSaModifyKey(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, macsecSAKey_t key)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
-+
-+ if (p_FmSecYSc->sa[an].active)
-+ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, FALSE)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ /* TODO - statistics should be read */
-+
-+ if ((err = FmMacsecCreateRxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, an, 1, key)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ if (p_FmSecYSc->sa[an].active)
-+ if ((err = FmMacsecRxSaSetReceive(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId, TRUE)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ return err;
-+}
-+
-+
-+t_Error FM_MACSEC_SECY_CreateTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an, macsecSAKey_t key)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId != SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, err, ("An %d is already assigned",an));
-+
-+ if ((err = FmMacsecCreateTxSa(p_FmMacsecSecY->h_FmMacsec,p_FmSecYSc->scId, (e_ScSaId)p_FmSecYSc->numOfSa, key)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ p_FmSecYSc->sa[an].saId = (e_ScSaId)p_FmSecYSc->numOfSa++;
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_DeleteTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is already deleted",an));
-+
-+ if ((err = FmMacsecDeleteTxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[an].saId)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ p_FmSecYSc->numOfSa--;
-+ p_FmSecYSc->sa[an].saId = (e_ScSaId)SECY_AN_FREE_VALUE;
-+ /* TODO - check if statistics need to be read*/
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_TxSaModifyKey(t_Handle h_FmMacsecSecY, macsecAN_t nextActiveAn, macsecSAKey_t key)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc;
-+ macsecAN_t currentAn;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(nextActiveAn < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if ((err = FmMacsecTxSaGetActive(p_FmMacsecSecY->h_FmMacsec,
-+ p_FmSecYSc->scId,
-+ &currentAn)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ if ((err = FmMacsecTxSaSetActive(p_FmMacsecSecY->h_FmMacsec,
-+ p_FmSecYSc->scId,
-+ p_FmSecYSc->sa[nextActiveAn].saId,
-+ nextActiveAn)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ /* TODO - statistics should be read */
-+
-+ if ((err = FmMacsecCreateTxSa(p_FmMacsecSecY->h_FmMacsec, p_FmSecYSc->scId, p_FmSecYSc->sa[currentAn].saId, key)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_TxSaSetActive(t_Handle h_FmMacsecSecY, macsecAN_t an)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(an < MAX_NUM_OF_SA_PER_SC, E_INVALID_STATE);
-+
-+ if (p_FmSecYSc->sa[an].saId == SECY_AN_FREE_VALUE)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("An %d is not configured",an));
-+
-+ if ((err = FmMacsecTxSaSetActive(p_FmMacsecSecY->h_FmMacsec,
-+ p_FmSecYSc->scId,
-+ p_FmSecYSc->sa[an].saId,
-+ an)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_TxSaGetActive(t_Handle h_FmMacsecSecY, macsecAN_t *p_An)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_An, E_INVALID_HANDLE);
-+
-+ if ((err = FmMacsecTxSaGetActive(p_FmMacsecSecY->h_FmMacsec,
-+ p_FmSecYSc->scId,
-+ p_An)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_GetRxScPhysId(t_Handle h_FmMacsecSecY, t_Handle h_Sc, uint32_t *p_ScPhysId)
-+{
-+ t_SecYSc *p_FmSecYSc = (t_SecYSc *)h_Sc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((t_FmMacsecSecY *)h_FmMacsecSecY)->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!((t_FmMacsecSecY *)h_FmMacsecSecY)->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+#ifdef DISABLE_SANITY_CHECKS
-+ UNUSED(h_FmMacsecSecY);
-+#endif /* DISABLE_SANITY_CHECKS */
-+
-+ *p_ScPhysId = p_FmSecYSc->scId;
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_GetTxScPhysId(t_Handle h_FmMacsecSecY, uint32_t *p_ScPhysId)
-+{
-+ t_FmMacsecSecY *p_FmMacsecSecY = (t_FmMacsecSecY *)h_FmMacsecSecY;
-+ t_SecYSc *p_FmSecYSc;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMacsecSecY->h_FmMacsec, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmMacsecSecY->p_FmMacsecSecYDriverParam, E_INVALID_STATE);
-+ p_FmSecYSc = &p_FmMacsecSecY->p_TxSc[0];
-+ SANITY_CHECK_RETURN_ERROR(p_FmSecYSc, E_INVALID_HANDLE);
-+
-+ *p_ScPhysId = p_FmSecYSc->scId;
-+ return err;
-+}
-+
-+t_Error FM_MACSEC_SECY_SetException(t_Handle h_FmMacsecSecY, e_FmMacsecExceptions exception, bool enable)
-+{
-+ UNUSED(h_FmMacsecSecY);UNUSED(exception);UNUSED(enable);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_SECY_SetEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable)
-+{
-+ UNUSED(h_FmMacsecSecY);UNUSED(event);UNUSED(enable);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_SECY_GetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYStatistics *p_Statistics)
-+{
-+ UNUSED(h_FmMacsecSecY);UNUSED(p_Statistics);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_SECY_RxScGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, t_FmMacsecSecYRxScStatistics *p_Statistics)
-+{
-+ UNUSED(h_FmMacsecSecY);UNUSED(h_Sc);UNUSED(p_Statistics);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_SECY_RxSaGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, t_FmMacsecSecYRxSaStatistics *p_Statistics)
-+{
-+ UNUSED(h_FmMacsecSecY);UNUSED(h_Sc);UNUSED(an);UNUSED(p_Statistics);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_SECY_TxScGetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYTxScStatistics *p_Statistics)
-+{
-+ UNUSED(h_FmMacsecSecY);UNUSED(p_Statistics);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
-+t_Error FM_MACSEC_SECY_TxSaGetStatistics(t_Handle h_FmMacsecSecY, macsecAN_t an, t_FmMacsecSecYTxSaStatistics *p_Statistics)
-+{
-+ UNUSED(h_FmMacsecSecY);UNUSED(an);UNUSED(p_Statistics);
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+}
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MACSEC/fm_macsec_secy.h
-@@ -0,0 +1,144 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fm_macsec_secy.h
-+
-+ @Description FM MACSEC SecY internal structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_MACSEC_SECY_H
-+#define __FM_MACSEC_SECY_H
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+
-+#include "fm_macsec.h"
-+
-+
-+/**************************************************************************//**
-+ @Description Exceptions
-+*//***************************************************************************/
-+
-+#define FM_MACSEC_SECY_EX_FRAME_DISCARDED 0x80000000
-+
-+#define GET_EXCEPTION_FLAG(bitMask, exception) switch (exception){ \
-+ case e_FM_MACSEC_SECY_EX_FRAME_DISCARDED: \
-+ bitMask = FM_MACSEC_SECY_EX_FRAME_DISCARDED; break; \
-+ default: bitMask = 0;break;}
-+
-+/**************************************************************************//**
-+ @Description Events
-+*//***************************************************************************/
-+
-+#define FM_MACSEC_SECY_EV_NEXT_PN 0x80000000
-+
-+#define GET_EVENT_FLAG(bitMask, event) switch (event){ \
-+ case e_FM_MACSEC_SECY_EV_NEXT_PN: \
-+ bitMask = FM_MACSEC_SECY_EV_NEXT_PN; break; \
-+ default: bitMask = 0;break;}
-+
-+/**************************************************************************//**
-+ @Description Defaults
-+*//***************************************************************************/
-+
-+#define DEFAULT_exceptions (FM_MACSEC_SECY_EX_FRAME_DISCARDED)
-+#define DEFAULT_events (FM_MACSEC_SECY_EV_NEXT_PN)
-+#define DEFAULT_numOfTxSc 1
-+#define DEFAULT_confidentialityEnable FALSE
-+#define DEFAULT_confidentialityOffset 0
-+#define DEFAULT_sciInsertionMode e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG
-+#define DEFAULT_validateFrames e_FM_MACSEC_VALID_FRAME_BEHAVIOR_STRICT
-+#define DEFAULT_replayEnable FALSE
-+#define DEFAULT_replayWindow 0
-+#define DEFAULT_protectFrames TRUE
-+#define DEFAULT_ptp FALSE
-+
-+/**************************************************************************//**
-+ @Description General defines
-+*//***************************************************************************/
-+
-+#define SECY_AN_FREE_VALUE MAX_NUM_OF_SA_PER_SC
-+
-+
-+typedef struct {
-+ e_ScSaId saId;
-+ bool active;
-+ union {
-+ t_FmMacsecSecYRxSaStatistics rxSaStatistics;
-+ t_FmMacsecSecYTxSaStatistics txSaStatistics;
-+ };
-+} t_SecYSa;
-+
-+typedef struct {
-+ bool inUse;
-+ uint32_t scId;
-+ e_ScType type;
-+ uint8_t numOfSa;
-+ t_SecYSa sa[MAX_NUM_OF_SA_PER_SC];
-+ union {
-+ t_FmMacsecSecYRxScStatistics rxScStatistics;
-+ t_FmMacsecSecYTxScStatistics txScStatistics;
-+ };
-+} t_SecYSc;
-+
-+typedef struct {
-+ t_FmMacsecSecYSCParams txScParams; /**< Tx SC Params */
-+} t_FmMacsecSecYDriverParam;
-+
-+typedef struct {
-+ t_Handle h_FmMacsec;
-+ bool confidentialityEnable; /**< TRUE - confidentiality protection and integrity protection
-+ FALSE - no confidentiality protection, only integrity protection*/
-+ uint16_t confidentialityOffset; /**< The number of initial octets of each MSDU without confidentiality protection
-+ common values are 0, 30, and 50 */
-+ bool replayProtect; /**< replay protection function mode */
-+ uint32_t replayWindow; /**< the size of the replay window */
-+ e_FmMacsecValidFrameBehavior validateFrames; /**< validation function mode */
-+ e_FmMacsecSciInsertionMode sciInsertionMode;
-+ bool protectFrames;
-+ bool isPointToPoint;
-+ e_FmMacsecSecYCipherSuite cipherSuite; /**< Cipher suite to be used for this SecY */
-+ uint32_t numOfRxSc; /**< Number of receive channels */
-+ uint32_t numOfTxSc; /**< Number of transmit channels */
-+ t_SecYSc *p_RxSc;
-+ t_SecYSc *p_TxSc;
-+ uint32_t events;
-+ uint32_t exceptions;
-+ t_FmMacsecSecYExceptionsCallback *f_Exception; /**< TODO */
-+ t_FmMacsecSecYEventsCallback *f_Event; /**< TODO */
-+ t_Handle h_App;
-+ t_FmMacsecSecYStatistics statistics;
-+ t_FmMacsecSecYDriverParam *p_FmMacsecSecYDriverParam;
-+} t_FmMacsecSecY;
-+
-+
-+#endif /* __FM_MACSEC_SECY_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Makefile
-@@ -0,0 +1,23 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+
-+obj-y += fsl-ncsw-PFM1.o
-+
-+fsl-ncsw-PFM1-objs := fm.o fm_muram.o fman.o
-+
-+obj-y += MAC/
-+obj-y += Pcd/
-+obj-y += SP/
-+obj-y += Port/
-+obj-y += HC/
-+obj-y += Rtc/
-+obj-y += MACSEC/
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/Makefile
-@@ -0,0 +1,26 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+obj-y += fsl-ncsw-Pcd.o
-+
-+fsl-ncsw-Pcd-objs := fman_kg.o fman_prs.o fm_cc.o fm_kg.o fm_pcd.o fm_plcr.o fm_prs.o fm_manip.o
-+
-+ifeq ($(CONFIG_FMAN_V3H),y)
-+fsl-ncsw-Pcd-objs += fm_replic.o
-+endif
-+ifeq ($(CONFIG_FMAN_V3L),y)
-+fsl-ncsw-Pcd-objs += fm_replic.o
-+endif
-+ifeq ($(CONFIG_FMAN_ARM),y)
-+fsl-ncsw-Pcd-objs += fm_replic.o
-+endif
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h
-@@ -0,0 +1,360 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+ /**************************************************************************//**
-+ @File crc64.h
-+
-+ @Description brief This file contains the CRC64 Table, and __inline__
-+ functions used for calculating crc.
-+*//***************************************************************************/
-+#ifndef __CRC64_H
-+#define __CRC64_H
-+
-+#include "std_ext.h"
-+
-+
-+#define BITS_PER_BYTE 8
-+
-+#define CRC64_EXPON_ECMA_182 0xC96C5795D7870F42ULL
-+#define CRC64_DEFAULT_INITVAL 0xFFFFFFFFFFFFFFFFULL
-+
-+#define CRC64_BYTE_MASK 0xFF
-+#define CRC64_TABLE_ENTRIES ( 1 << BITS_PER_BYTE )
-+#define CRC64_ODD_MASK 1
-+
-+
-+/**
-+ \brief '64 bit crc' Table
-+ */
-+struct crc64_t {
-+ uint64_t initial; /**< Initial seed */
-+ uint64_t table[CRC64_TABLE_ENTRIES]; /**< CRC table entries */
-+};
-+
-+
-+static struct crc64_t CRC64_ECMA_182 = {
-+ CRC64_DEFAULT_INITVAL,
-+ {
-+ 0x0000000000000000ULL,
-+ 0xb32e4cbe03a75f6fULL,
-+ 0xf4843657a840a05bULL,
-+ 0x47aa7ae9abe7ff34ULL,
-+ 0x7bd0c384ff8f5e33ULL,
-+ 0xc8fe8f3afc28015cULL,
-+ 0x8f54f5d357cffe68ULL,
-+ 0x3c7ab96d5468a107ULL,
-+ 0xf7a18709ff1ebc66ULL,
-+ 0x448fcbb7fcb9e309ULL,
-+ 0x0325b15e575e1c3dULL,
-+ 0xb00bfde054f94352ULL,
-+ 0x8c71448d0091e255ULL,
-+ 0x3f5f08330336bd3aULL,
-+ 0x78f572daa8d1420eULL,
-+ 0xcbdb3e64ab761d61ULL,
-+ 0x7d9ba13851336649ULL,
-+ 0xceb5ed8652943926ULL,
-+ 0x891f976ff973c612ULL,
-+ 0x3a31dbd1fad4997dULL,
-+ 0x064b62bcaebc387aULL,
-+ 0xb5652e02ad1b6715ULL,
-+ 0xf2cf54eb06fc9821ULL,
-+ 0x41e11855055bc74eULL,
-+ 0x8a3a2631ae2dda2fULL,
-+ 0x39146a8fad8a8540ULL,
-+ 0x7ebe1066066d7a74ULL,
-+ 0xcd905cd805ca251bULL,
-+ 0xf1eae5b551a2841cULL,
-+ 0x42c4a90b5205db73ULL,
-+ 0x056ed3e2f9e22447ULL,
-+ 0xb6409f5cfa457b28ULL,
-+ 0xfb374270a266cc92ULL,
-+ 0x48190ecea1c193fdULL,
-+ 0x0fb374270a266cc9ULL,
-+ 0xbc9d3899098133a6ULL,
-+ 0x80e781f45de992a1ULL,
-+ 0x33c9cd4a5e4ecdceULL,
-+ 0x7463b7a3f5a932faULL,
-+ 0xc74dfb1df60e6d95ULL,
-+ 0x0c96c5795d7870f4ULL,
-+ 0xbfb889c75edf2f9bULL,
-+ 0xf812f32ef538d0afULL,
-+ 0x4b3cbf90f69f8fc0ULL,
-+ 0x774606fda2f72ec7ULL,
-+ 0xc4684a43a15071a8ULL,
-+ 0x83c230aa0ab78e9cULL,
-+ 0x30ec7c140910d1f3ULL,
-+ 0x86ace348f355aadbULL,
-+ 0x3582aff6f0f2f5b4ULL,
-+ 0x7228d51f5b150a80ULL,
-+ 0xc10699a158b255efULL,
-+ 0xfd7c20cc0cdaf4e8ULL,
-+ 0x4e526c720f7dab87ULL,
-+ 0x09f8169ba49a54b3ULL,
-+ 0xbad65a25a73d0bdcULL,
-+ 0x710d64410c4b16bdULL,
-+ 0xc22328ff0fec49d2ULL,
-+ 0x85895216a40bb6e6ULL,
-+ 0x36a71ea8a7ace989ULL,
-+ 0x0adda7c5f3c4488eULL,
-+ 0xb9f3eb7bf06317e1ULL,
-+ 0xfe5991925b84e8d5ULL,
-+ 0x4d77dd2c5823b7baULL,
-+ 0x64b62bcaebc387a1ULL,
-+ 0xd7986774e864d8ceULL,
-+ 0x90321d9d438327faULL,
-+ 0x231c512340247895ULL,
-+ 0x1f66e84e144cd992ULL,
-+ 0xac48a4f017eb86fdULL,
-+ 0xebe2de19bc0c79c9ULL,
-+ 0x58cc92a7bfab26a6ULL,
-+ 0x9317acc314dd3bc7ULL,
-+ 0x2039e07d177a64a8ULL,
-+ 0x67939a94bc9d9b9cULL,
-+ 0xd4bdd62abf3ac4f3ULL,
-+ 0xe8c76f47eb5265f4ULL,
-+ 0x5be923f9e8f53a9bULL,
-+ 0x1c4359104312c5afULL,
-+ 0xaf6d15ae40b59ac0ULL,
-+ 0x192d8af2baf0e1e8ULL,
-+ 0xaa03c64cb957be87ULL,
-+ 0xeda9bca512b041b3ULL,
-+ 0x5e87f01b11171edcULL,
-+ 0x62fd4976457fbfdbULL,
-+ 0xd1d305c846d8e0b4ULL,
-+ 0x96797f21ed3f1f80ULL,
-+ 0x2557339fee9840efULL,
-+ 0xee8c0dfb45ee5d8eULL,
-+ 0x5da24145464902e1ULL,
-+ 0x1a083bacedaefdd5ULL,
-+ 0xa9267712ee09a2baULL,
-+ 0x955cce7fba6103bdULL,
-+ 0x267282c1b9c65cd2ULL,
-+ 0x61d8f8281221a3e6ULL,
-+ 0xd2f6b4961186fc89ULL,
-+ 0x9f8169ba49a54b33ULL,
-+ 0x2caf25044a02145cULL,
-+ 0x6b055fede1e5eb68ULL,
-+ 0xd82b1353e242b407ULL,
-+ 0xe451aa3eb62a1500ULL,
-+ 0x577fe680b58d4a6fULL,
-+ 0x10d59c691e6ab55bULL,
-+ 0xa3fbd0d71dcdea34ULL,
-+ 0x6820eeb3b6bbf755ULL,
-+ 0xdb0ea20db51ca83aULL,
-+ 0x9ca4d8e41efb570eULL,
-+ 0x2f8a945a1d5c0861ULL,
-+ 0x13f02d374934a966ULL,
-+ 0xa0de61894a93f609ULL,
-+ 0xe7741b60e174093dULL,
-+ 0x545a57dee2d35652ULL,
-+ 0xe21ac88218962d7aULL,
-+ 0x5134843c1b317215ULL,
-+ 0x169efed5b0d68d21ULL,
-+ 0xa5b0b26bb371d24eULL,
-+ 0x99ca0b06e7197349ULL,
-+ 0x2ae447b8e4be2c26ULL,
-+ 0x6d4e3d514f59d312ULL,
-+ 0xde6071ef4cfe8c7dULL,
-+ 0x15bb4f8be788911cULL,
-+ 0xa6950335e42fce73ULL,
-+ 0xe13f79dc4fc83147ULL,
-+ 0x521135624c6f6e28ULL,
-+ 0x6e6b8c0f1807cf2fULL,
-+ 0xdd45c0b11ba09040ULL,
-+ 0x9aefba58b0476f74ULL,
-+ 0x29c1f6e6b3e0301bULL,
-+ 0xc96c5795d7870f42ULL,
-+ 0x7a421b2bd420502dULL,
-+ 0x3de861c27fc7af19ULL,
-+ 0x8ec62d7c7c60f076ULL,
-+ 0xb2bc941128085171ULL,
-+ 0x0192d8af2baf0e1eULL,
-+ 0x4638a2468048f12aULL,
-+ 0xf516eef883efae45ULL,
-+ 0x3ecdd09c2899b324ULL,
-+ 0x8de39c222b3eec4bULL,
-+ 0xca49e6cb80d9137fULL,
-+ 0x7967aa75837e4c10ULL,
-+ 0x451d1318d716ed17ULL,
-+ 0xf6335fa6d4b1b278ULL,
-+ 0xb199254f7f564d4cULL,
-+ 0x02b769f17cf11223ULL,
-+ 0xb4f7f6ad86b4690bULL,
-+ 0x07d9ba1385133664ULL,
-+ 0x4073c0fa2ef4c950ULL,
-+ 0xf35d8c442d53963fULL,
-+ 0xcf273529793b3738ULL,
-+ 0x7c0979977a9c6857ULL,
-+ 0x3ba3037ed17b9763ULL,
-+ 0x888d4fc0d2dcc80cULL,
-+ 0x435671a479aad56dULL,
-+ 0xf0783d1a7a0d8a02ULL,
-+ 0xb7d247f3d1ea7536ULL,
-+ 0x04fc0b4dd24d2a59ULL,
-+ 0x3886b22086258b5eULL,
-+ 0x8ba8fe9e8582d431ULL,
-+ 0xcc0284772e652b05ULL,
-+ 0x7f2cc8c92dc2746aULL,
-+ 0x325b15e575e1c3d0ULL,
-+ 0x8175595b76469cbfULL,
-+ 0xc6df23b2dda1638bULL,
-+ 0x75f16f0cde063ce4ULL,
-+ 0x498bd6618a6e9de3ULL,
-+ 0xfaa59adf89c9c28cULL,
-+ 0xbd0fe036222e3db8ULL,
-+ 0x0e21ac88218962d7ULL,
-+ 0xc5fa92ec8aff7fb6ULL,
-+ 0x76d4de52895820d9ULL,
-+ 0x317ea4bb22bfdfedULL,
-+ 0x8250e80521188082ULL,
-+ 0xbe2a516875702185ULL,
-+ 0x0d041dd676d77eeaULL,
-+ 0x4aae673fdd3081deULL,
-+ 0xf9802b81de97deb1ULL,
-+ 0x4fc0b4dd24d2a599ULL,
-+ 0xfceef8632775faf6ULL,
-+ 0xbb44828a8c9205c2ULL,
-+ 0x086ace348f355aadULL,
-+ 0x34107759db5dfbaaULL,
-+ 0x873e3be7d8faa4c5ULL,
-+ 0xc094410e731d5bf1ULL,
-+ 0x73ba0db070ba049eULL,
-+ 0xb86133d4dbcc19ffULL,
-+ 0x0b4f7f6ad86b4690ULL,
-+ 0x4ce50583738cb9a4ULL,
-+ 0xffcb493d702be6cbULL,
-+ 0xc3b1f050244347ccULL,
-+ 0x709fbcee27e418a3ULL,
-+ 0x3735c6078c03e797ULL,
-+ 0x841b8ab98fa4b8f8ULL,
-+ 0xadda7c5f3c4488e3ULL,
-+ 0x1ef430e13fe3d78cULL,
-+ 0x595e4a08940428b8ULL,
-+ 0xea7006b697a377d7ULL,
-+ 0xd60abfdbc3cbd6d0ULL,
-+ 0x6524f365c06c89bfULL,
-+ 0x228e898c6b8b768bULL,
-+ 0x91a0c532682c29e4ULL,
-+ 0x5a7bfb56c35a3485ULL,
-+ 0xe955b7e8c0fd6beaULL,
-+ 0xaeffcd016b1a94deULL,
-+ 0x1dd181bf68bdcbb1ULL,
-+ 0x21ab38d23cd56ab6ULL,
-+ 0x9285746c3f7235d9ULL,
-+ 0xd52f0e859495caedULL,
-+ 0x6601423b97329582ULL,
-+ 0xd041dd676d77eeaaULL,
-+ 0x636f91d96ed0b1c5ULL,
-+ 0x24c5eb30c5374ef1ULL,
-+ 0x97eba78ec690119eULL,
-+ 0xab911ee392f8b099ULL,
-+ 0x18bf525d915feff6ULL,
-+ 0x5f1528b43ab810c2ULL,
-+ 0xec3b640a391f4fadULL,
-+ 0x27e05a6e926952ccULL,
-+ 0x94ce16d091ce0da3ULL,
-+ 0xd3646c393a29f297ULL,
-+ 0x604a2087398eadf8ULL,
-+ 0x5c3099ea6de60cffULL,
-+ 0xef1ed5546e415390ULL,
-+ 0xa8b4afbdc5a6aca4ULL,
-+ 0x1b9ae303c601f3cbULL,
-+ 0x56ed3e2f9e224471ULL,
-+ 0xe5c372919d851b1eULL,
-+ 0xa26908783662e42aULL,
-+ 0x114744c635c5bb45ULL,
-+ 0x2d3dfdab61ad1a42ULL,
-+ 0x9e13b115620a452dULL,
-+ 0xd9b9cbfcc9edba19ULL,
-+ 0x6a978742ca4ae576ULL,
-+ 0xa14cb926613cf817ULL,
-+ 0x1262f598629ba778ULL,
-+ 0x55c88f71c97c584cULL,
-+ 0xe6e6c3cfcadb0723ULL,
-+ 0xda9c7aa29eb3a624ULL,
-+ 0x69b2361c9d14f94bULL,
-+ 0x2e184cf536f3067fULL,
-+ 0x9d36004b35545910ULL,
-+ 0x2b769f17cf112238ULL,
-+ 0x9858d3a9ccb67d57ULL,
-+ 0xdff2a94067518263ULL,
-+ 0x6cdce5fe64f6dd0cULL,
-+ 0x50a65c93309e7c0bULL,
-+ 0xe388102d33392364ULL,
-+ 0xa4226ac498dedc50ULL,
-+ 0x170c267a9b79833fULL,
-+ 0xdcd7181e300f9e5eULL,
-+ 0x6ff954a033a8c131ULL,
-+ 0x28532e49984f3e05ULL,
-+ 0x9b7d62f79be8616aULL,
-+ 0xa707db9acf80c06dULL,
-+ 0x14299724cc279f02ULL,
-+ 0x5383edcd67c06036ULL,
-+ 0xe0ada17364673f59ULL
-+ }
-+};
-+
-+
-+/**
-+ \brief Initializes the crc seed
-+ */
-+static __inline__ uint64_t crc64_init(void)
-+{
-+ return CRC64_ECMA_182.initial;
-+}
-+
-+/**
-+ \brief Computes 64 bit the crc
-+ \param[in] data Pointer to the Data in the frame
-+ \param[in] len Length of the Data
-+ \param[in] crc seed
-+ \return calculated crc
-+ */
-+static __inline__ uint64_t crc64_compute(void const *data,
-+ uint32_t len,
-+ uint64_t seed)
-+{
-+ uint32_t i;
-+ uint64_t crc = seed;
-+ uint8_t *bdata = (uint8_t *) data;
-+
-+ for (i = 0; i < len; i++)
-+ crc =
-+ CRC64_ECMA_182.
-+ table[(crc ^ *bdata++) & CRC64_BYTE_MASK] ^ (crc >> 8);
-+
-+ return crc;
-+}
-+
-+
-+#endif /* __CRC64_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c
-@@ -0,0 +1,7582 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_cc.c
-+
-+ @Description FM Coarse Classifier implementation
-+ *//***************************************************************************/
-+#include <linux/math64.h>
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_muram_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_pcd.h"
-+#include "fm_hc.h"
-+#include "fm_cc.h"
-+#include "crc64.h"
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+
-+
-+static t_Error CcRootTryLock(t_Handle h_FmPcdCcTree)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
-+
-+ ASSERT_COND(h_FmPcdCcTree);
-+
-+ if (FmPcdLockTryLock(p_FmPcdCcTree->p_Lock))
-+ return E_OK;
-+
-+ return ERROR_CODE(E_BUSY);
-+}
-+
-+static void CcRootReleaseLock(t_Handle h_FmPcdCcTree)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
-+
-+ ASSERT_COND(h_FmPcdCcTree);
-+
-+ FmPcdLockUnlock(p_FmPcdCcTree->p_Lock);
-+}
-+
-+static void UpdateNodeOwner(t_FmPcdCcNode *p_CcNode, bool add)
-+{
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_CcNode);
-+
-+ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
-+
-+ if (add)
-+ p_CcNode->owners++;
-+ else
-+ {
-+ ASSERT_COND(p_CcNode->owners);
-+ p_CcNode->owners--;
-+ }
-+
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+}
-+
-+static __inline__ t_FmPcdStatsObj* DequeueStatsObj(t_List *p_List)
-+{
-+ t_FmPcdStatsObj *p_StatsObj = NULL;
-+ t_List *p_Next;
-+
-+ if (!LIST_IsEmpty(p_List))
-+ {
-+ p_Next = LIST_FIRST(p_List);
-+ p_StatsObj = LIST_OBJECT(p_Next, t_FmPcdStatsObj, node);
-+ ASSERT_COND(p_StatsObj);
-+ LIST_DelAndInit(p_Next);
-+ }
-+
-+ return p_StatsObj;
-+}
-+
-+static __inline__ void EnqueueStatsObj(t_List *p_List,
-+ t_FmPcdStatsObj *p_StatsObj)
-+{
-+ LIST_AddToTail(&p_StatsObj->node, p_List);
-+}
-+
-+static void FreeStatObjects(t_List *p_List, t_Handle h_FmMuram)
-+{
-+ t_FmPcdStatsObj *p_StatsObj;
-+
-+ while (!LIST_IsEmpty(p_List))
-+ {
-+ p_StatsObj = DequeueStatsObj(p_List);
-+ ASSERT_COND(p_StatsObj);
-+
-+ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd);
-+ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters);
-+
-+ XX_Free(p_StatsObj);
-+ }
-+}
-+
-+static t_FmPcdStatsObj* GetStatsObj(t_FmPcdCcNode *p_CcNode)
-+{
-+ t_FmPcdStatsObj* p_StatsObj;
-+ t_Handle h_FmMuram;
-+
-+ ASSERT_COND(p_CcNode);
-+
-+ /* If 'maxNumOfKeys' was passed, all statistics object were preallocated
-+ upon node initialization */
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ p_StatsObj = DequeueStatsObj(&p_CcNode->availableStatsLst);
-+
-+ /* Clean statistics counters & ADs */
-+ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize);
-+ }
-+ else
-+ {
-+ h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram;
-+ ASSERT_COND(h_FmMuram);
-+
-+ p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj));
-+ if (!p_StatsObj)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("statistics object"));
-+ return NULL;
-+ }
-+
-+ p_StatsObj->h_StatsAd = (t_Handle)FM_MURAM_AllocMem(
-+ h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_StatsObj->h_StatsAd)
-+ {
-+ XX_Free(p_StatsObj);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics ADs"));
-+ return NULL;
-+ }
-+ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ p_StatsObj->h_StatsCounters = (t_Handle)FM_MURAM_AllocMem(
-+ h_FmMuram, p_CcNode->countersArraySize,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_StatsObj->h_StatsCounters)
-+ {
-+ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd);
-+ XX_Free(p_StatsObj);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics counters"));
-+ return NULL;
-+ }
-+ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize);
-+ }
-+
-+ return p_StatsObj;
-+}
-+
-+static void PutStatsObj(t_FmPcdCcNode *p_CcNode, t_FmPcdStatsObj *p_StatsObj)
-+{
-+ t_Handle h_FmMuram;
-+
-+ ASSERT_COND(p_CcNode);
-+ ASSERT_COND(p_StatsObj);
-+
-+ /* If 'maxNumOfKeys' was passed, all statistics object were preallocated
-+ upon node initialization and now will be enqueued back to the list */
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ /* Clean statistics counters */
-+ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize);
-+
-+ /* Clean statistics ADs */
-+ MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj);
-+ }
-+ else
-+ {
-+ h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram;
-+ ASSERT_COND(h_FmMuram);
-+
-+ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd);
-+ FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters);
-+
-+ XX_Free(p_StatsObj);
-+ }
-+}
-+
-+static void SetStatsCounters(t_AdOfTypeStats *p_StatsAd,
-+ uint32_t statsCountersAddr)
-+{
-+ uint32_t tmp = (statsCountersAddr & FM_PCD_AD_STATS_COUNTERS_ADDR_MASK);
-+
-+ WRITE_UINT32(p_StatsAd->statsTableAddr, tmp);
-+}
-+
-+
-+static void UpdateStatsAd(t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
-+ t_Handle h_Ad, uint64_t physicalMuramBase)
-+{
-+ t_AdOfTypeStats *p_StatsAd;
-+ uint32_t statsCountersAddr, nextActionAddr, tmp;
-+#if (DPAA_VERSION >= 11)
-+ uint32_t frameLengthRangesAddr;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ p_StatsAd = (t_AdOfTypeStats *)p_FmPcdCcStatsParams->h_StatsAd;
-+
-+ tmp = FM_PCD_AD_STATS_TYPE;
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_FmPcdCcStatsParams->h_StatsFLRs)
-+ {
-+ frameLengthRangesAddr = (uint32_t)((XX_VirtToPhys(
-+ p_FmPcdCcStatsParams->h_StatsFLRs) - physicalMuramBase));
-+ tmp |= (frameLengthRangesAddr & FM_PCD_AD_STATS_FLR_ADDR_MASK);
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+ WRITE_UINT32(p_StatsAd->profileTableAddr, tmp);
-+
-+ nextActionAddr = (uint32_t)((XX_VirtToPhys(h_Ad) - physicalMuramBase));
-+ tmp = 0;
-+ tmp |= (uint32_t)((nextActionAddr << FM_PCD_AD_STATS_NEXT_ACTION_SHIFT)
-+ & FM_PCD_AD_STATS_NEXT_ACTION_MASK);
-+ tmp |= (FM_PCD_AD_STATS_NAD_EN | FM_PCD_AD_STATS_OP_CODE);
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_FmPcdCcStatsParams->h_StatsFLRs)
-+ tmp |= FM_PCD_AD_STATS_FLR_EN;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ WRITE_UINT32(p_StatsAd->nextActionIndx, tmp);
-+
-+ statsCountersAddr = (uint32_t)((XX_VirtToPhys(
-+ p_FmPcdCcStatsParams->h_StatsCounters) - physicalMuramBase));
-+ SetStatsCounters(p_StatsAd, statsCountersAddr);
-+}
-+
-+static void FillAdOfTypeContLookup(t_Handle h_Ad,
-+ t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
-+ t_Handle h_FmPcd, t_Handle p_CcNode,
-+ t_Handle h_Manip, t_Handle h_FrmReplic)
-+{
-+ t_FmPcdCcNode *p_Node = (t_FmPcdCcNode *)p_CcNode;
-+ t_AdOfTypeContLookup *p_AdContLookup = (t_AdOfTypeContLookup *)h_Ad;
-+ t_Handle h_TmpAd;
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t tmpReg32;
-+ t_Handle p_AdNewPtr = NULL;
-+
-+ UNUSED(h_Manip);
-+ UNUSED(h_FrmReplic);
-+
-+ /* there are 3 cases handled in this routine of building a "Continue lookup" type AD.
-+ * Case 1: No Manip. The action descriptor is built within the match table.
-+ * p_AdResult = p_AdNewPtr;
-+ * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized
-+ * either in the FmPcdManipUpdateAdResultForCc routine or it was already
-+ * initialized and returned here.
-+ * p_AdResult (within the match table) will be initialized after
-+ * this routine returns and point to the existing AD.
-+ * Case 3: Manip exists. The action descriptor is built within the match table.
-+ * FmPcdManipUpdateAdContLookupForCc returns a NULL p_AdNewPtr.
-+ */
-+
-+ /* As default, the "new" ptr is the current one. i.e. the content of the result
-+ * AD will be written into the match table itself (case (1))*/
-+ p_AdNewPtr = p_AdContLookup;
-+
-+ /* Initialize an action descriptor, if current statistics mode requires an Ad */
-+ if (p_FmPcdCcStatsParams)
-+ {
-+ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd);
-+ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters);
-+
-+ /* Swapping addresses between statistics Ad and the current lookup AD */
-+ h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd;
-+ p_FmPcdCcStatsParams->h_StatsAd = h_Ad;
-+ h_Ad = h_TmpAd;
-+
-+ p_AdNewPtr = h_Ad;
-+ p_AdContLookup = h_Ad;
-+
-+ /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */
-+ UpdateStatsAd(p_FmPcdCcStatsParams, h_Ad, p_FmPcd->physicalMuramBase);
-+ }
-+
-+#if DPAA_VERSION >= 11
-+ if (h_Manip && h_FrmReplic)
-+ FmPcdManipUpdateAdContLookupForCc(
-+ h_Manip,
-+ h_Ad,
-+ &p_AdNewPtr,
-+ (uint32_t)((XX_VirtToPhys(
-+ FrmReplicGroupGetSourceTableDescriptor(h_FrmReplic))
-+ - p_FmPcd->physicalMuramBase)));
-+ else
-+ if (h_FrmReplic)
-+ FrmReplicGroupUpdateAd(h_FrmReplic, h_Ad, &p_AdNewPtr);
-+ else
-+#endif /* (DPAA_VERSION >= 11) */
-+ if (h_Manip)
-+ FmPcdManipUpdateAdContLookupForCc(
-+ h_Manip,
-+ h_Ad,
-+ &p_AdNewPtr,
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+ /*no check for opcode of manip - this step can be reached only with capwap_applic_specific*/
-+ (uint32_t)((XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase))
-+#else /* not FM_CAPWAP_SUPPORT */
-+ (uint32_t)((XX_VirtToPhys(p_Node->h_Ad)
-+ - p_FmPcd->physicalMuramBase))
-+#endif /* not FM_CAPWAP_SUPPORT */
-+ );
-+
-+ /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */
-+ if (p_AdNewPtr)
-+ {
-+ /* cases (1) & (2) */
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ tmpReg32 |=
-+ p_Node->sizeOfExtraction ? ((p_Node->sizeOfExtraction - 1) << 24) :
-+ 0;
-+ tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Node->h_AdTable)
-+ - p_FmPcd->physicalMuramBase);
-+ WRITE_UINT32(p_AdContLookup->ccAdBase, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= p_Node->numOfKeys << 24;
-+ tmpReg32 |= (p_Node->lclMask ? FM_PCD_AD_CONT_LOOKUP_LCL_MASK : 0);
-+ tmpReg32 |=
-+ p_Node->h_KeysMatchTable ? (uint32_t)(XX_VirtToPhys(
-+ p_Node->h_KeysMatchTable) - p_FmPcd->physicalMuramBase) :
-+ 0;
-+ WRITE_UINT32(p_AdContLookup->matchTblPtr, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= p_Node->prsArrayOffset << 24;
-+ tmpReg32 |= p_Node->offset << 16;
-+ tmpReg32 |= p_Node->parseCode;
-+ WRITE_UINT32(p_AdContLookup->pcAndOffsets, tmpReg32);
-+
-+ MemCpy8((void*)&p_AdContLookup->gmask, p_Node->p_GlblMask,
-+ CC_GLBL_MASK_SIZE);
-+ }
-+}
-+
-+static t_Error AllocAndFillAdForContLookupManip(t_Handle h_CcNode)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_CcNode);
-+
-+ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
-+
-+ if (!p_CcNode->h_Ad)
-+ {
-+ if (p_CcNode->maxNumOfKeys)
-+ p_CcNode->h_Ad = p_CcNode->h_TmpAd;
-+ else
-+ p_CcNode->h_Ad = (t_Handle)FM_MURAM_AllocMem(
-+ ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram,
-+ FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN);
-+
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+
-+ if (!p_CcNode->h_Ad)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC action descriptor"));
-+
-+ MemSet8(p_CcNode->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ FillAdOfTypeContLookup(p_CcNode->h_Ad, NULL, p_CcNode->h_FmPcd,
-+ p_CcNode, NULL, NULL);
-+ }
-+ else
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+
-+ return E_OK;
-+}
-+
-+static t_Error SetRequiredAction1(
-+ t_Handle h_FmPcd, uint32_t requiredAction,
-+ t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParamsTmp,
-+ t_Handle h_AdTmp, uint16_t numOfEntries, t_Handle h_Tree)
-+{
-+ t_AdOfTypeResult *p_AdTmp = (t_AdOfTypeResult *)h_AdTmp;
-+ uint32_t tmpReg32;
-+ t_Error err;
-+ t_FmPcdCcNode *p_CcNode;
-+ int i = 0;
-+ uint16_t tmp = 0;
-+ uint16_t profileId;
-+ uint8_t relativeSchemeId, physicalSchemeId;
-+ t_CcNodeInformation ccNodeInfo;
-+
-+ for (i = 0; i < numOfEntries; i++)
-+ {
-+ if (i == 0)
-+ h_AdTmp = PTR_MOVE(h_AdTmp, i*FM_PCD_CC_AD_ENTRY_SIZE);
-+ else
-+ h_AdTmp = PTR_MOVE(h_AdTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ switch (p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.nextEngine)
-+ {
-+ case (e_FM_PCD_CC):
-+ if (requiredAction)
-+ {
-+ p_CcNode =
-+ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.ccParams.h_CcNode;
-+ ASSERT_COND(p_CcNode);
-+ if (p_CcNode->shadowAction == requiredAction)
-+ break;
-+ if ((requiredAction & UPDATE_CC_WITH_TREE)
-+ && !(p_CcNode->shadowAction & UPDATE_CC_WITH_TREE))
-+ {
-+
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = h_Tree;
-+ EnqueueNodeInfoToRelevantLst(&p_CcNode->ccTreesLst,
-+ &ccNodeInfo, NULL);
-+ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
-+ UPDATE_CC_WITH_TREE;
-+ }
-+ if ((requiredAction & UPDATE_CC_SHADOW_CLEAR)
-+ && !(p_CcNode->shadowAction & UPDATE_CC_SHADOW_CLEAR))
-+ {
-+
-+ p_CcNode->shadowAction = 0;
-+ }
-+
-+ if ((requiredAction & UPDATE_CC_WITH_DELETE_TREE)
-+ && !(p_CcNode->shadowAction
-+ & UPDATE_CC_WITH_DELETE_TREE))
-+ {
-+ DequeueNodeInfoFromRelevantLst(&p_CcNode->ccTreesLst,
-+ h_Tree, NULL);
-+ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
-+ UPDATE_CC_WITH_DELETE_TREE;
-+ }
-+ if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
-+ != e_FM_PCD_INVALID)
-+ tmp = (uint8_t)(p_CcNode->numOfKeys + 1);
-+ else
-+ tmp = p_CcNode->numOfKeys;
-+ err = SetRequiredAction1(h_FmPcd, requiredAction,
-+ p_CcNode->keyAndNextEngineParams,
-+ p_CcNode->h_AdTable, tmp, h_Tree);
-+ if (err != E_OK)
-+ return err;
-+ if (requiredAction != UPDATE_CC_SHADOW_CLEAR)
-+ p_CcNode->shadowAction |= requiredAction;
-+ }
-+ break;
-+
-+ case (e_FM_PCD_KG):
-+ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
-+ && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction
-+ & UPDATE_NIA_ENQ_WITHOUT_DMA))
-+ {
-+ physicalSchemeId =
-+ FmPcdKgGetSchemeId(
-+ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme);
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(
-+ h_FmPcd, physicalSchemeId);
-+ if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+ if (!FmPcdKgIsSchemeValidSw(
-+ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Invalid direct scheme."));
-+ if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("For this action scheme has to be direct."));
-+ err =
-+ FmPcdKgCcGetSetParams(
-+ h_FmPcd,
-+ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme,
-+ requiredAction, 0);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
-+ requiredAction;
-+ }
-+ break;
-+
-+ case (e_FM_PCD_PLCR):
-+ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
-+ && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction
-+ & UPDATE_NIA_ENQ_WITHOUT_DMA))
-+ {
-+ if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.overrideParams)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NOT_SUPPORTED,
-+ ("In this initialization only overrideFqid can be initialized"));
-+ if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.sharedProfile)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NOT_SUPPORTED,
-+ ("In this initialization only overrideFqid can be initialized"));
-+ err =
-+ FmPcdPlcrGetAbsoluteIdByProfileParams(
-+ h_FmPcd,
-+ e_FM_PCD_PLCR_SHARED,
-+ NULL,
-+ p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.newRelativeProfileId,
-+ &profileId);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ err = FmPcdPlcrCcGetSetParams(h_FmPcd, profileId,
-+ requiredAction);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
-+ requiredAction;
-+ }
-+ break;
-+
-+ case (e_FM_PCD_DONE):
-+ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
-+ && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction
-+ & UPDATE_NIA_ENQ_WITHOUT_DMA))
-+ {
-+ tmpReg32 = GET_UINT32(p_AdTmp->nia);
-+ if ((tmpReg32 & GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd))
-+ != GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("Next engine was previously assigned not as PCD_DONE"));
-+ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+ WRITE_UINT32(p_AdTmp->nia, tmpReg32);
-+ p_CcKeyAndNextEngineParamsTmp[i].shadowAction |=
-+ requiredAction;
-+ }
-+ break;
-+
-+ default:
-+ break;
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error SetRequiredAction(
-+ t_Handle h_FmPcd, uint32_t requiredAction,
-+ t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParamsTmp,
-+ t_Handle h_AdTmp, uint16_t numOfEntries, t_Handle h_Tree)
-+{
-+ t_Error err = SetRequiredAction1(h_FmPcd, requiredAction,
-+ p_CcKeyAndNextEngineParamsTmp, h_AdTmp,
-+ numOfEntries, h_Tree);
-+ if (err != E_OK)
-+ return err;
-+ return SetRequiredAction1(h_FmPcd, UPDATE_CC_SHADOW_CLEAR,
-+ p_CcKeyAndNextEngineParamsTmp, h_AdTmp,
-+ numOfEntries, h_Tree);
-+}
-+
-+static t_Error ReleaseModifiedDataStructure(
-+ t_Handle h_FmPcd, t_List *h_FmPcdOldPointersLst,
-+ t_List *h_FmPcdNewPointersLst,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams,
-+ bool useShadowStructs)
-+{
-+ t_List *p_Pos;
-+ t_Error err = E_OK;
-+ t_CcNodeInformation ccNodeInfo, *p_CcNodeInformation;
-+ t_Handle h_Muram;
-+ t_FmPcdCcNode *p_FmPcdCcNextNode, *p_FmPcdCcWorkingOnNode;
-+ t_List *p_UpdateLst;
-+ uint32_t intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_AdditionalParams->h_CurrentNode,
-+ E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcdOldPointersLst, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcdNewPointersLst, E_INVALID_HANDLE);
-+
-+ /* We don't update subtree of the new node with new tree because it was done in the previous stage */
-+ if (p_AdditionalParams->h_NodeForAdd)
-+ {
-+ p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForAdd;
-+
-+ if (!p_AdditionalParams->tree)
-+ p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst;
-+ else
-+ p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst;
-+
-+ p_CcNodeInformation = FindNodeInfoInReleventLst(
-+ p_UpdateLst, p_AdditionalParams->h_CurrentNode,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+
-+ if (p_CcNodeInformation)
-+ p_CcNodeInformation->index++;
-+ else
-+ {
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = (t_Handle)p_AdditionalParams->h_CurrentNode;
-+ ccNodeInfo.index = 1;
-+ EnqueueNodeInfoToRelevantLst(p_UpdateLst, &ccNodeInfo,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+ }
-+ if (p_AdditionalParams->h_ManipForAdd)
-+ {
-+ p_CcNodeInformation = FindNodeInfoInReleventLst(
-+ FmPcdManipGetNodeLstPointedOnThisManip(
-+ p_AdditionalParams->h_ManipForAdd),
-+ p_AdditionalParams->h_CurrentNode,
-+ FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForAdd));
-+
-+ if (p_CcNodeInformation)
-+ p_CcNodeInformation->index++;
-+ else
-+ {
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode =
-+ (t_Handle)p_AdditionalParams->h_CurrentNode;
-+ ccNodeInfo.index = 1;
-+ EnqueueNodeInfoToRelevantLst(
-+ FmPcdManipGetNodeLstPointedOnThisManip(
-+ p_AdditionalParams->h_ManipForAdd),
-+ &ccNodeInfo,
-+ FmPcdManipGetSpinlock(
-+ p_AdditionalParams->h_ManipForAdd));
-+ }
-+ }
-+ }
-+
-+ if (p_AdditionalParams->h_NodeForRmv)
-+ {
-+ p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForRmv;
-+
-+ if (!p_AdditionalParams->tree)
-+ {
-+ p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst;
-+ p_FmPcdCcWorkingOnNode =
-+ (t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode);
-+
-+ for (p_Pos = LIST_FIRST(&p_FmPcdCcWorkingOnNode->ccTreesLst);
-+ p_Pos != (&p_FmPcdCcWorkingOnNode->ccTreesLst); p_Pos =
-+ LIST_NEXT(p_Pos))
-+ {
-+ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
-+
-+ ASSERT_COND(p_CcNodeInformation->h_CcNode);
-+
-+ err =
-+ SetRequiredAction(
-+ h_FmPcd,
-+ UPDATE_CC_WITH_DELETE_TREE,
-+ &((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex],
-+ PTR_MOVE(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable, p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
-+ 1, p_CcNodeInformation->h_CcNode);
-+ }
-+ }
-+ else
-+ {
-+ p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst;
-+
-+ err =
-+ SetRequiredAction(
-+ h_FmPcd,
-+ UPDATE_CC_WITH_DELETE_TREE,
-+ &((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex],
-+ UINT_TO_PTR(((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->ccTreeBaseAddr + p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
-+ 1, p_AdditionalParams->h_CurrentNode);
-+ }
-+ if (err)
-+ return err;
-+
-+ /* We remove from the subtree of the removed node tree because it wasn't done in the previous stage
-+ Update ccPrevNodesLst or ccTreeIdLst of the removed node
-+ Update of the node owner */
-+ p_CcNodeInformation = FindNodeInfoInReleventLst(
-+ p_UpdateLst, p_AdditionalParams->h_CurrentNode,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+
-+ ASSERT_COND(p_CcNodeInformation);
-+ ASSERT_COND(p_CcNodeInformation->index);
-+
-+ p_CcNodeInformation->index--;
-+
-+ if (p_CcNodeInformation->index == 0)
-+ DequeueNodeInfoFromRelevantLst(p_UpdateLst,
-+ p_AdditionalParams->h_CurrentNode,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+
-+ UpdateNodeOwner(p_FmPcdCcNextNode, FALSE);
-+
-+ if (p_AdditionalParams->h_ManipForRmv)
-+ {
-+ p_CcNodeInformation = FindNodeInfoInReleventLst(
-+ FmPcdManipGetNodeLstPointedOnThisManip(
-+ p_AdditionalParams->h_ManipForRmv),
-+ p_AdditionalParams->h_CurrentNode,
-+ FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForRmv));
-+
-+ ASSERT_COND(p_CcNodeInformation);
-+ ASSERT_COND(p_CcNodeInformation->index);
-+
-+ p_CcNodeInformation->index--;
-+
-+ if (p_CcNodeInformation->index == 0)
-+ DequeueNodeInfoFromRelevantLst(
-+ FmPcdManipGetNodeLstPointedOnThisManip(
-+ p_AdditionalParams->h_ManipForRmv),
-+ p_AdditionalParams->h_CurrentNode,
-+ FmPcdManipGetSpinlock(
-+ p_AdditionalParams->h_ManipForRmv));
-+ }
-+ }
-+
-+ if (p_AdditionalParams->h_ManipForRmv)
-+ FmPcdManipUpdateOwner(p_AdditionalParams->h_ManipForRmv, FALSE);
-+
-+ if (p_AdditionalParams->p_StatsObjForRmv)
-+ PutStatsObj((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode),
-+ p_AdditionalParams->p_StatsObjForRmv);
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_AdditionalParams->h_FrmReplicForRmv)
-+ FrmReplicGroupUpdateOwner(p_AdditionalParams->h_FrmReplicForRmv,
-+ FALSE/* remove */);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (!useShadowStructs)
-+ {
-+ h_Muram = FmPcdGetMuramHandle(h_FmPcd);
-+ ASSERT_COND(h_Muram);
-+
-+ if ((p_AdditionalParams->tree && !((t_FmPcd *)h_FmPcd)->p_CcShadow)
-+ || (!p_AdditionalParams->tree
-+ && !((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->maxNumOfKeys))
-+ {
-+ /* We release new AD which was allocated and updated for copy from to actual AD */
-+ for (p_Pos = LIST_FIRST(h_FmPcdNewPointersLst);
-+ p_Pos != (h_FmPcdNewPointersLst); p_Pos = LIST_NEXT(p_Pos))
-+ {
-+
-+ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
-+ ASSERT_COND(p_CcNodeInformation->h_CcNode);
-+ FM_MURAM_FreeMem(h_Muram, p_CcNodeInformation->h_CcNode);
-+ }
-+ }
-+
-+ /* Free Old data structure if it has to be freed - new data structure was allocated*/
-+ if (p_AdditionalParams->p_AdTableOld)
-+ FM_MURAM_FreeMem(h_Muram, p_AdditionalParams->p_AdTableOld);
-+
-+ if (p_AdditionalParams->p_KeysMatchTableOld)
-+ FM_MURAM_FreeMem(h_Muram, p_AdditionalParams->p_KeysMatchTableOld);
-+ }
-+
-+ /* Update current modified node with changed fields if it's required*/
-+ if (!p_AdditionalParams->tree)
-+ {
-+ if (p_AdditionalParams->p_AdTableNew)
-+ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable =
-+ p_AdditionalParams->p_AdTableNew;
-+
-+ if (p_AdditionalParams->p_KeysMatchTableNew)
-+ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_KeysMatchTable =
-+ p_AdditionalParams->p_KeysMatchTableNew;
-+
-+ /* Locking node's spinlock before updating 'keys and next engine' structure,
-+ as it maybe used to retrieve keys statistics */
-+ intFlags =
-+ XX_LockIntrSpinlock(
-+ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock);
-+
-+ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->numOfKeys =
-+ p_AdditionalParams->numOfKeys;
-+
-+ memcpy(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams,
-+ &p_AdditionalParams->keyAndNextEngineParams,
-+ sizeof(t_FmPcdCcKeyAndNextEngineParams) * (CC_MAX_NUM_OF_KEYS));
-+
-+ XX_UnlockIntrSpinlock(
-+ ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock,
-+ intFlags);
-+ }
-+ else
-+ {
-+ uint8_t numEntries =
-+ ((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->numOfEntries;
-+ ASSERT_COND(numEntries < FM_PCD_MAX_NUM_OF_CC_GROUPS);
-+ memcpy(&((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams,
-+ &p_AdditionalParams->keyAndNextEngineParams,
-+ sizeof(t_FmPcdCcKeyAndNextEngineParams) * numEntries);
-+ }
-+
-+ ReleaseLst(h_FmPcdOldPointersLst);
-+ ReleaseLst(h_FmPcdNewPointersLst);
-+
-+ XX_Free(p_AdditionalParams);
-+
-+ return E_OK;
-+}
-+
-+static t_Handle BuildNewAd(
-+ t_Handle h_Ad,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams,
-+ t_FmPcdCcNode *p_CcNode,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcNode *p_FmPcdCcNodeTmp;
-+ t_Handle h_OrigAd = NULL;
-+
-+ p_FmPcdCcNodeTmp = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode));
-+ if (!p_FmPcdCcNodeTmp)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcNodeTmp"));
-+ return NULL;
-+ }
-+ memset(p_FmPcdCcNodeTmp, 0, sizeof(t_FmPcdCcNode));
-+
-+ p_FmPcdCcNodeTmp->numOfKeys = p_FmPcdModifyCcKeyAdditionalParams->numOfKeys;
-+ p_FmPcdCcNodeTmp->h_KeysMatchTable =
-+ p_FmPcdModifyCcKeyAdditionalParams->p_KeysMatchTableNew;
-+ p_FmPcdCcNodeTmp->h_AdTable =
-+ p_FmPcdModifyCcKeyAdditionalParams->p_AdTableNew;
-+
-+ p_FmPcdCcNodeTmp->lclMask = p_CcNode->lclMask;
-+ p_FmPcdCcNodeTmp->parseCode = p_CcNode->parseCode;
-+ p_FmPcdCcNodeTmp->offset = p_CcNode->offset;
-+ p_FmPcdCcNodeTmp->prsArrayOffset = p_CcNode->prsArrayOffset;
-+ p_FmPcdCcNodeTmp->ctrlFlow = p_CcNode->ctrlFlow;
-+ p_FmPcdCcNodeTmp->ccKeySizeAccExtraction = p_CcNode->ccKeySizeAccExtraction;
-+ p_FmPcdCcNodeTmp->sizeOfExtraction = p_CcNode->sizeOfExtraction;
-+ p_FmPcdCcNodeTmp->glblMaskSize = p_CcNode->glblMaskSize;
-+ p_FmPcdCcNodeTmp->p_GlblMask = p_CcNode->p_GlblMask;
-+
-+ if (p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC)
-+ {
-+ if (p_FmPcdCcNextEngineParams->h_Manip)
-+ {
-+ h_OrigAd = p_CcNode->h_Ad;
-+ if (AllocAndFillAdForContLookupManip(
-+ p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)
-+ != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+ XX_Free(p_FmPcdCcNodeTmp);
-+ return NULL;
-+ }
-+ }
-+ FillAdOfTypeContLookup(h_Ad, NULL, p_CcNode->h_FmPcd, p_FmPcdCcNodeTmp,
-+ h_OrigAd ? NULL : p_FmPcdCcNextEngineParams->h_Manip, NULL);
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_FR)
-+ && (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic))
-+ {
-+ FillAdOfTypeContLookup(
-+ h_Ad, NULL, p_CcNode->h_FmPcd, p_FmPcdCcNodeTmp,
-+ p_FmPcdCcNextEngineParams->h_Manip,
-+ p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic);
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ XX_Free(p_FmPcdCcNodeTmp);
-+
-+ return E_OK;
-+}
-+
-+static t_Error DynamicChangeHc(
-+ t_Handle h_FmPcd, t_List *h_OldPointersLst, t_List *h_NewPointersLst,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams,
-+ bool useShadowStructs)
-+{
-+ t_List *p_PosOld, *p_PosNew;
-+ uint32_t oldAdAddrOffset, newAdAddrOffset;
-+ uint16_t i = 0;
-+ t_Error err = E_OK;
-+ uint8_t numOfModifiedPtr;
-+
-+ ASSERT_COND(h_FmPcd);
-+ ASSERT_COND(h_OldPointersLst);
-+ ASSERT_COND(h_NewPointersLst);
-+
-+ numOfModifiedPtr = (uint8_t)LIST_NumOfObjs(h_OldPointersLst);
-+
-+ if (numOfModifiedPtr)
-+ {
-+ p_PosNew = LIST_FIRST(h_NewPointersLst);
-+ p_PosOld = LIST_FIRST(h_OldPointersLst);
-+
-+ /* Retrieve address of new AD */
-+ newAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd,
-+ p_PosNew);
-+ if (newAdAddrOffset == (uint32_t)ILLEGAL_BASE)
-+ {
-+ ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
-+ h_NewPointersLst,
-+ p_AdditionalParams, useShadowStructs);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("New AD address"));
-+ }
-+
-+ for (i = 0; i < numOfModifiedPtr; i++)
-+ {
-+ /* Retrieve address of current AD */
-+ oldAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd,
-+ p_PosOld);
-+ if (oldAdAddrOffset == (uint32_t)ILLEGAL_BASE)
-+ {
-+ ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
-+ h_NewPointersLst,
-+ p_AdditionalParams,
-+ useShadowStructs);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Old AD address"));
-+ }
-+
-+ /* Invoke host command to copy from new AD to old AD */
-+ err = FmHcPcdCcDoDynamicChange(((t_FmPcd *)h_FmPcd)->h_Hc,
-+ oldAdAddrOffset, newAdAddrOffset);
-+ if (err)
-+ {
-+ ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
-+ h_NewPointersLst,
-+ p_AdditionalParams,
-+ useShadowStructs);
-+ RETURN_ERROR(
-+ MAJOR,
-+ err,
-+ ("For part of nodes changes are done - situation is danger"));
-+ }
-+
-+ p_PosOld = LIST_NEXT(p_PosOld);
-+ }
-+ }
-+ return E_OK;
-+}
-+
-+static t_Error DoDynamicChange(
-+ t_Handle h_FmPcd, t_List *h_OldPointersLst, t_List *h_NewPointersLst,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams,
-+ bool useShadowStructs)
-+{
-+ t_FmPcdCcNode *p_CcNode =
-+ (t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode);
-+ t_List *p_PosNew;
-+ t_CcNodeInformation *p_CcNodeInfo;
-+ t_FmPcdCcNextEngineParams nextEngineParams;
-+ t_Handle h_Ad;
-+ uint32_t keySize;
-+ t_Error err = E_OK;
-+ uint8_t numOfModifiedPtr;
-+
-+ ASSERT_COND(h_FmPcd);
-+
-+ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams));
-+
-+ numOfModifiedPtr = (uint8_t)LIST_NumOfObjs(h_OldPointersLst);
-+
-+ if (numOfModifiedPtr)
-+ {
-+
-+ p_PosNew = LIST_FIRST(h_NewPointersLst);
-+
-+ /* Invoke host-command to copy from the new Ad to existing Ads */
-+ err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst,
-+ p_AdditionalParams, useShadowStructs);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (useShadowStructs)
-+ {
-+ /* When the host-command above has ended, the old structures are 'free'and we can update
-+ them by copying from the new shadow structures. */
-+ if (p_CcNode->lclMask)
-+ keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction);
-+ else
-+ keySize = p_CcNode->ccKeySizeAccExtraction;
-+
-+ MemCpy8(p_AdditionalParams->p_KeysMatchTableOld,
-+ p_AdditionalParams->p_KeysMatchTableNew,
-+ p_CcNode->maxNumOfKeys * keySize * sizeof(uint8_t));
-+
-+ MemCpy8(
-+ p_AdditionalParams->p_AdTableOld,
-+ p_AdditionalParams->p_AdTableNew,
-+ (uint32_t)((p_CcNode->maxNumOfKeys + 1)
-+ * FM_PCD_CC_AD_ENTRY_SIZE));
-+
-+ /* Retrieve the address of the allocated Ad */
-+ p_CcNodeInfo = CC_NODE_F_OBJECT(p_PosNew);
-+ h_Ad = p_CcNodeInfo->h_CcNode;
-+
-+ /* Build a new Ad that holds the old (now updated) structures */
-+ p_AdditionalParams->p_KeysMatchTableNew =
-+ p_AdditionalParams->p_KeysMatchTableOld;
-+ p_AdditionalParams->p_AdTableNew = p_AdditionalParams->p_AdTableOld;
-+
-+ nextEngineParams.nextEngine = e_FM_PCD_CC;
-+ nextEngineParams.params.ccParams.h_CcNode = (t_Handle)p_CcNode;
-+
-+ BuildNewAd(h_Ad, p_AdditionalParams, p_CcNode, &nextEngineParams);
-+
-+ /* HC to copy from the new Ad (old updated structures) to current Ad (uses shadow structures) */
-+ err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst,
-+ p_AdditionalParams, useShadowStructs);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+
-+ err = ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst,
-+ h_NewPointersLst,
-+ p_AdditionalParams, useShadowStructs);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+static bool IsCapwapApplSpecific(t_Handle h_Node)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_Node;
-+ bool isManipForCapwapApplSpecificBuild = FALSE;
-+ int i = 0;
-+
-+ ASSERT_COND(h_Node);
-+ /* assumption that this function called only for INDEXED_FLOW_ID - so no miss*/
-+ for (i = 0; i < p_CcNode->numOfKeys; i++)
-+ {
-+ if ( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip &&
-+ FmPcdManipIsCapwapApplSpecific(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip))
-+ {
-+ isManipForCapwapApplSpecificBuild = TRUE;
-+ break;
-+ }
-+ }
-+ return isManipForCapwapApplSpecificBuild;
-+
-+}
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+static t_Error CcUpdateParam(
-+ t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_FmPort,
-+ t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParams,
-+ uint16_t numOfEntries, t_Handle h_Ad, bool validate, uint16_t level,
-+ t_Handle h_FmTree, bool modify)
-+{
-+ t_FmPcdCcNode *p_CcNode;
-+ t_Error err;
-+ uint16_t tmp = 0;
-+ int i = 0;
-+ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_FmTree;
-+
-+ level++;
-+
-+ if (p_CcTree->h_IpReassemblyManip)
-+ {
-+ err = FmPcdManipUpdate(h_FmPcd, h_PcdParams, h_FmPort,
-+ p_CcTree->h_IpReassemblyManip, NULL, validate,
-+ level, h_FmTree, modify);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (p_CcTree->h_CapwapReassemblyManip)
-+ {
-+ err = FmPcdManipUpdate(h_FmPcd, h_PcdParams, h_FmPort,
-+ p_CcTree->h_CapwapReassemblyManip, NULL, validate,
-+ level, h_FmTree, modify);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (numOfEntries)
-+ {
-+ for (i = 0; i < numOfEntries; i++)
-+ {
-+ if (i == 0)
-+ h_Ad = PTR_MOVE(h_Ad, i*FM_PCD_CC_AD_ENTRY_SIZE);
-+ else
-+ h_Ad = PTR_MOVE(h_Ad, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ if (p_CcKeyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ {
-+ p_CcNode =
-+ p_CcKeyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
-+ ASSERT_COND(p_CcNode);
-+
-+ if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ FmPcdManipUpdate(
-+ h_FmPcd,
-+ NULL,
-+ h_FmPort,
-+ p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip,
-+ h_Ad, validate, level, h_FmTree, modify);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
-+ != e_FM_PCD_INVALID)
-+ tmp = (uint8_t)(p_CcNode->numOfKeys + 1);
-+ else
-+ tmp = p_CcNode->numOfKeys;
-+
-+ err = CcUpdateParam(h_FmPcd, h_PcdParams, h_FmPort,
-+ p_CcNode->keyAndNextEngineParams, tmp,
-+ p_CcNode->h_AdTable, validate, level,
-+ h_FmTree, modify);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ else
-+ {
-+ if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ FmPcdManipUpdate(
-+ h_FmPcd,
-+ NULL,
-+ h_FmPort,
-+ p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip,
-+ h_Ad, validate, level, h_FmTree, modify);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+static ccPrivateInfo_t IcDefineCode(t_FmPcdCcNodeParams *p_CcNodeParam)
-+{
-+ switch (p_CcNodeParam->extractCcParams.extractNonHdr.action)
-+ {
-+ case (e_FM_PCD_ACTION_EXACT_MATCH):
-+ switch (p_CcNodeParam->extractCcParams.extractNonHdr.src)
-+ {
-+ case (e_FM_PCD_EXTRACT_FROM_KEY):
-+ return CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH;
-+ case (e_FM_PCD_EXTRACT_FROM_HASH):
-+ return CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH;
-+ default:
-+ return CC_PRIVATE_INFO_NONE;
-+ }
-+
-+ case (e_FM_PCD_ACTION_INDEXED_LOOKUP):
-+ switch (p_CcNodeParam->extractCcParams.extractNonHdr.src)
-+ {
-+ case (e_FM_PCD_EXTRACT_FROM_HASH):
-+ return CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP;
-+ case (e_FM_PCD_EXTRACT_FROM_FLOW_ID):
-+ return CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP;
-+ default:
-+ return CC_PRIVATE_INFO_NONE;
-+ }
-+
-+ default:
-+ break;
-+ }
-+
-+ return CC_PRIVATE_INFO_NONE;
-+}
-+
-+static t_CcNodeInformation * DequeueAdditionalInfoFromRelevantLst(
-+ t_List *p_List)
-+{
-+ t_CcNodeInformation *p_CcNodeInfo = NULL;
-+
-+ if (!LIST_IsEmpty(p_List))
-+ {
-+ p_CcNodeInfo = CC_NODE_F_OBJECT(p_List->p_Next);
-+ LIST_DelAndInit(&p_CcNodeInfo->node);
-+ }
-+
-+ return p_CcNodeInfo;
-+}
-+
-+void ReleaseLst(t_List *p_List)
-+{
-+ t_CcNodeInformation *p_CcNodeInfo = NULL;
-+
-+ if (!LIST_IsEmpty(p_List))
-+ {
-+ p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List);
-+ while (p_CcNodeInfo)
-+ {
-+ XX_Free(p_CcNodeInfo);
-+ p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List);
-+ }
-+ }
-+
-+ LIST_Del(p_List);
-+}
-+
-+static void DeleteNode(t_FmPcdCcNode *p_CcNode)
-+{
-+ uint32_t i;
-+
-+ if (!p_CcNode)
-+ return;
-+
-+ if (p_CcNode->p_GlblMask)
-+ {
-+ XX_Free(p_CcNode->p_GlblMask);
-+ p_CcNode->p_GlblMask = NULL;
-+ }
-+
-+ if (p_CcNode->h_KeysMatchTable)
-+ {
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
-+ p_CcNode->h_KeysMatchTable);
-+ p_CcNode->h_KeysMatchTable = NULL;
-+ }
-+
-+ if (p_CcNode->h_AdTable)
-+ {
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
-+ p_CcNode->h_AdTable);
-+ p_CcNode->h_AdTable = NULL;
-+ }
-+
-+ if (p_CcNode->h_Ad)
-+ {
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
-+ p_CcNode->h_Ad);
-+ p_CcNode->h_Ad = NULL;
-+ p_CcNode->h_TmpAd = NULL;
-+ }
-+
-+ if (p_CcNode->h_StatsFLRs)
-+ {
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
-+ p_CcNode->h_StatsFLRs);
-+ p_CcNode->h_StatsFLRs = NULL;
-+ }
-+
-+ if (p_CcNode->h_Spinlock)
-+ {
-+ XX_FreeSpinlock(p_CcNode->h_Spinlock);
-+ p_CcNode->h_Spinlock = NULL;
-+ }
-+
-+ /* Restore the original counters pointer instead of the mutual pointer (mutual to all hash buckets) */
-+ if (p_CcNode->isHashBucket
-+ && (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE))
-+ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].p_StatsObj->h_StatsCounters =
-+ p_CcNode->h_PrivMissStatsCounters;
-+
-+ /* Releasing all currently used statistics objects, including 'miss' entry */
-+ for (i = 0; i < p_CcNode->numOfKeys + 1; i++)
-+ if (p_CcNode->keyAndNextEngineParams[i].p_StatsObj)
-+ PutStatsObj(p_CcNode,
-+ p_CcNode->keyAndNextEngineParams[i].p_StatsObj);
-+
-+ if (!LIST_IsEmpty(&p_CcNode->availableStatsLst))
-+ {
-+ t_Handle h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd);
-+ ASSERT_COND(h_FmMuram);
-+
-+ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
-+ }
-+
-+ LIST_Del(&p_CcNode->availableStatsLst);
-+
-+ ReleaseLst(&p_CcNode->availableStatsLst);
-+ ReleaseLst(&p_CcNode->ccPrevNodesLst);
-+ ReleaseLst(&p_CcNode->ccTreeIdLst);
-+ ReleaseLst(&p_CcNode->ccTreesLst);
-+
-+ XX_Free(p_CcNode);
-+}
-+
-+static void DeleteTree(t_FmPcdCcTree *p_FmPcdTree, t_FmPcd *p_FmPcd)
-+{
-+ if (p_FmPcdTree)
-+ {
-+ if (p_FmPcdTree->ccTreeBaseAddr)
-+ {
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd),
-+ UINT_TO_PTR(p_FmPcdTree->ccTreeBaseAddr));
-+ p_FmPcdTree->ccTreeBaseAddr = 0;
-+ }
-+
-+ ReleaseLst(&p_FmPcdTree->fmPortsLst);
-+
-+ XX_Free(p_FmPcdTree);
-+ }
-+}
-+
-+static void GetCcExtractKeySize(uint8_t parseCodeRealSize,
-+ uint8_t *parseCodeCcSize)
-+{
-+ if ((parseCodeRealSize > 0) && (parseCodeRealSize < 2))
-+ *parseCodeCcSize = 1;
-+ else
-+ if (parseCodeRealSize == 2)
-+ *parseCodeCcSize = 2;
-+ else
-+ if ((parseCodeRealSize > 2) && (parseCodeRealSize <= 4))
-+ *parseCodeCcSize = 4;
-+ else
-+ if ((parseCodeRealSize > 4) && (parseCodeRealSize <= 8))
-+ *parseCodeCcSize = 8;
-+ else
-+ if ((parseCodeRealSize > 8) && (parseCodeRealSize <= 16))
-+ *parseCodeCcSize = 16;
-+ else
-+ if ((parseCodeRealSize > 16)
-+ && (parseCodeRealSize <= 24))
-+ *parseCodeCcSize = 24;
-+ else
-+ if ((parseCodeRealSize > 24)
-+ && (parseCodeRealSize <= 32))
-+ *parseCodeCcSize = 32;
-+ else
-+ if ((parseCodeRealSize > 32)
-+ && (parseCodeRealSize <= 40))
-+ *parseCodeCcSize = 40;
-+ else
-+ if ((parseCodeRealSize > 40)
-+ && (parseCodeRealSize <= 48))
-+ *parseCodeCcSize = 48;
-+ else
-+ if ((parseCodeRealSize > 48)
-+ && (parseCodeRealSize <= 56))
-+ *parseCodeCcSize = 56;
-+ else
-+ *parseCodeCcSize = 0;
-+}
-+
-+static void GetSizeHeaderField(e_NetHeaderType hdr, t_FmPcdFields field,
-+ uint8_t *parseCodeRealSize)
-+{
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_ETH):
-+ switch (field.eth)
-+ {
-+ case (NET_HEADER_FIELD_ETH_DA):
-+ *parseCodeRealSize = 6;
-+ break;
-+
-+ case (NET_HEADER_FIELD_ETH_SA):
-+ *parseCodeRealSize = 6;
-+ break;
-+
-+ case (NET_HEADER_FIELD_ETH_TYPE):
-+ *parseCodeRealSize = 2;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_PPPoE):
-+ switch (field.pppoe)
-+ {
-+ case (NET_HEADER_FIELD_PPPoE_PID):
-+ *parseCodeRealSize = 2;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_VLAN):
-+ switch (field.vlan)
-+ {
-+ case (NET_HEADER_FIELD_VLAN_TCI):
-+ *parseCodeRealSize = 2;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported2"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_MPLS):
-+ switch (field.mpls)
-+ {
-+ case (NET_HEADER_FIELD_MPLS_LABEL_STACK):
-+ *parseCodeRealSize = 4;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported3"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_IPv4):
-+ switch (field.ipv4)
-+ {
-+ case (NET_HEADER_FIELD_IPv4_DST_IP):
-+ case (NET_HEADER_FIELD_IPv4_SRC_IP):
-+ *parseCodeRealSize = 4;
-+ break;
-+
-+ case (NET_HEADER_FIELD_IPv4_TOS):
-+ case (NET_HEADER_FIELD_IPv4_PROTO):
-+ *parseCodeRealSize = 1;
-+ break;
-+
-+ case (NET_HEADER_FIELD_IPv4_DST_IP
-+ | NET_HEADER_FIELD_IPv4_SRC_IP):
-+ *parseCodeRealSize = 8;
-+ break;
-+
-+ case (NET_HEADER_FIELD_IPv4_TTL):
-+ *parseCodeRealSize = 1;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported4"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_IPv6):
-+ switch (field.ipv6)
-+ {
-+ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL
-+ | NET_HEADER_FIELD_IPv6_TC):
-+ *parseCodeRealSize = 4;
-+ break;
-+
-+ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
-+ case (NET_HEADER_FIELD_IPv6_HOP_LIMIT):
-+ *parseCodeRealSize = 1;
-+ break;
-+
-+ case (NET_HEADER_FIELD_IPv6_DST_IP):
-+ case (NET_HEADER_FIELD_IPv6_SRC_IP):
-+ *parseCodeRealSize = 16;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_IP):
-+ switch (field.ip)
-+ {
-+ case (NET_HEADER_FIELD_IP_DSCP):
-+ case (NET_HEADER_FIELD_IP_PROTO):
-+ *parseCodeRealSize = 1;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_GRE):
-+ switch (field.gre)
-+ {
-+ case (NET_HEADER_FIELD_GRE_TYPE):
-+ *parseCodeRealSize = 2;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported6"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_MINENCAP):
-+ switch (field.minencap)
-+ {
-+ case (NET_HEADER_FIELD_MINENCAP_TYPE):
-+ *parseCodeRealSize = 1;
-+ break;
-+
-+ case (NET_HEADER_FIELD_MINENCAP_DST_IP):
-+ case (NET_HEADER_FIELD_MINENCAP_SRC_IP):
-+ *parseCodeRealSize = 4;
-+ break;
-+
-+ case (NET_HEADER_FIELD_MINENCAP_SRC_IP
-+ | NET_HEADER_FIELD_MINENCAP_DST_IP):
-+ *parseCodeRealSize = 8;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported7"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_TCP):
-+ switch (field.tcp)
-+ {
-+ case (NET_HEADER_FIELD_TCP_PORT_SRC):
-+ case (NET_HEADER_FIELD_TCP_PORT_DST):
-+ *parseCodeRealSize = 2;
-+ break;
-+
-+ case (NET_HEADER_FIELD_TCP_PORT_SRC
-+ | NET_HEADER_FIELD_TCP_PORT_DST):
-+ *parseCodeRealSize = 4;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported8"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_UDP):
-+ switch (field.udp)
-+ {
-+ case (NET_HEADER_FIELD_UDP_PORT_SRC):
-+ case (NET_HEADER_FIELD_UDP_PORT_DST):
-+ *parseCodeRealSize = 2;
-+ break;
-+
-+ case (NET_HEADER_FIELD_UDP_PORT_SRC
-+ | NET_HEADER_FIELD_UDP_PORT_DST):
-+ *parseCodeRealSize = 4;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported9"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported10"));
-+ *parseCodeRealSize = CC_SIZE_ILLEGAL;
-+ break;
-+ }
-+}
-+
-+t_Error ValidateNextEngineParams(
-+ t_Handle h_FmPcd, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
-+ e_FmPcdCcStatsMode statsMode)
-+{
-+ uint16_t absoluteProfileId;
-+ t_Error err = E_OK;
-+ uint8_t relativeSchemeId;
-+
-+ if ((statsMode == e_FM_PCD_CC_STATS_MODE_NONE)
-+ && (p_FmPcdCcNextEngineParams->statisticsEn))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_CONFLICT,
-+ ("Statistics are requested for a key, but statistics mode was set"
-+ "to 'NONE' upon initialization"));
-+
-+ switch (p_FmPcdCcNextEngineParams->nextEngine)
-+ {
-+ case (e_FM_PCD_INVALID):
-+ err = E_NOT_SUPPORTED;
-+ break;
-+
-+ case (e_FM_PCD_DONE):
-+ if ((p_FmPcdCcNextEngineParams->params.enqueueParams.action
-+ == e_FM_PCD_ENQ_FRAME)
-+ && p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid)
-+ {
-+ if (!p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_CONFLICT,
-+ ("When overrideFqid is set, newFqid must not be zero"));
-+ if (p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid
-+ & ~0x00FFFFFF)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("fqidForCtrlFlow must be between 1 and 2^24-1"));
-+ }
-+ break;
-+
-+ case (e_FM_PCD_KG):
-+ relativeSchemeId =
-+ FmPcdKgGetRelativeSchemeId(
-+ h_FmPcd,
-+ FmPcdKgGetSchemeId(
-+ p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme));
-+ if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+ if (!FmPcdKgIsSchemeValidSw(
-+ p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("not valid schemeIndex in KG next engine param"));
-+ if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("CC Node may point only to a scheme that is always direct."));
-+ break;
-+
-+ case (e_FM_PCD_PLCR):
-+ if (p_FmPcdCcNextEngineParams->params.plcrParams.overrideParams)
-+ {
-+ /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */
-+ if (p_FmPcdCcNextEngineParams->params.plcrParams.sharedProfile)
-+ {
-+ err =
-+ FmPcdPlcrGetAbsoluteIdByProfileParams(
-+ h_FmPcd,
-+ e_FM_PCD_PLCR_SHARED,
-+ NULL,
-+ p_FmPcdCcNextEngineParams->params.plcrParams.newRelativeProfileId,
-+ &absoluteProfileId);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err,
-+ ("Shared profile offset is out of range"));
-+ if (!FmPcdPlcrIsProfileValid(h_FmPcd, absoluteProfileId))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Invalid profile"));
-+ }
-+ }
-+ break;
-+
-+ case (e_FM_PCD_HASH):
-+ p_FmPcdCcNextEngineParams->nextEngine = e_FM_PCD_CC;
-+ case (e_FM_PCD_CC):
-+ if (!p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)
-+ RETURN_ERROR(MAJOR, E_NULL_POINTER,
-+ ("handler to next Node is NULL"));
-+ break;
-+
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_FR):
-+ if (!p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic)
-+ err = E_NOT_SUPPORTED;
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Next engine is not correct"));
-+ }
-+
-+
-+ return err;
-+}
-+
-+static uint8_t GetGenParseCode(e_FmPcdExtractFrom src,
-+ uint32_t offset, bool glblMask,
-+ uint8_t *parseArrayOffset, bool fromIc,
-+ ccPrivateInfo_t icCode)
-+{
-+ if (!fromIc)
-+ {
-+ switch (src)
-+ {
-+ case (e_FM_PCD_EXTRACT_FROM_FRAME_START):
-+ if (glblMask)
-+ return CC_PC_GENERIC_WITH_MASK;
-+ else
-+ return CC_PC_GENERIC_WITHOUT_MASK;
-+
-+ case (e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE):
-+ *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET;
-+ if (offset)
-+ return CC_PR_OFFSET;
-+ else
-+ return CC_PR_WITHOUT_OFFSET;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src"));
-+ return CC_PC_ILLEGAL;
-+ }
-+ }
-+ else
-+ {
-+ switch (icCode)
-+ {
-+ case (CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH):
-+ *parseArrayOffset = 0x50;
-+ return CC_PC_GENERIC_IC_GMASK;
-+
-+ case (CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH):
-+ *parseArrayOffset = 0x48;
-+ return CC_PC_GENERIC_IC_GMASK;
-+
-+ case (CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP):
-+ *parseArrayOffset = 0x48;
-+ return CC_PC_GENERIC_IC_HASH_INDEXED;
-+
-+ case (CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP):
-+ *parseArrayOffset = 0x16;
-+ return CC_PC_GENERIC_IC_HASH_INDEXED;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src"));
-+ break;
-+ }
-+ }
-+
-+ return CC_PC_ILLEGAL;
-+}
-+
-+static uint8_t GetFullFieldParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex index,
-+ t_FmPcdFields field)
-+{
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ return CC_PC_ILLEGAL;
-+
-+ case (HEADER_TYPE_ETH):
-+ switch (field.eth)
-+ {
-+ case (NET_HEADER_FIELD_ETH_DA):
-+ return CC_PC_FF_MACDST;
-+ case (NET_HEADER_FIELD_ETH_SA):
-+ return CC_PC_FF_MACSRC;
-+ case (NET_HEADER_FIELD_ETH_TYPE):
-+ return CC_PC_FF_ETYPE;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_VLAN):
-+ switch (field.vlan)
-+ {
-+ case (NET_HEADER_FIELD_VLAN_TCI):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_TCI1;
-+ if (index == e_FM_PCD_HDR_INDEX_LAST)
-+ return CC_PC_FF_TCI2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_MPLS):
-+ switch (field.mpls)
-+ {
-+ case (NET_HEADER_FIELD_MPLS_LABEL_STACK):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_MPLS1;
-+ if (index == e_FM_PCD_HDR_INDEX_LAST)
-+ return CC_PC_FF_MPLS_LAST;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index"));
-+ return CC_PC_ILLEGAL;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_IPv4):
-+ switch (field.ipv4)
-+ {
-+ case (NET_HEADER_FIELD_IPv4_DST_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV4DST1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV4DST2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return CC_PC_ILLEGAL;
-+ case (NET_HEADER_FIELD_IPv4_TOS):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV4IPTOS_TC1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV4IPTOS_TC2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return CC_PC_ILLEGAL;
-+ case (NET_HEADER_FIELD_IPv4_PROTO):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV4PTYPE1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV4PTYPE2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return CC_PC_ILLEGAL;
-+ case (NET_HEADER_FIELD_IPv4_SRC_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV4SRC1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV4SRC2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return CC_PC_ILLEGAL;
-+ case (NET_HEADER_FIELD_IPv4_SRC_IP
-+ | NET_HEADER_FIELD_IPv4_DST_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV4SRC1_IPV4DST1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV4SRC2_IPV4DST2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return CC_PC_ILLEGAL;
-+ case (NET_HEADER_FIELD_IPv4_TTL):
-+ return CC_PC_FF_IPV4TTL;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_IPv6):
-+ switch (field.ipv6)
-+ {
-+ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL
-+ | NET_HEADER_FIELD_IPv6_TC):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return CC_PC_ILLEGAL;
-+
-+ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV6PTYPE1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV6PTYPE2;
-+ if (index == e_FM_PCD_HDR_INDEX_LAST)
-+ return CC_PC_FF_IPPID;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return CC_PC_ILLEGAL;
-+
-+ case (NET_HEADER_FIELD_IPv6_DST_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV6DST1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV6DST2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return CC_PC_ILLEGAL;
-+
-+ case (NET_HEADER_FIELD_IPv6_SRC_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPV6SRC1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return CC_PC_FF_IPV6SRC2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return CC_PC_ILLEGAL;
-+
-+ case (NET_HEADER_FIELD_IPv6_HOP_LIMIT):
-+ return CC_PC_FF_IPV6HOP_LIMIT;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_IP):
-+ switch (field.ip)
-+ {
-+ case (NET_HEADER_FIELD_IP_DSCP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE)
-+ || (index == e_FM_PCD_HDR_INDEX_1))
-+ return CC_PC_FF_IPDSCP;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index"));
-+ return CC_PC_ILLEGAL;
-+
-+ case (NET_HEADER_FIELD_IP_PROTO):
-+ if (index == e_FM_PCD_HDR_INDEX_LAST)
-+ return CC_PC_FF_IPPID;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index"));
-+ return CC_PC_ILLEGAL;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_GRE):
-+ switch (field.gre)
-+ {
-+ case (NET_HEADER_FIELD_GRE_TYPE):
-+ return CC_PC_FF_GREPTYPE;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_MINENCAP):
-+ switch (field.minencap)
-+ {
-+ case (NET_HEADER_FIELD_MINENCAP_TYPE):
-+ return CC_PC_FF_MINENCAP_PTYPE;
-+
-+ case (NET_HEADER_FIELD_MINENCAP_DST_IP):
-+ return CC_PC_FF_MINENCAP_IPDST;
-+
-+ case (NET_HEADER_FIELD_MINENCAP_SRC_IP):
-+ return CC_PC_FF_MINENCAP_IPSRC;
-+
-+ case (NET_HEADER_FIELD_MINENCAP_SRC_IP
-+ | NET_HEADER_FIELD_MINENCAP_DST_IP):
-+ return CC_PC_FF_MINENCAP_IPSRC_IPDST;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_TCP):
-+ switch (field.tcp)
-+ {
-+ case (NET_HEADER_FIELD_TCP_PORT_SRC):
-+ return CC_PC_FF_L4PSRC;
-+
-+ case (NET_HEADER_FIELD_TCP_PORT_DST):
-+ return CC_PC_FF_L4PDST;
-+
-+ case (NET_HEADER_FIELD_TCP_PORT_DST
-+ | NET_HEADER_FIELD_TCP_PORT_SRC):
-+ return CC_PC_FF_L4PSRC_L4PDST;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_PPPoE):
-+ switch (field.pppoe)
-+ {
-+ case (NET_HEADER_FIELD_PPPoE_PID):
-+ return CC_PC_FF_PPPPID;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ case (HEADER_TYPE_UDP):
-+ switch (field.udp)
-+ {
-+ case (NET_HEADER_FIELD_UDP_PORT_SRC):
-+ return CC_PC_FF_L4PSRC;
-+
-+ case (NET_HEADER_FIELD_UDP_PORT_DST):
-+ return CC_PC_FF_L4PDST;
-+
-+ case (NET_HEADER_FIELD_UDP_PORT_DST
-+ | NET_HEADER_FIELD_UDP_PORT_SRC):
-+ return CC_PC_FF_L4PSRC_L4PDST;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+}
-+
-+static uint8_t GetPrParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex hdrIndex,
-+ uint32_t offset, bool glblMask,
-+ uint8_t *parseArrayOffset)
-+{
-+ bool offsetRelevant = FALSE;
-+
-+ if (offset)
-+ offsetRelevant = TRUE;
-+
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ return CC_PC_ILLEGAL;
-+
-+ case (HEADER_TYPE_ETH):
-+ *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET;
-+ break;
-+
-+ case (HEADER_TYPE_USER_DEFINED_SHIM1):
-+ if (offset || glblMask)
-+ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET;
-+ else
-+ return CC_PC_PR_SHIM1;
-+ break;
-+
-+ case (HEADER_TYPE_USER_DEFINED_SHIM2):
-+ if (offset || glblMask)
-+ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET;
-+ else
-+ return CC_PC_PR_SHIM2;
-+ break;
-+
-+ case (HEADER_TYPE_LLC_SNAP):
-+ *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET;
-+ break;
-+
-+ case (HEADER_TYPE_PPPoE):
-+ *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET;
-+ break;
-+
-+ case (HEADER_TYPE_MPLS):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
-+ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET;
-+ else
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET;
-+ else
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index"));
-+ return CC_PC_ILLEGAL;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_IPv4):
-+ case (HEADER_TYPE_IPv6):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
-+ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ *parseArrayOffset = CC_PC_PR_IP1_OFFSET;
-+ else
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_2)
-+ *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET;
-+ else
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index"));
-+ return CC_PC_ILLEGAL;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_MINENCAP):
-+ *parseArrayOffset = CC_PC_PR_MINENC_OFFSET;
-+ break;
-+
-+ case (HEADER_TYPE_GRE):
-+ *parseArrayOffset = CC_PC_PR_GRE_OFFSET;
-+ break;
-+
-+ case (HEADER_TYPE_TCP):
-+ case (HEADER_TYPE_UDP):
-+ case (HEADER_TYPE_IPSEC_AH):
-+ case (HEADER_TYPE_IPSEC_ESP):
-+ case (HEADER_TYPE_DCCP):
-+ case (HEADER_TYPE_SCTP):
-+ *parseArrayOffset = CC_PC_PR_L4_OFFSET;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header for this type of operation"));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ if (offsetRelevant)
-+ return CC_PR_OFFSET;
-+ else
-+ return CC_PR_WITHOUT_OFFSET;
-+}
-+
-+static uint8_t GetFieldParseCode(e_NetHeaderType hdr, t_FmPcdFields field,
-+ uint32_t offset, uint8_t *parseArrayOffset,
-+ e_FmPcdHdrIndex hdrIndex)
-+{
-+ bool offsetRelevant = FALSE;
-+
-+ if (offset)
-+ offsetRelevant = TRUE;
-+
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ break;
-+ case (HEADER_TYPE_ETH):
-+ switch (field.eth)
-+ {
-+ case (NET_HEADER_FIELD_ETH_TYPE):
-+ *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+ break;
-+
-+ case (HEADER_TYPE_VLAN):
-+ switch (field.vlan)
-+ {
-+ case (NET_HEADER_FIELD_VLAN_TCI):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
-+ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET;
-+ else
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET;
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return CC_PC_ILLEGAL;
-+ }
-+ break;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal header "));
-+ return CC_PC_ILLEGAL;
-+ }
-+
-+ if (offsetRelevant)
-+ return CC_PR_OFFSET;
-+ else
-+ return CC_PR_WITHOUT_OFFSET;
-+}
-+
-+static void FillAdOfTypeResult(t_Handle h_Ad,
-+ t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
-+ t_FmPcd *p_FmPcd,
-+ t_FmPcdCcNextEngineParams *p_CcNextEngineParams)
-+{
-+ t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult *)h_Ad;
-+ t_Handle h_TmpAd;
-+ uint32_t tmp = 0, tmpNia = 0;
-+ uint16_t profileId;
-+ t_Handle p_AdNewPtr = NULL;
-+ t_Error err = E_OK;
-+
-+ /* There are 3 cases handled in this routine of building a "result" type AD.
-+ * Case 1: No Manip. The action descriptor is built within the match table.
-+ * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized
-+ * either in the FmPcdManipUpdateAdResultForCc routine or it was already
-+ * initialized and returned here.
-+ * p_AdResult (within the match table) will be initialized after
-+ * this routine returns and point to the existing AD.
-+ * Case 3: Manip exists. The action descriptor is built within the match table.
-+ * FmPcdManipUpdateAdResultForCc returns a NULL p_AdNewPtr.
-+ *
-+ * If statistics were enabled and the statistics mode of this node requires
-+ * a statistics Ad, it will be placed after the result Ad and before the
-+ * manip Ad, if manip Ad exists here.
-+ */
-+
-+ /* As default, the "new" ptr is the current one. i.e. the content of the result
-+ * AD will be written into the match table itself (case (1))*/
-+ p_AdNewPtr = p_AdResult;
-+
-+ /* Initialize an action descriptor, if current statistics mode requires an Ad */
-+ if (p_FmPcdCcStatsParams)
-+ {
-+ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd);
-+ ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters);
-+
-+ /* Swapping addresses between statistics Ad and the current lookup AD addresses */
-+ h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd;
-+ p_FmPcdCcStatsParams->h_StatsAd = h_Ad;
-+ h_Ad = h_TmpAd;
-+
-+ p_AdNewPtr = h_Ad;
-+ p_AdResult = h_Ad;
-+
-+ /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */
-+ UpdateStatsAd(p_FmPcdCcStatsParams, h_Ad, p_FmPcd->physicalMuramBase);
-+ }
-+
-+ /* Create manip and return p_AdNewPtr to either a new descriptor or NULL */
-+ if (p_CcNextEngineParams->h_Manip)
-+ FmPcdManipUpdateAdResultForCc(p_CcNextEngineParams->h_Manip,
-+ p_CcNextEngineParams, h_Ad, &p_AdNewPtr);
-+
-+ /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */
-+ if (p_AdNewPtr)
-+ {
-+ /* case (1) and (2) */
-+ switch (p_CcNextEngineParams->nextEngine)
-+ {
-+ case (e_FM_PCD_DONE):
-+ if (p_CcNextEngineParams->params.enqueueParams.action
-+ == e_FM_PCD_ENQ_FRAME)
-+ {
-+ if (p_CcNextEngineParams->params.enqueueParams.overrideFqid)
-+ {
-+ tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE;
-+ tmp |=
-+ p_CcNextEngineParams->params.enqueueParams.newFqid;
-+#if (DPAA_VERSION >= 11)
-+ tmp |=
-+ (p_CcNextEngineParams->params.enqueueParams.newRelativeStorageProfileId
-+ & FM_PCD_AD_RESULT_VSP_MASK)
-+ << FM_PCD_AD_RESULT_VSP_SHIFT;
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+ else
-+ {
-+ tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE;
-+ tmp |= FM_PCD_AD_RESULT_PLCR_DIS;
-+ }
-+ }
-+
-+ if (p_CcNextEngineParams->params.enqueueParams.action
-+ == e_FM_PCD_DROP_FRAME)
-+ tmpNia |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd);
-+ else
-+ tmpNia |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
-+ break;
-+
-+ case (e_FM_PCD_KG):
-+ if (p_CcNextEngineParams->params.kgParams.overrideFqid)
-+ {
-+ tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE;
-+ tmp |= p_CcNextEngineParams->params.kgParams.newFqid;
-+#if (DPAA_VERSION >= 11)
-+ tmp |=
-+ (p_CcNextEngineParams->params.kgParams.newRelativeStorageProfileId
-+ & FM_PCD_AD_RESULT_VSP_MASK)
-+ << FM_PCD_AD_RESULT_VSP_SHIFT;
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+ else
-+ {
-+ tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE;
-+ tmp |= FM_PCD_AD_RESULT_PLCR_DIS;
-+ }
-+ tmpNia = NIA_KG_DIRECT;
-+ tmpNia |= NIA_ENG_KG;
-+ tmpNia |= NIA_KG_CC_EN;
-+ tmpNia |= FmPcdKgGetSchemeId(
-+ p_CcNextEngineParams->params.kgParams.h_DirectScheme);
-+ break;
-+
-+ case (e_FM_PCD_PLCR):
-+ if (p_CcNextEngineParams->params.plcrParams.overrideParams)
-+ {
-+ tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE;
-+
-+ /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */
-+ if (p_CcNextEngineParams->params.plcrParams.sharedProfile)
-+ {
-+ tmpNia |= NIA_PLCR_ABSOLUTE;
-+ err = FmPcdPlcrGetAbsoluteIdByProfileParams(
-+ (t_Handle)p_FmPcd,
-+ e_FM_PCD_PLCR_SHARED,
-+ NULL,
-+ p_CcNextEngineParams->params.plcrParams.newRelativeProfileId,
-+ &profileId);
-+
-+ if (err != E_OK) {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return;
-+ }
-+
-+ }
-+ else
-+ profileId =
-+ p_CcNextEngineParams->params.plcrParams.newRelativeProfileId;
-+
-+ tmp |= p_CcNextEngineParams->params.plcrParams.newFqid;
-+#if (DPAA_VERSION >= 11)
-+ tmp |=
-+ (p_CcNextEngineParams->params.plcrParams.newRelativeStorageProfileId
-+ & FM_PCD_AD_RESULT_VSP_MASK)
-+ << FM_PCD_AD_RESULT_VSP_SHIFT;
-+#endif /* (DPAA_VERSION >= 11) */
-+ WRITE_UINT32(
-+ p_AdResult->plcrProfile,
-+ (uint32_t)((uint32_t)profileId << FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT));
-+ }
-+ else
-+ tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE;
-+
-+ tmpNia |=
-+ NIA_ENG_PLCR
-+ | p_CcNextEngineParams->params.plcrParams.newRelativeProfileId;
-+ break;
-+
-+ default:
-+ return;
-+ }WRITE_UINT32(p_AdResult->fqid, tmp);
-+
-+ if (p_CcNextEngineParams->h_Manip)
-+ {
-+ tmp = GET_UINT32(p_AdResult->plcrProfile);
-+ tmp |= (uint32_t)(XX_VirtToPhys(p_AdNewPtr)
-+ - (p_FmPcd->physicalMuramBase)) >> 4;
-+ WRITE_UINT32(p_AdResult->plcrProfile, tmp);
-+
-+ tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE;
-+ tmpNia |= FM_PCD_AD_RESULT_NADEN;
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ tmpNia |= FM_PCD_AD_RESULT_NO_OM_VSPE;
-+#endif /* (DPAA_VERSION >= 11) */
-+ WRITE_UINT32(p_AdResult->nia, tmpNia);
-+ }
-+}
-+
-+static t_Error CcUpdateParams(t_Handle h_FmPcd, t_Handle h_PcdParams,
-+ t_Handle h_FmPort, t_Handle h_FmTree,
-+ bool validate)
-+{
-+ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_FmTree;
-+
-+ return CcUpdateParam(h_FmPcd, h_PcdParams, h_FmPort,
-+ p_CcTree->keyAndNextEngineParams,
-+ p_CcTree->numOfEntries,
-+ UINT_TO_PTR(p_CcTree->ccTreeBaseAddr), validate, 0,
-+ h_FmTree, FALSE);
-+}
-+
-+
-+static void ReleaseNewNodeCommonPart(
-+ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
-+{
-+ if (p_AdditionalInfo->p_AdTableNew)
-+ FM_MURAM_FreeMem(
-+ FmPcdGetMuramHandle(
-+ ((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd),
-+ p_AdditionalInfo->p_AdTableNew);
-+
-+ if (p_AdditionalInfo->p_KeysMatchTableNew)
-+ FM_MURAM_FreeMem(
-+ FmPcdGetMuramHandle(
-+ ((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd),
-+ p_AdditionalInfo->p_KeysMatchTableNew);
-+}
-+
-+static t_Error UpdateGblMask(t_FmPcdCcNode *p_CcNode, uint8_t keySize,
-+ uint8_t *p_Mask)
-+{
-+ uint8_t prvGlblMaskSize = p_CcNode->glblMaskSize;
-+
-+ if (p_Mask && !p_CcNode->glblMaskUpdated && (keySize <= 4)
-+ && !p_CcNode->lclMask)
-+ {
-+ if (p_CcNode->parseCode && (p_CcNode->parseCode != CC_PC_FF_TCI1)
-+ && (p_CcNode->parseCode != CC_PC_FF_TCI2)
-+ && (p_CcNode->parseCode != CC_PC_FF_MPLS1)
-+ && (p_CcNode->parseCode != CC_PC_FF_MPLS_LAST)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPDSCP)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2))
-+ {
-+ p_CcNode->glblMaskSize = 0;
-+ p_CcNode->lclMask = TRUE;
-+ }
-+ else
-+ {
-+ memcpy(p_CcNode->p_GlblMask, p_Mask, (sizeof(uint8_t)) * keySize);
-+ p_CcNode->glblMaskUpdated = TRUE;
-+ p_CcNode->glblMaskSize = 4;
-+ }
-+ }
-+ else
-+ if (p_Mask && (keySize <= 4) && !p_CcNode->lclMask)
-+ {
-+ if (memcmp(p_CcNode->p_GlblMask, p_Mask, keySize) != 0)
-+ {
-+ p_CcNode->lclMask = TRUE;
-+ p_CcNode->glblMaskSize = 0;
-+ }
-+ }
-+ else
-+ if (!p_Mask && p_CcNode->glblMaskUpdated && (keySize <= 4))
-+ {
-+ uint32_t tmpMask = 0xffffffff;
-+ if (memcmp(p_CcNode->p_GlblMask, &tmpMask, 4) != 0)
-+ {
-+ p_CcNode->lclMask = TRUE;
-+ p_CcNode->glblMaskSize = 0;
-+ }
-+ }
-+ else
-+ if (p_Mask)
-+ {
-+ p_CcNode->lclMask = TRUE;
-+ p_CcNode->glblMaskSize = 0;
-+ }
-+
-+ /* In static mode (maxNumOfKeys > 0), local mask is supported
-+ only is mask support was enabled at initialization */
-+ if (p_CcNode->maxNumOfKeys && (!p_CcNode->maskSupport) && p_CcNode->lclMask)
-+ {
-+ p_CcNode->lclMask = FALSE;
-+ p_CcNode->glblMaskSize = prvGlblMaskSize;
-+ return ERROR_CODE(E_NOT_SUPPORTED);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static __inline__ t_Handle GetNewAd(t_Handle h_FmPcdCcNodeOrTree, bool isTree)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_Handle h_Ad;
-+
-+ if (isTree)
-+ p_FmPcd = (t_FmPcd *)(((t_FmPcdCcTree *)h_FmPcdCcNodeOrTree)->h_FmPcd);
-+ else
-+ p_FmPcd = (t_FmPcd *)(((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_FmPcd);
-+
-+ if ((isTree && p_FmPcd->p_CcShadow)
-+ || (!isTree && ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->maxNumOfKeys))
-+ {
-+ /* The allocated shadow is divided as follows:
-+ 0 . . . 16 . . .
-+ ---------------------------------------------------
-+ | Shadow | Shadow Keys | Shadow Next |
-+ | Ad | Match Table | Engine Table |
-+ | (16 bytes) | (maximal size) | (maximal size) |
-+ ---------------------------------------------------
-+ */
-+ if (!p_FmPcd->p_CcShadow)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated"));
-+ return NULL;
-+ }
-+
-+ h_Ad = p_FmPcd->p_CcShadow;
-+ }
-+ else
-+ {
-+ h_Ad = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd),
-+ FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!h_Ad)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptor"));
-+ return NULL;
-+ }
-+ }
-+
-+ return h_Ad;
-+}
-+
-+static t_Error BuildNewNodeCommonPart(
-+ t_FmPcdCcNode *p_CcNode, int *size,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+
-+ if (p_CcNode->lclMask)
-+ *size = 2 * p_CcNode->ccKeySizeAccExtraction;
-+ else
-+ *size = p_CcNode->ccKeySizeAccExtraction;
-+
-+ if (p_CcNode->maxNumOfKeys == 0)
-+ {
-+ p_AdditionalInfo->p_AdTableNew = (t_Handle)FM_MURAM_AllocMem(
-+ FmPcdGetMuramHandle(p_FmPcd),
-+ (uint32_t)((p_AdditionalInfo->numOfKeys + 1)
-+ * FM_PCD_CC_AD_ENTRY_SIZE),
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_AdditionalInfo->p_AdTableNew)
-+ RETURN_ERROR(
-+ MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC node action descriptors table"));
-+
-+ p_AdditionalInfo->p_KeysMatchTableNew = (t_Handle)FM_MURAM_AllocMem(
-+ FmPcdGetMuramHandle(p_FmPcd),
-+ (uint32_t)(*size * sizeof(uint8_t)
-+ * (p_AdditionalInfo->numOfKeys + 1)),
-+ FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN);
-+ if (!p_AdditionalInfo->p_KeysMatchTableNew)
-+ {
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd),
-+ p_AdditionalInfo->p_AdTableNew);
-+ p_AdditionalInfo->p_AdTableNew = NULL;
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC node key match table"));
-+ }
-+
-+ MemSet8(
-+ (uint8_t*)p_AdditionalInfo->p_AdTableNew,
-+ 0,
-+ (uint32_t)((p_AdditionalInfo->numOfKeys + 1)
-+ * FM_PCD_CC_AD_ENTRY_SIZE));
-+ MemSet8((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0,
-+ *size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1));
-+ }
-+ else
-+ {
-+ /* The allocated shadow is divided as follows:
-+ 0 . . . 16 . . .
-+ ---------------------------------------------------
-+ | Shadow | Shadow Keys | Shadow Next |
-+ | Ad | Match Table | Engine Table |
-+ | (16 bytes) | (maximal size) | (maximal size) |
-+ ---------------------------------------------------
-+ */
-+
-+ if (!p_FmPcd->p_CcShadow)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated"));
-+
-+ p_AdditionalInfo->p_KeysMatchTableNew =
-+ PTR_MOVE(p_FmPcd->p_CcShadow, FM_PCD_CC_AD_ENTRY_SIZE);
-+ p_AdditionalInfo->p_AdTableNew =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, p_CcNode->keysMatchTableMaxSize);
-+
-+ MemSet8(
-+ (uint8_t*)p_AdditionalInfo->p_AdTableNew,
-+ 0,
-+ (uint32_t)((p_CcNode->maxNumOfKeys + 1)
-+ * FM_PCD_CC_AD_ENTRY_SIZE));
-+ MemSet8((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0,
-+ (*size) * sizeof(uint8_t) * (p_CcNode->maxNumOfKeys));
-+ }
-+
-+ p_AdditionalInfo->p_AdTableOld = p_CcNode->h_AdTable;
-+ p_AdditionalInfo->p_KeysMatchTableOld = p_CcNode->h_KeysMatchTable;
-+
-+ return E_OK;
-+}
-+
-+static t_Error BuildNewNodeAddOrMdfyKeyAndNextEngine(
-+ t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode, uint16_t keyIndex,
-+ t_FmPcdCcKeyParams *p_KeyParams,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo, bool add)
-+{
-+ t_Error err = E_OK;
-+ t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp;
-+ t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp;
-+ int size;
-+ int i = 0, j = 0;
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t requiredAction = 0;
-+ bool prvLclMask;
-+ t_CcNodeInformation *p_CcNodeInformation;
-+ t_FmPcdCcStatsParams statsParams = { 0 };
-+ t_List *p_Pos;
-+ t_FmPcdStatsObj *p_StatsObj;
-+
-+ /* Check that new NIA is legal */
-+ err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams,
-+ p_CcNode->statisticsMode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ prvLclMask = p_CcNode->lclMask;
-+
-+ /* Check that new key is not require update of localMask */
-+ err = UpdateGblMask(p_CcNode, p_CcNode->ccKeySizeAccExtraction,
-+ p_KeyParams->p_Mask);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ /* Update internal data structure with new next engine for the given index */
-+ memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams,
-+ &p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams));
-+
-+ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key,
-+ p_KeyParams->p_Key, p_CcNode->userSizeOfExtraction);
-+
-+ if ((p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ && p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ AllocAndFillAdForContLookupManip(
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ if (p_KeyParams->p_Mask)
-+ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask,
-+ p_KeyParams->p_Mask, p_CcNode->userSizeOfExtraction);
-+ else
-+ memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, 0xFF,
-+ p_CcNode->userSizeOfExtraction);
-+
-+ /* Update numOfKeys */
-+ if (add)
-+ p_AdditionalInfo->numOfKeys = (uint8_t)(p_CcNode->numOfKeys + 1);
-+ else
-+ p_AdditionalInfo->numOfKeys = (uint8_t)p_CcNode->numOfKeys;
-+
-+ /* Allocate new tables in MURAM: keys match table and action descriptors table */
-+ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /* Check that manip is legal and what requiredAction is necessary for this manip */
-+ if (p_KeyParams->ccNextEngineParams.h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(
-+ &p_KeyParams->ccNextEngineParams, &requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction =
-+ requiredAction;
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |=
-+ UPDATE_CC_WITH_TREE;
-+
-+ /* Update new Ad and new Key Table according to new requirement */
-+ i = 0;
-+ for (j = 0; j < p_AdditionalInfo->numOfKeys; j++)
-+ {
-+ p_AdTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ if (j == keyIndex)
-+ {
-+ if (p_KeyParams->ccNextEngineParams.statisticsEn)
-+ {
-+ /* Allocate a statistics object that holds statistics AD and counters.
-+ - For added key - New statistics AD and counters pointer need to be allocated
-+ new statistics object. If statistics were enabled, we need to replace the
-+ existing descriptor with a new descriptor with nullified counters.
-+ */
-+ p_StatsObj = GetStatsObj(p_CcNode);
-+ ASSERT_COND(p_StatsObj);
-+
-+ /* Store allocated statistics object */
-+ ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS);
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj =
-+ p_StatsObj;
-+
-+ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
-+ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
-+#if (DPAA_VERSION >= 11)
-+ statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs;
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* Building action descriptor for the received new key */
-+ NextStepAd(p_AdTableNewTmp, &statsParams,
-+ &p_KeyParams->ccNextEngineParams, p_FmPcd);
-+ }
-+ else
-+ {
-+ /* Building action descriptor for the received new key */
-+ NextStepAd(p_AdTableNewTmp, NULL,
-+ &p_KeyParams->ccNextEngineParams, p_FmPcd);
-+ }
-+
-+ /* Copy the received new key into keys match table */
-+ p_KeysMatchTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size*sizeof(uint8_t));
-+
-+ MemCpy8((void*)p_KeysMatchTableNewTmp, p_KeyParams->p_Key,
-+ p_CcNode->userSizeOfExtraction);
-+
-+ /* Update mask for the received new key */
-+ if (p_CcNode->lclMask)
-+ {
-+ if (p_KeyParams->p_Mask)
-+ {
-+ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ p_KeyParams->p_Mask,
-+ p_CcNode->userSizeOfExtraction);
-+ }
-+ else
-+ if (p_CcNode->ccKeySizeAccExtraction > 4)
-+ {
-+ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ 0xff, p_CcNode->userSizeOfExtraction);
-+ }
-+ else
-+ {
-+ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ p_CcNode->p_GlblMask,
-+ p_CcNode->userSizeOfExtraction);
-+ }
-+ }
-+
-+ /* If key modification requested, the old entry is omitted and replaced by the new parameters */
-+ if (!add)
-+ i++;
-+ }
-+ else
-+ {
-+ /* Copy existing action descriptors to the newly allocated Ad table */
-+ p_AdTableOldTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE);
-+ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp,
-+ FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Copy existing keys and their masks to the newly allocated keys match table */
-+ p_KeysMatchTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t));
-+ p_KeysMatchTableOldTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, i * size * sizeof(uint8_t));
-+
-+ if (p_CcNode->lclMask)
-+ {
-+ if (prvLclMask)
-+ {
-+ MemCpy8(
-+ PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction),
-+ PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction),
-+ p_CcNode->ccKeySizeAccExtraction);
-+ }
-+ else
-+ {
-+ p_KeysMatchTableOldTmp =
-+ PTR_MOVE(p_CcNode->h_KeysMatchTable,
-+ i * (int)p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t));
-+
-+ if (p_CcNode->ccKeySizeAccExtraction > 4)
-+ {
-+ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ 0xff, p_CcNode->userSizeOfExtraction);
-+ }
-+ else
-+ {
-+ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ p_CcNode->p_GlblMask,
-+ p_CcNode->userSizeOfExtraction);
-+ }
-+ }
-+ }
-+
-+ MemCpy8(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp,
-+ p_CcNode->ccKeySizeAccExtraction);
-+
-+ i++;
-+ }
-+ }
-+
-+ /* Miss action descriptor */
-+ p_AdTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE);
-+ p_AdTableOldTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i * FM_PCD_CC_AD_ENTRY_SIZE);
-+ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ if (!LIST_IsEmpty(&p_CcNode->ccTreesLst))
-+ {
-+ LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst)
-+ {
-+ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
-+ ASSERT_COND(p_CcNodeInformation->h_CcNode);
-+ /* Update the manipulation which has to be updated from parameters of the port */
-+ /* It's has to be updated with restrictions defined in the function */
-+ err =
-+ SetRequiredAction(
-+ p_CcNode->h_FmPcd,
-+ p_CcNode->shadowAction
-+ | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction,
-+ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
-+ 1, p_CcNodeInformation->h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ err =
-+ CcUpdateParam(
-+ p_CcNode->h_FmPcd,
-+ NULL,
-+ NULL,
-+ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
-+ 1,
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE),
-+ TRUE, p_CcNodeInformation->index,
-+ p_CcNodeInformation->h_CcNode, TRUE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+ }
-+
-+ if (p_CcNode->lclMask)
-+ memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t));
-+
-+ if (p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_CC)
-+ p_AdditionalInfo->h_NodeForAdd =
-+ p_KeyParams->ccNextEngineParams.params.ccParams.h_CcNode;
-+ if (p_KeyParams->ccNextEngineParams.h_Manip)
-+ p_AdditionalInfo->h_ManipForAdd =
-+ p_KeyParams->ccNextEngineParams.h_Manip;
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_FR)
-+ && (p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic))
-+ p_AdditionalInfo->h_FrmReplicForAdd =
-+ p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (!add)
-+ {
-+ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ p_AdditionalInfo->h_NodeForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
-+ p_AdditionalInfo->h_ManipForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
-+
-+ /* If statistics were previously enabled, store the old statistics object to be released */
-+ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ {
-+ p_AdditionalInfo->p_StatsObjForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj;
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_FR)
-+ && (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
-+ p_AdditionalInfo->h_FrmReplicForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error BuildNewNodeRemoveKey(
-+ t_FmPcdCcNode *p_CcNode, uint16_t keyIndex,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
-+{
-+ int i = 0, j = 0;
-+ t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp;
-+ t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp;
-+ int size;
-+ t_Error err = E_OK;
-+
-+ /*save new numOfKeys*/
-+ p_AdditionalInfo->numOfKeys = (uint16_t)(p_CcNode->numOfKeys - 1);
-+
-+ /*function which allocates in the memory new KeyTbl, AdTbl*/
-+ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /*update new Ad and new Key Table according to new requirement*/
-+ for (i = 0, j = 0; j < p_CcNode->numOfKeys; i++, j++)
-+ {
-+ if (j == keyIndex)
-+ j++;
-+
-+ if (j == p_CcNode->numOfKeys)
-+ break;
-+ p_AdTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE);
-+ p_AdTableOldTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE);
-+ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ p_KeysMatchTableOldTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, j * size * sizeof(uint8_t));
-+ p_KeysMatchTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, i * size * sizeof(uint8_t));
-+ MemCpy8(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp,
-+ size * sizeof(uint8_t));
-+ }
-+
-+ p_AdTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE);
-+ p_AdTableOldTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE);
-+ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ p_AdditionalInfo->h_NodeForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
-+ p_AdditionalInfo->h_ManipForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
-+
-+ /* If statistics were previously enabled, store the old statistics object to be released */
-+ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ {
-+ p_AdditionalInfo->p_StatsObjForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj;
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_FR)
-+ && (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
-+ p_AdditionalInfo->h_FrmReplicForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ return E_OK;
-+}
-+
-+static t_Error BuildNewNodeModifyKey(
-+ t_FmPcdCcNode *p_CcNode, uint16_t keyIndex, uint8_t *p_Key,
-+ uint8_t *p_Mask, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ t_Error err = E_OK;
-+ t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp;
-+ t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp;
-+ int size;
-+ int i = 0, j = 0;
-+ bool prvLclMask;
-+ t_FmPcdStatsObj *p_StatsObj, tmpStatsObj;
-+ p_AdditionalInfo->numOfKeys = p_CcNode->numOfKeys;
-+
-+ prvLclMask = p_CcNode->lclMask;
-+
-+ /* Check that new key is not require update of localMask */
-+ err = UpdateGblMask(p_CcNode, p_CcNode->ccKeySizeAccExtraction, p_Mask);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ /* Update internal data structure with new next engine for the given index */
-+ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key, p_Key,
-+ p_CcNode->userSizeOfExtraction);
-+
-+ if (p_Mask)
-+ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, p_Mask,
-+ p_CcNode->userSizeOfExtraction);
-+ else
-+ memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, 0xFF,
-+ p_CcNode->userSizeOfExtraction);
-+
-+ /*function which build in the memory new KeyTbl, AdTbl*/
-+ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /*fill the New AdTable and New KeyTable*/
-+ for (j = 0, i = 0; j < p_AdditionalInfo->numOfKeys; j++, i++)
-+ {
-+ p_AdTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE);
-+ p_AdTableOldTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ if (j == keyIndex)
-+ {
-+ ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS);
-+ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ {
-+ /* As statistics were enabled, we need to update the existing
-+ statistics descriptor with a new nullified counters. */
-+ p_StatsObj = GetStatsObj(p_CcNode);
-+ ASSERT_COND(p_StatsObj);
-+
-+ SetStatsCounters(
-+ p_AdTableNewTmp,
-+ (uint32_t)((XX_VirtToPhys(p_StatsObj->h_StatsCounters)
-+ - p_FmPcd->physicalMuramBase)));
-+
-+ tmpStatsObj.h_StatsAd = p_StatsObj->h_StatsAd;
-+ tmpStatsObj.h_StatsCounters = p_StatsObj->h_StatsCounters;
-+
-+ /* As we need to replace only the counters, we build a new statistics
-+ object that holds the old AD and the new counters - this will be the
-+ currently used statistics object.
-+ The newly allocated AD is not required and may be released back to
-+ the available objects with the previous counters pointer. */
-+ p_StatsObj->h_StatsAd =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd;
-+
-+ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd =
-+ tmpStatsObj.h_StatsAd;
-+
-+ /* Store allocated statistics object */
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj =
-+ p_StatsObj;
-+
-+ /* As statistics were previously enabled, store the old statistics object to be released */
-+ p_AdditionalInfo->p_StatsObjForRmv =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj;
-+ }
-+
-+ p_KeysMatchTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t));
-+
-+ MemCpy8(p_KeysMatchTableNewTmp, p_Key,
-+ p_CcNode->userSizeOfExtraction);
-+
-+ if (p_CcNode->lclMask)
-+ {
-+ if (p_Mask)
-+ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ p_Mask, p_CcNode->userSizeOfExtraction);
-+ else
-+ if (p_CcNode->ccKeySizeAccExtraction > 4)
-+ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ 0xff, p_CcNode->userSizeOfExtraction);
-+ else
-+ MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ p_CcNode->p_GlblMask,
-+ p_CcNode->userSizeOfExtraction);
-+ }
-+ }
-+ else
-+ {
-+ p_KeysMatchTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t));
-+ p_KeysMatchTableOldTmp =
-+ PTR_MOVE(p_CcNode->h_KeysMatchTable, i * size * sizeof(uint8_t));
-+
-+ if (p_CcNode->lclMask)
-+ {
-+ if (prvLclMask)
-+ MemCpy8(
-+ PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction),
-+ PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction),
-+ p_CcNode->userSizeOfExtraction);
-+ else
-+ {
-+ p_KeysMatchTableOldTmp =
-+ PTR_MOVE(p_CcNode->h_KeysMatchTable,
-+ i * (int)p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t));
-+
-+ if (p_CcNode->ccKeySizeAccExtraction > 4)
-+ MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp,
-+ p_CcNode->ccKeySizeAccExtraction),
-+ 0xff, p_CcNode->userSizeOfExtraction);
-+ else
-+ MemCpy8(
-+ PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction),
-+ p_CcNode->p_GlblMask,
-+ p_CcNode->userSizeOfExtraction);
-+ }
-+ }
-+ MemCpy8((void*)p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp,
-+ p_CcNode->ccKeySizeAccExtraction);
-+ }
-+ }
-+
-+ p_AdTableNewTmp =
-+ PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE);
-+ p_AdTableOldTmp = PTR_MOVE(p_CcNode->h_AdTable, i * FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ return E_OK;
-+}
-+
-+static t_Error BuildNewNodeModifyNextEngine(
-+ t_Handle h_FmPcd, t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex,
-+ t_FmPcdCcNextEngineParams *p_CcNextEngineParams, t_List *h_OldLst,
-+ t_List *h_NewLst, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo)
-+{
-+ t_Error err = E_OK;
-+ uint32_t requiredAction = 0;
-+ t_List *p_Pos;
-+ t_CcNodeInformation *p_CcNodeInformation, ccNodeInfo;
-+ t_Handle p_Ad;
-+ t_FmPcdCcNode *p_FmPcdCcNode1 = NULL;
-+ t_FmPcdCcTree *p_FmPcdCcTree = NULL;
-+ t_FmPcdStatsObj *p_StatsObj;
-+ t_FmPcdCcStatsParams statsParams = { 0 };
-+
-+ ASSERT_COND(p_CcNextEngineParams);
-+
-+ /* check that new NIA is legal */
-+ if (!p_AdditionalInfo->tree)
-+ err = ValidateNextEngineParams(
-+ h_FmPcd, p_CcNextEngineParams,
-+ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->statisticsMode);
-+ else
-+ /* Statistics are not supported for CC root */
-+ err = ValidateNextEngineParams(h_FmPcd, p_CcNextEngineParams,
-+ e_FM_PCD_CC_STATS_MODE_NONE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /* Update internal data structure for next engine per index (index - key) */
-+ memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams,
-+ p_CcNextEngineParams, sizeof(t_FmPcdCcNextEngineParams));
-+
-+ /* Check that manip is legal and what requiredAction is necessary for this manip */
-+ if (p_CcNextEngineParams->h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(p_CcNextEngineParams,
-+ &requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ if (!p_AdditionalInfo->tree)
-+ {
-+ p_FmPcdCcNode1 = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree;
-+ p_AdditionalInfo->numOfKeys = p_FmPcdCcNode1->numOfKeys;
-+ p_Ad = p_FmPcdCcNode1->h_AdTable;
-+
-+ if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ p_AdditionalInfo->h_NodeForRmv =
-+ p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
-+ p_AdditionalInfo->h_ManipForRmv =
-+ p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_FR)
-+ && (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
-+ p_AdditionalInfo->h_FrmReplicForRmv =
-+ p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+ else
-+ {
-+ p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree;
-+ p_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
-+
-+ if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ p_AdditionalInfo->h_NodeForRmv =
-+ p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip)
-+ p_AdditionalInfo->h_ManipForRmv =
-+ p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip;
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine
-+ == e_FM_PCD_FR)
-+ && (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic))
-+ p_AdditionalInfo->h_FrmReplicForRmv =
-+ p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic;
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_CC)
-+ && p_CcNextEngineParams->h_Manip)
-+ {
-+ err = AllocAndFillAdForContLookupManip(
-+ p_CcNextEngineParams->params.ccParams.h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ ASSERT_COND(p_Ad);
-+
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = PTR_MOVE(p_Ad, keyIndex * FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* If statistics were enabled, this Ad is the statistics Ad. Need to follow its
-+ nextAction to retrieve the actual Nia-Ad. If statistics should remain enabled,
-+ only the actual Nia-Ad should be modified. */
-+ if ((!p_AdditionalInfo->tree)
-+ && (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ && (p_CcNextEngineParams->statisticsEn))
-+ ccNodeInfo.h_CcNode =
-+ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd;
-+
-+ EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL);
-+
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ p_Ad = GetNewAd(h_FmPcdCcNodeOrTree, p_AdditionalInfo->tree);
-+ if (!p_Ad)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC node action descriptor"));
-+ MemSet8((uint8_t *)p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* If statistics were not enabled before, but requested now - Allocate a statistics
-+ object that holds statistics AD and counters. */
-+ if ((!p_AdditionalInfo->tree)
-+ && (!((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ && (p_CcNextEngineParams->statisticsEn))
-+ {
-+ p_StatsObj = GetStatsObj((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree);
-+ ASSERT_COND(p_StatsObj);
-+
-+ /* Store allocated statistics object */
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj =
-+ p_StatsObj;
-+
-+ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
-+ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
-+
-+#if (DPAA_VERSION >= 11)
-+ statsParams.h_StatsFLRs =
-+ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_StatsFLRs;
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ NextStepAd(p_Ad, &statsParams, p_CcNextEngineParams, h_FmPcd);
-+ }
-+ else
-+ NextStepAd(p_Ad, NULL, p_CcNextEngineParams, h_FmPcd);
-+
-+ ccNodeInfo.h_CcNode = p_Ad;
-+ EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL);
-+
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction =
-+ requiredAction;
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |=
-+ UPDATE_CC_WITH_TREE;
-+
-+ if (!p_AdditionalInfo->tree)
-+ {
-+ ASSERT_COND(p_FmPcdCcNode1);
-+ if (!LIST_IsEmpty(&p_FmPcdCcNode1->ccTreesLst))
-+ {
-+ LIST_FOR_EACH(p_Pos, &p_FmPcdCcNode1->ccTreesLst)
-+ {
-+ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
-+
-+ ASSERT_COND(p_CcNodeInformation->h_CcNode);
-+ /* Update the manipulation which has to be updated from parameters of the port
-+ it's has to be updated with restrictions defined in the function */
-+
-+ err =
-+ SetRequiredAction(
-+ p_FmPcdCcNode1->h_FmPcd,
-+ p_FmPcdCcNode1->shadowAction
-+ | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction,
-+ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
-+ p_Ad, 1, p_CcNodeInformation->h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ err = CcUpdateParam(
-+ p_FmPcdCcNode1->h_FmPcd, NULL, NULL,
-+ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], 1,
-+ p_Ad, TRUE, p_CcNodeInformation->index,
-+ p_CcNodeInformation->h_CcNode, TRUE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+ }
-+ }
-+ else
-+ {
-+ ASSERT_COND(p_FmPcdCcTree);
-+
-+ err =
-+ SetRequiredAction(
-+ h_FmPcd,
-+ p_FmPcdCcTree->requiredAction
-+ | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction,
-+ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
-+ p_Ad, 1, (t_Handle)p_FmPcdCcTree);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ err = CcUpdateParam(h_FmPcd, NULL, NULL,
-+ &p_AdditionalInfo->keyAndNextEngineParams[keyIndex],
-+ 1, p_Ad, TRUE, 0, (t_Handle)p_FmPcdCcTree, TRUE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ if (p_CcNextEngineParams->nextEngine == e_FM_PCD_CC)
-+ p_AdditionalInfo->h_NodeForAdd =
-+ p_CcNextEngineParams->params.ccParams.h_CcNode;
-+ if (p_CcNextEngineParams->h_Manip)
-+ p_AdditionalInfo->h_ManipForAdd = p_CcNextEngineParams->h_Manip;
-+
-+ /* If statistics were previously enabled, but now are disabled,
-+ store the old statistics object to be released */
-+ if ((!p_AdditionalInfo->tree)
-+ && (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ && (!p_CcNextEngineParams->statisticsEn))
-+ {
-+ p_AdditionalInfo->p_StatsObjForRmv =
-+ ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj;
-+
-+
-+ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = NULL;
-+ }
-+#if (DPAA_VERSION >= 11)
-+ if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_FR)
-+ && (p_CcNextEngineParams->params.frParams.h_FrmReplic))
-+ p_AdditionalInfo->h_FrmReplicForAdd =
-+ p_CcNextEngineParams->params.frParams.h_FrmReplic;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ return E_OK;
-+}
-+
-+static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(
-+ t_FmPcdCcNode *p_CrntMdfNode, t_List *h_OldLst,
-+ t_FmPcdCcNextEngineParams **p_NextEngineParams)
-+{
-+ t_CcNodeInformation *p_CcNodeInformation;
-+ t_FmPcdCcNode *p_NodePtrOnCurrentMdfNode = NULL;
-+ t_List *p_Pos;
-+ int i = 0;
-+ t_Handle p_AdTablePtOnCrntCurrentMdfNode/*, p_AdTableNewModified*/;
-+ t_CcNodeInformation ccNodeInfo;
-+
-+ LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccPrevNodesLst)
-+ {
-+ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
-+ p_NodePtrOnCurrentMdfNode =
-+ (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode;
-+
-+ ASSERT_COND(p_NodePtrOnCurrentMdfNode);
-+
-+ /* Search in the previous node which exact index points on this current modified node for getting AD */
-+ for (i = 0; i < p_NodePtrOnCurrentMdfNode->numOfKeys + 1; i++)
-+ {
-+ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ {
-+ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode
-+ == (t_Handle)p_CrntMdfNode)
-+ {
-+ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
-+ p_AdTablePtOnCrntCurrentMdfNode = p_CrntMdfNode->h_Ad;
-+ else
-+ if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj)
-+ p_AdTablePtOnCrntCurrentMdfNode =
-+ p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj->h_StatsAd;
-+ else
-+ p_AdTablePtOnCrntCurrentMdfNode =
-+ PTR_MOVE(p_NodePtrOnCurrentMdfNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode;
-+ EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL);
-+
-+ if (!(*p_NextEngineParams))
-+ *p_NextEngineParams =
-+ &p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams;
-+ }
-+ }
-+ }
-+
-+ ASSERT_COND(i != p_NodePtrOnCurrentMdfNode->numOfKeys);
-+ }
-+}
-+
-+static void UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(
-+ t_FmPcdCcNode *p_CrntMdfNode, t_List *h_OldLst,
-+ t_FmPcdCcNextEngineParams **p_NextEngineParams)
-+{
-+ t_CcNodeInformation *p_CcNodeInformation;
-+ t_FmPcdCcTree *p_TreePtrOnCurrentMdfNode = NULL;
-+ t_List *p_Pos;
-+ int i = 0;
-+ t_Handle p_AdTableTmp;
-+ t_CcNodeInformation ccNodeInfo;
-+
-+ LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccTreeIdLst)
-+ {
-+ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
-+ p_TreePtrOnCurrentMdfNode =
-+ (t_FmPcdCcTree *)p_CcNodeInformation->h_CcNode;
-+
-+ ASSERT_COND(p_TreePtrOnCurrentMdfNode);
-+
-+ /*search in the trees which exact index points on this current modified node for getting AD */
-+ for (i = 0; i < p_TreePtrOnCurrentMdfNode->numOfEntries; i++)
-+ {
-+ if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ {
-+ if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode
-+ == (t_Handle)p_CrntMdfNode)
-+ {
-+ p_AdTableTmp =
-+ UINT_TO_PTR(p_TreePtrOnCurrentMdfNode->ccTreeBaseAddr + i*FM_PCD_CC_AD_ENTRY_SIZE);
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = p_AdTableTmp;
-+ EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL);
-+
-+ if (!(*p_NextEngineParams))
-+ *p_NextEngineParams =
-+ &p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams;
-+ }
-+ }
-+ }
-+
-+ ASSERT_COND(i == p_TreePtrOnCurrentMdfNode->numOfEntries);
-+ }
-+}
-+
-+static t_FmPcdModifyCcKeyAdditionalParams * ModifyNodeCommonPart(
-+ t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex,
-+ e_ModifyState modifyState, bool ttlCheck, bool hashCheck, bool tree)
-+{
-+ t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams;
-+ int i = 0, j = 0;
-+ bool wasUpdate = FALSE;
-+ t_FmPcdCcNode *p_CcNode = NULL;
-+ t_FmPcdCcTree *p_FmPcdCcTree;
-+ uint16_t numOfKeys;
-+ t_FmPcdCcKeyAndNextEngineParams *p_KeyAndNextEngineParams;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcdCcNodeOrTree, E_INVALID_HANDLE, NULL);
-+
-+ if (!tree)
-+ {
-+ p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree;
-+ numOfKeys = p_CcNode->numOfKeys;
-+
-+ /* node has to be pointed by another node or tree */
-+
-+ p_KeyAndNextEngineParams = (t_FmPcdCcKeyAndNextEngineParams *)XX_Malloc(
-+ sizeof(t_FmPcdCcKeyAndNextEngineParams) * (numOfKeys + 1));
-+ if (!p_KeyAndNextEngineParams)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Next engine and required action structure"));
-+ return NULL;
-+ }
-+ memcpy(p_KeyAndNextEngineParams, p_CcNode->keyAndNextEngineParams,
-+ (numOfKeys + 1) * sizeof(t_FmPcdCcKeyAndNextEngineParams));
-+
-+ if (ttlCheck)
-+ {
-+ if ((p_CcNode->parseCode == CC_PC_FF_IPV4TTL)
-+ || (p_CcNode->parseCode == CC_PC_FF_IPV6HOP_LIMIT))
-+ {
-+ XX_Free(p_KeyAndNextEngineParams);
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_FF_IPV4TTL or CC_PC_FF_IPV6HOP_LIMIT can not be used for this operation"));
-+ return NULL;
-+ }
-+ }
-+
-+ if (hashCheck)
-+ {
-+ if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)
-+ {
-+ XX_Free(p_KeyAndNextEngineParams);
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_GENERIC_IC_HASH_INDEXED can not be used for this operation"));
-+ return NULL;
-+ }
-+ }
-+ }
-+ else
-+ {
-+ p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree;
-+ numOfKeys = p_FmPcdCcTree->numOfEntries;
-+
-+ p_KeyAndNextEngineParams = (t_FmPcdCcKeyAndNextEngineParams *)XX_Malloc(
-+ sizeof(t_FmPcdCcKeyAndNextEngineParams)
-+ * FM_PCD_MAX_NUM_OF_CC_GROUPS);
-+ if (!p_KeyAndNextEngineParams)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Next engine and required action structure"));
-+ return NULL;
-+ }
-+ memcpy(p_KeyAndNextEngineParams,
-+ p_FmPcdCcTree->keyAndNextEngineParams,
-+ FM_PCD_MAX_NUM_OF_CC_GROUPS
-+ * sizeof(t_FmPcdCcKeyAndNextEngineParams));
-+ }
-+
-+ p_FmPcdModifyCcKeyAdditionalParams =
-+ (t_FmPcdModifyCcKeyAdditionalParams *)XX_Malloc(
-+ sizeof(t_FmPcdModifyCcKeyAdditionalParams));
-+ if (!p_FmPcdModifyCcKeyAdditionalParams)
-+ {
-+ XX_Free(p_KeyAndNextEngineParams);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of internal data structure FAILED"));
-+ return NULL;
-+ }
-+ memset(p_FmPcdModifyCcKeyAdditionalParams, 0,
-+ sizeof(t_FmPcdModifyCcKeyAdditionalParams));
-+
-+ p_FmPcdModifyCcKeyAdditionalParams->h_CurrentNode = h_FmPcdCcNodeOrTree;
-+ p_FmPcdModifyCcKeyAdditionalParams->savedKeyIndex = keyIndex;
-+
-+ while (i < numOfKeys)
-+ {
-+ if ((j == keyIndex) && !wasUpdate)
-+ {
-+ if (modifyState == e_MODIFY_STATE_ADD)
-+ j++;
-+ else
-+ if (modifyState == e_MODIFY_STATE_REMOVE)
-+ i++;
-+ wasUpdate = TRUE;
-+ }
-+ else
-+ {
-+ memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j],
-+ p_KeyAndNextEngineParams + i,
-+ sizeof(t_FmPcdCcKeyAndNextEngineParams));
-+ i++;
-+ j++;
-+ }
-+ }
-+
-+ if (keyIndex == numOfKeys)
-+ {
-+ if (modifyState == e_MODIFY_STATE_ADD)
-+ j++;
-+ }
-+
-+ memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j],
-+ p_KeyAndNextEngineParams + numOfKeys,
-+ sizeof(t_FmPcdCcKeyAndNextEngineParams));
-+
-+ XX_Free(p_KeyAndNextEngineParams);
-+
-+ return p_FmPcdModifyCcKeyAdditionalParams;
-+}
-+
-+static t_Error UpdatePtrWhichPointOnCrntMdfNode(
-+ t_FmPcdCcNode *p_CcNode,
-+ t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams,
-+ t_List *h_OldLst, t_List *h_NewLst)
-+{
-+ t_FmPcdCcNextEngineParams *p_NextEngineParams = NULL;
-+ t_CcNodeInformation ccNodeInfo = { 0 };
-+ t_Handle h_NewAd;
-+ t_Handle h_OrigAd = NULL;
-+
-+ /* Building a list of all action descriptors that point to the previous node */
-+ if (!LIST_IsEmpty(&p_CcNode->ccPrevNodesLst))
-+ UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst,
-+ &p_NextEngineParams);
-+
-+ if (!LIST_IsEmpty(&p_CcNode->ccTreeIdLst))
-+ UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst,
-+ &p_NextEngineParams);
-+
-+ /* This node must be found as next engine of one of its previous nodes or trees*/
-+ if (p_NextEngineParams)
-+ {
-+ /* Building a new action descriptor that points to the modified node */
-+ h_NewAd = GetNewAd(p_CcNode, FALSE);
-+ if (!h_NewAd)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ MemSet8(h_NewAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ h_OrigAd = p_CcNode->h_Ad;
-+ BuildNewAd(h_NewAd, p_FmPcdModifyCcKeyAdditionalParams, p_CcNode,
-+ p_NextEngineParams);
-+
-+ ccNodeInfo.h_CcNode = h_NewAd;
-+ EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL);
-+
-+ if (p_NextEngineParams->h_Manip && !h_OrigAd)
-+ FmPcdManipUpdateOwner(p_NextEngineParams->h_Manip, FALSE);
-+ }
-+ return E_OK;
-+}
-+
-+static void UpdateCcRootOwner(t_FmPcdCcTree *p_FmPcdCcTree, bool add)
-+{
-+ ASSERT_COND(p_FmPcdCcTree);
-+
-+ /* this routine must be protected by the calling routine! */
-+
-+ if (add)
-+ p_FmPcdCcTree->owners++;
-+ else
-+ {
-+ ASSERT_COND(p_FmPcdCcTree->owners);
-+ p_FmPcdCcTree->owners--;
-+ }
-+}
-+
-+static t_Error CheckAndSetManipParamsWithCcNodeParams(t_FmPcdCcNode *p_CcNode)
-+{
-+ t_Error err = E_OK;
-+ int i = 0;
-+
-+ for (i = 0; i < p_CcNode->numOfKeys; i++)
-+ {
-+ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ FmPcdManipCheckParamsWithCcNodeParams(
-+ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip,
-+ (t_Handle)p_CcNode);
-+ if (err)
-+ return err;
-+ }
-+ }
-+
-+ return err;
-+}
-+static t_Error ValidateAndCalcStatsParams(t_FmPcdCcNode *p_CcNode,
-+ t_FmPcdCcNodeParams *p_CcNodeParam,
-+ uint32_t *p_NumOfRanges,
-+ uint32_t *p_CountersArraySize)
-+{
-+ e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode;
-+ uint32_t i;
-+
-+ UNUSED(p_CcNodeParam);
-+
-+ switch (statisticsMode)
-+ {
-+ case e_FM_PCD_CC_STATS_MODE_NONE:
-+ for (i = 0; i < p_CcNode->numOfKeys; i++)
-+ if (p_CcNodeParam->keysParams.keyParams[i].ccNextEngineParams.statisticsEn)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("Statistics cannot be enabled for key %d when statistics mode was set to 'NONE'", i));
-+ return E_OK;
-+
-+ case e_FM_PCD_CC_STATS_MODE_FRAME:
-+ case e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME:
-+ *p_NumOfRanges = 1;
-+ *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE;
-+ return E_OK;
-+
-+#if (DPAA_VERSION >= 11)
-+ case e_FM_PCD_CC_STATS_MODE_RMON:
-+ {
-+ uint16_t *p_FrameLengthRanges =
-+ p_CcNodeParam->keysParams.frameLengthRanges;
-+ uint32_t i;
-+
-+ if (p_FrameLengthRanges[0] <= 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode"));
-+
-+ if (p_FrameLengthRanges[0] == 0xFFFF)
-+ {
-+ *p_NumOfRanges = 1;
-+ *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE;
-+ return E_OK;
-+ }
-+
-+ for (i = 1; i < FM_PCD_CC_STATS_MAX_NUM_OF_FLR; i++)
-+ {
-+ if (p_FrameLengthRanges[i - 1] >= p_FrameLengthRanges[i])
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("Frame length range must be larger at least by 1 from preceding range"));
-+
-+ /* Stop when last range is reached */
-+ if (p_FrameLengthRanges[i] == 0xFFFF)
-+ break;
-+ }
-+
-+ if ((i >= FM_PCD_CC_STATS_MAX_NUM_OF_FLR)
-+ || (p_FrameLengthRanges[i] != 0xFFFF))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("Last Frame length range must be 0xFFFF"));
-+
-+ *p_NumOfRanges = i + 1;
-+
-+ /* Allocate an extra counter for byte count, as counters
-+ array always begins with byte count */
-+ *p_CountersArraySize = (*p_NumOfRanges + 1)
-+ * FM_PCD_CC_STATS_COUNTER_SIZE;
-+
-+ }
-+ return E_OK;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode"));
-+ }
-+}
-+
-+static t_Error CheckParams(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam,
-+ t_FmPcdCcNode *p_CcNode, bool *isKeyTblAlloc)
-+{
-+ int tmp = 0;
-+ t_FmPcdCcKeyParams *p_KeyParams;
-+ t_Error err;
-+ uint32_t requiredAction = 0;
-+
-+ /* Validate statistics parameters */
-+ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam,
-+ &(p_CcNode->numOfStatsFLRs),
-+ &(p_CcNode->countersArraySize));
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters"));
-+
-+ /* Validate next engine parameters on Miss */
-+ err = ValidateNextEngineParams(
-+ h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ p_CcNode->statisticsMode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err,
-+ ("For this node MissNextEngineParams are not valid"));
-+
-+ if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(
-+ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ &requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams,
-+ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction =
-+ requiredAction;
-+
-+ if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ AllocAndFillAdForContLookupManip(
-+ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
-+ {
-+ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
-+
-+ if (!p_KeyParams->p_Key)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_Key is not initialized"));
-+
-+ err = ValidateNextEngineParams(h_FmPcd,
-+ &p_KeyParams->ccNextEngineParams,
-+ p_CcNode->statisticsMode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ err = UpdateGblMask(p_CcNode, p_CcNodeParam->keysParams.keySize,
-+ p_KeyParams->p_Mask);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ if (p_KeyParams->ccNextEngineParams.h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(
-+ &p_KeyParams->ccNextEngineParams, &requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ /* Store 'key' parameters - key, mask (if passed by the user) */
-+ memcpy(p_CcNode->keyAndNextEngineParams[tmp].key, p_KeyParams->p_Key,
-+ p_CcNodeParam->keysParams.keySize);
-+
-+ if (p_KeyParams->p_Mask)
-+ memcpy(p_CcNode->keyAndNextEngineParams[tmp].mask,
-+ p_KeyParams->p_Mask, p_CcNodeParam->keysParams.keySize);
-+ else
-+ memset((void *)(p_CcNode->keyAndNextEngineParams[tmp].mask), 0xFF,
-+ p_CcNodeParam->keysParams.keySize);
-+
-+ /* Store next engine parameters */
-+ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams,
-+ &p_KeyParams->ccNextEngineParams,
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction;
-+
-+ if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ AllocAndFillAdForContLookupManip(
-+ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+ }
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ if (p_CcNode->maxNumOfKeys < p_CcNode->numOfKeys)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("Number of keys exceed the provided maximal number of keys"));
-+ }
-+
-+ *isKeyTblAlloc = TRUE;
-+
-+ return E_OK;
-+}
-+
-+static t_Error Ipv4TtlOrIpv6HopLimitCheckParams(
-+ t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam,
-+ t_FmPcdCcNode *p_CcNode, bool *isKeyTblAlloc)
-+{
-+ int tmp = 0;
-+ t_FmPcdCcKeyParams *p_KeyParams;
-+ t_Error err;
-+ uint8_t key = 0x01;
-+ uint32_t requiredAction = 0;
-+
-+ if (p_CcNode->numOfKeys != 1)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'numOfKeys' is 1"));
-+
-+ if ((p_CcNodeParam->keysParams.maxNumOfKeys)
-+ && (p_CcNodeParam->keysParams.maxNumOfKeys != 1))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'maxNumOfKeys' is 1"));
-+
-+ /* Validate statistics parameters */
-+ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam,
-+ &(p_CcNode->numOfStatsFLRs),
-+ &(p_CcNode->countersArraySize));
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters"));
-+
-+ err = ValidateNextEngineParams(
-+ h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ p_CcNodeParam->keysParams.statisticsMode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err,
-+ ("For this node MissNextEngineParams are not valid"));
-+
-+ if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(
-+ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ &requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams,
-+ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction =
-+ requiredAction;
-+
-+ if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ AllocAndFillAdForContLookupManip(
-+ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
-+ {
-+ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
-+
-+ if (p_KeyParams->p_Mask)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Mask can not be initialized"));
-+
-+ if (memcmp(p_KeyParams->p_Key, &key, 1) != 0)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Key has to be 1"));
-+
-+ err = ValidateNextEngineParams(h_FmPcd,
-+ &p_KeyParams->ccNextEngineParams,
-+ p_CcNode->statisticsMode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+
-+ if (p_KeyParams->ccNextEngineParams.h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(
-+ &p_KeyParams->ccNextEngineParams, &requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+
-+ /* Store 'key' parameters - key (fixed to 0x01), key size of 1 byte and full mask */
-+ p_CcNode->keyAndNextEngineParams[tmp].key[0] = key;
-+ p_CcNode->keyAndNextEngineParams[tmp].mask[0] = 0xFF;
-+
-+ /* Store NextEngine parameters */
-+ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams,
-+ &p_KeyParams->ccNextEngineParams,
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ AllocAndFillAdForContLookupManip(
-+ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+ p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction;
-+ }
-+
-+ *isKeyTblAlloc = FALSE;
-+
-+ return E_OK;
-+}
-+
-+static t_Error IcHashIndexedCheckParams(t_Handle h_FmPcd,
-+ t_FmPcdCcNodeParams *p_CcNodeParam,
-+ t_FmPcdCcNode *p_CcNode,
-+ bool *isKeyTblAlloc)
-+{
-+ int tmp = 0, countOnes = 0;
-+ t_FmPcdCcKeyParams *p_KeyParams;
-+ t_Error err;
-+ uint16_t glblMask = p_CcNodeParam->extractCcParams.extractNonHdr.icIndxMask;
-+ uint16_t countMask = (uint16_t)(glblMask >> 4);
-+ uint32_t requiredAction = 0;
-+
-+ if (glblMask & 0x000f)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("icIndxMask has to be with last nibble 0"));
-+
-+ while (countMask)
-+ {
-+ countOnes++;
-+ countMask = (uint16_t)(countMask >> 1);
-+ }
-+
-+ if (!POWER_OF_2(p_CcNode->numOfKeys))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For Node of the type INDEXED numOfKeys has to be powerOfTwo"));
-+
-+ if (p_CcNode->numOfKeys != ((uint32_t)1 << countOnes))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For Node of the type IC_HASH_INDEXED numOfKeys has to be powerOfTwo"));
-+
-+ if (p_CcNodeParam->keysParams.maxNumOfKeys
-+ && (p_CcNodeParam->keysParams.maxNumOfKeys != p_CcNode->numOfKeys))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For Node of the type INDEXED 'maxNumOfKeys' should be 0 or equal 'numOfKeys'"));
-+
-+ /* Validate statistics parameters */
-+ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam,
-+ &(p_CcNode->numOfStatsFLRs),
-+ &(p_CcNode->countersArraySize));
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters"));
-+
-+ err = ValidateNextEngineParams(
-+ h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ p_CcNode->statisticsMode);
-+ if (GET_ERROR_TYPE(err) != E_NOT_SUPPORTED)
-+ RETURN_ERROR(
-+ MAJOR,
-+ err,
-+ ("MissNextEngineParams for the node of the type IC_INDEX_HASH has to be UnInitialized"));
-+
-+ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
-+ {
-+ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
-+
-+ if (p_KeyParams->p_Mask || p_KeyParams->p_Key)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For Node of the type IC_HASH_INDEXED p_Key or p_Mask has to be NULL"));
-+
-+ if ((glblMask & (tmp * 16)) == (tmp * 16))
-+ {
-+ err = ValidateNextEngineParams(h_FmPcd,
-+ &p_KeyParams->ccNextEngineParams,
-+ p_CcNode->statisticsMode);
-+ if (err)
-+ RETURN_ERROR(
-+ MAJOR,
-+ err,
-+ ("This index has to be initialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask "));
-+
-+ if (p_KeyParams->ccNextEngineParams.h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(
-+ &p_KeyParams->ccNextEngineParams, &requiredAction);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ p_CcNode->keyAndNextEngineParams[tmp].requiredAction =
-+ requiredAction;
-+ }
-+
-+ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams,
-+ &p_KeyParams->ccNextEngineParams,
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
-+ {
-+ err =
-+ AllocAndFillAdForContLookupManip(
-+ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, (NO_MSG));
-+ }
-+ }
-+ else
-+ {
-+ err = ValidateNextEngineParams(h_FmPcd,
-+ &p_KeyParams->ccNextEngineParams,
-+ p_CcNode->statisticsMode);
-+ if (GET_ERROR_TYPE(err) != E_NOT_SUPPORTED)
-+ RETURN_ERROR(
-+ MAJOR,
-+ err,
-+ ("This index has to be UnInitialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask"));
-+ }
-+ }
-+
-+ *isKeyTblAlloc = FALSE;
-+ cpu_to_be16s(&glblMask);
-+ memcpy(PTR_MOVE(p_CcNode->p_GlblMask, 2), &glblMask, 2);
-+
-+ return E_OK;
-+}
-+
-+static t_Error ModifyNextEngineParamNode(
-+ t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
-+ t_FmPcd *p_FmPcd;
-+ t_List h_OldPointersLst, h_NewPointersLst;
-+ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+
-+ if (keyIndex >= p_CcNode->numOfKeys)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("keyIndex > previously cleared last index + 1"));
-+
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+
-+ INIT_LIST(&h_OldPointersLst);
-+ INIT_LIST(&h_NewPointersLst);
-+
-+ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
-+ e_MODIFY_STATE_CHANGE, FALSE,
-+ FALSE, FALSE);
-+ if (!p_ModifyKeyParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (p_CcNode->maxNumOfKeys
-+ && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = BuildNewNodeModifyNextEngine(h_FmPcd, p_CcNode, keyIndex,
-+ p_FmPcdCcNextEngineParams,
-+ &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams);
-+ if (err)
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams, FALSE);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ ReleaseLst(&h_OldPointersLst);
-+ ReleaseLst(&h_NewPointersLst);
-+
-+ return err;
-+}
-+
-+static t_Error FindKeyIndex(t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key,
-+ uint8_t *p_Mask, uint16_t *p_KeyIndex)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint8_t tmpMask[FM_PCD_MAX_SIZE_OF_KEY];
-+ uint16_t i;
-+
-+ ASSERT_COND(p_Key);
-+ ASSERT_COND(p_KeyIndex);
-+ ASSERT_COND(keySize < FM_PCD_MAX_SIZE_OF_KEY);
-+
-+ if (keySize != p_CcNode->userSizeOfExtraction)
-+ RETURN_ERROR(
-+ MINOR, E_INVALID_VALUE,
-+ ("Key size doesn't match the extraction size of the node"));
-+
-+ /* If user didn't pass a mask for this key, we'll look for full extraction mask */
-+ if (!p_Mask)
-+ memset(tmpMask, 0xFF, keySize);
-+
-+ for (i = 0; i < p_CcNode->numOfKeys; i++)
-+ {
-+ /* Comparing received key */
-+ if (memcmp(p_Key, p_CcNode->keyAndNextEngineParams[i].key, keySize)
-+ == 0)
-+ {
-+ if (p_Mask)
-+ {
-+ /* If a user passed a mask for this key, it must match to the existing key's mask for a correct match */
-+ if (memcmp(p_Mask, p_CcNode->keyAndNextEngineParams[i].mask,
-+ keySize) == 0)
-+ {
-+ *p_KeyIndex = i;
-+ return E_OK;
-+ }
-+ }
-+ else
-+ {
-+ /* If user didn't pass a mask for this key, check if the existing key mask is full extraction */
-+ if (memcmp(tmpMask, p_CcNode->keyAndNextEngineParams[i].mask,
-+ keySize) == 0)
-+ {
-+ *p_KeyIndex = i;
-+ return E_OK;
-+ }
-+ }
-+ }
-+ }
-+
-+ return ERROR_CODE(E_NOT_FOUND);
-+}
-+
-+static t_Error CalcAndUpdateCcShadow(t_FmPcdCcNode *p_CcNode,
-+ bool isKeyTblAlloc,
-+ uint32_t *p_MatchTableSize,
-+ uint32_t *p_AdTableSize)
-+{
-+ uint32_t shadowSize;
-+ t_Error err;
-+
-+ /* Calculate keys table maximal size - each entry consists of a key and a mask,
-+ (if local mask support is requested) */
-+ *p_MatchTableSize = p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t)
-+ * p_CcNode->maxNumOfKeys;
-+
-+ if (p_CcNode->maskSupport)
-+ *p_MatchTableSize *= 2;
-+
-+ /* Calculate next action descriptors table, including one more entry for miss */
-+ *p_AdTableSize = (uint32_t)((p_CcNode->maxNumOfKeys + 1)
-+ * FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Calculate maximal shadow size of this node.
-+ All shadow structures will be used for runtime modifications host command. If
-+ keys table was allocated for this node, the keys table and next engines table may
-+ be modified in run time (entries added or removed), so shadow tables are requires.
-+ Otherwise, the only supported runtime modification is a specific next engine update
-+ and this requires shadow memory of a single AD */
-+
-+ /* Shadow size should be enough to hold the following 3 structures:
-+ * 1 - an action descriptor */
-+ shadowSize = FM_PCD_CC_AD_ENTRY_SIZE;
-+
-+ /* 2 - keys match table, if was allocated for the current node */
-+ if (isKeyTblAlloc)
-+ shadowSize += *p_MatchTableSize;
-+
-+ /* 3 - next action descriptors table */
-+ shadowSize += *p_AdTableSize;
-+
-+ /* Update shadow to the calculated size */
-+ err = FmPcdUpdateCcShadow(p_CcNode->h_FmPcd, (uint32_t)shadowSize,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (err != E_OK)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node shadow"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error AllocStatsObjs(t_FmPcdCcNode *p_CcNode)
-+{
-+ t_FmPcdStatsObj *p_StatsObj;
-+ t_Handle h_FmMuram, h_StatsAd, h_StatsCounters;
-+ uint32_t i;
-+
-+ h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd);
-+ if (!h_FmMuram)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM"));
-+
-+ /* Allocate statistics ADs and statistics counter. An extra pair (AD + counters)
-+ will be allocated to support runtime modifications */
-+ for (i = 0; i < p_CcNode->maxNumOfKeys + 2; i++)
-+ {
-+ /* Allocate list object structure */
-+ p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj));
-+ if (!p_StatsObj)
-+ {
-+ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Statistics object"));
-+ }
-+ memset(p_StatsObj, 0, sizeof(t_FmPcdStatsObj));
-+
-+ /* Allocate statistics AD from MURAM */
-+ h_StatsAd = (t_Handle)FM_MURAM_AllocMem(h_FmMuram,
-+ FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!h_StatsAd)
-+ {
-+ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
-+ XX_Free(p_StatsObj);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for statistics ADs"));
-+ }
-+ MemSet8(h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Allocate statistics counters from MURAM */
-+ h_StatsCounters = (t_Handle)FM_MURAM_AllocMem(
-+ h_FmMuram, p_CcNode->countersArraySize,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!h_StatsCounters)
-+ {
-+ FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
-+ FM_MURAM_FreeMem(h_FmMuram, h_StatsAd);
-+ XX_Free(p_StatsObj);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for statistics counters"));
-+ }
-+ MemSet8(h_StatsCounters, 0, p_CcNode->countersArraySize);
-+
-+ p_StatsObj->h_StatsAd = h_StatsAd;
-+ p_StatsObj->h_StatsCounters = h_StatsCounters;
-+
-+ EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error MatchTableGetKeyStatistics(
-+ t_FmPcdCcNode *p_CcNode, uint16_t keyIndex,
-+ t_FmPcdCcKeyStatistics *p_KeyStatistics)
-+{
-+ uint32_t *p_StatsCounters, i;
-+
-+ if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Statistics were not enabled for this match table"));
-+
-+ if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Statistics were not enabled for this key"));
-+
-+ memset(p_KeyStatistics, 0, sizeof(t_FmPcdCcKeyStatistics));
-+
-+ p_StatsCounters =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters;
-+ ASSERT_COND(p_StatsCounters);
-+
-+ p_KeyStatistics->byteCount = GET_UINT32(*p_StatsCounters);
-+
-+ for (i = 1; i <= p_CcNode->numOfStatsFLRs; i++)
-+ {
-+ p_StatsCounters =
-+ PTR_MOVE(p_StatsCounters, FM_PCD_CC_STATS_COUNTER_SIZE);
-+
-+ p_KeyStatistics->frameCount += GET_UINT32(*p_StatsCounters);
-+
-+#if (DPAA_VERSION >= 11)
-+ p_KeyStatistics->frameLengthRangeCount[i - 1] =
-+ GET_UINT32(*p_StatsCounters);
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode,
-+ t_FmPcdCcNodeParams *p_CcNodeParam)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdCcNode *p_FmPcdCcNextNode;
-+ t_Error err = E_OK;
-+ uint32_t tmp, keySize;
-+ bool glblMask = FALSE;
-+ t_FmPcdCcKeyParams *p_KeyParams;
-+ t_Handle h_FmMuram, p_KeysMatchTblTmp, p_AdTableTmp;
-+#if (DPAA_VERSION >= 11)
-+ t_Handle h_StatsFLRs;
-+#endif /* (DPAA_VERSION >= 11) */
-+ bool fullField = FALSE;
-+ ccPrivateInfo_t icCode = CC_PRIVATE_INFO_NONE;
-+ bool isKeyTblAlloc, fromIc = FALSE;
-+ uint32_t matchTableSize, adTableSize;
-+ t_CcNodeInformation ccNodeInfo, *p_CcInformation;
-+ t_FmPcdStatsObj *p_StatsObj;
-+ t_FmPcdCcStatsParams statsParams = { 0 };
-+ t_Handle h_Manip;
-+
-+ ASSERT_COND(h_FmPcd);
-+ ASSERT_COND(p_CcNode);
-+ ASSERT_COND(p_CcNodeParam);
-+
-+ p_CcNode->p_GlblMask = (t_Handle)XX_Malloc(
-+ CC_GLBL_MASK_SIZE * sizeof(uint8_t));
-+ memset(p_CcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t));
-+
-+ p_CcNode->h_FmPcd = h_FmPcd;
-+ p_CcNode->numOfKeys = p_CcNodeParam->keysParams.numOfKeys;
-+ p_CcNode->maxNumOfKeys = p_CcNodeParam->keysParams.maxNumOfKeys;
-+ p_CcNode->maskSupport = p_CcNodeParam->keysParams.maskSupport;
-+ p_CcNode->statisticsMode = p_CcNodeParam->keysParams.statisticsMode;
-+
-+ /* For backward compatibility - even if statistics mode is nullified,
-+ we'll fix it to frame mode so we can support per-key request for
-+ statistics using 'statisticsEn' in next engine parameters */
-+ if (!p_CcNode->maxNumOfKeys
-+ && (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE))
-+ p_CcNode->statisticsMode = e_FM_PCD_CC_STATS_MODE_FRAME;
-+
-+ h_FmMuram = FmPcdGetMuramHandle(h_FmPcd);
-+ if (!h_FmMuram)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM"));
-+
-+ INIT_LIST(&p_CcNode->ccPrevNodesLst);
-+ INIT_LIST(&p_CcNode->ccTreeIdLst);
-+ INIT_LIST(&p_CcNode->ccTreesLst);
-+ INIT_LIST(&p_CcNode->availableStatsLst);
-+
-+ p_CcNode->h_Spinlock = XX_InitSpinlock();
-+ if (!p_CcNode->h_Spinlock)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock"));
-+ }
-+
-+ if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR)
-+ && ((p_CcNodeParam->extractCcParams.extractByHdr.hdr
-+ == HEADER_TYPE_IPv4)
-+ || (p_CcNodeParam->extractCcParams.extractByHdr.hdr
-+ == HEADER_TYPE_IPv6))
-+ && (p_CcNodeParam->extractCcParams.extractByHdr.type
-+ == e_FM_PCD_EXTRACT_FULL_FIELD)
-+ && ((p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv6
-+ == NET_HEADER_FIELD_IPv6_HOP_LIMIT)
-+ || (p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv4
-+ == NET_HEADER_FIELD_IPv4_TTL)))
-+ {
-+ err = Ipv4TtlOrIpv6HopLimitCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode,
-+ &isKeyTblAlloc);
-+ glblMask = FALSE;
-+ }
-+ else
-+ if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_NON_HDR)
-+ && ((p_CcNodeParam->extractCcParams.extractNonHdr.src
-+ == e_FM_PCD_EXTRACT_FROM_KEY)
-+ || (p_CcNodeParam->extractCcParams.extractNonHdr.src
-+ == e_FM_PCD_EXTRACT_FROM_HASH)
-+ || (p_CcNodeParam->extractCcParams.extractNonHdr.src
-+ == e_FM_PCD_EXTRACT_FROM_FLOW_ID)))
-+ {
-+ if ((p_CcNodeParam->extractCcParams.extractNonHdr.src
-+ == e_FM_PCD_EXTRACT_FROM_FLOW_ID)
-+ && (p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0))
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0"));
-+ }
-+
-+ icCode = IcDefineCode(p_CcNodeParam);
-+ fromIc = TRUE;
-+ if (icCode == CC_PRIVATE_INFO_NONE)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("user asked extraction from IC and field in internal context or action wasn't initialized in the right way"));
-+ }
-+
-+ if ((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP)
-+ || (icCode == CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP))
-+ {
-+ err = IcHashIndexedCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode,
-+ &isKeyTblAlloc);
-+ glblMask = TRUE;
-+ }
-+ else
-+ {
-+ err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode,
-+ &isKeyTblAlloc);
-+ if (p_CcNode->glblMaskSize)
-+ glblMask = TRUE;
-+ }
-+ }
-+ else
-+ {
-+ err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc);
-+ if (p_CcNode->glblMaskSize)
-+ glblMask = TRUE;
-+ }
-+
-+ if (err)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ switch (p_CcNodeParam->extractCcParams.type)
-+ {
-+ case (e_FM_PCD_EXTRACT_BY_HDR):
-+ switch (p_CcNodeParam->extractCcParams.extractByHdr.type)
-+ {
-+ case (e_FM_PCD_EXTRACT_FULL_FIELD):
-+ p_CcNode->parseCode =
-+ GetFullFieldParseCode(
-+ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
-+ p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex,
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField);
-+ GetSizeHeaderField(
-+ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField,
-+ &p_CcNode->sizeOfExtraction);
-+ fullField = TRUE;
-+ if ((p_CcNode->parseCode != CC_PC_FF_TCI1)
-+ && (p_CcNode->parseCode != CC_PC_FF_TCI2)
-+ && (p_CcNode->parseCode != CC_PC_FF_MPLS1)
-+ && (p_CcNode->parseCode != CC_PC_FF_MPLS_LAST)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2)
-+ && (p_CcNode->parseCode
-+ != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1)
-+ && (p_CcNode->parseCode != CC_PC_FF_IPDSCP)
-+ && (p_CcNode->parseCode
-+ != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2)
-+ && glblMask)
-+ {
-+ glblMask = FALSE;
-+ p_CcNode->glblMaskSize = 4;
-+ p_CcNode->lclMask = TRUE;
-+ }
-+ break;
-+
-+ case (e_FM_PCD_EXTRACT_FROM_HDR):
-+ p_CcNode->sizeOfExtraction =
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.size;
-+ p_CcNode->offset =
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset;
-+ p_CcNode->userOffset =
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset;
-+ p_CcNode->parseCode =
-+ GetPrParseCode(
-+ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
-+ p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex,
-+ p_CcNode->offset, glblMask,
-+ &p_CcNode->prsArrayOffset);
-+ break;
-+
-+ case (e_FM_PCD_EXTRACT_FROM_FIELD):
-+ p_CcNode->offset =
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset;
-+ p_CcNode->userOffset =
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset;
-+ p_CcNode->sizeOfExtraction =
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.size;
-+ p_CcNode->parseCode =
-+ GetFieldParseCode(
-+ p_CcNodeParam->extractCcParams.extractByHdr.hdr,
-+ p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.field,
-+ p_CcNode->offset,
-+ &p_CcNode->prsArrayOffset,
-+ p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex);
-+ break;
-+
-+ default:
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ break;
-+
-+ case (e_FM_PCD_EXTRACT_NON_HDR):
-+ /* get the field code for the generic extract */
-+ p_CcNode->sizeOfExtraction =
-+ p_CcNodeParam->extractCcParams.extractNonHdr.size;
-+ p_CcNode->offset =
-+ p_CcNodeParam->extractCcParams.extractNonHdr.offset;
-+ p_CcNode->userOffset =
-+ p_CcNodeParam->extractCcParams.extractNonHdr.offset;
-+ p_CcNode->parseCode = GetGenParseCode(
-+ p_CcNodeParam->extractCcParams.extractNonHdr.src,
-+ p_CcNode->offset, glblMask, &p_CcNode->prsArrayOffset,
-+ fromIc, icCode);
-+
-+ if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)
-+ {
-+ if ((p_CcNode->offset + p_CcNode->sizeOfExtraction) > 8)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_SELECTION,
-+ ("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)"));
-+ }
-+ }
-+ if ((p_CcNode->parseCode == CC_PC_GENERIC_IC_GMASK)
-+ || (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED))
-+ {
-+ p_CcNode->offset += p_CcNode->prsArrayOffset;
-+ p_CcNode->prsArrayOffset = 0;
-+ }
-+ break;
-+
-+ default:
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ if (p_CcNode->parseCode == CC_PC_ILLEGAL)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type"));
-+ }
-+
-+ if ((p_CcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY)
-+ || !p_CcNode->sizeOfExtraction)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("sizeOfExatrction can not be greater than 56 and not 0"));
-+ }
-+
-+ if (p_CcNodeParam->keysParams.keySize != p_CcNode->sizeOfExtraction)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("keySize has to be equal to sizeOfExtraction"));
-+ }
-+
-+ p_CcNode->userSizeOfExtraction = p_CcNode->sizeOfExtraction;
-+
-+ if (!glblMask)
-+ memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t));
-+
-+ err = CheckAndSetManipParamsWithCcNodeParams(p_CcNode);
-+ if (err != E_OK)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("keySize has to be equal to sizeOfExtraction"));
-+ }
-+
-+ /* Calculating matching table entry size by rounding up the user-defined size of extraction to valid entry size */
-+ GetCcExtractKeySize(p_CcNode->sizeOfExtraction,
-+ &p_CcNode->ccKeySizeAccExtraction);
-+
-+ /* If local mask is used, it is stored next to each key in the keys match table */
-+ if (p_CcNode->lclMask)
-+ keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction);
-+ else
-+ keySize = p_CcNode->ccKeySizeAccExtraction;
-+
-+ /* Update CC shadow with maximal size required by this node */
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ err = CalcAndUpdateCcShadow(p_CcNode, isKeyTblAlloc, &matchTableSize,
-+ &adTableSize);
-+ if (err != E_OK)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ p_CcNode->keysMatchTableMaxSize = matchTableSize;
-+
-+ if (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE)
-+ {
-+ err = AllocStatsObjs(p_CcNode);
-+ if (err != E_OK)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+
-+ /* If manipulation will be initialized before this node, it will use the table
-+ descriptor in the AD table of previous node and this node will need an extra
-+ AD as his table descriptor. */
-+ p_CcNode->h_TmpAd = (t_Handle)FM_MURAM_AllocMem(
-+ h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_CcNode->h_TmpAd)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC action descriptor"));
-+ }
-+ }
-+ else
-+ {
-+ matchTableSize = (uint32_t)(keySize * sizeof(uint8_t)
-+ * (p_CcNode->numOfKeys + 1));
-+ adTableSize = (uint32_t)(FM_PCD_CC_AD_ENTRY_SIZE
-+ * (p_CcNode->numOfKeys + 1));
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ switch (p_CcNode->statisticsMode)
-+ {
-+
-+ case e_FM_PCD_CC_STATS_MODE_RMON:
-+ /* If RMON statistics or RMON conditional statistics modes are requested,
-+ allocate frame length ranges array */
-+ p_CcNode->h_StatsFLRs = FM_MURAM_AllocMem(
-+ h_FmMuram,
-+ (uint32_t)(p_CcNode->numOfStatsFLRs)
-+ * FM_PCD_CC_STATS_FLR_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+
-+ if (!p_CcNode->h_StatsFLRs)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(
-+ MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC frame length ranges array"));
-+ }
-+
-+ /* Initialize using value received from the user */
-+ for (tmp = 0; tmp < p_CcNode->numOfStatsFLRs; tmp++)
-+ {
-+ uint16_t flr =
-+ cpu_to_be16(p_CcNodeParam->keysParams.frameLengthRanges[tmp]);
-+
-+ h_StatsFLRs =
-+ PTR_MOVE(p_CcNode->h_StatsFLRs, tmp * FM_PCD_CC_STATS_FLR_SIZE);
-+
-+ MemCpy8(h_StatsFLRs,
-+ &flr,
-+ FM_PCD_CC_STATS_FLR_SIZE);
-+ }
-+ break;
-+
-+ default:
-+ break;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* Allocate keys match table. Not required for some CC nodes, for example for IPv4 TTL
-+ identification, IPv6 hop count identification, etc. */
-+ if (isKeyTblAlloc)
-+ {
-+ p_CcNode->h_KeysMatchTable = (t_Handle)FM_MURAM_AllocMem(
-+ h_FmMuram, matchTableSize, FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN);
-+ if (!p_CcNode->h_KeysMatchTable)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC node key match table"));
-+ }
-+ MemSet8((uint8_t *)p_CcNode->h_KeysMatchTable, 0, matchTableSize);
-+ }
-+
-+ /* Allocate action descriptors table */
-+ p_CcNode->h_AdTable = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, adTableSize,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_CcNode->h_AdTable)
-+ {
-+ DeleteNode(p_CcNode);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC node action descriptors table"));
-+ }
-+ MemSet8((uint8_t *)p_CcNode->h_AdTable, 0, adTableSize);
-+
-+ p_KeysMatchTblTmp = p_CcNode->h_KeysMatchTable;
-+ p_AdTableTmp = p_CcNode->h_AdTable;
-+
-+ /* For each key, create the key and the next step AD */
-+ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++)
-+ {
-+ p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp];
-+
-+ if (p_KeysMatchTblTmp)
-+ {
-+ /* Copy the key */
-+ MemCpy8((void*)p_KeysMatchTblTmp, p_KeyParams->p_Key,
-+ p_CcNode->sizeOfExtraction);
-+
-+ /* Copy the key mask or initialize it to 0xFF..F */
-+ if (p_CcNode->lclMask && p_KeyParams->p_Mask)
-+ {
-+ MemCpy8(PTR_MOVE(p_KeysMatchTblTmp,
-+ p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */
-+ p_KeyParams->p_Mask, p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */
-+ }
-+ else
-+ if (p_CcNode->lclMask)
-+ {
-+ MemSet8(PTR_MOVE(p_KeysMatchTblTmp,
-+ p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */
-+ 0xff, p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */
-+ }
-+
-+ p_KeysMatchTblTmp =
-+ PTR_MOVE(p_KeysMatchTblTmp, keySize * sizeof(uint8_t));
-+ }
-+
-+ /* Create the next action descriptor in the match table */
-+ if (p_KeyParams->ccNextEngineParams.statisticsEn)
-+ {
-+ p_StatsObj = GetStatsObj(p_CcNode);
-+ ASSERT_COND(p_StatsObj);
-+
-+ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
-+ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
-+#if (DPAA_VERSION >= 11)
-+ statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs;
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+ NextStepAd(p_AdTableTmp, &statsParams,
-+ &p_KeyParams->ccNextEngineParams, p_FmPcd);
-+
-+ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj;
-+ }
-+ else
-+ {
-+ NextStepAd(p_AdTableTmp, NULL, &p_KeyParams->ccNextEngineParams,
-+ p_FmPcd);
-+
-+ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL;
-+ }
-+
-+ p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+ }
-+
-+ /* Update next engine for the 'miss' entry */
-+ if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.statisticsEn)
-+ {
-+ p_StatsObj = GetStatsObj(p_CcNode);
-+ ASSERT_COND(p_StatsObj);
-+
-+ /* All 'bucket' nodes of a hash table should share the same statistics counters,
-+ allocated by the hash table. So, if this node is a bucket of a hash table,
-+ we'll replace the locally allocated counters with the shared counters. */
-+ if (p_CcNode->isHashBucket)
-+ {
-+ ASSERT_COND(p_CcNode->h_MissStatsCounters);
-+
-+ /* Store original counters pointer and replace it with mutual preallocated pointer */
-+ p_CcNode->h_PrivMissStatsCounters = p_StatsObj->h_StatsCounters;
-+ p_StatsObj->h_StatsCounters = p_CcNode->h_MissStatsCounters;
-+ }
-+
-+ statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
-+ statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
-+#if (DPAA_VERSION >= 11)
-+ statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs;
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ NextStepAd(p_AdTableTmp, &statsParams,
-+ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ p_FmPcd);
-+
-+ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj;
-+ }
-+ else
-+ {
-+ NextStepAd(p_AdTableTmp, NULL,
-+ &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss,
-+ p_FmPcd);
-+
-+ p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL;
-+ }
-+
-+ /* This parameter will be used to initialize the "key length" field in the action descriptor
-+ that points to this node and it should be 0 for full field extraction */
-+ if (fullField == TRUE)
-+ p_CcNode->sizeOfExtraction = 0;
-+
-+ for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++)
-+ {
-+ if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ {
-+ p_FmPcdCcNextNode =
-+ (t_FmPcdCcNode*)p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode;
-+ p_CcInformation = FindNodeInfoInReleventLst(
-+ &p_FmPcdCcNextNode->ccPrevNodesLst, (t_Handle)p_CcNode,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+ if (!p_CcInformation)
-+ {
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = (t_Handle)p_CcNode;
-+ ccNodeInfo.index = 1;
-+ EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccPrevNodesLst,
-+ &ccNodeInfo,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+ }
-+ else
-+ p_CcInformation->index++;
-+
-+ if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip)
-+ {
-+ h_Manip =
-+ p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip;
-+ p_CcInformation = FindNodeInfoInReleventLst(
-+ FmPcdManipGetNodeLstPointedOnThisManip(h_Manip),
-+ (t_Handle)p_CcNode, FmPcdManipGetSpinlock(h_Manip));
-+ if (!p_CcInformation)
-+ {
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = (t_Handle)p_CcNode;
-+ ccNodeInfo.index = 1;
-+ EnqueueNodeInfoToRelevantLst(
-+ FmPcdManipGetNodeLstPointedOnThisManip(h_Manip),
-+ &ccNodeInfo, FmPcdManipGetSpinlock(h_Manip));
-+ }
-+ else
-+ p_CcInformation->index++;
-+ }
-+ }
-+ }
-+
-+ p_AdTableTmp = p_CcNode->h_AdTable;
-+
-+ if (!FmPcdLockTryLockAll(h_FmPcd))
-+ {
-+ FM_PCD_MatchTableDelete((t_Handle)p_CcNode);
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ /* Required action for each next engine */
-+ for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++)
-+ {
-+ if (p_CcNode->keyAndNextEngineParams[tmp].requiredAction)
-+ {
-+ err = SetRequiredAction(
-+ h_FmPcd,
-+ p_CcNode->keyAndNextEngineParams[tmp].requiredAction,
-+ &p_CcNode->keyAndNextEngineParams[tmp], p_AdTableTmp, 1,
-+ NULL);
-+ if (err)
-+ {
-+ FmPcdLockUnlockAll(h_FmPcd);
-+ FM_PCD_MatchTableDelete((t_Handle)p_CcNode);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+ }
-+ }
-+
-+ FmPcdLockUnlockAll(h_FmPcd);
-+
-+ return E_OK;
-+}
-+/************************** End of static functions **************************/
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+
-+t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info,
-+ t_Handle h_Spinlock)
-+{
-+ t_CcNodeInformation *p_CcInformation;
-+ t_List *p_Pos;
-+ uint32_t intFlags;
-+
-+ intFlags = XX_LockIntrSpinlock(h_Spinlock);
-+
-+ for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List);
-+ p_Pos = LIST_NEXT(p_Pos))
-+ {
-+ p_CcInformation = CC_NODE_F_OBJECT(p_Pos);
-+
-+ ASSERT_COND(p_CcInformation->h_CcNode);
-+
-+ if (p_CcInformation->h_CcNode == h_Info)
-+ {
-+ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
-+ return p_CcInformation;
-+ }
-+ }
-+
-+ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
-+
-+ return NULL;
-+}
-+
-+void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo,
-+ t_Handle h_Spinlock)
-+{
-+ t_CcNodeInformation *p_CcInformation;
-+ uint32_t intFlags = 0;
-+
-+ p_CcInformation = (t_CcNodeInformation *)XX_Malloc(
-+ sizeof(t_CcNodeInformation));
-+
-+ if (p_CcInformation)
-+ {
-+ memset(p_CcInformation, 0, sizeof(t_CcNodeInformation));
-+ memcpy(p_CcInformation, p_CcInfo, sizeof(t_CcNodeInformation));
-+ INIT_LIST(&p_CcInformation->node);
-+
-+ if (h_Spinlock)
-+ intFlags = XX_LockIntrSpinlock(h_Spinlock);
-+
-+ LIST_AddToTail(&p_CcInformation->node, p_List);
-+
-+ if (h_Spinlock)
-+ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
-+ }
-+ else
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Node Information"));
-+}
-+
-+void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info,
-+ t_Handle h_Spinlock)
-+{
-+ t_CcNodeInformation *p_CcInformation = NULL;
-+ uint32_t intFlags = 0;
-+ t_List *p_Pos;
-+
-+ if (h_Spinlock)
-+ intFlags = XX_LockIntrSpinlock(h_Spinlock);
-+
-+ if (LIST_IsEmpty(p_List))
-+ {
-+ XX_RestoreAllIntr(intFlags);
-+ return;
-+ }
-+
-+ for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List);
-+ p_Pos = LIST_NEXT(p_Pos))
-+ {
-+ p_CcInformation = CC_NODE_F_OBJECT(p_Pos);
-+ ASSERT_COND(p_CcInformation);
-+ ASSERT_COND(p_CcInformation->h_CcNode);
-+ if (p_CcInformation->h_CcNode == h_Info)
-+ break;
-+ }
-+
-+ if (p_CcInformation)
-+ {
-+ LIST_DelAndInit(&p_CcInformation->node);
-+ XX_Free(p_CcInformation);
-+ }
-+
-+ if (h_Spinlock)
-+ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
-+}
-+
-+void NextStepAd(t_Handle h_Ad, t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
-+ t_FmPcd *p_FmPcd)
-+{
-+ switch (p_FmPcdCcNextEngineParams->nextEngine)
-+ {
-+ case (e_FM_PCD_KG):
-+ case (e_FM_PCD_PLCR):
-+ case (e_FM_PCD_DONE):
-+ /* if NIA is not CC, create a "result" type AD */
-+ FillAdOfTypeResult(h_Ad, p_FmPcdCcStatsParams, p_FmPcd,
-+ p_FmPcdCcNextEngineParams);
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_FR):
-+ if (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic)
-+ {
-+ FillAdOfTypeContLookup(
-+ h_Ad, p_FmPcdCcStatsParams, p_FmPcd,
-+ p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode,
-+ p_FmPcdCcNextEngineParams->h_Manip,
-+ p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic);
-+ FrmReplicGroupUpdateOwner(
-+ p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic,
-+ TRUE/* add */);
-+ }
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ case (e_FM_PCD_CC):
-+ /* if NIA is not CC, create a TD to continue the CC lookup */
-+ FillAdOfTypeContLookup(
-+ h_Ad, p_FmPcdCcStatsParams, p_FmPcd,
-+ p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode,
-+ p_FmPcdCcNextEngineParams->h_Manip, NULL);
-+
-+ UpdateNodeOwner(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode,
-+ TRUE);
-+ break;
-+
-+ default:
-+ return;
-+ }
-+}
-+
-+t_Error FmPcdCcTreeAddIPR(t_Handle h_FmPcd, t_Handle h_FmTree,
-+ t_Handle h_NetEnv, t_Handle h_IpReassemblyManip,
-+ bool createSchemes)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
-+ t_FmPcdCcNextEngineParams nextEngineParams;
-+ t_NetEnvParams netEnvParams;
-+ t_Handle h_Ad;
-+ bool isIpv6Present;
-+ uint8_t ipv4GroupId, ipv6GroupId;
-+ t_Error err;
-+
-+ ASSERT_COND(p_FmPcdCcTree);
-+
-+ /* this routine must be protected by the calling routine! */
-+
-+ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams));
-+ memset(&netEnvParams, 0, sizeof(t_NetEnvParams));
-+
-+ h_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
-+
-+ isIpv6Present = FmPcdManipIpReassmIsIpv6Hdr(h_IpReassemblyManip);
-+
-+ if (isIpv6Present
-+ && (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 2)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR"));
-+
-+ if (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR"));
-+
-+ nextEngineParams.nextEngine = e_FM_PCD_DONE;
-+ nextEngineParams.h_Manip = h_IpReassemblyManip;
-+
-+ /* Lock tree */
-+ err = CcRootTryLock(p_FmPcdCcTree);
-+ if (err)
-+ return ERROR_CODE(E_BUSY);
-+
-+ if (p_FmPcdCcTree->h_IpReassemblyManip == h_IpReassemblyManip)
-+ {
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+ return E_OK;
-+ }
-+
-+ if ((p_FmPcdCcTree->h_IpReassemblyManip)
-+ && (p_FmPcdCcTree->h_IpReassemblyManip != h_IpReassemblyManip))
-+ {
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("This tree was previously updated with different IPR"));
-+ }
-+
-+ /* Initialize IPR for the first time for this tree */
-+ if (isIpv6Present)
-+ {
-+ ipv6GroupId = p_FmPcdCcTree->numOfGrps++;
-+ p_FmPcdCcTree->fmPcdGroupParam[ipv6GroupId].baseGroupEntry =
-+ (FM_PCD_MAX_NUM_OF_CC_GROUPS - 2);
-+
-+ if (createSchemes)
-+ {
-+ err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv,
-+ p_FmPcdCcTree,
-+ h_IpReassemblyManip, FALSE,
-+ ipv6GroupId);
-+ if (err)
-+ {
-+ p_FmPcdCcTree->numOfGrps--;
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+
-+ NextStepAd(
-+ PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-2) * FM_PCD_CC_AD_ENTRY_SIZE),
-+ NULL, &nextEngineParams, h_FmPcd);
-+ }
-+
-+ ipv4GroupId = p_FmPcdCcTree->numOfGrps++;
-+ p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].totalBitsMask = 0;
-+ p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].baseGroupEntry =
-+ (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1);
-+
-+ if (createSchemes)
-+ {
-+ err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv, p_FmPcdCcTree,
-+ h_IpReassemblyManip, TRUE,
-+ ipv4GroupId);
-+ if (err)
-+ {
-+ p_FmPcdCcTree->numOfGrps--;
-+ if (isIpv6Present)
-+ {
-+ p_FmPcdCcTree->numOfGrps--;
-+ FmPcdManipDeleteIpReassmSchemes(h_IpReassemblyManip);
-+ }
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+
-+ NextStepAd(
-+ PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-1) * FM_PCD_CC_AD_ENTRY_SIZE),
-+ NULL, &nextEngineParams, h_FmPcd);
-+
-+ p_FmPcdCcTree->h_IpReassemblyManip = h_IpReassemblyManip;
-+
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdCcTreeAddCPR(t_Handle h_FmPcd, t_Handle h_FmTree,
-+ t_Handle h_NetEnv, t_Handle h_ReassemblyManip,
-+ bool createSchemes)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
-+ t_FmPcdCcNextEngineParams nextEngineParams;
-+ t_NetEnvParams netEnvParams;
-+ t_Handle h_Ad;
-+ uint8_t groupId;
-+ t_Error err;
-+
-+ ASSERT_COND(p_FmPcdCcTree);
-+
-+ /* this routine must be protected by the calling routine! */
-+ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams));
-+ memset(&netEnvParams, 0, sizeof(t_NetEnvParams));
-+
-+ h_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
-+
-+ if (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need one free entries for CPR"));
-+
-+ nextEngineParams.nextEngine = e_FM_PCD_DONE;
-+ nextEngineParams.h_Manip = h_ReassemblyManip;
-+
-+ /* Lock tree */
-+ err = CcRootTryLock(p_FmPcdCcTree);
-+ if (err)
-+ return ERROR_CODE(E_BUSY);
-+
-+ if (p_FmPcdCcTree->h_CapwapReassemblyManip == h_ReassemblyManip)
-+ {
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+ return E_OK;
-+ }
-+
-+ if ((p_FmPcdCcTree->h_CapwapReassemblyManip)
-+ && (p_FmPcdCcTree->h_CapwapReassemblyManip != h_ReassemblyManip))
-+ {
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("This tree was previously updated with different CPR"));
-+ }
-+
-+ groupId = p_FmPcdCcTree->numOfGrps++;
-+ p_FmPcdCcTree->fmPcdGroupParam[groupId].baseGroupEntry =
-+ (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1);
-+
-+ if (createSchemes)
-+ {
-+ err = FmPcdManipBuildCapwapReassmScheme(h_FmPcd, h_NetEnv,
-+ p_FmPcdCcTree,
-+ h_ReassemblyManip, groupId);
-+ if (err)
-+ {
-+ p_FmPcdCcTree->numOfGrps--;
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+
-+ NextStepAd(
-+ PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-1) * FM_PCD_CC_AD_ENTRY_SIZE),
-+ NULL, &nextEngineParams, h_FmPcd);
-+
-+ p_FmPcdCcTree->h_CapwapReassemblyManip = h_ReassemblyManip;
-+
-+ CcRootReleaseLock(p_FmPcdCcTree);
-+
-+ return E_OK;
-+}
-+
-+t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
-+
-+ ASSERT_COND(p_FmPcdCcTree);
-+
-+ return p_FmPcdCcTree->h_FmPcdCcSavedManipParams;
-+}
-+
-+void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree,
-+ t_Handle h_SavedManipParams)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree;
-+
-+ ASSERT_COND(p_FmPcdCcTree);
-+
-+ p_FmPcdCcTree->h_FmPcdCcSavedManipParams = h_SavedManipParams;
-+}
-+
-+uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+
-+ ASSERT_COND(p_CcNode);
-+
-+ return p_CcNode->parseCode;
-+}
-+
-+uint8_t FmPcdCcGetOffset(t_Handle h_CcNode)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+
-+ ASSERT_COND(p_CcNode);
-+
-+ return p_CcNode->offset;
-+}
-+
-+uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+
-+ ASSERT_COND(p_CcNode);
-+
-+ return p_CcNode->numOfKeys;
-+}
-+
-+t_Error FmPcdCcModifyNextEngineParamTree(
-+ t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint8_t grpId, uint8_t index,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
-+ t_FmPcd *p_FmPcd;
-+ t_List h_OldPointersLst, h_NewPointersLst;
-+ uint16_t keyIndex;
-+ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((grpId <= 7), E_INVALID_VALUE);
-+
-+ if (grpId >= p_FmPcdCcTree->numOfGrps)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE,
-+ ("grpId you asked > numOfGroup of relevant tree"));
-+
-+ if (index >= p_FmPcdCcTree->fmPcdGroupParam[grpId].numOfEntriesInGroup)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("index > numOfEntriesInGroup"));
-+
-+ p_FmPcd = (t_FmPcd *)h_FmPcd;
-+
-+ INIT_LIST(&h_OldPointersLst);
-+ INIT_LIST(&h_NewPointersLst);
-+
-+ keyIndex = (uint16_t)(p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry
-+ + index);
-+
-+ p_ModifyKeyParams = ModifyNodeCommonPart(p_FmPcdCcTree, keyIndex,
-+ e_MODIFY_STATE_CHANGE, FALSE,
-+ FALSE, TRUE);
-+ if (!p_ModifyKeyParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ p_ModifyKeyParams->tree = TRUE;
-+
-+ if (p_FmPcd->p_CcShadow
-+ && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = BuildNewNodeModifyNextEngine(p_FmPcd, p_FmPcdCcTree, keyIndex,
-+ p_FmPcdCcNextEngineParams,
-+ &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams);
-+ if (err)
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams, FALSE);
-+
-+ if (p_FmPcd->p_CcShadow)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ ReleaseLst(&h_OldPointersLst);
-+ ReleaseLst(&h_NewPointersLst);
-+
-+ return err;
-+
-+}
-+
-+t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
-+ uint16_t keyIndex)
-+{
-+
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
-+ t_List h_OldPointersLst, h_NewPointersLst;
-+ bool useShadowStructs = FALSE;
-+ t_Error err = E_OK;
-+
-+ if (keyIndex >= p_CcNode->numOfKeys)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("impossible to remove key when numOfKeys <= keyIndex"));
-+
-+ if (p_CcNode->h_FmPcd != h_FmPcd)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("handler to FmPcd is different from the handle provided at node initialization time"));
-+
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+
-+ INIT_LIST(&h_OldPointersLst);
-+ INIT_LIST(&h_NewPointersLst);
-+
-+ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
-+ e_MODIFY_STATE_REMOVE, TRUE, TRUE,
-+ FALSE);
-+ if (!p_ModifyKeyParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ useShadowStructs = TRUE;
-+ }
-+
-+ err = BuildNewNodeRemoveKey(p_CcNode, keyIndex, p_ModifyKeyParams);
-+ if (err)
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
-+ &h_OldPointersLst,
-+ &h_NewPointersLst);
-+ if (err)
-+ {
-+ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams, useShadowStructs);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ ReleaseLst(&h_OldPointersLst);
-+ ReleaseLst(&h_NewPointersLst);
-+
-+ return err;
-+}
-+
-+t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
-+ uint16_t keyIndex, uint8_t keySize, uint8_t *p_Key,
-+ uint8_t *p_Mask)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
-+ t_FmPcd *p_FmPcd;
-+ t_List h_OldPointersLst, h_NewPointersLst;
-+ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
-+ uint16_t tmpKeyIndex;
-+ bool useShadowStructs = FALSE;
-+ t_Error err = E_OK;
-+
-+ if (keyIndex >= p_CcNode->numOfKeys)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("keyIndex > previously cleared last index + 1"));
-+
-+ if (keySize != p_CcNode->userSizeOfExtraction)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("size for ModifyKey has to be the same as defined in SetNode"));
-+
-+ if (p_CcNode->h_FmPcd != h_FmPcd)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("handler to FmPcd is different from the handle provided at node initialization time"));
-+
-+ err = FindKeyIndex(h_FmPcdCcNode, keySize, p_Key, p_Mask, &tmpKeyIndex);
-+ if (GET_ERROR_TYPE(err) != E_NOT_FOUND)
-+ RETURN_ERROR(
-+ MINOR,
-+ E_ALREADY_EXISTS,
-+ ("The received key and mask pair was already found in the match table of the provided node"));
-+
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+
-+ INIT_LIST(&h_OldPointersLst);
-+ INIT_LIST(&h_NewPointersLst);
-+
-+ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
-+ e_MODIFY_STATE_CHANGE, TRUE, TRUE,
-+ FALSE);
-+ if (!p_ModifyKeyParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ useShadowStructs = TRUE;
-+ }
-+
-+ err = BuildNewNodeModifyKey(p_CcNode, keyIndex, p_Key, p_Mask,
-+ p_ModifyKeyParams);
-+ if (err)
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
-+ &h_OldPointersLst,
-+ &h_NewPointersLst);
-+ if (err)
-+ {
-+ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams, useShadowStructs);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ ReleaseLst(&h_OldPointersLst);
-+ ReleaseLst(&h_NewPointersLst);
-+
-+ return err;
-+}
-+
-+t_Error FmPcdCcModifyMissNextEngineParamNode(
-+ t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
-+ t_FmPcd *p_FmPcd;
-+ t_List h_OldPointersLst, h_NewPointersLst;
-+ uint16_t keyIndex;
-+ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_VALUE);
-+
-+ keyIndex = p_CcNode->numOfKeys;
-+
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+
-+ INIT_LIST(&h_OldPointersLst);
-+ INIT_LIST(&h_NewPointersLst);
-+
-+ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
-+ e_MODIFY_STATE_CHANGE, FALSE, TRUE,
-+ FALSE);
-+ if (!p_ModifyKeyParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (p_CcNode->maxNumOfKeys
-+ && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = BuildNewNodeModifyNextEngine(h_FmPcd, p_CcNode, keyIndex,
-+ p_FmPcdCcNextEngineParams,
-+ &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams);
-+ if (err)
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams, FALSE);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ ReleaseLst(&h_OldPointersLst);
-+ ReleaseLst(&h_NewPointersLst);
-+
-+ return err;
-+}
-+
-+t_Error FmPcdCcAddKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
-+ uint16_t keyIndex, uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_FmPcdCcKeyParams)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
-+ t_List h_OldPointersLst, h_NewPointersLst;
-+ bool useShadowStructs = FALSE;
-+ uint16_t tmpKeyIndex;
-+ t_Error err = E_OK;
-+
-+ if (keyIndex > p_CcNode->numOfKeys)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
-+ ("keyIndex > previously cleared last index + 1"));
-+
-+ if (keySize != p_CcNode->userSizeOfExtraction)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("keySize has to be defined as it was defined in initialization step"));
-+
-+ if (p_CcNode->h_FmPcd != h_FmPcd)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("handler to FmPcd is different from the handle provided at node initialization time"));
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ if (p_CcNode->numOfKeys == p_CcNode->maxNumOfKeys)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_FULL,
-+ ("number of keys exceeds the maximal number of keys provided at node initialization time"));
-+ }
-+ else
-+ if (p_CcNode->numOfKeys == FM_PCD_MAX_NUM_OF_KEYS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("number of keys can not be larger than %d", FM_PCD_MAX_NUM_OF_KEYS));
-+
-+ err = FindKeyIndex(h_FmPcdCcNode, keySize, p_FmPcdCcKeyParams->p_Key,
-+ p_FmPcdCcKeyParams->p_Mask, &tmpKeyIndex);
-+ if (GET_ERROR_TYPE(err) != E_NOT_FOUND)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_ALREADY_EXISTS,
-+ ("The received key and mask pair was already found in the match table of the provided node"));
-+
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+
-+ INIT_LIST(&h_OldPointersLst);
-+ INIT_LIST(&h_NewPointersLst);
-+
-+ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
-+ e_MODIFY_STATE_ADD, TRUE, TRUE,
-+ FALSE);
-+ if (!p_ModifyKeyParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ useShadowStructs = TRUE;
-+ }
-+
-+ err = BuildNewNodeAddOrMdfyKeyAndNextEngine(h_FmPcd, p_CcNode, keyIndex,
-+ p_FmPcdCcKeyParams,
-+ p_ModifyKeyParams, TRUE);
-+ if (err)
-+ {
-+ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
-+ &h_OldPointersLst,
-+ &h_NewPointersLst);
-+ if (err)
-+ {
-+ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams, useShadowStructs);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ ReleaseLst(&h_OldPointersLst);
-+ ReleaseLst(&h_NewPointersLst);
-+
-+ return err;
-+}
-+
-+t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
-+ uint16_t keyIndex, uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_FmPcdCcKeyParams)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
-+ t_FmPcd *p_FmPcd;
-+ t_List h_OldPointersLst, h_NewPointersLst;
-+ t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams;
-+ uint16_t tmpKeyIndex;
-+ bool useShadowStructs = FALSE;
-+ t_Error err = E_OK;
-+
-+ if (keyIndex > p_CcNode->numOfKeys)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("keyIndex > previously cleared last index + 1"));
-+
-+ if (keySize != p_CcNode->userSizeOfExtraction)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("keySize has to be defined as it was defined in initialization step"));
-+
-+ if (p_CcNode->h_FmPcd != h_FmPcd)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("handler to FmPcd is different from the handle provided at node initialization time"));
-+
-+ err = FindKeyIndex(h_FmPcdCcNode, keySize, p_FmPcdCcKeyParams->p_Key,
-+ p_FmPcdCcKeyParams->p_Mask, &tmpKeyIndex);
-+ if (GET_ERROR_TYPE(err) != E_NOT_FOUND)
-+ RETURN_ERROR(
-+ MINOR,
-+ E_ALREADY_EXISTS,
-+ ("The received key and mask pair was already found in the match table of the provided node"));
-+
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+
-+ INIT_LIST(&h_OldPointersLst);
-+ INIT_LIST(&h_NewPointersLst);
-+
-+ p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex,
-+ e_MODIFY_STATE_CHANGE, TRUE, TRUE,
-+ FALSE);
-+ if (!p_ModifyKeyParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ {
-+ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ {
-+ XX_Free(p_ModifyKeyParams);
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ useShadowStructs = TRUE;
-+ }
-+
-+ err = BuildNewNodeAddOrMdfyKeyAndNextEngine(h_FmPcd, p_CcNode, keyIndex,
-+ p_FmPcdCcKeyParams,
-+ p_ModifyKeyParams, FALSE);
-+ if (err)
-+ {
-+ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams,
-+ &h_OldPointersLst,
-+ &h_NewPointersLst);
-+ if (err)
-+ {
-+ ReleaseNewNodeCommonPart(p_ModifyKeyParams);
-+ XX_Free(p_ModifyKeyParams);
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst,
-+ p_ModifyKeyParams, useShadowStructs);
-+
-+ if (p_CcNode->maxNumOfKeys)
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ ReleaseLst(&h_OldPointersLst);
-+ ReleaseLst(&h_NewPointersLst);
-+
-+ return err;
-+}
-+
-+uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd,
-+ t_Handle h_Pointer)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_CcNodeInformation *p_CcNodeInfo;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE,
-+ (uint32_t)ILLEGAL_BASE);
-+
-+ p_CcNodeInfo = CC_NODE_F_OBJECT(h_Pointer);
-+
-+ return (uint32_t)(XX_VirtToPhys(p_CcNodeInfo->h_CcNode)
-+ - p_FmPcd->physicalMuramBase);
-+}
-+
-+t_Error FmPcdCcGetGrpParams(t_Handle h_FmPcdCcTree, uint8_t grpId,
-+ uint32_t *p_GrpBits, uint8_t *p_GrpBase)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
-+
-+ if (grpId >= p_FmPcdCcTree->numOfGrps)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE,
-+ ("grpId you asked > numOfGroup of relevant tree"));
-+
-+ *p_GrpBits = p_FmPcdCcTree->fmPcdGroupParam[grpId].totalBitsMask;
-+ *p_GrpBase = p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry;
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdCcBindTree(t_Handle h_FmPcd, t_Handle h_PcdParams,
-+ t_Handle h_FmPcdCcTree, uint32_t *p_Offset,
-+ t_Handle h_FmPort)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
-+
-+ /* this routine must be protected by the calling routine by locking all PCD modules! */
-+
-+ err = CcUpdateParams(h_FmPcd, h_PcdParams, h_FmPort, h_FmPcdCcTree, TRUE);
-+
-+ if (err == E_OK)
-+ UpdateCcRootOwner(p_FmPcdCcTree, TRUE);
-+
-+ *p_Offset = (uint32_t)(XX_VirtToPhys(
-+ UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr))
-+ - p_FmPcd->physicalMuramBase);
-+
-+ return err;
-+}
-+
-+t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree)
-+{
-+ t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree;
-+
-+ /* this routine must be protected by the calling routine by locking all PCD modules! */
-+
-+ UNUSED(h_FmPcd);
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE);
-+
-+ UpdateCcRootOwner(p_FmPcdCcTree, FALSE);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode,
-+ t_List *p_List)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode;
-+ t_List *p_Pos, *p_Tmp;
-+ t_CcNodeInformation *p_CcNodeInfo, nodeInfo;
-+ uint32_t intFlags;
-+ t_Error err = E_OK;
-+
-+ intFlags = FmPcdLock(h_FmPcd);
-+
-+ LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst)
-+ {
-+ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
-+ ASSERT_COND(p_CcNodeInfo->h_CcNode);
-+
-+ err = CcRootTryLock(p_CcNodeInfo->h_CcNode);
-+
-+ if (err)
-+ {
-+ LIST_FOR_EACH(p_Tmp, &p_CcNode->ccTreesLst)
-+ {
-+ if (p_Tmp == p_Pos)
-+ break;
-+
-+ CcRootReleaseLock(p_CcNodeInfo->h_CcNode);
-+ }
-+ break;
-+ }
-+
-+ memset(&nodeInfo, 0, sizeof(t_CcNodeInformation));
-+ nodeInfo.h_CcNode = p_CcNodeInfo->h_CcNode;
-+ EnqueueNodeInfoToRelevantLst(p_List, &nodeInfo, NULL);
-+ }
-+
-+ FmPcdUnlock(h_FmPcd, intFlags);
-+ CORE_MemoryBarrier();
-+
-+ return err;
-+}
-+
-+void FmPcdCcNodeTreeReleaseLock(t_Handle h_FmPcd, t_List *p_List)
-+{
-+ t_List *p_Pos;
-+ t_CcNodeInformation *p_CcNodeInfo;
-+ t_Handle h_FmPcdCcTree;
-+ uint32_t intFlags;
-+
-+ intFlags = FmPcdLock(h_FmPcd);
-+
-+ LIST_FOR_EACH(p_Pos, p_List)
-+ {
-+ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
-+ h_FmPcdCcTree = p_CcNodeInfo->h_CcNode;
-+ CcRootReleaseLock(h_FmPcdCcTree);
-+ }
-+
-+ ReleaseLst(p_List);
-+
-+ FmPcdUnlock(h_FmPcd, intFlags);
-+ CORE_MemoryBarrier();
-+}
-+
-+t_Error FmPcdUpdateCcShadow(t_FmPcd *p_FmPcd, uint32_t size, uint32_t align)
-+{
-+ uint32_t intFlags;
-+ uint32_t newSize = 0, newAlign = 0;
-+ bool allocFail = FALSE;
-+
-+ ASSERT_COND(p_FmPcd);
-+
-+ if (!size)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("size must be larger then 0"));
-+
-+ if (!POWER_OF_2(align))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("alignment must be power of 2"));
-+
-+ newSize = p_FmPcd->ccShadowSize;
-+ newAlign = p_FmPcd->ccShadowAlign;
-+
-+ /* Check if current shadow is large enough to hold the requested size */
-+ if (size > p_FmPcd->ccShadowSize)
-+ newSize = size;
-+
-+ /* Check if current shadow matches the requested alignment */
-+ if (align > p_FmPcd->ccShadowAlign)
-+ newAlign = align;
-+
-+ /* If a bigger shadow size or bigger shadow alignment are required,
-+ a new shadow will be allocated */
-+ if ((newSize != p_FmPcd->ccShadowSize)
-+ || (newAlign != p_FmPcd->ccShadowAlign))
-+ {
-+ intFlags = FmPcdLock(p_FmPcd);
-+
-+ if (p_FmPcd->p_CcShadow)
-+ {
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd), p_FmPcd->p_CcShadow);
-+ p_FmPcd->ccShadowSize = 0;
-+ p_FmPcd->ccShadowAlign = 0;
-+ }
-+
-+ p_FmPcd->p_CcShadow = FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd),
-+ newSize, newAlign);
-+ if (!p_FmPcd->p_CcShadow)
-+ {
-+ allocFail = TRUE;
-+
-+ /* If new shadow size allocation failed,
-+ re-allocate with previous parameters */
-+ p_FmPcd->p_CcShadow = FM_MURAM_AllocMem(
-+ FmPcdGetMuramHandle(p_FmPcd), p_FmPcd->ccShadowSize,
-+ p_FmPcd->ccShadowAlign);
-+ }
-+
-+ FmPcdUnlock(p_FmPcd, intFlags);
-+
-+ if (allocFail)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for CC Shadow memory"));
-+
-+ p_FmPcd->ccShadowSize = newSize;
-+ p_FmPcd->ccShadowAlign = newAlign;
-+ }
-+
-+ return E_OK;
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node,
-+ t_Handle h_ReplicGroup,
-+ t_List *p_AdTables,
-+ uint32_t *p_NumOfAdTables)
-+{
-+ t_FmPcdCcNode *p_CurrentNode = (t_FmPcdCcNode *)h_Node;
-+ int i = 0;
-+ void * p_AdTable;
-+ t_CcNodeInformation ccNodeInfo;
-+
-+ ASSERT_COND(h_Node);
-+ *p_NumOfAdTables = 0;
-+
-+ /* search in the current node which exact index points on this current replicator group for getting AD */
-+ for (i = 0; i < p_CurrentNode->numOfKeys + 1; i++)
-+ {
-+ if ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_FR)
-+ && ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic
-+ == (t_Handle)h_ReplicGroup)))
-+ {
-+ /* save the current ad table in the list */
-+ /* this entry uses the input replicator group */
-+ p_AdTable =
-+ PTR_MOVE(p_CurrentNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE);
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = p_AdTable;
-+ EnqueueNodeInfoToRelevantLst(p_AdTables, &ccNodeInfo, NULL);
-+ (*p_NumOfAdTables)++;
-+ }
-+ }
-+
-+ ASSERT_COND(i != p_CurrentNode->numOfKeys);
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+/*********************** End of inter-module routines ************************/
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+
-+t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd,
-+ t_FmPcdCcTreeParams *p_PcdGroupsParam)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_Error err = E_OK;
-+ int i = 0, j = 0, k = 0;
-+ t_FmPcdCcTree *p_FmPcdCcTree;
-+ uint8_t numOfEntries;
-+ t_Handle p_CcTreeTmp;
-+ t_FmPcdCcGrpParams *p_FmPcdCcGroupParams;
-+ t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams;
-+ t_NetEnvParams netEnvParams;
-+ uint8_t lastOne = 0;
-+ uint32_t requiredAction = 0;
-+ t_FmPcdCcNode *p_FmPcdCcNextNode;
-+ t_CcNodeInformation ccNodeInfo, *p_CcInformation;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam, E_INVALID_HANDLE, NULL);
-+
-+ if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
-+ return NULL;
-+ }
-+
-+ p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree));
-+ if (!p_FmPcdCcTree)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure"));
-+ return NULL;
-+ }
-+ memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree));
-+ p_FmPcdCcTree->h_FmPcd = h_FmPcd;
-+
-+ p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc(
-+ FM_PCD_MAX_NUM_OF_CC_GROUPS
-+ * sizeof(t_FmPcdCcKeyAndNextEngineParams));
-+ memset(p_Params,
-+ 0,
-+ FM_PCD_MAX_NUM_OF_CC_GROUPS
-+ * sizeof(t_FmPcdCcKeyAndNextEngineParams));
-+
-+ INIT_LIST(&p_FmPcdCcTree->fmPortsLst);
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+ if ((p_PcdGroupsParam->numOfGrps == 1) &&
-+ (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) &&
-+ (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) &&
-+ p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode &&
-+ IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode))
-+ {
-+ p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild();
-+ if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip)
-+ {
-+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+ return NULL;
-+ }
-+ }
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+ numOfEntries = 0;
-+ p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv);
-+
-+ for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++)
-+ {
-+ p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i];
-+
-+ if (p_FmPcdCcGroupParams->numOfDistinctionUnits
-+ > FM_PCD_MAX_NUM_OF_CC_UNITS)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS));
-+ return NULL;
-+ }
-+
-+ p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries;
-+ p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup = (uint8_t)(0x01
-+ << p_FmPcdCcGroupParams->numOfDistinctionUnits);
-+ numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
-+ if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
-+ return NULL;
-+ }
-+
-+ if (lastOne)
-+ {
-+ if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order"));
-+ return NULL;
-+ }
-+ }
-+
-+ lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
-+
-+ netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId;
-+ netEnvParams.numOfDistinctionUnits =
-+ p_FmPcdCcGroupParams->numOfDistinctionUnits;
-+
-+ memcpy(netEnvParams.unitIds, &p_FmPcdCcGroupParams->unitIds,
-+ (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits);
-+
-+ err = PcdGetUnitsVector(p_FmPcd, &netEnvParams);
-+ if (err)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return NULL;
-+ }
-+
-+ p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector;
-+ for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
-+ j++)
-+ {
-+ err = ValidateNextEngineParams(
-+ h_FmPcd,
-+ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
-+ e_FM_PCD_CC_STATS_MODE_NONE);
-+ if (err)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, err, (NO_MSG));
-+ return NULL;
-+ }
-+
-+ if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip)
-+ {
-+ err = FmPcdManipCheckParamsForCcNextEngine(
-+ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
-+ &requiredAction);
-+ if (err)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+ return NULL;
-+ }
-+ }
-+ p_KeyAndNextEngineParams = p_Params + k;
-+
-+ memcpy(&p_KeyAndNextEngineParams->nextEngineParams,
-+ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ && p_KeyAndNextEngineParams->nextEngineParams.h_Manip)
-+ {
-+ err =
-+ AllocAndFillAdForContLookupManip(
-+ p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode);
-+ if (err)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
-+ return NULL;
-+ }
-+ }
-+
-+ requiredAction |= UPDATE_CC_WITH_TREE;
-+ p_KeyAndNextEngineParams->requiredAction = requiredAction;
-+
-+ k++;
-+ }
-+ }
-+
-+ p_FmPcdCcTree->numOfEntries = (uint8_t)k;
-+ p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps;
-+
-+ p_FmPcdCcTree->ccTreeBaseAddr =
-+ PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd),
-+ (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE),
-+ FM_PCD_CC_TREE_ADDR_ALIGN));
-+ if (!p_FmPcdCcTree->ccTreeBaseAddr)
-+ {
-+ DeleteTree(p_FmPcdCcTree, p_FmPcd);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
-+ return NULL;
-+ }
-+ MemSet8(
-+ UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0,
-+ (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE));
-+
-+ p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
-+
-+ for (i = 0; i < numOfEntries; i++)
-+ {
-+ p_KeyAndNextEngineParams = p_Params + i;
-+
-+ NextStepAd(p_CcTreeTmp, NULL,
-+ &p_KeyAndNextEngineParams->nextEngineParams, p_FmPcd);
-+
-+ p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i],
-+ p_KeyAndNextEngineParams,
-+ sizeof(t_FmPcdCcKeyAndNextEngineParams));
-+
-+ if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ {
-+ p_FmPcdCcNextNode =
-+ (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
-+ p_CcInformation = FindNodeInfoInReleventLst(
-+ &p_FmPcdCcNextNode->ccTreeIdLst, (t_Handle)p_FmPcdCcTree,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+
-+ if (!p_CcInformation)
-+ {
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree;
-+ ccNodeInfo.index = 1;
-+ EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst,
-+ &ccNodeInfo,
-+ p_FmPcdCcNextNode->h_Spinlock);
-+ }
-+ else
-+ p_CcInformation->index++;
-+ }
-+ }
-+
-+ FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId);
-+ p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ FM_PCD_CcRootDelete(p_FmPcdCcTree);
-+ XX_Free(p_Params);
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return NULL;
-+ }
-+
-+ for (i = 0; i < numOfEntries; i++)
-+ {
-+ if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction)
-+ {
-+ err = SetRequiredAction(
-+ h_FmPcd,
-+ p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction,
-+ &p_FmPcdCcTree->keyAndNextEngineParams[i], p_CcTreeTmp, 1,
-+ p_FmPcdCcTree);
-+ if (err)
-+ {
-+ FmPcdLockUnlockAll(p_FmPcd);
-+ FM_PCD_CcRootDelete(p_FmPcdCcTree);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
-+ return NULL;
-+ }
-+ p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-+ }
-+ }
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+ p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd);
-+ if (!p_FmPcdCcTree->p_Lock)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPcdCcTree);
-+ XX_Free(p_Params);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock"));
-+ return NULL;
-+ }
-+
-+ XX_Free(p_Params);
-+
-+ return p_FmPcdCcTree;
-+}
-+
-+t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
-+ int i = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_CcTree, E_INVALID_STATE);
-+ p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId);
-+
-+ if (p_CcTree->owners)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_SELECTION,
-+ ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree"));
-+
-+ /* Delete ip-reassembly schemes if exist */
-+ if (p_CcTree->h_IpReassemblyManip)
-+ {
-+ FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip);
-+ FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE);
-+ }
-+
-+ /* Delete capwap-reassembly schemes if exist */
-+ if (p_CcTree->h_CapwapReassemblyManip)
-+ {
-+ FmPcdManipDeleteCapwapReassmSchemes(p_CcTree->h_CapwapReassemblyManip);
-+ FmPcdManipUpdateOwner(p_CcTree->h_CapwapReassemblyManip, FALSE);
-+ }
-+
-+ for (i = 0; i < p_CcTree->numOfEntries; i++)
-+ {
-+ if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ UpdateNodeOwner(
-+ p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode,
-+ FALSE);
-+
-+ if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
-+ FmPcdManipUpdateOwner(
-+ p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip,
-+ FALSE);
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+ if ((p_CcTree->numOfGrps == 1) &&
-+ (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) &&
-+ (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) &&
-+ p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode &&
-+ IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode))
-+ {
-+ if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK)
-+ return E_INVALID_STATE;
-+ }
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_FR)
-+ && (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic))
-+ FrmReplicGroupUpdateOwner(
-+ p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic,
-+ FALSE);
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ if (p_CcTree->p_Lock)
-+ FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock);
-+
-+ DeleteTree(p_CcTree, p_FmPcd);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_CcRootModifyNextEngine(
-+ t_Handle h_CcTree, uint8_t grpId, uint8_t index,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcTree, E_INVALID_STATE);
-+ p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdCcModifyNextEngineParamTree(p_FmPcd, p_CcTree, grpId, index,
-+ p_FmPcdCcNextEngineParams);
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ if (err)
-+ {
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd,
-+ t_FmPcdCcNodeParams *p_CcNodeParam)
-+{
-+ t_FmPcdCcNode *p_CcNode;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_CcNodeParam, E_NULL_POINTER, NULL);
-+
-+ p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode));
-+ if (!p_CcNode)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
-+ return NULL;
-+ }
-+ memset(p_CcNode, 0, sizeof(t_FmPcdCcNode));
-+
-+ err = MatchTableSet(h_FmPcd, p_CcNode, p_CcNodeParam);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ break;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return NULL;
-+
-+ default:
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return NULL;
-+ }
-+
-+ return p_CcNode;
-+}
-+
-+t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ int i = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode->h_FmPcd, E_INVALID_HANDLE);
-+
-+ if (p_CcNode->owners)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("This node cannot be removed because it is occupied; first unbind this node"));
-+
-+ for (i = 0; i < p_CcNode->numOfKeys; i++)
-+ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ UpdateNodeOwner(
-+ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode,
-+ FALSE);
-+
-+ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ UpdateNodeOwner(
-+ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode,
-+ FALSE);
-+
-+ /* Handle also Miss entry */
-+ for (i = 0; i < p_CcNode->numOfKeys + 1; i++)
-+ {
-+ if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
-+ FmPcdManipUpdateOwner(
-+ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip,
-+ FALSE);
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_FR)
-+ && (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic))
-+ {
-+ FrmReplicGroupUpdateOwner(
-+ p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic,
-+ FALSE);
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ DeleteNode(p_CcNode);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_MatchTableAddKey(t_Handle h_CcNode, uint16_t keyIndex,
-+ uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_KeyParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (keyIndex == FM_PCD_LAST_KEY_INDEX)
-+ keyIndex = p_CcNode->numOfKeys;
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdCcAddKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_KeyParams);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableRemoveKey(t_Handle h_CcNode, uint16_t keyIndex)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, uint16_t keyIndex,
-+ uint8_t keySize, uint8_t *p_Key,
-+ uint8_t *p_Mask)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_Key, p_Mask);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableModifyNextEngine(
-+ t_Handle h_CcNode, uint16_t keyIndex,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = ModifyNextEngineParamNode(p_FmPcd, p_CcNode, keyIndex,
-+ p_FmPcdCcNextEngineParams);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableModifyMissNextEngine(
-+ t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdCcModifyMissNextEngineParamNode(p_FmPcd, p_CcNode,
-+ p_FmPcdCcNextEngineParams);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableModifyKeyAndNextEngine(t_Handle h_CcNode,
-+ uint16_t keyIndex,
-+ uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_KeyParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, p_CcNode, keyIndex, keySize,
-+ p_KeyParams);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableFindNRemoveKey(t_Handle h_CcNode, uint8_t keySize,
-+ uint8_t *p_Key, uint8_t *p_Mask)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint16_t keyIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
-+ if (GET_ERROR_TYPE(err) != E_OK)
-+ {
-+ FmPcdLockUnlockAll(p_FmPcd);
-+ RETURN_ERROR(
-+ MAJOR,
-+ err,
-+ ("The received key and mask pair was not found in the match table of the provided node"));
-+ }
-+
-+ err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableFindNModifyNextEngine(
-+ t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint16_t keyIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
-+ if (GET_ERROR_TYPE(err) != E_OK)
-+ {
-+ FmPcdLockUnlockAll(p_FmPcd);
-+ RETURN_ERROR(
-+ MAJOR,
-+ err,
-+ ("The received key and mask pair was not found in the match table of the provided node"));
-+ }
-+
-+ err = ModifyNextEngineParamNode(p_FmPcd, p_CcNode, keyIndex,
-+ p_FmPcdCcNextEngineParams);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableFindNModifyKeyAndNextEngine(
-+ t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask,
-+ t_FmPcdCcKeyParams *p_KeyParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint16_t keyIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
-+ if (GET_ERROR_TYPE(err) != E_OK)
-+ {
-+ FmPcdLockUnlockAll(p_FmPcd);
-+ RETURN_ERROR(
-+ MAJOR,
-+ err,
-+ ("The received key and mask pair was not found in the match table of the provided node"));
-+ }
-+
-+ err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, h_CcNode, keyIndex, keySize,
-+ p_KeyParams);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableFindNModifyKey(t_Handle h_CcNode, uint8_t keySize,
-+ uint8_t *p_Key, uint8_t *p_Mask,
-+ uint8_t *p_NewKey, uint8_t *p_NewMask)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ t_List h_List;
-+ uint16_t keyIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_NewKey, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ INIT_LIST(&h_List);
-+
-+ err = FmPcdCcNodeTreeTryLock(p_FmPcd, p_CcNode, &h_List);
-+ if (err)
-+ {
-+ DBG(TRACE, ("Node's trees lock failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
-+ if (GET_ERROR_TYPE(err) != E_OK)
-+ {
-+ FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List);
-+ RETURN_ERROR(MAJOR, err,
-+ ("The received key and mask pair was not found in the "
-+ "match table of the provided node"));
-+ }
-+
-+ err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_NewKey,
-+ p_NewMask);
-+
-+ FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List);
-+
-+ switch(GET_ERROR_TYPE(err)
-+) {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+t_Error FM_PCD_MatchTableGetNextEngine(
-+ t_Handle h_CcNode, uint16_t keyIndex,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+
-+ if (keyIndex >= p_CcNode->numOfKeys)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("keyIndex exceeds current number of keys"));
-+
-+ if (keyIndex > (FM_PCD_MAX_NUM_OF_KEYS - 1))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("keyIndex can not be larger than %d", (FM_PCD_MAX_NUM_OF_KEYS - 1)));
-+
-+ memcpy(p_FmPcdCcNextEngineParams,
-+ &p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams,
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ return E_OK;
-+}
-+
-+
-+uint32_t FM_PCD_MatchTableGetKeyCounter(t_Handle h_CcNode, uint16_t keyIndex)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint32_t *p_StatsCounters, frameCount;
-+ uint32_t intFlags;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_CcNode, E_INVALID_HANDLE, 0);
-+
-+ if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table"));
-+ return 0;
-+ }
-+
-+ if ((p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_FRAME)
-+ && (p_CcNode->statisticsMode
-+ != e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Frame count is not supported in the statistics mode of this match table"));
-+ return 0;
-+ }
-+
-+ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
-+
-+ if (keyIndex >= p_CcNode->numOfKeys)
-+ {
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table"));
-+ return 0;
-+ }
-+
-+ if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
-+ {
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key"));
-+ return 0;
-+ }
-+
-+ p_StatsCounters =
-+ p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters;
-+ ASSERT_COND(p_StatsCounters);
-+
-+ /* The first counter is byte counter, so we need to advance to the next counter */
-+ frameCount = GET_UINT32(*(uint32_t *)(PTR_MOVE(p_StatsCounters,
-+ FM_PCD_CC_STATS_COUNTER_SIZE)));
-+
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+
-+ return frameCount;
-+}
-+
-+t_Error FM_PCD_MatchTableGetKeyStatistics(
-+ t_Handle h_CcNode, uint16_t keyIndex,
-+ t_FmPcdCcKeyStatistics *p_KeyStatistics)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint32_t intFlags;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER);
-+
-+ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
-+
-+ if (keyIndex >= p_CcNode->numOfKeys)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("The provided keyIndex exceeds the number of keys in this match table"));
-+
-+ err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics);
-+
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_MatchTableGetMissStatistics(
-+ t_Handle h_CcNode, t_FmPcdCcKeyStatistics *p_MissStatistics)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint32_t intFlags;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER);
-+
-+ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
-+
-+ err = MatchTableGetKeyStatistics(p_CcNode, p_CcNode->numOfKeys,
-+ p_MissStatistics);
-+
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_MatchTableFindNGetKeyStatistics(
-+ t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask,
-+ t_FmPcdCcKeyStatistics *p_KeyStatistics)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint16_t keyIndex;
-+ uint32_t intFlags;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER);
-+
-+ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
-+
-+ err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex);
-+ if (GET_ERROR_TYPE(err) != E_OK)
-+ {
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, err,
-+ ("The received key and mask pair was not found in the "
-+ "match table of the provided node"));
-+ }
-+
-+ ASSERT_COND(keyIndex < p_CcNode->numOfKeys);
-+
-+ err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics);
-+
-+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_MatchTableGetIndexedHashBucket(t_Handle h_CcNode,
-+ uint8_t keySize, uint8_t *p_Key,
-+ uint8_t hashShift,
-+ t_Handle *p_CcNodeBucketHandle,
-+ uint8_t *p_BucketIndex,
-+ uint16_t *p_LastIndex)
-+{
-+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
-+ uint16_t glblMask;
-+ uint64_t crc64 = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED,
-+ E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_CcNodeBucketHandle, E_NULL_POINTER);
-+
-+ memcpy(&glblMask, PTR_MOVE(p_CcNode->p_GlblMask, 2), 2);
-+ be16_to_cpus(&glblMask);
-+
-+ crc64 = crc64_init();
-+ crc64 = crc64_compute(p_Key, keySize, crc64);
-+ crc64 >>= hashShift;
-+
-+ *p_BucketIndex = (uint8_t)(((crc64 >> (8 * (6 - p_CcNode->userOffset)))
-+ & glblMask) >> 4);
-+ if (*p_BucketIndex >= p_CcNode->numOfKeys)
-+ RETURN_ERROR(MINOR, E_NOT_IN_RANGE, ("bucket index!"));
-+
-+ *p_CcNodeBucketHandle =
-+ p_CcNode->keyAndNextEngineParams[*p_BucketIndex].nextEngineParams.params.ccParams.h_CcNode;
-+ if (!*p_CcNodeBucketHandle)
-+ RETURN_ERROR(MINOR, E_NOT_FOUND, ("bucket!"));
-+
-+ *p_LastIndex = ((t_FmPcdCcNode *)*p_CcNodeBucketHandle)->numOfKeys;
-+
-+ return E_OK;
-+}
-+
-+t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
-+{
-+ t_FmPcdCcNode *p_CcNodeHashTbl;
-+ t_FmPcdCcNodeParams *p_IndxHashCcNodeParam, *p_ExactMatchCcNodeParam;
-+ t_FmPcdCcNode *p_CcNode;
-+ t_Handle h_MissStatsCounters = NULL;
-+ t_FmPcdCcKeyParams *p_HashKeyParams;
-+ int i;
-+ uint16_t numOfSets, numOfWays, countMask, onesCount = 0;
-+ bool statsEnForMiss = FALSE;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_Param, E_NULL_POINTER, NULL);
-+
-+ if (p_Param->maxNumOfKeys == 0)
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Max number of keys must be higher then 0"));
-+ return NULL;
-+ }
-+
-+ if (p_Param->hashResMask == 0)
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Hash result mask must differ from 0"));
-+ return NULL;
-+ }
-+
-+ /*Fix: QorIQ SDK / QSDK-2131*/
-+ if (p_Param->ccNextEngineParamsForMiss.nextEngine == e_FM_PCD_INVALID)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Next PCD Engine for on-miss entry is invalid. On-miss entry is always required. You can use e_FM_PCD_DONE."));
-+ return NULL;
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_RMON)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("RMON statistics mode is not supported for hash table"));
-+ return NULL;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ p_ExactMatchCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc(
-+ sizeof(t_FmPcdCcNodeParams));
-+ if (!p_ExactMatchCcNodeParam)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_ExactMatchCcNodeParam"));
-+ return NULL;
-+ }
-+ memset(p_ExactMatchCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams));
-+
-+ p_IndxHashCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc(
-+ sizeof(t_FmPcdCcNodeParams));
-+ if (!p_IndxHashCcNodeParam)
-+ {
-+ XX_Free(p_ExactMatchCcNodeParam);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_IndxHashCcNodeParam"));
-+ return NULL;
-+ }
-+ memset(p_IndxHashCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams));
-+
-+ /* Calculate number of sets and number of ways of the hash table */
-+ countMask = (uint16_t)(p_Param->hashResMask >> 4);
-+ while (countMask)
-+ {
-+ onesCount++;
-+ countMask = (uint16_t)(countMask >> 1);
-+ }
-+
-+ numOfSets = (uint16_t)(1 << onesCount);
-+ numOfWays = (uint16_t)DIV_CEIL(p_Param->maxNumOfKeys, numOfSets);
-+
-+ if (p_Param->maxNumOfKeys % numOfSets)
-+ DBG(INFO, ("'maxNumOfKeys' is not a multiple of hash number of ways, so number of ways will be rounded up"));
-+
-+ if ((p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_FRAME)
-+ || (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME))
-+ {
-+ /* Allocating a statistics counters table that will be used by all
-+ 'miss' entries of the hash table */
-+ h_MissStatsCounters = (t_Handle)FM_MURAM_AllocMem(
-+ FmPcdGetMuramHandle(h_FmPcd), 2 * FM_PCD_CC_STATS_COUNTER_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!h_MissStatsCounters)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics table for hash miss"));
-+ XX_Free(p_IndxHashCcNodeParam);
-+ XX_Free(p_ExactMatchCcNodeParam);
-+ return NULL;
-+ }
-+ memset(h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
-+
-+ /* Always enable statistics for 'miss', so that a statistics AD will be
-+ initialized from the start. We'll store the requested 'statistics enable'
-+ value and it will be used when statistics are read by the user. */
-+ statsEnForMiss = p_Param->ccNextEngineParamsForMiss.statisticsEn;
-+ p_Param->ccNextEngineParamsForMiss.statisticsEn = TRUE;
-+ }
-+
-+ /* Building exact-match node params, will be used to create the hash buckets */
-+ p_ExactMatchCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR;
-+
-+ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.src =
-+ e_FM_PCD_EXTRACT_FROM_KEY;
-+ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.action =
-+ e_FM_PCD_ACTION_EXACT_MATCH;
-+ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.offset = 0;
-+ p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.size =
-+ p_Param->matchKeySize;
-+
-+ p_ExactMatchCcNodeParam->keysParams.maxNumOfKeys = numOfWays;
-+ p_ExactMatchCcNodeParam->keysParams.maskSupport = FALSE;
-+ p_ExactMatchCcNodeParam->keysParams.statisticsMode =
-+ p_Param->statisticsMode;
-+ p_ExactMatchCcNodeParam->keysParams.numOfKeys = 0;
-+ p_ExactMatchCcNodeParam->keysParams.keySize = p_Param->matchKeySize;
-+ p_ExactMatchCcNodeParam->keysParams.ccNextEngineParamsForMiss =
-+ p_Param->ccNextEngineParamsForMiss;
-+
-+ p_HashKeyParams = p_IndxHashCcNodeParam->keysParams.keyParams;
-+
-+ for (i = 0; i < numOfSets; i++)
-+ {
-+ /* Each exact-match node will be marked as a 'bucket' and provided with
-+ a pointer to statistics counters, to be used for 'miss' entry
-+ statistics */
-+ p_CcNode = (t_FmPcdCcNode *)XX_Malloc(sizeof(t_FmPcdCcNode));
-+ if (!p_CcNode)
-+ break;
-+ memset(p_CcNode, 0, sizeof(t_FmPcdCcNode));
-+
-+ p_CcNode->isHashBucket = TRUE;
-+ p_CcNode->h_MissStatsCounters = h_MissStatsCounters;
-+
-+ err = MatchTableSet(h_FmPcd, p_CcNode, p_ExactMatchCcNodeParam);
-+ if (err)
-+ break;
-+
-+ p_HashKeyParams[i].ccNextEngineParams.nextEngine = e_FM_PCD_CC;
-+ p_HashKeyParams[i].ccNextEngineParams.statisticsEn = FALSE;
-+ p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode =
-+ p_CcNode;
-+ }
-+
-+ if (i < numOfSets)
-+ {
-+ for (i = i - 1; i >= 0; i--)
-+ FM_PCD_MatchTableDelete(
-+ p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode);
-+
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters);
-+
-+ REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG);
-+ XX_Free(p_IndxHashCcNodeParam);
-+ XX_Free(p_ExactMatchCcNodeParam);
-+ return NULL;
-+ }
-+
-+ /* Creating indexed-hash CC node */
-+ p_IndxHashCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR;
-+ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.src =
-+ e_FM_PCD_EXTRACT_FROM_HASH;
-+ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.action =
-+ e_FM_PCD_ACTION_INDEXED_LOOKUP;
-+ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.icIndxMask =
-+ p_Param->hashResMask;
-+ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.offset =
-+ p_Param->hashShift;
-+ p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.size = 2;
-+
-+ p_IndxHashCcNodeParam->keysParams.maxNumOfKeys = numOfSets;
-+ p_IndxHashCcNodeParam->keysParams.maskSupport = FALSE;
-+ p_IndxHashCcNodeParam->keysParams.statisticsMode =
-+ e_FM_PCD_CC_STATS_MODE_NONE;
-+ /* Number of keys of this node is number of sets of the hash */
-+ p_IndxHashCcNodeParam->keysParams.numOfKeys = numOfSets;
-+ p_IndxHashCcNodeParam->keysParams.keySize = 2;
-+
-+ p_CcNodeHashTbl = FM_PCD_MatchTableSet(h_FmPcd, p_IndxHashCcNodeParam);
-+
-+ if (p_CcNodeHashTbl)
-+ {
-+ p_CcNodeHashTbl->kgHashShift = p_Param->kgHashShift;
-+
-+ /* Storing the allocated counters for buckets 'miss' in the hash table
-+ and if statistics for miss were enabled. */
-+ p_CcNodeHashTbl->h_MissStatsCounters = h_MissStatsCounters;
-+ p_CcNodeHashTbl->statsEnForMiss = statsEnForMiss;
-+ }
-+
-+ XX_Free(p_IndxHashCcNodeParam);
-+ XX_Free(p_ExactMatchCcNodeParam);
-+
-+ return p_CcNodeHashTbl;
-+}
-+
-+t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_Handle h_FmPcd;
-+ t_Handle *p_HashBuckets, h_MissStatsCounters;
-+ uint16_t i, numOfBuckets;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
-+
-+ /* Store all hash buckets before the hash is freed */
-+ numOfBuckets = p_HashTbl->numOfKeys;
-+
-+ p_HashBuckets = (t_Handle *)XX_Malloc(numOfBuckets * sizeof(t_Handle));
-+ if (!p_HashBuckets)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+
-+ for (i = 0; i < numOfBuckets; i++)
-+ p_HashBuckets[i] =
-+ p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ h_FmPcd = p_HashTbl->h_FmPcd;
-+ h_MissStatsCounters = p_HashTbl->h_MissStatsCounters;
-+
-+ /* Free the hash */
-+ err = FM_PCD_MatchTableDelete(p_HashTbl);
-+
-+ /* Free each hash bucket */
-+ for (i = 0; i < numOfBuckets; i++)
-+ err |= FM_PCD_MatchTableDelete(p_HashBuckets[i]);
-+
-+ XX_Free(p_HashBuckets);
-+
-+ /* Free statistics counters for 'miss', if these were allocated */
-+ if (h_MissStatsCounters)
-+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters);
-+
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_HashTableAddKey(t_Handle h_HashTbl, uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_KeyParams)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_Handle h_HashBucket;
-+ uint8_t bucketIndex;
-+ uint16_t lastIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_KeyParams->p_Key, E_NULL_POINTER);
-+
-+ if (p_KeyParams->p_Mask)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("Keys masks not supported for hash table"));
-+
-+ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize,
-+ p_KeyParams->p_Key,
-+ p_HashTbl->kgHashShift,
-+ &h_HashBucket, &bucketIndex,
-+ &lastIndex);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return FM_PCD_MatchTableAddKey(h_HashBucket, FM_PCD_LAST_KEY_INDEX, keySize,
-+ p_KeyParams);
-+}
-+
-+t_Error FM_PCD_HashTableRemoveKey(t_Handle h_HashTbl, uint8_t keySize,
-+ uint8_t *p_Key)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_Handle h_HashBucket;
-+ uint8_t bucketIndex;
-+ uint16_t lastIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+
-+ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key,
-+ p_HashTbl->kgHashShift,
-+ &h_HashBucket, &bucketIndex,
-+ &lastIndex);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return FM_PCD_MatchTableFindNRemoveKey(h_HashBucket, keySize, p_Key, NULL);
-+}
-+
-+t_Error FM_PCD_HashTableModifyNextEngine(
-+ t_Handle h_HashTbl, uint8_t keySize, uint8_t *p_Key,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_Handle h_HashBucket;
-+ uint8_t bucketIndex;
-+ uint16_t lastIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+
-+ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key,
-+ p_HashTbl->kgHashShift,
-+ &h_HashBucket, &bucketIndex,
-+ &lastIndex);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return FM_PCD_MatchTableFindNModifyNextEngine(h_HashBucket, keySize, p_Key,
-+ NULL,
-+ p_FmPcdCcNextEngineParams);
-+}
-+
-+t_Error FM_PCD_HashTableModifyMissNextEngine(
-+ t_Handle h_HashTbl,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_Handle h_HashBucket;
-+ uint8_t i;
-+ bool nullifyMissStats = FALSE;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_HashTbl, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+
-+ if ((!p_HashTbl->h_MissStatsCounters)
-+ && (p_FmPcdCcNextEngineParams->statisticsEn))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_CONFLICT,
-+ ("Statistics are requested for a key, but statistics mode was set"
-+ "to 'NONE' upon initialization"));
-+
-+ if (p_HashTbl->h_MissStatsCounters)
-+ {
-+ if ((!p_HashTbl->statsEnForMiss)
-+ && (p_FmPcdCcNextEngineParams->statisticsEn))
-+ nullifyMissStats = TRUE;
-+
-+ if ((p_HashTbl->statsEnForMiss)
-+ && (!p_FmPcdCcNextEngineParams->statisticsEn))
-+ {
-+ p_HashTbl->statsEnForMiss = FALSE;
-+ p_FmPcdCcNextEngineParams->statisticsEn = TRUE;
-+ }
-+ }
-+
-+ for (i = 0; i < p_HashTbl->numOfKeys; i++)
-+ {
-+ h_HashBucket =
-+ p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ err = FM_PCD_MatchTableModifyMissNextEngine(h_HashBucket,
-+ p_FmPcdCcNextEngineParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (nullifyMissStats)
-+ {
-+ memset(p_HashTbl->h_MissStatsCounters, 0,
-+ (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
-+ memset(p_HashTbl->h_MissStatsCounters, 0,
-+ (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
-+ p_HashTbl->statsEnForMiss = TRUE;
-+ }
-+
-+ return E_OK;
-+}
-+
-+
-+t_Error FM_PCD_HashTableGetMissNextEngine(
-+ t_Handle h_HashTbl,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_FmPcdCcNode *p_HashBucket;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
-+
-+ /* Miss next engine of each bucket was initialized with the next engine of the hash table */
-+ p_HashBucket =
-+ p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ memcpy(p_FmPcdCcNextEngineParams,
-+ &p_HashBucket->keyAndNextEngineParams[p_HashBucket->numOfKeys].nextEngineParams,
-+ sizeof(t_FmPcdCcNextEngineParams));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_HashTableFindNGetKeyStatistics(
-+ t_Handle h_HashTbl, uint8_t keySize, uint8_t *p_Key,
-+ t_FmPcdCcKeyStatistics *p_KeyStatistics)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_Handle h_HashBucket;
-+ uint8_t bucketIndex;
-+ uint16_t lastIndex;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER);
-+
-+ err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key,
-+ p_HashTbl->kgHashShift,
-+ &h_HashBucket, &bucketIndex,
-+ &lastIndex);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return FM_PCD_MatchTableFindNGetKeyStatistics(h_HashBucket, keySize, p_Key,
-+ NULL, p_KeyStatistics);
-+}
-+
-+t_Error FM_PCD_HashTableGetMissStatistics(
-+ t_Handle h_HashTbl, t_FmPcdCcKeyStatistics *p_MissStatistics)
-+{
-+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
-+ t_Handle h_HashBucket;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER);
-+
-+ if (!p_HashTbl->statsEnForMiss)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Statistics were not enabled for miss"));
-+
-+ h_HashBucket =
-+ p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode;
-+
-+ return FM_PCD_MatchTableGetMissStatistics(h_HashBucket, p_MissStatistics);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h
-@@ -0,0 +1,399 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_cc.h
-+
-+ @Description FM PCD CC ...
-+*//***************************************************************************/
-+#ifndef __FM_CC_H
-+#define __FM_CC_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+
-+#include "fm_pcd.h"
-+
-+
-+/***********************************************************************/
-+/* Coarse classification defines */
-+/***********************************************************************/
-+
-+#define CC_MAX_NUM_OF_KEYS (FM_PCD_MAX_NUM_OF_KEYS + 1)
-+
-+#define CC_PC_FF_MACDST 0x00
-+#define CC_PC_FF_MACSRC 0x01
-+#define CC_PC_FF_ETYPE 0x02
-+
-+#define CC_PC_FF_TCI1 0x03
-+#define CC_PC_FF_TCI2 0x04
-+
-+#define CC_PC_FF_MPLS1 0x06
-+#define CC_PC_FF_MPLS_LAST 0x07
-+
-+#define CC_PC_FF_IPV4DST1 0x08
-+#define CC_PC_FF_IPV4DST2 0x16
-+#define CC_PC_FF_IPV4IPTOS_TC1 0x09
-+#define CC_PC_FF_IPV4IPTOS_TC2 0x17
-+#define CC_PC_FF_IPV4PTYPE1 0x0A
-+#define CC_PC_FF_IPV4PTYPE2 0x18
-+#define CC_PC_FF_IPV4SRC1 0x0b
-+#define CC_PC_FF_IPV4SRC2 0x19
-+#define CC_PC_FF_IPV4SRC1_IPV4DST1 0x0c
-+#define CC_PC_FF_IPV4SRC2_IPV4DST2 0x1a
-+#define CC_PC_FF_IPV4TTL 0x29
-+
-+
-+#define CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1 0x0d /*TODO - CLASS - what is it? TOS*/
-+#define CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2 0x1b
-+#define CC_PC_FF_IPV6PTYPE1 0x0e
-+#define CC_PC_FF_IPV6PTYPE2 0x1c
-+#define CC_PC_FF_IPV6DST1 0x0f
-+#define CC_PC_FF_IPV6DST2 0x1d
-+#define CC_PC_FF_IPV6SRC1 0x10
-+#define CC_PC_FF_IPV6SRC2 0x1e
-+#define CC_PC_FF_IPV6HOP_LIMIT 0x2a
-+#define CC_PC_FF_IPPID 0x24
-+#define CC_PC_FF_IPDSCP 0x76
-+
-+#define CC_PC_FF_GREPTYPE 0x11
-+
-+#define CC_PC_FF_MINENCAP_PTYPE 0x12
-+#define CC_PC_FF_MINENCAP_IPDST 0x13
-+#define CC_PC_FF_MINENCAP_IPSRC 0x14
-+#define CC_PC_FF_MINENCAP_IPSRC_IPDST 0x15
-+
-+#define CC_PC_FF_L4PSRC 0x1f
-+#define CC_PC_FF_L4PDST 0x20
-+#define CC_PC_FF_L4PSRC_L4PDST 0x21
-+
-+#define CC_PC_FF_PPPPID 0x05
-+
-+#define CC_PC_PR_SHIM1 0x22
-+#define CC_PC_PR_SHIM2 0x23
-+
-+#define CC_PC_GENERIC_WITHOUT_MASK 0x27
-+#define CC_PC_GENERIC_WITH_MASK 0x28
-+#define CC_PC_GENERIC_IC_GMASK 0x2B
-+#define CC_PC_GENERIC_IC_HASH_INDEXED 0x2C
-+#define CC_PC_GENERIC_IC_AGING_MASK 0x2D
-+
-+#define CC_PR_OFFSET 0x25
-+#define CC_PR_WITHOUT_OFFSET 0x26
-+
-+#define CC_PC_PR_ETH_OFFSET 19
-+#define CC_PC_PR_USER_DEFINED_SHIM1_OFFSET 16
-+#define CC_PC_PR_USER_DEFINED_SHIM2_OFFSET 17
-+#define CC_PC_PR_USER_LLC_SNAP_OFFSET 20
-+#define CC_PC_PR_VLAN1_OFFSET 21
-+#define CC_PC_PR_VLAN2_OFFSET 22
-+#define CC_PC_PR_PPPOE_OFFSET 24
-+#define CC_PC_PR_MPLS1_OFFSET 25
-+#define CC_PC_PR_MPLS_LAST_OFFSET 26
-+#define CC_PC_PR_IP1_OFFSET 27
-+#define CC_PC_PR_IP_LAST_OFFSET 28
-+#define CC_PC_PR_MINENC_OFFSET 28
-+#define CC_PC_PR_L4_OFFSET 30
-+#define CC_PC_PR_GRE_OFFSET 29
-+#define CC_PC_PR_ETYPE_LAST_OFFSET 23
-+#define CC_PC_PR_NEXT_HEADER_OFFSET 31
-+
-+#define CC_PC_ILLEGAL 0xff
-+#define CC_SIZE_ILLEGAL 0
-+
-+#define FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN 16
-+#define FM_PCD_CC_AD_TABLE_ALIGN 16
-+#define FM_PCD_CC_AD_ENTRY_SIZE 16
-+#define FM_PCD_CC_NUM_OF_KEYS 255
-+#define FM_PCD_CC_TREE_ADDR_ALIGN 256
-+
-+#define FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE 0x00000000
-+#define FM_PCD_AD_RESULT_DATA_FLOW_TYPE 0x80000000
-+#define FM_PCD_AD_RESULT_PLCR_DIS 0x20000000
-+#define FM_PCD_AD_RESULT_EXTENDED_MODE 0x80000000
-+#define FM_PCD_AD_RESULT_NADEN 0x20000000
-+#define FM_PCD_AD_RESULT_STATISTICS_EN 0x40000000
-+
-+#define FM_PCD_AD_CONT_LOOKUP_TYPE 0x40000000
-+#define FM_PCD_AD_CONT_LOOKUP_LCL_MASK 0x00800000
-+
-+#define FM_PCD_AD_STATS_TYPE 0x40000000
-+#define FM_PCD_AD_STATS_FLR_ADDR_MASK 0x00FFFFFF
-+#define FM_PCD_AD_STATS_COUNTERS_ADDR_MASK 0x00FFFFFF
-+#define FM_PCD_AD_STATS_NEXT_ACTION_MASK 0xFFFF0000
-+#define FM_PCD_AD_STATS_NEXT_ACTION_SHIFT 12
-+#define FM_PCD_AD_STATS_NAD_EN 0x00008000
-+#define FM_PCD_AD_STATS_OP_CODE 0x00000036
-+#define FM_PCD_AD_STATS_FLR_EN 0x00004000
-+#define FM_PCD_AD_STATS_COND_EN 0x00002000
-+
-+
-+
-+#define FM_PCD_AD_BYPASS_TYPE 0xc0000000
-+
-+#define FM_PCD_AD_TYPE_MASK 0xc0000000
-+#define FM_PCD_AD_OPCODE_MASK 0x0000000f
-+
-+#define FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT 16
-+#if (DPAA_VERSION >= 11)
-+#define FM_PCD_AD_RESULT_VSP_SHIFT 24
-+#define FM_PCD_AD_RESULT_NO_OM_VSPE 0x02000000
-+#define FM_PCD_AD_RESULT_VSP_MASK 0x3f
-+#define FM_PCD_AD_NCSPFQIDM_MASK 0x80000000
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+#define GLBL_MASK_FOR_HASH_INDEXED 0xfff00000
-+#define CC_GLBL_MASK_SIZE 4
-+#define CC_AGING_MASK_SIZE 4
-+
-+typedef uint32_t ccPrivateInfo_t; /**< private info of CC: */
-+
-+#define CC_PRIVATE_INFO_NONE 0
-+#define CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP 0x80000000
-+#define CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH 0x40000000
-+#define CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH 0x20000000
-+#define CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP 0x10000000
-+
-+#define CC_BUILD_AGING_MASK(numOfKeys) ((((1LL << ((numOfKeys) + 1)) - 1)) << (31 - (numOfKeys)))
-+/***********************************************************************/
-+/* Memory map */
-+/***********************************************************************/
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+typedef struct
-+{
-+ volatile uint32_t fqid;
-+ volatile uint32_t plcrProfile;
-+ volatile uint32_t nia;
-+ volatile uint32_t res;
-+} t_AdOfTypeResult;
-+
-+typedef struct
-+{
-+ volatile uint32_t ccAdBase;
-+ volatile uint32_t matchTblPtr;
-+ volatile uint32_t pcAndOffsets;
-+ volatile uint32_t gmask;
-+} t_AdOfTypeContLookup;
-+
-+typedef struct
-+{
-+ volatile uint32_t profileTableAddr;
-+ volatile uint32_t reserved;
-+ volatile uint32_t nextActionIndx;
-+ volatile uint32_t statsTableAddr;
-+} t_AdOfTypeStats;
-+
-+typedef union
-+{
-+ volatile t_AdOfTypeResult adResult;
-+ volatile t_AdOfTypeContLookup adContLookup;
-+} t_Ad;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/***********************************************************************/
-+/* Driver's internal structures */
-+/***********************************************************************/
-+
-+typedef struct t_FmPcdStatsObj
-+{
-+ t_Handle h_StatsAd;
-+ t_Handle h_StatsCounters;
-+ t_List node;
-+} t_FmPcdStatsObj;
-+
-+typedef struct
-+{
-+ uint8_t key[FM_PCD_MAX_SIZE_OF_KEY];
-+ uint8_t mask[FM_PCD_MAX_SIZE_OF_KEY];
-+
-+ t_FmPcdCcNextEngineParams nextEngineParams;
-+ uint32_t requiredAction;
-+ uint32_t shadowAction;
-+
-+ t_FmPcdStatsObj *p_StatsObj;
-+
-+} t_FmPcdCcKeyAndNextEngineParams;
-+
-+typedef struct
-+{
-+ t_Handle p_Ad;
-+ e_FmPcdEngine fmPcdEngine;
-+ bool adAllocated;
-+ bool isTree;
-+
-+ uint32_t myInfo;
-+ t_List *h_CcNextNodesLst;
-+ t_Handle h_AdditionalInfo;
-+ t_Handle h_Node;
-+} t_FmPcdModifyCcAdditionalParams;
-+
-+typedef struct
-+{
-+ t_Handle p_AdTableNew;
-+ t_Handle p_KeysMatchTableNew;
-+ t_Handle p_AdTableOld;
-+ t_Handle p_KeysMatchTableOld;
-+ uint16_t numOfKeys;
-+ t_Handle h_CurrentNode;
-+ uint16_t savedKeyIndex;
-+ t_Handle h_NodeForAdd;
-+ t_Handle h_NodeForRmv;
-+ t_Handle h_ManipForRmv;
-+ t_Handle h_ManipForAdd;
-+ t_FmPcdStatsObj *p_StatsObjForRmv;
-+#if (DPAA_VERSION >= 11)
-+ t_Handle h_FrmReplicForAdd;
-+ t_Handle h_FrmReplicForRmv;
-+#endif /* (DPAA_VERSION >= 11) */
-+ bool tree;
-+
-+ t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[CC_MAX_NUM_OF_KEYS];
-+} t_FmPcdModifyCcKeyAdditionalParams;
-+
-+typedef struct
-+{
-+ t_Handle h_Manip;
-+ t_Handle h_CcNode;
-+} t_CcNextEngineInfo;
-+
-+typedef struct
-+{
-+ uint16_t numOfKeys;
-+ uint16_t maxNumOfKeys;
-+
-+ bool maskSupport;
-+ uint32_t keysMatchTableMaxSize;
-+
-+ e_FmPcdCcStatsMode statisticsMode;
-+ uint32_t numOfStatsFLRs;
-+ uint32_t countersArraySize;
-+
-+ bool isHashBucket; /**< Valid for match table node that is a bucket of a hash table only */
-+ t_Handle h_MissStatsCounters; /**< Valid for hash table node and match table that is a bucket;
-+ Holds the statistics counters allocated by the hash table and
-+ are shared by all hash table buckets; */
-+ t_Handle h_PrivMissStatsCounters; /**< Valid for match table node that is a bucket of a hash table only;
-+ Holds the statistics counters that were allocated for this node
-+ and replaced by the shared counters (allocated by the hash table); */
-+ bool statsEnForMiss; /**< Valid for hash table node only; TRUE is statistics are currently
-+ enabled for hash 'miss', FALSE otherwise; This parameter effects the
-+ returned statistics count to user, statistics AD always present for 'miss'
-+ for all hash buckets; */
-+ bool glblMaskUpdated;
-+ t_Handle p_GlblMask;
-+ bool lclMask;
-+ uint8_t parseCode;
-+ uint8_t offset;
-+ uint8_t prsArrayOffset;
-+ bool ctrlFlow;
-+ uint16_t owners;
-+
-+ uint8_t ccKeySizeAccExtraction;
-+ uint8_t sizeOfExtraction;
-+ uint8_t glblMaskSize;
-+
-+ t_Handle h_KeysMatchTable;
-+ t_Handle h_AdTable;
-+ t_Handle h_StatsAds;
-+ t_Handle h_TmpAd;
-+ t_Handle h_Ad;
-+ t_Handle h_StatsFLRs;
-+
-+ t_List availableStatsLst;
-+
-+ t_List ccPrevNodesLst;
-+
-+ t_List ccTreeIdLst;
-+ t_List ccTreesLst;
-+
-+ t_Handle h_FmPcd;
-+ uint32_t shadowAction;
-+ uint8_t userSizeOfExtraction;
-+ uint8_t userOffset;
-+ uint8_t kgHashShift; /* used in hash-table */
-+
-+ t_Handle h_Spinlock;
-+
-+ t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[CC_MAX_NUM_OF_KEYS];
-+} t_FmPcdCcNode;
-+
-+typedef struct
-+{
-+ t_FmPcdCcNode *p_FmPcdCcNode;
-+ bool occupied;
-+ uint16_t owners;
-+ volatile bool lock;
-+} t_FmPcdCcNodeArray;
-+
-+typedef struct
-+{
-+ uint8_t numOfEntriesInGroup;
-+ uint32_t totalBitsMask;
-+ uint8_t baseGroupEntry;
-+} t_FmPcdCcGroupParam;
-+
-+typedef struct
-+{
-+ t_Handle h_FmPcd;
-+ uint8_t netEnvId;
-+ uintptr_t ccTreeBaseAddr;
-+ uint8_t numOfGrps;
-+ t_FmPcdCcGroupParam fmPcdGroupParam[FM_PCD_MAX_NUM_OF_CC_GROUPS];
-+ t_List fmPortsLst;
-+ t_FmPcdLock *p_Lock;
-+ uint8_t numOfEntries;
-+ uint16_t owners;
-+ t_Handle h_FmPcdCcSavedManipParams;
-+ bool modifiedState;
-+ uint32_t requiredAction;
-+ t_Handle h_IpReassemblyManip;
-+ t_Handle h_CapwapReassemblyManip;
-+
-+ t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[FM_PCD_MAX_NUM_OF_CC_GROUPS];
-+} t_FmPcdCcTree;
-+
-+
-+t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_List *p_List);
-+void FmPcdCcNodeTreeReleaseLock(t_Handle h_FmPcd, t_List *p_List);
-+t_Error FmPcdUpdateCcShadow (t_FmPcd *p_FmPcd, uint32_t size, uint32_t align);
-+
-+
-+#endif /* __FM_CC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c
-@@ -0,0 +1,3242 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_kg.c
-+
-+ @Description FM PCD ...
-+*//***************************************************************************/
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+#include "net_ext.h"
-+#include "fm_port_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_pcd.h"
-+#include "fm_hc.h"
-+#include "fm_pcd_ipc.h"
-+#include "fm_kg.h"
-+#include "fsl_fman_kg.h"
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+
-+static uint32_t KgHwLock(t_Handle h_FmPcdKg)
-+{
-+ ASSERT_COND(h_FmPcdKg);
-+ return XX_LockIntrSpinlock(((t_FmPcdKg *)h_FmPcdKg)->h_HwSpinlock);
-+}
-+
-+static void KgHwUnlock(t_Handle h_FmPcdKg, uint32_t intFlags)
-+{
-+ ASSERT_COND(h_FmPcdKg);
-+ XX_UnlockIntrSpinlock(((t_FmPcdKg *)h_FmPcdKg)->h_HwSpinlock, intFlags);
-+}
-+
-+static uint32_t KgSchemeLock(t_Handle h_Scheme)
-+{
-+ ASSERT_COND(h_Scheme);
-+ return FmPcdLockSpinlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock);
-+}
-+
-+static void KgSchemeUnlock(t_Handle h_Scheme, uint32_t intFlags)
-+{
-+ ASSERT_COND(h_Scheme);
-+ FmPcdUnlockSpinlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock, intFlags);
-+}
-+
-+static bool KgSchemeFlagTryLock(t_Handle h_Scheme)
-+{
-+ ASSERT_COND(h_Scheme);
-+ return FmPcdLockTryLock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock);
-+}
-+
-+static void KgSchemeFlagUnlock(t_Handle h_Scheme)
-+{
-+ ASSERT_COND(h_Scheme);
-+ FmPcdLockUnlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock);
-+}
-+
-+static t_Error WriteKgarWait(t_FmPcd *p_FmPcd, uint32_t fmkg_ar)
-+{
-+
-+ struct fman_kg_regs *regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+
-+ if (fman_kg_write_ar_wait(regs, fmkg_ar))
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Keygen scheme access violation"));
-+
-+ return E_OK;
-+}
-+
-+static e_FmPcdKgExtractDfltSelect GetGenericSwDefault(t_FmPcdKgExtractDflt swDefaults[], uint8_t numOfSwDefaults, uint8_t code)
-+{
-+ int i;
-+
-+ switch (code)
-+ {
-+ case (KG_SCH_GEN_PARSE_RESULT_N_FQID):
-+ case (KG_SCH_GEN_DEFAULT):
-+ case (KG_SCH_GEN_NEXTHDR):
-+ for (i=0 ; i<numOfSwDefaults ; i++)
-+ if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_NOT_FROM_DATA)
-+ return swDefaults[i].dfltSelect;
-+ break;
-+ case (KG_SCH_GEN_SHIM1):
-+ case (KG_SCH_GEN_SHIM2):
-+ case (KG_SCH_GEN_IP_PID_NO_V):
-+ case (KG_SCH_GEN_ETH_NO_V):
-+ case (KG_SCH_GEN_SNAP_NO_V):
-+ case (KG_SCH_GEN_VLAN1_NO_V):
-+ case (KG_SCH_GEN_VLAN2_NO_V):
-+ case (KG_SCH_GEN_ETH_TYPE_NO_V):
-+ case (KG_SCH_GEN_PPP_NO_V):
-+ case (KG_SCH_GEN_MPLS1_NO_V):
-+ case (KG_SCH_GEN_MPLS_LAST_NO_V):
-+ case (KG_SCH_GEN_L3_NO_V):
-+ case (KG_SCH_GEN_IP2_NO_V):
-+ case (KG_SCH_GEN_GRE_NO_V):
-+ case (KG_SCH_GEN_L4_NO_V):
-+ for (i=0 ; i<numOfSwDefaults ; i++)
-+ if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V)
-+ return swDefaults[i].dfltSelect;
-+ break;
-+ case (KG_SCH_GEN_START_OF_FRM):
-+ case (KG_SCH_GEN_ETH):
-+ case (KG_SCH_GEN_SNAP):
-+ case (KG_SCH_GEN_VLAN1):
-+ case (KG_SCH_GEN_VLAN2):
-+ case (KG_SCH_GEN_ETH_TYPE):
-+ case (KG_SCH_GEN_PPP):
-+ case (KG_SCH_GEN_MPLS1):
-+ case (KG_SCH_GEN_MPLS2):
-+ case (KG_SCH_GEN_MPLS3):
-+ case (KG_SCH_GEN_MPLS_LAST):
-+ case (KG_SCH_GEN_IPV4):
-+ case (KG_SCH_GEN_IPV6):
-+ case (KG_SCH_GEN_IPV4_TUNNELED):
-+ case (KG_SCH_GEN_IPV6_TUNNELED):
-+ case (KG_SCH_GEN_MIN_ENCAP):
-+ case (KG_SCH_GEN_GRE):
-+ case (KG_SCH_GEN_TCP):
-+ case (KG_SCH_GEN_UDP):
-+ case (KG_SCH_GEN_IPSEC_AH):
-+ case (KG_SCH_GEN_SCTP):
-+ case (KG_SCH_GEN_DCCP):
-+ case (KG_SCH_GEN_IPSEC_ESP):
-+ for (i=0 ; i<numOfSwDefaults ; i++)
-+ if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_FROM_DATA)
-+ return swDefaults[i].dfltSelect;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return e_FM_PCD_KG_DFLT_ILLEGAL;
-+}
-+
-+static uint8_t GetGenCode(e_FmPcdExtractFrom src, uint8_t *p_Offset)
-+{
-+ *p_Offset = 0;
-+
-+ switch (src)
-+ {
-+ case (e_FM_PCD_EXTRACT_FROM_FRAME_START):
-+ return KG_SCH_GEN_START_OF_FRM;
-+ case (e_FM_PCD_EXTRACT_FROM_DFLT_VALUE):
-+ return KG_SCH_GEN_DEFAULT;
-+ case (e_FM_PCD_EXTRACT_FROM_PARSE_RESULT):
-+ return KG_SCH_GEN_PARSE_RESULT_N_FQID;
-+ case (e_FM_PCD_EXTRACT_FROM_ENQ_FQID):
-+ *p_Offset = 32;
-+ return KG_SCH_GEN_PARSE_RESULT_N_FQID;
-+ case (e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE):
-+ return KG_SCH_GEN_NEXTHDR;
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src"));
-+ return 0;
-+ }
-+}
-+
-+static uint8_t GetGenHdrCode(e_NetHeaderType hdr, e_FmPcdHdrIndex hdrIndex, bool ignoreProtocolValidation)
-+{
-+ if (!ignoreProtocolValidation)
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ case (HEADER_TYPE_ETH):
-+ return KG_SCH_GEN_ETH;
-+ case (HEADER_TYPE_LLC_SNAP):
-+ return KG_SCH_GEN_SNAP;
-+ case (HEADER_TYPE_PPPoE):
-+ return KG_SCH_GEN_PPP;
-+ case (HEADER_TYPE_MPLS):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_GEN_MPLS1;
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_2)
-+ return KG_SCH_GEN_MPLS2;
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_3)
-+ return KG_SCH_GEN_MPLS3;
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ return KG_SCH_GEN_MPLS_LAST;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index"));
-+ return 0;
-+ case (HEADER_TYPE_IPv4):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_GEN_IPV4;
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_GEN_IPV4_TUNNELED;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 header index"));
-+ return 0;
-+ case (HEADER_TYPE_IPv6):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_GEN_IPV6;
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_GEN_IPV6_TUNNELED;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 header index"));
-+ return 0;
-+ case (HEADER_TYPE_GRE):
-+ return KG_SCH_GEN_GRE;
-+ case (HEADER_TYPE_TCP):
-+ return KG_SCH_GEN_TCP;
-+ case (HEADER_TYPE_UDP):
-+ return KG_SCH_GEN_UDP;
-+ case (HEADER_TYPE_IPSEC_AH):
-+ return KG_SCH_GEN_IPSEC_AH;
-+ case (HEADER_TYPE_IPSEC_ESP):
-+ return KG_SCH_GEN_IPSEC_ESP;
-+ case (HEADER_TYPE_SCTP):
-+ return KG_SCH_GEN_SCTP;
-+ case (HEADER_TYPE_DCCP):
-+ return KG_SCH_GEN_DCCP;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ else
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ case (HEADER_TYPE_ETH):
-+ return KG_SCH_GEN_ETH_NO_V;
-+ case (HEADER_TYPE_LLC_SNAP):
-+ return KG_SCH_GEN_SNAP_NO_V;
-+ case (HEADER_TYPE_PPPoE):
-+ return KG_SCH_GEN_PPP_NO_V;
-+ case (HEADER_TYPE_MPLS):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_GEN_MPLS1_NO_V;
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ return KG_SCH_GEN_MPLS_LAST_NO_V;
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_3) )
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Indexed MPLS Extraction not supported"));
-+ else
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index"));
-+ return 0;
-+ case (HEADER_TYPE_IPv4):
-+ case (HEADER_TYPE_IPv6):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_GEN_L3_NO_V;
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_GEN_IP2_NO_V;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index"));
-+ case (HEADER_TYPE_MINENCAP):
-+ return KG_SCH_GEN_IP2_NO_V;
-+ case (HEADER_TYPE_USER_DEFINED_L3):
-+ return KG_SCH_GEN_L3_NO_V;
-+ case (HEADER_TYPE_GRE):
-+ return KG_SCH_GEN_GRE_NO_V;
-+ case (HEADER_TYPE_TCP):
-+ case (HEADER_TYPE_UDP):
-+ case (HEADER_TYPE_IPSEC_AH):
-+ case (HEADER_TYPE_IPSEC_ESP):
-+ case (HEADER_TYPE_SCTP):
-+ case (HEADER_TYPE_DCCP):
-+ return KG_SCH_GEN_L4_NO_V;
-+ case (HEADER_TYPE_USER_DEFINED_SHIM1):
-+ return KG_SCH_GEN_SHIM1;
-+ case (HEADER_TYPE_USER_DEFINED_SHIM2):
-+ return KG_SCH_GEN_SHIM2;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+}
-+static t_GenericCodes GetGenFieldCode(e_NetHeaderType hdr, t_FmPcdFields field, bool ignoreProtocolValidation, e_FmPcdHdrIndex hdrIndex)
-+{
-+ if (!ignoreProtocolValidation)
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ break;
-+ case (HEADER_TYPE_ETH):
-+ switch (field.eth)
-+ {
-+ case (NET_HEADER_FIELD_ETH_TYPE):
-+ return KG_SCH_GEN_ETH_TYPE;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ break;
-+ case (HEADER_TYPE_VLAN):
-+ switch (field.vlan)
-+ {
-+ case (NET_HEADER_FIELD_VLAN_TCI):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_GEN_VLAN1;
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ return KG_SCH_GEN_VLAN2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal VLAN header index"));
-+ return 0;
-+ }
-+ break;
-+ case (HEADER_TYPE_MPLS):
-+ case (HEADER_TYPE_IPSEC_AH):
-+ case (HEADER_TYPE_IPSEC_ESP):
-+ case (HEADER_TYPE_LLC_SNAP):
-+ case (HEADER_TYPE_PPPoE):
-+ case (HEADER_TYPE_IPv4):
-+ case (HEADER_TYPE_IPv6):
-+ case (HEADER_TYPE_GRE):
-+ case (HEADER_TYPE_MINENCAP):
-+ case (HEADER_TYPE_USER_DEFINED_L3):
-+ case (HEADER_TYPE_TCP):
-+ case (HEADER_TYPE_UDP):
-+ case (HEADER_TYPE_SCTP):
-+ case (HEADER_TYPE_DCCP):
-+ case (HEADER_TYPE_USER_DEFINED_L4):
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ default:
-+ break;
-+
-+ }
-+ else
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ break;
-+ case (HEADER_TYPE_ETH):
-+ switch (field.eth)
-+ {
-+ case (NET_HEADER_FIELD_ETH_TYPE):
-+ return KG_SCH_GEN_ETH_TYPE_NO_V;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ break;
-+ case (HEADER_TYPE_VLAN):
-+ switch (field.vlan)
-+ {
-+ case (NET_HEADER_FIELD_VLAN_TCI) :
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_GEN_VLAN1_NO_V;
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ return KG_SCH_GEN_VLAN2_NO_V;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal VLAN header index"));
-+ return 0;
-+ }
-+ break;
-+ case (HEADER_TYPE_IPv4):
-+ switch (field.ipv4)
-+ {
-+ case (NET_HEADER_FIELD_IPv4_PROTO):
-+ return KG_SCH_GEN_IP_PID_NO_V;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ break;
-+ case (HEADER_TYPE_IPv6):
-+ switch (field.ipv6)
-+ {
-+ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
-+ return KG_SCH_GEN_IP_PID_NO_V;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ break;
-+ case (HEADER_TYPE_MPLS):
-+ case (HEADER_TYPE_LLC_SNAP):
-+ case (HEADER_TYPE_PPPoE):
-+ case (HEADER_TYPE_GRE):
-+ case (HEADER_TYPE_MINENCAP):
-+ case (HEADER_TYPE_USER_DEFINED_L3):
-+ case (HEADER_TYPE_TCP):
-+ case (HEADER_TYPE_UDP):
-+ case (HEADER_TYPE_IPSEC_AH):
-+ case (HEADER_TYPE_IPSEC_ESP):
-+ case (HEADER_TYPE_SCTP):
-+ case (HEADER_TYPE_DCCP):
-+ case (HEADER_TYPE_USER_DEFINED_L4):
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ default:
-+ break;
-+ }
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header not supported"));
-+ return 0;
-+}
-+
-+static t_KnownFieldsMasks GetKnownProtMask(t_FmPcd *p_FmPcd, e_NetHeaderType hdr, e_FmPcdHdrIndex index, t_FmPcdFields field)
-+{
-+ UNUSED(p_FmPcd);
-+
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_NONE):
-+ ASSERT_COND(FALSE);
-+ break;
-+ case (HEADER_TYPE_ETH):
-+ switch (field.eth)
-+ {
-+ case (NET_HEADER_FIELD_ETH_DA):
-+ return KG_SCH_KN_MACDST;
-+ case (NET_HEADER_FIELD_ETH_SA):
-+ return KG_SCH_KN_MACSRC;
-+ case (NET_HEADER_FIELD_ETH_TYPE):
-+ return KG_SCH_KN_ETYPE;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_LLC_SNAP):
-+ switch (field.llcSnap)
-+ {
-+ case (NET_HEADER_FIELD_LLC_SNAP_TYPE):
-+ return KG_SCH_KN_ETYPE;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_VLAN):
-+ switch (field.vlan)
-+ {
-+ case (NET_HEADER_FIELD_VLAN_TCI):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_TCI1;
-+ if (index == e_FM_PCD_HDR_INDEX_LAST)
-+ return KG_SCH_KN_TCI2;
-+ else
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_MPLS):
-+ switch (field.mpls)
-+ {
-+ case (NET_HEADER_FIELD_MPLS_LABEL_STACK):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_MPLS1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return KG_SCH_KN_MPLS2;
-+ if (index == e_FM_PCD_HDR_INDEX_LAST)
-+ return KG_SCH_KN_MPLS_LAST;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index"));
-+ return 0;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_IPv4):
-+ switch (field.ipv4)
-+ {
-+ case (NET_HEADER_FIELD_IPv4_SRC_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_IPSRC1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_IPSRC2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv4_DST_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_IPDST1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_IPDST2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv4_PROTO):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_PTYPE1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_PTYPE2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv4_TOS):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_IPTOS_TC1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_IPTOS_TC2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index"));
-+ return 0;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_IPv6):
-+ switch (field.ipv6)
-+ {
-+ case (NET_HEADER_FIELD_IPv6_SRC_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_IPSRC1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_IPSRC2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv6_DST_IP):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_IPDST1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_IPDST2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv6_NEXT_HDR):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_PTYPE1;
-+ if (index == e_FM_PCD_HDR_INDEX_2)
-+ return KG_SCH_KN_PTYPE2;
-+ if (index == e_FM_PCD_HDR_INDEX_LAST)
-+#ifdef FM_KG_NO_IPPID_SUPPORT
-+ if (p_FmPcd->fmRevInfo.majorRev < 6)
-+ return KG_SCH_KN_PTYPE2;
-+#endif /* FM_KG_NO_IPPID_SUPPORT */
-+ return KG_SCH_KN_IPPID;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return (KG_SCH_KN_IPV6FL1 | KG_SCH_KN_IPTOS_TC1);
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return (KG_SCH_KN_IPV6FL2 | KG_SCH_KN_IPTOS_TC2);
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_TC):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_IPTOS_TC1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_IPTOS_TC2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return 0;
-+ case (NET_HEADER_FIELD_IPv6_FL):
-+ if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1))
-+ return KG_SCH_KN_IPV6FL1;
-+ if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST))
-+ return KG_SCH_KN_IPV6FL2;
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index"));
-+ return 0;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_GRE):
-+ switch (field.gre)
-+ {
-+ case (NET_HEADER_FIELD_GRE_TYPE):
-+ return KG_SCH_KN_GREPTYPE;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_MINENCAP):
-+ switch (field.minencap)
-+ {
-+ case (NET_HEADER_FIELD_MINENCAP_SRC_IP):
-+ return KG_SCH_KN_IPSRC2;
-+ case (NET_HEADER_FIELD_MINENCAP_DST_IP):
-+ return KG_SCH_KN_IPDST2;
-+ case (NET_HEADER_FIELD_MINENCAP_TYPE):
-+ return KG_SCH_KN_PTYPE2;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_TCP):
-+ switch (field.tcp)
-+ {
-+ case (NET_HEADER_FIELD_TCP_PORT_SRC):
-+ return KG_SCH_KN_L4PSRC;
-+ case (NET_HEADER_FIELD_TCP_PORT_DST):
-+ return KG_SCH_KN_L4PDST;
-+ case (NET_HEADER_FIELD_TCP_FLAGS):
-+ return KG_SCH_KN_TFLG;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_UDP):
-+ switch (field.udp)
-+ {
-+ case (NET_HEADER_FIELD_UDP_PORT_SRC):
-+ return KG_SCH_KN_L4PSRC;
-+ case (NET_HEADER_FIELD_UDP_PORT_DST):
-+ return KG_SCH_KN_L4PDST;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_IPSEC_AH):
-+ switch (field.ipsecAh)
-+ {
-+ case (NET_HEADER_FIELD_IPSEC_AH_SPI):
-+ return KG_SCH_KN_IPSEC_SPI;
-+ case (NET_HEADER_FIELD_IPSEC_AH_NH):
-+ return KG_SCH_KN_IPSEC_NH;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_IPSEC_ESP):
-+ switch (field.ipsecEsp)
-+ {
-+ case (NET_HEADER_FIELD_IPSEC_ESP_SPI):
-+ return KG_SCH_KN_IPSEC_SPI;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_SCTP):
-+ switch (field.sctp)
-+ {
-+ case (NET_HEADER_FIELD_SCTP_PORT_SRC):
-+ return KG_SCH_KN_L4PSRC;
-+ case (NET_HEADER_FIELD_SCTP_PORT_DST):
-+ return KG_SCH_KN_L4PDST;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_DCCP):
-+ switch (field.dccp)
-+ {
-+ case (NET_HEADER_FIELD_DCCP_PORT_SRC):
-+ return KG_SCH_KN_L4PSRC;
-+ case (NET_HEADER_FIELD_DCCP_PORT_DST):
-+ return KG_SCH_KN_L4PDST;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ case (HEADER_TYPE_PPPoE):
-+ switch (field.pppoe)
-+ {
-+ case (NET_HEADER_FIELD_PPPoE_PID):
-+ return KG_SCH_KN_PPPID;
-+ case (NET_HEADER_FIELD_PPPoE_SID):
-+ return KG_SCH_KN_PPPSID;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+ }
-+ default:
-+ break;
-+
-+ }
-+
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported"));
-+ return 0;
-+}
-+
-+
-+static uint8_t GetKnownFieldId(uint32_t bitMask)
-+{
-+ uint8_t cnt = 0;
-+
-+ while (bitMask)
-+ if (bitMask & 0x80000000)
-+ break;
-+ else
-+ {
-+ cnt++;
-+ bitMask <<= 1;
-+ }
-+ return cnt;
-+
-+}
-+
-+static uint8_t GetExtractedOrMask(uint8_t bitOffset, bool fqid)
-+{
-+ uint8_t i, mask, numOfOnesToClear, walking1Mask = 1;
-+
-+ /* bitOffset 1-7 --> mask 0x1-0x7F */
-+ if (bitOffset<8)
-+ {
-+ mask = 0;
-+ for (i = 0 ; i < bitOffset ; i++, walking1Mask <<= 1)
-+ mask |= walking1Mask;
-+ }
-+ else
-+ {
-+ mask = 0xFF;
-+ numOfOnesToClear = 0;
-+ if (fqid && bitOffset>24)
-+ /* bitOffset 25-31 --> mask 0xFE-0x80 */
-+ numOfOnesToClear = (uint8_t)(bitOffset-24);
-+ else
-+ /* bitOffset 9-15 --> mask 0xFE-0x80 */
-+ if (!fqid && bitOffset>8)
-+ numOfOnesToClear = (uint8_t)(bitOffset-8);
-+ for (i = 0 ; i < numOfOnesToClear ; i++, walking1Mask <<= 1)
-+ mask &= ~walking1Mask;
-+ /* bitOffset 8-24 for FQID, 8 for PP --> no mask (0xFF)*/
-+ }
-+ return mask;
-+}
-+
-+static void IncSchemeOwners(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort)
-+{
-+ t_FmPcdKg *p_FmPcdKg;
-+ t_FmPcdKgScheme *p_Scheme;
-+ uint32_t intFlags;
-+ uint8_t relativeSchemeId;
-+ int i;
-+
-+ p_FmPcdKg = p_FmPcd->p_FmPcdKg;
-+
-+ /* for each scheme - update owners counters */
-+ for (i = 0; i < p_BindPort->numOfSchemes; i++)
-+ {
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]);
-+ ASSERT_COND(relativeSchemeId < FM_PCD_KG_NUM_OF_SCHEMES);
-+
-+ p_Scheme = &p_FmPcdKg->schemes[relativeSchemeId];
-+
-+ /* increment owners number */
-+ intFlags = KgSchemeLock(p_Scheme);
-+ p_Scheme->owners++;
-+ KgSchemeUnlock(p_Scheme, intFlags);
-+ }
-+}
-+
-+static void DecSchemeOwners(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort)
-+{
-+ t_FmPcdKg *p_FmPcdKg;
-+ t_FmPcdKgScheme *p_Scheme;
-+ uint32_t intFlags;
-+ uint8_t relativeSchemeId;
-+ int i;
-+
-+ p_FmPcdKg = p_FmPcd->p_FmPcdKg;
-+
-+ /* for each scheme - update owners counters */
-+ for (i = 0; i < p_BindPort->numOfSchemes; i++)
-+ {
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]);
-+ ASSERT_COND(relativeSchemeId < FM_PCD_KG_NUM_OF_SCHEMES);
-+
-+ p_Scheme = &p_FmPcdKg->schemes[relativeSchemeId];
-+
-+ /* increment owners number */
-+ ASSERT_COND(p_Scheme->owners);
-+ intFlags = KgSchemeLock(p_Scheme);
-+ p_Scheme->owners--;
-+ KgSchemeUnlock(p_Scheme, intFlags);
-+ }
-+}
-+
-+static void UpdateRequiredActionFlag(t_FmPcdKgScheme *p_Scheme, bool set)
-+{
-+ /* this routine is locked by the calling routine */
-+ ASSERT_COND(p_Scheme);
-+ ASSERT_COND(p_Scheme->valid);
-+
-+ if (set)
-+ p_Scheme->requiredActionFlag = TRUE;
-+ else
-+ {
-+ p_Scheme->requiredAction = 0;
-+ p_Scheme->requiredActionFlag = FALSE;
-+ }
-+}
-+
-+static t_Error KgWriteSp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t spReg, bool add)
-+{
-+ struct fman_kg_regs *p_KgRegs;
-+
-+ uint32_t tmpKgarReg = 0, intFlags;
-+ t_Error err = E_OK;
-+
-+ /* The calling routine had locked the port, so for each port only one core can access
-+ * (so we don't need a lock here) */
-+
-+ if (p_FmPcd->h_Hc)
-+ return FmHcKgWriteSp(p_FmPcd->h_Hc, hardwarePortId, spReg, add);
-+
-+ p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+
-+ tmpKgarReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId);
-+ /* lock a common KG reg */
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ err = WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ if (err)
-+ {
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ fman_kg_write_sp(p_KgRegs, spReg, add);
-+
-+ tmpKgarReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId);
-+
-+ err = WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ return err;
-+}
-+
-+static t_Error KgWriteCpp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t cppReg)
-+{
-+ struct fman_kg_regs *p_KgRegs;
-+ uint32_t tmpKgarReg, intFlags;
-+ t_Error err;
-+
-+ p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcKgWriteCpp(p_FmPcd->h_Hc, hardwarePortId, cppReg);
-+ return err;
-+ }
-+
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ fman_kg_write_cpp(p_KgRegs, cppReg);
-+ tmpKgarReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId);
-+ err = WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+
-+ return err;
-+}
-+
-+static uint32_t BuildCppReg(t_FmPcd *p_FmPcd, uint8_t clsPlanGrpId)
-+{
-+ uint32_t tmpKgpeCpp;
-+
-+ tmpKgpeCpp = (uint32_t)(p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry / 8);
-+ tmpKgpeCpp |= (uint32_t)(((p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp / 8) - 1) << FM_KG_PE_CPP_MASK_SHIFT);
-+
-+ return tmpKgpeCpp;
-+}
-+
-+static t_Error BindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId)
-+{
-+ uint32_t tmpKgpeCpp = 0;
-+
-+ tmpKgpeCpp = BuildCppReg(p_FmPcd, clsPlanGrpId);
-+ return KgWriteCpp(p_FmPcd, hardwarePortId, tmpKgpeCpp);
-+}
-+
-+static void UnbindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId)
-+{
-+ KgWriteCpp(p_FmPcd, hardwarePortId, 0);
-+}
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+static uint32_t __attribute__((unused)) ReadClsPlanBlockActionReg(uint8_t grpId)
-+{
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ FM_KG_KGAR_READ |
-+ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
-+ DUMMY_PORT_ID |
-+ ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) |
-+ FM_PCD_KG_KGAR_WSEL_MASK);
-+
-+ /* if we ever want to write 1 by 1, use:
-+ sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP)));
-+ */
-+}
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+static void PcdKgErrorException(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint32_t event,schemeIndexes = 0, index = 0;
-+ struct fman_kg_regs *p_KgRegs;
-+
-+ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
-+ p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+ fman_kg_get_event(p_KgRegs, &event, &schemeIndexes);
-+
-+ if (event & FM_EX_KG_DOUBLE_ECC)
-+ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC);
-+ if (event & FM_EX_KG_KEYSIZE_OVERFLOW)
-+ {
-+ if (schemeIndexes)
-+ {
-+ while (schemeIndexes)
-+ {
-+ if (schemeIndexes & 0x1)
-+ p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, (uint16_t)(31 - index));
-+ schemeIndexes >>= 1;
-+ index+=1;
-+ }
-+ }
-+ else /* this should happen only when interrupt is forced. */
-+ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW);
-+ }
-+}
-+
-+static t_Error KgInitGuest(t_FmPcd *p_FmPcd)
-+{
-+ t_Error err = E_OK;
-+ t_FmPcdIpcKgSchemesParams kgAlloc;
-+ uint32_t replyLength;
-+ t_FmPcdIpcReply reply;
-+ t_FmPcdIpcMsg msg;
-+
-+ ASSERT_COND(p_FmPcd->guestId != NCSW_MASTER_ID);
-+
-+ /* in GUEST_PARTITION, we use the IPC */
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&kgAlloc, 0, sizeof(t_FmPcdIpcKgSchemesParams));
-+ kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes;
-+ kgAlloc.guestId = p_FmPcd->guestId;
-+ msg.msgId = FM_PCD_ALLOC_KG_SCHEMES;
-+ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
-+ replyLength = sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t);
-+ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(kgAlloc),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (replyLength != (sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ memcpy(p_FmPcd->p_FmPcdKg->schemesIds, (uint8_t*)(reply.replyBody),p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t));
-+
-+ return (t_Error)reply.error;
-+}
-+
-+static t_Error KgInitMaster(t_FmPcd *p_FmPcd)
-+{
-+ t_Error err = E_OK;
-+ struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+
-+ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
-+
-+ if (p_FmPcd->exceptions & FM_EX_KG_DOUBLE_ECC)
-+ FmEnableRamsEcc(p_FmPcd->h_Fm);
-+
-+ fman_kg_init(p_Regs, p_FmPcd->exceptions, GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd));
-+
-+ /* register even if no interrupts enabled, to allow future enablement */
-+ FmRegisterIntr(p_FmPcd->h_Fm,
-+ e_FM_MOD_KG,
-+ 0,
-+ e_FM_INTR_TYPE_ERR,
-+ PcdKgErrorException,
-+ p_FmPcd);
-+
-+ fman_kg_enable_scheme_interrupts(p_Regs);
-+
-+ if (p_FmPcd->p_FmPcdKg->numOfSchemes)
-+ {
-+ err = FmPcdKgAllocSchemes(p_FmPcd,
-+ p_FmPcd->p_FmPcdKg->numOfSchemes,
-+ p_FmPcd->guestId,
-+ p_FmPcd->p_FmPcdKg->schemesIds);
-+ if (err)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static void ValidateSchemeSw(t_FmPcdKgScheme *p_Scheme)
-+{
-+ ASSERT_COND(!p_Scheme->valid);
-+ if (p_Scheme->netEnvId != ILLEGAL_NETENV)
-+ FmPcdIncNetEnvOwners(p_Scheme->h_FmPcd, p_Scheme->netEnvId);
-+ p_Scheme->valid = TRUE;
-+}
-+
-+static t_Error InvalidateSchemeSw(t_FmPcdKgScheme *p_Scheme)
-+{
-+ if (p_Scheme->owners)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a scheme that has ports bound to"));
-+
-+ if (p_Scheme->netEnvId != ILLEGAL_NETENV)
-+ FmPcdDecNetEnvOwners(p_Scheme->h_FmPcd, p_Scheme->netEnvId);
-+ p_Scheme->valid = FALSE;
-+
-+ return E_OK;
-+}
-+
-+static t_Error BuildSchemeRegs(t_FmPcdKgScheme *p_Scheme,
-+ t_FmPcdKgSchemeParams *p_SchemeParams,
-+ struct fman_kg_scheme_regs *p_SchemeRegs)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)(p_Scheme->h_FmPcd);
-+ uint32_t grpBits = 0;
-+ uint8_t grpBase;
-+ bool direct=TRUE, absolute=FALSE;
-+ uint16_t profileId=0, numOfProfiles=0, relativeProfileId;
-+ t_Error err = E_OK;
-+ int i = 0;
-+ t_NetEnvParams netEnvParams;
-+ uint32_t tmpReg, fqbTmp = 0, ppcTmp = 0, selectTmp, maskTmp, knownTmp, genTmp;
-+ t_FmPcdKgKeyExtractAndHashParams *p_KeyAndHash = NULL;
-+ uint8_t j, curr, idx;
-+ uint8_t id, shift=0, code=0, offset=0, size=0;
-+ t_FmPcdExtractEntry *p_Extract = NULL;
-+ t_FmPcdKgExtractedOrParams *p_ExtractOr;
-+ bool generic = FALSE;
-+ t_KnownFieldsMasks bitMask;
-+ e_FmPcdKgExtractDfltSelect swDefault = (e_FmPcdKgExtractDfltSelect)0;
-+ t_FmPcdKgSchemesExtracts *p_LocalExtractsArray;
-+ uint8_t numOfSwDefaults = 0;
-+ t_FmPcdKgExtractDflt swDefaults[NUM_OF_SW_DEFAULTS];
-+ uint8_t currGenId = 0;
-+
-+ memset(swDefaults, 0, NUM_OF_SW_DEFAULTS*sizeof(t_FmPcdKgExtractDflt));
-+ memset(p_SchemeRegs, 0, sizeof(struct fman_kg_scheme_regs));
-+
-+ if (p_SchemeParams->netEnvParams.numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("numOfDistinctionUnits should not exceed %d", FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS));
-+
-+ /* by netEnv parameters, get match vector */
-+ if (!p_SchemeParams->alwaysDirect)
-+ {
-+ p_Scheme->netEnvId = FmPcdGetNetEnvId(p_SchemeParams->netEnvParams.h_NetEnv);
-+ netEnvParams.netEnvId = p_Scheme->netEnvId;
-+ netEnvParams.numOfDistinctionUnits = p_SchemeParams->netEnvParams.numOfDistinctionUnits;
-+ memcpy(netEnvParams.unitIds, p_SchemeParams->netEnvParams.unitIds, (sizeof(uint8_t))*p_SchemeParams->netEnvParams.numOfDistinctionUnits);
-+ err = PcdGetUnitsVector(p_FmPcd, &netEnvParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+ p_Scheme->matchVector = netEnvParams.vector;
-+ }
-+ else
-+ {
-+ p_Scheme->matchVector = SCHEME_ALWAYS_DIRECT;
-+ p_Scheme->netEnvId = ILLEGAL_NETENV;
-+ }
-+
-+ if (p_SchemeParams->nextEngine == e_FM_PCD_INVALID)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next Engine of the scheme is not Valid"));
-+
-+ if (p_SchemeParams->bypassFqidGeneration)
-+ {
-+#ifdef FM_KG_NO_BYPASS_FQID_GEN
-+ if ((p_FmPcd->fmRevInfo.majorRev != 4) && (p_FmPcd->fmRevInfo.majorRev < 6))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassFqidGeneration."));
-+#endif /* FM_KG_NO_BYPASS_FQID_GEN */
-+ if (p_SchemeParams->baseFqid)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid set for a scheme that does not generate an FQID"));
-+ }
-+ else
-+ if (!p_SchemeParams->baseFqid)
-+ DBG(WARNING, ("baseFqid is 0."));
-+
-+ if (p_SchemeParams->nextEngine == e_FM_PCD_PLCR)
-+ {
-+ direct = p_SchemeParams->kgNextEngineParams.plcrProfile.direct;
-+ p_Scheme->directPlcr = direct;
-+ absolute = (bool)(p_SchemeParams->kgNextEngineParams.plcrProfile.sharedProfile ? TRUE : FALSE);
-+ if (!direct && absolute)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Indirect policing is not available when profile is shared."));
-+
-+ if (direct)
-+ {
-+ profileId = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.directRelativeProfileId;
-+ numOfProfiles = 1;
-+ }
-+ else
-+ {
-+ profileId = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase;
-+ shift = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift;
-+ numOfProfiles = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.numOfProfiles;
-+ }
-+ }
-+
-+ if (p_SchemeParams->nextEngine == e_FM_PCD_CC)
-+ {
-+#ifdef FM_KG_NO_BYPASS_PLCR_PROFILE_GEN
-+ if ((p_SchemeParams->kgNextEngineParams.cc.plcrNext) && (p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration))
-+ {
-+ if ((p_FmPcd->fmRevInfo.majorRev != 4) && (p_FmPcd->fmRevInfo.majorRev < 6))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassPlcrProfileGeneration."));
-+ }
-+#endif /* FM_KG_NO_BYPASS_PLCR_PROFILE_GEN */
-+
-+ err = FmPcdCcGetGrpParams(p_SchemeParams->kgNextEngineParams.cc.h_CcTree,
-+ p_SchemeParams->kgNextEngineParams.cc.grpId,
-+ &grpBits,
-+ &grpBase);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ p_Scheme->ccUnits = grpBits;
-+
-+ if ((p_SchemeParams->kgNextEngineParams.cc.plcrNext) &&
-+ (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration))
-+ {
-+ if (p_SchemeParams->kgNextEngineParams.cc.plcrProfile.sharedProfile)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Shared profile may not be used after Coarse classification."));
-+ absolute = FALSE;
-+ direct = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.direct;
-+ if (direct)
-+ {
-+ profileId = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.directRelativeProfileId;
-+ numOfProfiles = 1;
-+ }
-+ else
-+ {
-+ profileId = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase;
-+ shift = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift;
-+ numOfProfiles = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.numOfProfiles;
-+ }
-+ }
-+ }
-+
-+ /* if policer is used directly after KG, or after CC */
-+ if ((p_SchemeParams->nextEngine == e_FM_PCD_PLCR) ||
-+ ((p_SchemeParams->nextEngine == e_FM_PCD_CC) &&
-+ (p_SchemeParams->kgNextEngineParams.cc.plcrNext) &&
-+ (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration)))
-+ {
-+ /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */
-+ if (absolute)
-+ {
-+ /* for absolute direct policy only, */
-+ relativeProfileId = profileId;
-+ err = FmPcdPlcrGetAbsoluteIdByProfileParams((t_Handle)p_FmPcd,e_FM_PCD_PLCR_SHARED,NULL, relativeProfileId, &profileId);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("Shared profile not valid offset"));
-+ if (!FmPcdPlcrIsProfileValid(p_FmPcd, profileId))
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Shared profile not valid."));
-+ p_Scheme->relativeProfileId = profileId;
-+ }
-+ else
-+ {
-+ /* save relative profile id's for later check */
-+ p_Scheme->nextRelativePlcrProfile = TRUE;
-+ p_Scheme->relativeProfileId = profileId;
-+ p_Scheme->numOfProfiles = numOfProfiles;
-+ }
-+ }
-+ else
-+ {
-+ /* if policer is NOT going to be used after KG at all than if bypassFqidGeneration
-+ is set, we do not need numOfUsedExtractedOrs and hashDistributionNumOfFqids */
-+ if (p_SchemeParams->bypassFqidGeneration && p_SchemeParams->numOfUsedExtractedOrs)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("numOfUsedExtractedOrs is set in a scheme that does not generate FQID or policer profile ID"));
-+ if (p_SchemeParams->bypassFqidGeneration &&
-+ p_SchemeParams->useHash &&
-+ p_SchemeParams->keyExtractAndHashParams.hashDistributionNumOfFqids)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("hashDistributionNumOfFqids is set in a scheme that does not generate FQID or policer profile ID"));
-+ }
-+
-+ /* configure all 21 scheme registers */
-+ tmpReg = KG_SCH_MODE_EN;
-+ switch (p_SchemeParams->nextEngine)
-+ {
-+ case (e_FM_PCD_PLCR):
-+ /* add to mode register - NIA */
-+ tmpReg |= KG_SCH_MODE_NIA_PLCR;
-+ tmpReg |= NIA_ENG_PLCR;
-+ tmpReg |= (uint32_t)(p_SchemeParams->kgNextEngineParams.plcrProfile.sharedProfile ? NIA_PLCR_ABSOLUTE:0);
-+ /* initialize policer profile command - */
-+ /* configure kgse_ppc */
-+ if (direct)
-+ /* use profileId as base, other fields are 0 */
-+ p_SchemeRegs->kgse_ppc = (uint32_t)profileId;
-+ else
-+ {
-+ if (shift > MAX_PP_SHIFT)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT));
-+
-+ if (!numOfProfiles || !POWER_OF_2(numOfProfiles))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2"));
-+
-+ ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH;
-+ ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW;
-+ ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT);
-+ ppcTmp |= (uint32_t)profileId;
-+
-+ p_SchemeRegs->kgse_ppc = ppcTmp;
-+ }
-+ break;
-+ case (e_FM_PCD_CC):
-+ /* mode reg - define NIA */
-+ tmpReg |= (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC);
-+
-+ p_SchemeRegs->kgse_ccbs = grpBits;
-+ tmpReg |= (uint32_t)(grpBase << KG_SCH_MODE_CCOBASE_SHIFT);
-+
-+ if (p_SchemeParams->kgNextEngineParams.cc.plcrNext)
-+ {
-+ if (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration)
-+ {
-+ /* find out if absolute or relative */
-+ if (absolute)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("It is illegal to request a shared profile in a scheme that is in a KG->CC->PLCR flow"));
-+ if (direct)
-+ {
-+ /* mask = 0, base = directProfileId */
-+ p_SchemeRegs->kgse_ppc = (uint32_t)profileId;
-+ }
-+ else
-+ {
-+ if (shift > MAX_PP_SHIFT)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT));
-+ if (!numOfProfiles || !POWER_OF_2(numOfProfiles))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2"));
-+
-+ ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH;
-+ ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW;
-+ ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT);
-+ ppcTmp |= (uint32_t)profileId;
-+
-+ p_SchemeRegs->kgse_ppc = ppcTmp;
-+ }
-+ }
-+ }
-+ break;
-+ case (e_FM_PCD_DONE):
-+ if (p_SchemeParams->kgNextEngineParams.doneAction == e_FM_PCD_DROP_FRAME)
-+ tmpReg |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd);
-+ else
-+ tmpReg |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Next engine not supported"));
-+ }
-+ p_SchemeRegs->kgse_mode = tmpReg;
-+
-+ p_SchemeRegs->kgse_mv = p_Scheme->matchVector;
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_SchemeParams->overrideStorageProfile)
-+ {
-+ p_SchemeRegs->kgse_om |= KG_SCH_OM_VSPE;
-+
-+ if (p_SchemeParams->storageProfile.direct)
-+ {
-+ profileId = p_SchemeParams->storageProfile.profileSelect.directRelativeProfileId;
-+ shift = 0;
-+ numOfProfiles = 1;
-+ }
-+ else
-+ {
-+ profileId = p_SchemeParams->storageProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase;
-+ shift = p_SchemeParams->storageProfile.profileSelect.indirectProfile.fqidOffsetShift;
-+ numOfProfiles = p_SchemeParams->storageProfile.profileSelect.indirectProfile.numOfProfiles;
-+ }
-+ if (shift > MAX_SP_SHIFT)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_SP_SHIFT));
-+
-+ if (!numOfProfiles || !POWER_OF_2(numOfProfiles))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2"));
-+
-+ tmpReg = (uint32_t)shift << KG_SCH_VSP_SHIFT;
-+ tmpReg |= ((uint32_t)(numOfProfiles-1) << KG_SCH_VSP_MASK_SHIFT);
-+ tmpReg |= (uint32_t)profileId;
-+
-+
-+ p_SchemeRegs->kgse_vsp = tmpReg;
-+
-+ p_Scheme->vspe = TRUE;
-+
-+ }
-+ else
-+ p_SchemeRegs->kgse_vsp = KG_SCH_VSP_NO_KSP_EN;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (p_SchemeParams->useHash)
-+ {
-+ p_KeyAndHash = &p_SchemeParams->keyExtractAndHashParams;
-+
-+ if (p_KeyAndHash->numOfUsedExtracts >= FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfUsedExtracts out of range"));
-+
-+ /* configure kgse_dv0 */
-+ p_SchemeRegs->kgse_dv0 = p_KeyAndHash->privateDflt0;
-+
-+ /* configure kgse_dv1 */
-+ p_SchemeRegs->kgse_dv1 = p_KeyAndHash->privateDflt1;
-+
-+ if (!p_SchemeParams->bypassFqidGeneration)
-+ {
-+ if (!p_KeyAndHash->hashDistributionNumOfFqids || !POWER_OF_2(p_KeyAndHash->hashDistributionNumOfFqids))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionNumOfFqids must not be 0 and must be a power of 2"));
-+ if ((p_KeyAndHash->hashDistributionNumOfFqids-1) & p_SchemeParams->baseFqid)
-+ DBG(WARNING, ("baseFqid unaligned. Distribution may result in less than hashDistributionNumOfFqids queues."));
-+ }
-+
-+ /* configure kgse_ekdv */
-+ tmpReg = 0;
-+ for ( i=0 ;i<p_KeyAndHash->numOfUsedDflts ; i++)
-+ {
-+ switch (p_KeyAndHash->dflts[i].type)
-+ {
-+ case (e_FM_PCD_KG_MAC_ADDR):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MAC_ADDR_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_TCI):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCI_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_ENET_TYPE):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_ENET_TYPE_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_PPP_SESSION_ID):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_SESSION_ID_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_PPP_PROTOCOL_ID):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_MPLS_LABEL):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MPLS_LABEL_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_IP_ADDR):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_ADDR_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_PROTOCOL_TYPE):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PROTOCOL_TYPE_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_IP_TOS_TC):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_TOS_TC_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_IPV6_FLOW_LABEL):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_IPSEC_SPI):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IPSEC_SPI_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_L4_PORT):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_TCP_FLAG):
-+ tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCP_FLAG_SHIFT);
-+ break;
-+ case (e_FM_PCD_KG_GENERIC_FROM_DATA):
-+ swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA;
-+ swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect;
-+ numOfSwDefaults ++;
-+ break;
-+ case (e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V):
-+ swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V;
-+ swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect;
-+ numOfSwDefaults ++;
-+ break;
-+ case (e_FM_PCD_KG_GENERIC_NOT_FROM_DATA):
-+ swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_NOT_FROM_DATA;
-+ swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect;
-+ numOfSwDefaults ++;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ }
-+ p_SchemeRegs->kgse_ekdv = tmpReg;
-+
-+ p_LocalExtractsArray = (t_FmPcdKgSchemesExtracts *)XX_Malloc(sizeof(t_FmPcdKgSchemesExtracts));
-+ if (!p_LocalExtractsArray)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
-+
-+ /* configure kgse_ekfc and kgse_gec */
-+ knownTmp = 0;
-+ for ( i=0 ;i<p_KeyAndHash->numOfUsedExtracts ; i++)
-+ {
-+ p_Extract = &p_KeyAndHash->extractArray[i];
-+ switch (p_Extract->type)
-+ {
-+ case (e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO):
-+ knownTmp |= KG_SCH_KN_PORT_ID;
-+ /* save in driver structure */
-+ p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(KG_SCH_KN_PORT_ID);
-+ p_LocalExtractsArray->extractsArray[i].known = TRUE;
-+ break;
-+ case (e_FM_PCD_EXTRACT_BY_HDR):
-+ switch (p_Extract->extractByHdr.hdr)
-+ {
-+#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ case (HEADER_TYPE_UDP_LITE):
-+ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
-+ break;
-+#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+ case (HEADER_TYPE_UDP_ENCAP_ESP):
-+ switch (p_Extract->extractByHdr.type)
-+ {
-+ case (e_FM_PCD_EXTRACT_FROM_HDR):
-+ /* case where extraction from ESP only */
-+ if (p_Extract->extractByHdr.extractByHdrType.fromHdr.offset >= UDP_HEADER_SIZE)
-+ {
-+ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
-+ p_Extract->extractByHdr.extractByHdrType.fromHdr.offset -= UDP_HEADER_SIZE;
-+ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
-+ }
-+ else
-+ {
-+ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
-+ p_Extract->extractByHdr.ignoreProtocolValidation = FALSE;
-+ }
-+ break;
-+ case (e_FM_PCD_EXTRACT_FROM_FIELD):
-+ switch (p_Extract->extractByHdr.extractByHdrType.fromField.field.udpEncapEsp)
-+ {
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC):
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST):
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN):
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM):
-+ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
-+ break;
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI):
-+ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
-+ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
-+ /*p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SPI_OFFSET;*/
-+ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
-+ break;
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM):
-+ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
-+ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
-+ p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SEQ_NUM_OFFSET;
-+ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
-+ break;
-+ }
-+ break;
-+ case (e_FM_PCD_EXTRACT_FULL_FIELD):
-+ switch (p_Extract->extractByHdr.extractByHdrType.fullField.udpEncapEsp)
-+ {
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC):
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST):
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN):
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM):
-+ p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP;
-+ break;
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI):
-+ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
-+ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
-+ p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SPI_SIZE;
-+ p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SPI_OFFSET;
-+ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
-+ break;
-+ case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM):
-+ p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR;
-+ p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP);
-+ p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SEQ_NUM_SIZE;
-+ p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SEQ_NUM_OFFSET;
-+ p_Extract->extractByHdr.ignoreProtocolValidation = TRUE;
-+ break;
-+ }
-+ break;
-+ }
-+ break;
-+ default:
-+ break;
-+ }
-+ switch (p_Extract->extractByHdr.type)
-+ {
-+ case (e_FM_PCD_EXTRACT_FROM_HDR):
-+ generic = TRUE;
-+ /* get the header code for the generic extract */
-+ code = GetGenHdrCode(p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex, p_Extract->extractByHdr.ignoreProtocolValidation);
-+ /* set generic register fields */
-+ offset = p_Extract->extractByHdr.extractByHdrType.fromHdr.offset;
-+ size = p_Extract->extractByHdr.extractByHdrType.fromHdr.size;
-+ break;
-+ case (e_FM_PCD_EXTRACT_FROM_FIELD):
-+ generic = TRUE;
-+ /* get the field code for the generic extract */
-+ code = GetGenFieldCode(p_Extract->extractByHdr.hdr,
-+ p_Extract->extractByHdr.extractByHdrType.fromField.field, p_Extract->extractByHdr.ignoreProtocolValidation,p_Extract->extractByHdr.hdrIndex);
-+ offset = p_Extract->extractByHdr.extractByHdrType.fromField.offset;
-+ size = p_Extract->extractByHdr.extractByHdrType.fromField.size;
-+ break;
-+ case (e_FM_PCD_EXTRACT_FULL_FIELD):
-+ if (!p_Extract->extractByHdr.ignoreProtocolValidation)
-+ {
-+ /* if we have a known field for it - use it, otherwise use generic */
-+ bitMask = GetKnownProtMask(p_FmPcd, p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex,
-+ p_Extract->extractByHdr.extractByHdrType.fullField);
-+ if (bitMask)
-+ {
-+ knownTmp |= bitMask;
-+ /* save in driver structure */
-+ p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(bitMask);
-+ p_LocalExtractsArray->extractsArray[i].known = TRUE;
-+ }
-+ else
-+ generic = TRUE;
-+ }
-+ else
-+ generic = TRUE;
-+ if (generic)
-+ {
-+ /* tmp - till we cover more headers under generic */
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Full header selection not supported"));
-+ }
-+ break;
-+ default:
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ break;
-+ case (e_FM_PCD_EXTRACT_NON_HDR):
-+ /* use generic */
-+ generic = TRUE;
-+ offset = 0;
-+ /* get the field code for the generic extract */
-+ code = GetGenCode(p_Extract->extractNonHdr.src, &offset);
-+ offset += p_Extract->extractNonHdr.offset;
-+ size = p_Extract->extractNonHdr.size;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ if (generic)
-+ {
-+ /* set generic register fields */
-+ if (currGenId >= FM_KG_NUM_OF_GENERIC_REGS)
-+ {
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used"));
-+ }
-+ if (!code)
-+ {
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
-+ }
-+
-+ genTmp = KG_SCH_GEN_VALID;
-+ genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT);
-+ genTmp |= offset;
-+ if ((size > MAX_KG_SCH_SIZE) || (size < 1))
-+ {
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal extraction (size out of range)"));
-+ }
-+ genTmp |= (uint32_t)((size - 1) << KG_SCH_GEN_SIZE_SHIFT);
-+ swDefault = GetGenericSwDefault(swDefaults, numOfSwDefaults, code);
-+ if (swDefault == e_FM_PCD_KG_DFLT_ILLEGAL)
-+ DBG(WARNING, ("No sw default configured"));
-+ else
-+ genTmp |= swDefault << KG_SCH_GEN_DEF_SHIFT;
-+
-+ genTmp |= KG_SCH_GEN_MASK;
-+ p_SchemeRegs->kgse_gec[currGenId] = genTmp;
-+ /* save in driver structure */
-+ p_LocalExtractsArray->extractsArray[i].id = currGenId++;
-+ p_LocalExtractsArray->extractsArray[i].known = FALSE;
-+ generic = FALSE;
-+ }
-+ }
-+ p_SchemeRegs->kgse_ekfc = knownTmp;
-+
-+ selectTmp = 0;
-+ maskTmp = 0xFFFFFFFF;
-+ /* configure kgse_bmch, kgse_bmcl and kgse_fqb */
-+
-+ if (p_KeyAndHash->numOfUsedMasks > FM_PCD_KG_NUM_OF_EXTRACT_MASKS)
-+ {
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Only %d masks supported", FM_PCD_KG_NUM_OF_EXTRACT_MASKS));
-+ }
-+ for ( i=0 ;i<p_KeyAndHash->numOfUsedMasks ; i++)
-+ {
-+ /* Get the relative id of the extract (for known 0-0x1f, for generic 0-7) */
-+ id = p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].id;
-+ /* Get the shift of the select field (depending on i) */
-+ GET_MASK_SEL_SHIFT(shift,i);
-+ if (p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].known)
-+ selectTmp |= id << shift;
-+ else
-+ selectTmp |= (id + MASK_FOR_GENERIC_BASE_ID) << shift;
-+
-+ /* Get the shift of the offset field (depending on i) - may
-+ be in kgse_bmch or in kgse_fqb (depending on i) */
-+ GET_MASK_OFFSET_SHIFT(shift,i);
-+ if (i<=1)
-+ selectTmp |= p_KeyAndHash->masks[i].offset << shift;
-+ else
-+ fqbTmp |= p_KeyAndHash->masks[i].offset << shift;
-+
-+ /* Get the shift of the mask field (depending on i) */
-+ GET_MASK_SHIFT(shift,i);
-+ /* pass all bits */
-+ maskTmp |= KG_SCH_BITMASK_MASK << shift;
-+ /* clear bits that need masking */
-+ maskTmp &= ~(0xFF << shift) ;
-+ /* set mask bits */
-+ maskTmp |= (p_KeyAndHash->masks[i].mask << shift) ;
-+ }
-+ p_SchemeRegs->kgse_bmch = selectTmp;
-+ p_SchemeRegs->kgse_bmcl = maskTmp;
-+ /* kgse_fqb will be written t the end of the routine */
-+
-+ /* configure kgse_hc */
-+ if (p_KeyAndHash->hashShift > MAX_HASH_SHIFT)
-+ {
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashShift must not be larger than %d", MAX_HASH_SHIFT));
-+ }
-+ if (p_KeyAndHash->hashDistributionFqidsShift > MAX_DIST_FQID_SHIFT)
-+ {
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionFqidsShift must not be larger than %d", MAX_DIST_FQID_SHIFT));
-+ }
-+
-+ tmpReg = 0;
-+
-+ tmpReg |= ((p_KeyAndHash->hashDistributionNumOfFqids - 1) << p_KeyAndHash->hashDistributionFqidsShift);
-+ tmpReg |= p_KeyAndHash->hashShift << KG_SCH_HASH_CONFIG_SHIFT_SHIFT;
-+
-+ if (p_KeyAndHash->symmetricHash)
-+ {
-+ if ((!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACDST)) ||
-+ (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC1) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST1)) ||
-+ (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC2) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST2)) ||
-+ (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PDST)))
-+ {
-+ XX_Free(p_LocalExtractsArray);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("symmetricHash set but src/dest extractions missing"));
-+ }
-+ tmpReg |= KG_SCH_HASH_CONFIG_SYM;
-+ }
-+ p_SchemeRegs->kgse_hc = tmpReg;
-+
-+ /* build the return array describing the order of the extractions */
-+
-+ /* the last currGenId places of the array
-+ are for generic extracts that are always last.
-+ We now sort for the calculation of the order of the known
-+ extractions we sort the known extracts between orderedArray[0] and
-+ orderedArray[p_KeyAndHash->numOfUsedExtracts - currGenId - 1].
-+ for the calculation of the order of the generic extractions we use:
-+ num_of_generic - currGenId
-+ num_of_known - p_KeyAndHash->numOfUsedExtracts - currGenId
-+ first_generic_index = num_of_known */
-+ curr = 0;
-+ for (i=0;i<p_KeyAndHash->numOfUsedExtracts ; i++)
-+ {
-+ if (p_LocalExtractsArray->extractsArray[i].known)
-+ {
-+ ASSERT_COND(curr<(p_KeyAndHash->numOfUsedExtracts - currGenId));
-+ j = curr;
-+ /* id is the extract id (port id = 0, mac src = 1 etc.). the value in the array is the original
-+ index in the user's extractions array */
-+ /* we compare the id of the current extract with the id of the extract in the orderedArray[j-1]
-+ location */
-+ while ((j > 0) && (p_LocalExtractsArray->extractsArray[i].id <
-+ p_LocalExtractsArray->extractsArray[p_Scheme->orderedArray[j-1]].id))
-+ {
-+ p_Scheme->orderedArray[j] =
-+ p_Scheme->orderedArray[j-1];
-+ j--;
-+ }
-+ p_Scheme->orderedArray[j] = (uint8_t)i;
-+ curr++;
-+ }
-+ else
-+ {
-+ /* index is first_generic_index + generic index (id) */
-+ idx = (uint8_t)(p_KeyAndHash->numOfUsedExtracts - currGenId + p_LocalExtractsArray->extractsArray[i].id);
-+ ASSERT_COND(idx < FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY);
-+ p_Scheme->orderedArray[idx]= (uint8_t)i;
-+ }
-+ }
-+ XX_Free(p_LocalExtractsArray);
-+ }
-+ else
-+ {
-+ /* clear all unused registers: */
-+ p_SchemeRegs->kgse_ekfc = 0;
-+ p_SchemeRegs->kgse_ekdv = 0;
-+ p_SchemeRegs->kgse_bmch = 0;
-+ p_SchemeRegs->kgse_bmcl = 0;
-+ p_SchemeRegs->kgse_hc = 0;
-+ p_SchemeRegs->kgse_dv0 = 0;
-+ p_SchemeRegs->kgse_dv1 = 0;
-+ }
-+
-+ if (p_SchemeParams->bypassFqidGeneration)
-+ p_SchemeRegs->kgse_hc |= KG_SCH_HASH_CONFIG_NO_FQID;
-+
-+ /* configure kgse_spc */
-+ if ( p_SchemeParams->schemeCounter.update)
-+ p_SchemeRegs->kgse_spc = p_SchemeParams->schemeCounter.value;
-+
-+
-+ /* check that are enough generic registers */
-+ if (p_SchemeParams->numOfUsedExtractedOrs + currGenId > FM_KG_NUM_OF_GENERIC_REGS)
-+ RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used"));
-+
-+ /* extracted OR mask on Qid */
-+ for ( i=0 ;i<p_SchemeParams->numOfUsedExtractedOrs ; i++)
-+ {
-+
-+ p_Scheme->extractedOrs = TRUE;
-+ /* configure kgse_gec[i] */
-+ p_ExtractOr = &p_SchemeParams->extractedOrs[i];
-+ switch (p_ExtractOr->type)
-+ {
-+ case (e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO):
-+ code = KG_SCH_GEN_PARSE_RESULT_N_FQID;
-+ offset = 0;
-+ break;
-+ case (e_FM_PCD_EXTRACT_BY_HDR):
-+ /* get the header code for the generic extract */
-+ code = GetGenHdrCode(p_ExtractOr->extractByHdr.hdr, p_ExtractOr->extractByHdr.hdrIndex, p_ExtractOr->extractByHdr.ignoreProtocolValidation);
-+ /* set generic register fields */
-+ offset = p_ExtractOr->extractionOffset;
-+ break;
-+ case (e_FM_PCD_EXTRACT_NON_HDR):
-+ /* get the field code for the generic extract */
-+ offset = 0;
-+ code = GetGenCode(p_ExtractOr->src, &offset);
-+ offset += p_ExtractOr->extractionOffset;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ /* set generic register fields */
-+ if (!code)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
-+ genTmp = KG_SCH_GEN_EXTRACT_TYPE | KG_SCH_GEN_VALID;
-+ genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT);
-+ genTmp |= offset;
-+ if (!!p_ExtractOr->bitOffsetInFqid == !!p_ExtractOr->bitOffsetInPlcrProfile)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" extracted byte must effect either FQID or Policer profile"));
-+
-+ /************************************************************************************
-+ bitOffsetInFqid and bitOffsetInPolicerProfile are translated to rotate parameter
-+ in the following way:
-+
-+ Driver API and implementation:
-+ ==============================
-+ FQID: extracted OR byte may be shifted right 1-31 bits to effect parts of the FQID.
-+ if shifted less than 8 bits, or more than 24 bits a mask is set on the bits that
-+ are not overlapping FQID.
-+ ------------------------
-+ | FQID (24) |
-+ ------------------------
-+ --------
-+ | | extracted OR byte
-+ --------
-+
-+ Policer Profile: extracted OR byte may be shifted right 1-15 bits to effect parts of the
-+ PP id. Unless shifted exactly 8 bits to overlap the PP id, a mask is set on the bits that
-+ are not overlapping PP id.
-+
-+ --------
-+ | PP (8) |
-+ --------
-+ --------
-+ | | extracted OR byte
-+ --------
-+
-+ HW implementation
-+ =================
-+ FQID and PP construct a 32 bit word in the way describe below. Extracted byte is located
-+ as the highest byte of that word and may be rotated to effect any part os the FQID or
-+ the PP.
-+ ------------------------ --------
-+ | FQID (24) || PP (8) |
-+ ------------------------ --------
-+ --------
-+ | | extracted OR byte
-+ --------
-+
-+ ************************************************************************************/
-+
-+ if (p_ExtractOr->bitOffsetInFqid)
-+ {
-+ if (p_ExtractOr->bitOffsetInFqid > MAX_KG_SCH_FQID_BIT_OFFSET )
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInFqid out of range)"));
-+ if (p_ExtractOr->bitOffsetInFqid<8)
-+ genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid+24) << KG_SCH_GEN_SIZE_SHIFT);
-+ else
-+ genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid-8) << KG_SCH_GEN_SIZE_SHIFT);
-+ p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInFqid, TRUE);
-+ }
-+ else /* effect policer profile */
-+ {
-+ if (p_ExtractOr->bitOffsetInPlcrProfile > MAX_KG_SCH_PP_BIT_OFFSET )
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInPlcrProfile out of range)"));
-+ p_Scheme->bitOffsetInPlcrProfile = p_ExtractOr->bitOffsetInPlcrProfile;
-+ genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInPlcrProfile+16) << KG_SCH_GEN_SIZE_SHIFT);
-+ p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInPlcrProfile, FALSE);
-+ }
-+
-+ genTmp |= (uint32_t)(p_ExtractOr->extractionOffset << KG_SCH_GEN_DEF_SHIFT);
-+ /* clear bits that need masking */
-+ genTmp &= ~KG_SCH_GEN_MASK ;
-+ /* set mask bits */
-+ genTmp |= (uint32_t)(p_ExtractOr->mask << KG_SCH_GEN_MASK_SHIFT);
-+ p_SchemeRegs->kgse_gec[currGenId++] = genTmp;
-+
-+ }
-+ /* clear all unused GEC registers */
-+ for ( i=currGenId ;i<FM_KG_NUM_OF_GENERIC_REGS ; i++)
-+ p_SchemeRegs->kgse_gec[i] = 0;
-+
-+ /* add base Qid for this scheme */
-+ /* add configuration for kgse_fqb */
-+ if (p_SchemeParams->baseFqid & ~0x00FFFFFF)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid must be between 1 and 2^24-1"));
-+
-+ fqbTmp |= p_SchemeParams->baseFqid;
-+ p_SchemeRegs->kgse_fqb = fqbTmp;
-+
-+ p_Scheme->nextEngine = p_SchemeParams->nextEngine;
-+ p_Scheme->doneAction = p_SchemeParams->kgNextEngineParams.doneAction;
-+
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+
-+t_Error FmPcdKgBuildClsPlanGrp(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_Grp, t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdKgClsPlanGrp *p_ClsPlanGrp;
-+ t_FmPcdIpcKgClsPlanParams kgAlloc;
-+ t_Error err = E_OK;
-+ uint32_t oredVectors = 0;
-+ int i, j;
-+
-+ /* this routine is protected by the calling routine ! */
-+ if (p_Grp->numOfOptions >= FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Too many classification plan basic options selected."));
-+
-+ /* find a new clsPlan group */
-+ for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++)
-+ if (!p_FmPcd->p_FmPcdKg->clsPlanGrps[i].used)
-+ break;
-+ if (i == FM_MAX_NUM_OF_PORTS)
-+ RETURN_ERROR(MAJOR, E_FULL,("No classification plan groups available."));
-+
-+ p_FmPcd->p_FmPcdKg->clsPlanGrps[i].used = TRUE;
-+
-+ p_Grp->clsPlanGrpId = (uint8_t)i;
-+
-+ if (p_Grp->numOfOptions == 0)
-+ p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = (uint8_t)i;
-+
-+ p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[i];
-+ p_ClsPlanGrp->netEnvId = p_Grp->netEnvId;
-+ p_ClsPlanGrp->owners = 0;
-+ FmPcdSetClsPlanGrpId(p_FmPcd, p_Grp->netEnvId, p_Grp->clsPlanGrpId);
-+ if (p_Grp->numOfOptions != 0)
-+ FmPcdIncNetEnvOwners(p_FmPcd, p_Grp->netEnvId);
-+
-+ p_ClsPlanGrp->sizeOfGrp = (uint16_t)(1 << p_Grp->numOfOptions);
-+ /* a minimal group of 8 is required */
-+ if (p_ClsPlanGrp->sizeOfGrp < CLS_PLAN_NUM_PER_GRP)
-+ p_ClsPlanGrp->sizeOfGrp = CLS_PLAN_NUM_PER_GRP;
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ {
-+ err = KgAllocClsPlanEntries(h_FmPcd, p_ClsPlanGrp->sizeOfGrp, p_FmPcd->guestId, &p_ClsPlanGrp->baseEntry);
-+
-+ if (err)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG);
-+ }
-+ else
-+ {
-+ t_FmPcdIpcMsg msg;
-+ uint32_t replyLength;
-+ t_FmPcdIpcReply reply;
-+
-+ /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&kgAlloc, 0, sizeof(kgAlloc));
-+ kgAlloc.guestId = p_FmPcd->guestId;
-+ kgAlloc.numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp;
-+ msg.msgId = FM_PCD_ALLOC_KG_CLSPLAN;
-+ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
-+ replyLength = (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry));
-+ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(kgAlloc),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (replyLength != (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ if ((t_Error)reply.error != E_OK)
-+ RETURN_ERROR(MINOR, (t_Error)reply.error, NO_MSG);
-+
-+ p_ClsPlanGrp->baseEntry = *(uint8_t*)(reply.replyBody);
-+ }
-+
-+ /* build classification plan entries parameters */
-+ p_ClsPlanSet->baseEntry = p_ClsPlanGrp->baseEntry;
-+ p_ClsPlanSet->numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp;
-+
-+ oredVectors = 0;
-+ for (i = 0; i<p_Grp->numOfOptions; i++)
-+ {
-+ oredVectors |= p_Grp->optVectors[i];
-+ /* save an array of used options - the indexes represent the power of 2 index */
-+ p_ClsPlanGrp->optArray[i] = p_Grp->options[i];
-+ }
-+ /* set the classification plan relevant entries so that all bits
-+ * relevant to the list of options is cleared
-+ */
-+ for (j = 0; j<p_ClsPlanGrp->sizeOfGrp; j++)
-+ p_ClsPlanSet->vectors[j] = ~oredVectors;
-+
-+ for (i = 0; i<p_Grp->numOfOptions; i++)
-+ {
-+ /* option i got the place 2^i in the clsPlan array. all entries that
-+ * have bit i set, should have the vector bit cleared. So each option
-+ * has one location that it is exclusive (1,2,4,8...) and represent the
-+ * presence of that option only, and other locations that represent a
-+ * combination of options.
-+ * e.g:
-+ * If ethernet-BC is option 1 it gets entry 2 in the table. Entry 2
-+ * now represents a frame with ethernet-BC header - so the bit
-+ * representing ethernet-BC should be set and all other option bits
-+ * should be cleared.
-+ * Entries 2,3,6,7,10... also have ethernet-BC and therefore have bit
-+ * vector[1] set, but they also have other bits set:
-+ * 3=1+2, options 0 and 1
-+ * 6=2+4, options 1 and 2
-+ * 7=1+2+4, options 0,1,and 2
-+ * 10=2+8, options 1 and 3
-+ * etc.
-+ * */
-+
-+ /* now for each option (i), we set their bits in all entries (j)
-+ * that contain bit 2^i.
-+ */
-+ for (j = 0; j<p_ClsPlanGrp->sizeOfGrp; j++)
-+ {
-+ if (j & (1<<i))
-+ p_ClsPlanSet->vectors[j] |= p_Grp->optVectors[i];
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+void FmPcdKgDestroyClsPlanGrp(t_Handle h_FmPcd, uint8_t grpId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdIpcKgClsPlanParams kgAlloc;
-+ t_Error err;
-+ t_FmPcdIpcMsg msg;
-+ uint32_t replyLength;
-+ t_FmPcdIpcReply reply;
-+
-+ /* check that no port is bound to this clsPlan */
-+ if (p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].owners)
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a clsPlan grp that has ports bound to"));
-+ return;
-+ }
-+
-+ FmPcdSetClsPlanGrpId(p_FmPcd, p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].netEnvId, ILLEGAL_CLS_PLAN);
-+
-+ if (grpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId)
-+ p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN;
-+ else
-+ FmPcdDecNetEnvOwners(p_FmPcd, p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].netEnvId);
-+
-+ /* free blocks */
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ KgFreeClsPlanEntries(h_FmPcd,
-+ p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp,
-+ p_FmPcd->guestId,
-+ p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry);
-+ else /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */
-+ {
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&msg, 0, sizeof(msg));
-+ kgAlloc.guestId = p_FmPcd->guestId;
-+ kgAlloc.numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp;
-+ kgAlloc.clsPlanBase = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry;
-+ msg.msgId = FM_PCD_FREE_KG_CLSPLAN;
-+ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
-+ replyLength = sizeof(uint32_t);
-+ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(kgAlloc),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return;
-+ }
-+ if (replyLength != sizeof(uint32_t))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return;
-+ }
-+ if ((t_Error)reply.error != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Free KG clsPlan failed"));
-+ return;
-+ }
-+ }
-+
-+ /* clear clsPlan driver structure */
-+ memset(&p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId], 0, sizeof(t_FmPcdKgClsPlanGrp));
-+}
-+
-+t_Error FmPcdKgBuildBindPortToSchemes(t_Handle h_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort, uint32_t *p_SpReg, bool add)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t j, schemesPerPortVector = 0;
-+ t_FmPcdKgScheme *p_Scheme;
-+ uint8_t i, relativeSchemeId;
-+ uint32_t tmp, walking1Mask;
-+ uint8_t swPortIndex = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
-+
-+ /* for each scheme */
-+ for (i = 0; i<p_BindPort->numOfSchemes; i++)
-+ {
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]);
-+ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+
-+ if (add)
-+ {
-+ p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId];
-+ if (!FmPcdKgIsSchemeValidSw(p_Scheme))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested scheme is invalid."));
-+ /* check netEnvId of the port against the scheme netEnvId */
-+ if ((p_Scheme->netEnvId != p_BindPort->netEnvId) && (p_Scheme->netEnvId != ILLEGAL_NETENV))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port may not be bound to requested scheme - differ in netEnvId"));
-+
-+ /* if next engine is private port policer profile, we need to check that it is valid */
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, p_BindPort->hardwarePortId);
-+ if (p_Scheme->nextRelativePlcrProfile)
-+ {
-+ for (j = 0;j<p_Scheme->numOfProfiles;j++)
-+ {
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort);
-+ if (p_Scheme->relativeProfileId+j >= p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Relative profile not in range"));
-+ if (!FmPcdPlcrIsProfileValid(p_FmPcd, (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase + p_Scheme->relativeProfileId + j)))
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Relative profile not valid."));
-+ }
-+ }
-+ if (!p_BindPort->useClsPlan)
-+ {
-+ /* This check may be redundant as port is a assigned to the whole NetEnv */
-+
-+ /* if this port does not use clsPlan, it may not be bound to schemes with units that contain
-+ cls plan options. Schemes that are used only directly, should not be checked.
-+ it also may not be bound to schemes that go to CC with units that are options - so we OR
-+ the match vector and the grpBits (= ccUnits) */
-+ if ((p_Scheme->matchVector != SCHEME_ALWAYS_DIRECT) || p_Scheme->ccUnits)
-+ {
-+ uint8_t netEnvId;
-+ walking1Mask = 0x80000000;
-+ netEnvId = (p_Scheme->netEnvId == ILLEGAL_NETENV)? p_BindPort->netEnvId:p_Scheme->netEnvId;
-+ tmp = (p_Scheme->matchVector == SCHEME_ALWAYS_DIRECT)? 0:p_Scheme->matchVector;
-+ tmp |= p_Scheme->ccUnits;
-+ while (tmp)
-+ {
-+ if (tmp & walking1Mask)
-+ {
-+ tmp &= ~walking1Mask;
-+ if (!PcdNetEnvIsUnitWithoutOpts(p_FmPcd, netEnvId, walking1Mask))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port (without clsPlan) may not be bound to requested scheme - uses clsPlan options"));
-+ }
-+ walking1Mask >>= 1;
-+ }
-+ }
-+ }
-+ }
-+ /* build vector */
-+ schemesPerPortVector |= 1 << (31 - p_BindPort->schemesIds[i]);
-+ }
-+
-+ *p_SpReg = schemesPerPortVector;
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdKgBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t spReg;
-+ t_Error err = E_OK;
-+
-+ err = FmPcdKgBuildBindPortToSchemes(h_FmPcd, p_SchemeBind, &spReg, TRUE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, TRUE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ IncSchemeOwners(p_FmPcd, p_SchemeBind);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdKgUnbindPortToSchemes(t_Handle h_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t spReg;
-+ t_Error err = E_OK;
-+
-+ err = FmPcdKgBuildBindPortToSchemes(p_FmPcd, p_SchemeBind, &spReg, FALSE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, FALSE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ DecSchemeOwners(p_FmPcd, p_SchemeBind);
-+
-+ return E_OK;
-+}
-+
-+bool FmPcdKgIsSchemeValidSw(t_Handle h_Scheme)
-+{
-+ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme*)h_Scheme;
-+
-+ return p_Scheme->valid;
-+}
-+
-+bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ if (p_FmPcd->p_FmPcdKg->schemes[schemeId].matchVector == SCHEME_ALWAYS_DIRECT)
-+ return TRUE;
-+ else
-+ return FALSE;
-+}
-+
-+t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint8_t i, j;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE);
-+
-+ /* This routine is issued only on master core of master partition -
-+ either directly or through IPC, so no need for lock */
-+
-+ for (j = 0, i = 0; i < FM_PCD_KG_NUM_OF_SCHEMES && j < numOfSchemes; i++)
-+ {
-+ if (!p_FmPcd->p_FmPcdKg->schemesMng[i].allocated)
-+ {
-+ p_FmPcd->p_FmPcdKg->schemesMng[i].allocated = TRUE;
-+ p_FmPcd->p_FmPcdKg->schemesMng[i].ownerId = guestId;
-+ p_SchemesIds[j] = i;
-+ j++;
-+ }
-+ }
-+
-+ if (j != numOfSchemes)
-+ {
-+ /* roll back */
-+ for (j--; j; j--)
-+ {
-+ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].allocated = FALSE;
-+ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].ownerId = 0;
-+ p_SchemesIds[j] = 0;
-+ }
-+
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("No schemes found"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint8_t i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE);
-+
-+ /* This routine is issued only on master core of master partition -
-+ either directly or through IPC */
-+
-+ for (i = 0; i < numOfSchemes; i++)
-+ {
-+ if (!p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme was not previously allocated"));
-+ }
-+ if (p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId != guestId)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme is not owned by caller. "));
-+ }
-+ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated = FALSE;
-+ p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId = 0;
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint8_t numOfBlocks, blocksFound=0, first=0;
-+ uint8_t i, j;
-+
-+ /* This routine is issued only on master core of master partition -
-+ either directly or through IPC, so no need for lock */
-+
-+ if (!numOfClsPlanEntries)
-+ return E_OK;
-+
-+ if ((numOfClsPlanEntries % CLS_PLAN_NUM_PER_GRP) || (!POWER_OF_2(numOfClsPlanEntries)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfClsPlanEntries must be a power of 2 and divisible by 8"));
-+
-+ numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP);
-+
-+ /* try to find consequent blocks */
-+ first = 0;
-+ for (i = 0; i < FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP;)
-+ {
-+ if (!p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated)
-+ {
-+ blocksFound++;
-+ i++;
-+ if (blocksFound == numOfBlocks)
-+ break;
-+ }
-+ else
-+ {
-+ blocksFound = 0;
-+ /* advance i to the next aligned address */
-+ first = i = (uint8_t)(first + numOfBlocks);
-+ }
-+ }
-+
-+ if (blocksFound == numOfBlocks)
-+ {
-+ *p_First = (uint8_t)(first * CLS_PLAN_NUM_PER_GRP);
-+ for (j = first; j < (first + numOfBlocks); j++)
-+ {
-+ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[j].allocated = TRUE;
-+ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[j].ownerId = guestId;
-+ }
-+ return E_OK;
-+ }
-+ else
-+ RETURN_ERROR(MINOR, E_FULL, ("No resources for clsPlan"));
-+}
-+
-+void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint8_t numOfBlocks;
-+ uint8_t i, baseBlock;
-+
-+#ifdef DISABLE_ASSERTIONS
-+UNUSED(guestId);
-+#endif /* DISABLE_ASSERTIONS */
-+
-+ /* This routine is issued only on master core of master partition -
-+ either directly or through IPC, so no need for lock */
-+
-+ numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP);
-+ ASSERT_COND(!(base%CLS_PLAN_NUM_PER_GRP));
-+
-+ baseBlock = (uint8_t)(base/CLS_PLAN_NUM_PER_GRP);
-+ for (i=baseBlock;i<baseBlock+numOfBlocks;i++)
-+ {
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated);
-+ ASSERT_COND(guestId == p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId);
-+ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated = FALSE;
-+ p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId = 0;
-+ }
-+}
-+
-+void KgEnable(t_FmPcd *p_FmPcd)
-+{
-+ struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+
-+ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
-+ fman_kg_enable(p_Regs);
-+}
-+
-+void KgDisable(t_FmPcd *p_FmPcd)
-+{
-+ struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+
-+ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
-+ fman_kg_disable(p_Regs);
-+}
-+
-+void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ struct fman_kg_cp_regs *p_FmPcdKgPortRegs;
-+ uint32_t tmpKgarReg = 0, intFlags;
-+ uint16_t i, j;
-+
-+ /* This routine is protected by the calling routine ! */
-+ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
-+ p_FmPcdKgPortRegs = &p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->clsPlanRegs;
-+
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ for (i=p_Set->baseEntry;i<p_Set->baseEntry+p_Set->numOfClsPlanEntries;i+=8)
-+ {
-+ tmpKgarReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP));
-+
-+ for (j = i; j < i+8; j++)
-+ {
-+ ASSERT_COND(IN_RANGE(0, (j - p_Set->baseEntry), FM_PCD_MAX_NUM_OF_CLS_PLANS-1));
-+ WRITE_UINT32(p_FmPcdKgPortRegs->kgcpe[j % CLS_PLAN_NUM_PER_GRP],p_Set->vectors[j - p_Set->baseEntry]);
-+ }
-+
-+ if (WriteKgarWait(p_FmPcd, tmpKgarReg) != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("WriteKgarWait FAILED"));
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ return;
-+ }
-+ }
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+}
-+
-+t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams)
-+{
-+ t_FmPcdKg *p_FmPcdKg;
-+
-+ UNUSED(p_FmPcd);
-+
-+ if (p_FmPcdParams->numOfSchemes > FM_PCD_KG_NUM_OF_SCHEMES)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("numOfSchemes should not exceed %d", FM_PCD_KG_NUM_OF_SCHEMES));
-+ return NULL;
-+ }
-+
-+ p_FmPcdKg = (t_FmPcdKg *)XX_Malloc(sizeof(t_FmPcdKg));
-+ if (!p_FmPcdKg)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Keygen allocation FAILED"));
-+ return NULL;
-+ }
-+ memset(p_FmPcdKg, 0, sizeof(t_FmPcdKg));
-+
-+
-+ if (FmIsMaster(p_FmPcd->h_Fm))
-+ {
-+ p_FmPcdKg->p_FmPcdKgRegs = (struct fman_kg_regs *)UINT_TO_PTR(FmGetPcdKgBaseAddr(p_FmPcdParams->h_Fm));
-+ p_FmPcd->exceptions |= DEFAULT_fmPcdKgErrorExceptions;
-+ p_FmPcdKg->p_IndirectAccessRegs = (u_FmPcdKgIndirectAccessRegs *)&p_FmPcdKg->p_FmPcdKgRegs->fmkg_indirect[0];
-+ }
-+
-+ p_FmPcdKg->numOfSchemes = p_FmPcdParams->numOfSchemes;
-+ if ((p_FmPcd->guestId == NCSW_MASTER_ID) && !p_FmPcdKg->numOfSchemes)
-+ {
-+ p_FmPcdKg->numOfSchemes = FM_PCD_KG_NUM_OF_SCHEMES;
-+ DBG(WARNING, ("numOfSchemes was defined 0 by user, re-defined by driver to FM_PCD_KG_NUM_OF_SCHEMES"));
-+ }
-+
-+ p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN;
-+
-+ return p_FmPcdKg;
-+}
-+
-+t_Error KgInit(t_FmPcd *p_FmPcd)
-+{
-+ t_Error err = E_OK;
-+
-+ p_FmPcd->p_FmPcdKg->h_HwSpinlock = XX_InitSpinlock();
-+ if (!p_FmPcd->p_FmPcdKg->h_HwSpinlock)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM KG HW spinlock"));
-+
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ err = KgInitMaster(p_FmPcd);
-+ else
-+ err = KgInitGuest(p_FmPcd);
-+
-+ if (err != E_OK)
-+ {
-+ if (p_FmPcd->p_FmPcdKg->h_HwSpinlock)
-+ XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock);
-+ }
-+
-+ return err;
-+}
-+
-+t_Error KgFree(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdIpcKgSchemesParams kgAlloc;
-+ t_Error err = E_OK;
-+ t_FmPcdIpcMsg msg;
-+ uint32_t replyLength;
-+ t_FmPcdIpcReply reply;
-+
-+ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_KG, 0, e_FM_INTR_TYPE_ERR);
-+
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ {
-+ err = FmPcdKgFreeSchemes(p_FmPcd,
-+ p_FmPcd->p_FmPcdKg->numOfSchemes,
-+ p_FmPcd->guestId,
-+ p_FmPcd->p_FmPcdKg->schemesIds);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (p_FmPcd->p_FmPcdKg->h_HwSpinlock)
-+ XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock);
-+
-+ return E_OK;
-+ }
-+
-+ /* guest */
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&msg, 0, sizeof(msg));
-+ kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes;
-+ kgAlloc.guestId = p_FmPcd->guestId;
-+ ASSERT_COND(kgAlloc.numOfSchemes < FM_PCD_KG_NUM_OF_SCHEMES);
-+ memcpy(kgAlloc.schemesIds, p_FmPcd->p_FmPcdKg->schemesIds, (sizeof(uint8_t))*kgAlloc.numOfSchemes);
-+ msg.msgId = FM_PCD_FREE_KG_SCHEMES;
-+ memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc));
-+ replyLength = sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(kgAlloc),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+
-+ if (p_FmPcd->p_FmPcdKg->h_HwSpinlock)
-+ XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock);
-+
-+ return (t_Error)reply.error;
-+}
-+
-+t_Error FmPcdKgSetOrBindToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t netEnvId, protocolOpt_t *p_OptArray, uint8_t *p_ClsPlanGrpId, bool *p_IsEmptyClsPlanGrp)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdKgInterModuleClsPlanGrpParams grpParams, *p_GrpParams;
-+ t_FmPcdKgClsPlanGrp *p_ClsPlanGrp;
-+ t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet;
-+ t_Error err;
-+
-+ /* This function is issued only from FM_PORT_SetPcd which locked all PCD modules,
-+ so no need for lock here */
-+
-+ memset(&grpParams, 0, sizeof(grpParams));
-+ grpParams.clsPlanGrpId = ILLEGAL_CLS_PLAN;
-+ p_GrpParams = &grpParams;
-+
-+ p_GrpParams->netEnvId = netEnvId;
-+
-+ /* Get from the NetEnv the information of the clsPlan (can be already created,
-+ * or needs to build) */
-+ err = PcdGetClsPlanGrpParams(h_FmPcd, p_GrpParams);
-+ if (err)
-+ RETURN_ERROR(MINOR,err,NO_MSG);
-+
-+ if (p_GrpParams->grpExists)
-+ {
-+ /* this group was already updated (at least) in SW */
-+ *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId;
-+ }
-+ else
-+ {
-+ p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet));
-+ if (!p_ClsPlanSet)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set"));
-+ memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet));
-+ /* Build (in SW) the clsPlan parameters, including the vectors to be written to HW */
-+ err = FmPcdKgBuildClsPlanGrp(h_FmPcd, p_GrpParams, p_ClsPlanSet);
-+ if (err)
-+ {
-+ XX_Free(p_ClsPlanSet);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+ *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId;
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ /* write clsPlan entries to memory */
-+ err = FmHcPcdKgSetClsPlan(p_FmPcd->h_Hc, p_ClsPlanSet);
-+ if (err)
-+ {
-+ XX_Free(p_ClsPlanSet);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+ else
-+ /* write clsPlan entries to memory */
-+ KgSetClsPlan(p_FmPcd, p_ClsPlanSet);
-+
-+ XX_Free(p_ClsPlanSet);
-+ }
-+
-+ /* Set caller parameters */
-+
-+ /* mark if this is an empty classification group */
-+ if (*p_ClsPlanGrpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId)
-+ *p_IsEmptyClsPlanGrp = TRUE;
-+ else
-+ *p_IsEmptyClsPlanGrp = FALSE;
-+
-+ p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId];
-+
-+ /* increment owners number */
-+ p_ClsPlanGrp->owners++;
-+
-+ /* copy options array for port */
-+ memcpy(p_OptArray, &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId].optArray, FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)*sizeof(protocolOpt_t));
-+
-+ /* bind port to the new or existing group */
-+ err = BindPortToClsPlanGrp(p_FmPcd, hardwarePortId, p_GrpParams->clsPlanGrpId);
-+ if (err)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdKgDeleteOrUnbindPortToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdKgClsPlanGrp *p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId];
-+ t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet;
-+ t_Error err;
-+
-+ /* This function is issued only from FM_PORT_DeletePcd which locked all PCD modules,
-+ so no need for lock here */
-+
-+ UnbindPortToClsPlanGrp(p_FmPcd, hardwarePortId);
-+
-+ /* decrement owners number */
-+ ASSERT_COND(p_ClsPlanGrp->owners);
-+ p_ClsPlanGrp->owners--;
-+
-+ if (!p_ClsPlanGrp->owners)
-+ {
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcPcdKgDeleteClsPlan(p_FmPcd->h_Hc, clsPlanGrpId);
-+ return err;
-+ }
-+ else
-+ {
-+ /* clear clsPlan entries in memory */
-+ p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet));
-+ if (!p_ClsPlanSet)
-+ {
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set"));
-+ }
-+ memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet));
-+
-+ p_ClsPlanSet->baseEntry = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry;
-+ p_ClsPlanSet->numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp;
-+ KgSetClsPlan(p_FmPcd, p_ClsPlanSet);
-+ XX_Free(p_ClsPlanSet);
-+
-+ FmPcdKgDestroyClsPlanGrp(h_FmPcd, clsPlanGrpId);
-+ }
-+ }
-+ return E_OK;
-+}
-+
-+uint32_t FmPcdKgGetRequiredAction(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
-+
-+ return p_FmPcd->p_FmPcdKg->schemes[schemeId].requiredAction;
-+}
-+
-+uint32_t FmPcdKgGetRequiredActionFlag(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
-+
-+ return p_FmPcd->p_FmPcdKg->schemes[schemeId].requiredActionFlag;
-+}
-+
-+bool FmPcdKgIsDirectPlcr(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
-+
-+ return p_FmPcd->p_FmPcdKg->schemes[schemeId].directPlcr;
-+}
-+
-+
-+uint16_t FmPcdKgGetRelativeProfileId(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
-+
-+ return p_FmPcd->p_FmPcdKg->schemes[schemeId].relativeProfileId;
-+}
-+
-+bool FmPcdKgIsDistrOnPlcrProfile(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
-+
-+ if ((p_FmPcd->p_FmPcdKg->schemes[schemeId].extractedOrs &&
-+ p_FmPcd->p_FmPcdKg->schemes[schemeId].bitOffsetInPlcrProfile) ||
-+ p_FmPcd->p_FmPcdKg->schemes[schemeId].nextRelativePlcrProfile)
-+ return TRUE;
-+ else
-+ return FALSE;
-+
-+}
-+
-+e_FmPcdEngine FmPcdKgGetNextEngine(t_Handle h_FmPcd, uint8_t relativeSchemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].valid);
-+
-+ return p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine;
-+}
-+
-+e_FmPcdDoneAction FmPcdKgGetDoneAction(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid);
-+
-+ return p_FmPcd->p_FmPcdKg->schemes[schemeId].doneAction;
-+}
-+
-+void FmPcdKgUpdateRequiredAction(t_Handle h_Scheme, uint32_t requiredAction)
-+{
-+ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme *)h_Scheme;
-+
-+ /* this routine is protected by calling routine */
-+
-+ ASSERT_COND(p_Scheme->valid);
-+
-+ p_Scheme->requiredAction |= requiredAction;
-+}
-+
-+bool FmPcdKgHwSchemeIsValid(uint32_t schemeModeReg)
-+{
-+ return (bool)!!(schemeModeReg & KG_SCH_MODE_EN);
-+}
-+
-+uint32_t FmPcdKgBuildWriteSchemeActionReg(uint8_t schemeId, bool updateCounter)
-+{
-+ return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT) |
-+ FM_KG_KGAR_GO |
-+ FM_KG_KGAR_WRITE |
-+ FM_KG_KGAR_SEL_SCHEME_ENTRY |
-+ DUMMY_PORT_ID |
-+ (updateCounter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT:0));
-+}
-+
-+uint32_t FmPcdKgBuildReadSchemeActionReg(uint8_t schemeId)
-+{
-+ return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT) |
-+ FM_KG_KGAR_GO |
-+ FM_KG_KGAR_READ |
-+ FM_KG_KGAR_SEL_SCHEME_ENTRY |
-+ DUMMY_PORT_ID |
-+ FM_KG_KGAR_SCM_WSEL_UPDATE_CNT);
-+
-+}
-+
-+uint32_t FmPcdKgBuildWriteClsPlanBlockActionReg(uint8_t grpId)
-+{
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ FM_KG_KGAR_WRITE |
-+ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
-+ DUMMY_PORT_ID |
-+ ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) |
-+ FM_PCD_KG_KGAR_WSEL_MASK);
-+
-+ /* if we ever want to write 1 by 1, use:
-+ sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP)));
-+ */
-+}
-+
-+uint32_t FmPcdKgBuildWritePortSchemeBindActionReg(uint8_t hardwarePortId)
-+{
-+
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ FM_KG_KGAR_WRITE |
-+ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
-+ hardwarePortId |
-+ FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP);
-+}
-+
-+uint32_t FmPcdKgBuildReadPortSchemeBindActionReg(uint8_t hardwarePortId)
-+{
-+
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ FM_KG_KGAR_READ |
-+ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
-+ hardwarePortId |
-+ FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP);
-+}
-+
-+uint32_t FmPcdKgBuildWritePortClsPlanBindActionReg(uint8_t hardwarePortId)
-+{
-+
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ FM_KG_KGAR_WRITE |
-+ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
-+ hardwarePortId |
-+ FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP);
-+}
-+
-+uint8_t FmPcdKgGetClsPlanGrpBase(t_Handle h_FmPcd, uint8_t clsPlanGrp)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].baseEntry;
-+}
-+
-+uint16_t FmPcdKgGetClsPlanGrpSize(t_Handle h_FmPcd, uint8_t clsPlanGrp)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].sizeOfGrp;
-+}
-+
-+
-+uint8_t FmPcdKgGetSchemeId(t_Handle h_Scheme)
-+{
-+ return ((t_FmPcdKgScheme*)h_Scheme)->schemeId;
-+
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+bool FmPcdKgGetVspe(t_Handle h_Scheme)
-+{
-+ return ((t_FmPcdKgScheme*)h_Scheme)->vspe;
-+
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+uint8_t FmPcdKgGetRelativeSchemeId(t_Handle h_FmPcd, uint8_t schemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint8_t i;
-+
-+ for (i = 0;i<p_FmPcd->p_FmPcdKg->numOfSchemes;i++)
-+ if (p_FmPcd->p_FmPcdKg->schemesIds[i] == schemeId)
-+ return i;
-+
-+ if (i == p_FmPcd->p_FmPcdKg->numOfSchemes)
-+ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("Scheme is out of partition range"));
-+
-+ return FM_PCD_KG_NUM_OF_SCHEMES;
-+}
-+
-+t_Handle FmPcdKgGetSchemeHandle(t_Handle h_FmPcd, uint8_t relativeSchemeId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd);
-+
-+ /* check that schemeId is in range */
-+ if (relativeSchemeId >= p_FmPcd->p_FmPcdKg->numOfSchemes)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("relative-scheme-id %d!", relativeSchemeId));
-+ return NULL;
-+ }
-+
-+ if (!FmPcdKgIsSchemeValidSw(&p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId]))
-+ return NULL;
-+
-+ return &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId];
-+}
-+
-+bool FmPcdKgIsSchemeHasOwners(t_Handle h_Scheme)
-+{
-+ return (((t_FmPcdKgScheme*)h_Scheme)->owners == 0)?FALSE:TRUE;
-+}
-+
-+t_Error FmPcdKgCcGetSetParams(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint8_t relativeSchemeId, physicalSchemeId;
-+ uint32_t tmpKgarReg, tmpReg32 = 0, intFlags;
-+ t_Error err;
-+ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme*)h_Scheme;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0);
-+
-+ /* Calling function locked all PCD modules, so no need to lock here */
-+
-+ if (!FmPcdKgIsSchemeValidSw(h_Scheme))
-+ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid"));
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcPcdKgCcGetSetParams(p_FmPcd->h_Hc, h_Scheme, requiredAction, value);
-+
-+ UpdateRequiredActionFlag(h_Scheme,TRUE);
-+ FmPcdKgUpdateRequiredAction(h_Scheme,requiredAction);
-+ return err;
-+ }
-+
-+ physicalSchemeId = p_Scheme->schemeId;
-+
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId);
-+ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+
-+ if (!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].requiredActionFlag ||
-+ !(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].requiredAction & requiredAction))
-+ {
-+ if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
-+ {
-+ switch (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine)
-+ {
-+ case (e_FM_PCD_DONE):
-+ if (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].doneAction == e_FM_PCD_ENQ_FRAME)
-+ {
-+ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode);
-+ ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA);
-+ /* call indirect command for scheme write */
-+ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ }
-+ break;
-+ case (e_FM_PCD_PLCR):
-+ if (!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].directPlcr ||
-+ (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].extractedOrs &&
-+ p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].bitOffsetInPlcrProfile) ||
-+ p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextRelativePlcrProfile)
-+ {
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared"));
-+ }
-+ err = FmPcdPlcrCcGetSetParams(h_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].relativeProfileId, requiredAction);
-+ if (err)
-+ {
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("in this situation the next engine after scheme can be or PLCR or ENQ_FRAME"));
-+ }
-+ }
-+ if (requiredAction & UPDATE_KG_NIA_CC_WA)
-+ {
-+ if (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine == e_FM_PCD_CC)
-+ {
-+ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode);
-+ ASSERT_COND(tmpReg32 & (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC));
-+ tmpReg32 &= ~NIA_FM_CTL_AC_CC;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32 | NIA_FM_CTL_AC_PRE_CC);
-+ /* call indirect command for scheme write */
-+ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ }
-+ }
-+ if (requiredAction & UPDATE_KG_OPT_MODE)
-+ {
-+ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_om, value);
-+ /* call indirect command for scheme write */
-+ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ }
-+ if (requiredAction & UPDATE_KG_NIA)
-+ {
-+ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode);
-+ tmpReg32 &= ~(NIA_ENG_MASK | NIA_AC_MASK);
-+ tmpReg32 |= value;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32);
-+ /* call indirect command for scheme write */
-+ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ }
-+ }
-+
-+ UpdateRequiredActionFlag(h_Scheme, TRUE);
-+ FmPcdKgUpdateRequiredAction(h_Scheme, requiredAction);
-+
-+ return E_OK;
-+}
-+/*********************** End of inter-module routines ************************/
-+
-+
-+/****************************************/
-+/* API routines */
-+/****************************************/
-+
-+t_Handle FM_PCD_KgSchemeSet(t_Handle h_FmPcd, t_FmPcdKgSchemeParams *p_SchemeParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ struct fman_kg_scheme_regs schemeRegs;
-+ struct fman_kg_scheme_regs *p_MemRegs;
-+ uint8_t i;
-+ t_Error err = E_OK;
-+ uint32_t tmpKgarReg;
-+ uint32_t intFlags;
-+ uint8_t physicalSchemeId, relativeSchemeId = 0;
-+ t_FmPcdKgScheme *p_Scheme;
-+
-+ if (p_SchemeParams->modify)
-+ {
-+ p_Scheme = (t_FmPcdKgScheme *)p_SchemeParams->id.h_Scheme;
-+ p_FmPcd = p_Scheme->h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, NULL);
-+
-+ if (!FmPcdKgIsSchemeValidSw(p_Scheme))
-+ {
-+ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS,
-+ ("Scheme is invalid"));
-+ return NULL;
-+ }
-+
-+ if (!KgSchemeFlagTryLock(p_Scheme))
-+ {
-+ DBG(TRACE, ("Scheme Try Lock - BUSY"));
-+ /* Signal to caller BUSY condition */
-+ p_SchemeParams->id.h_Scheme = NULL;
-+ return NULL;
-+ }
-+ }
-+ else
-+ {
-+ p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, NULL);
-+
-+ relativeSchemeId = p_SchemeParams->id.relativeSchemeId;
-+ /* check that schemeId is in range */
-+ if (relativeSchemeId >= p_FmPcd->p_FmPcdKg->numOfSchemes)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("relative-scheme-id %d!", relativeSchemeId));
-+ return NULL;
-+ }
-+
-+ p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId];
-+ if (FmPcdKgIsSchemeValidSw(p_Scheme))
-+ {
-+ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS,
-+ ("Scheme id (%d)!", relativeSchemeId));
-+ return NULL;
-+ }
-+ /* Clear all fields, scheme may have beed previously used */
-+ memset(p_Scheme, 0, sizeof(t_FmPcdKgScheme));
-+
-+ p_Scheme->schemeId = p_FmPcd->p_FmPcdKg->schemesIds[relativeSchemeId];
-+ p_Scheme->h_FmPcd = p_FmPcd;
-+
-+ p_Scheme->p_Lock = FmPcdAcquireLock(p_FmPcd);
-+ if (!p_Scheme->p_Lock)
-+ REPORT_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM KG Scheme lock obj!"));
-+ }
-+
-+ err = BuildSchemeRegs((t_Handle)p_Scheme, p_SchemeParams, &schemeRegs);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ if (p_SchemeParams->modify)
-+ KgSchemeFlagUnlock(p_Scheme);
-+ if (!p_SchemeParams->modify &&
-+ p_Scheme->p_Lock)
-+ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
-+ return NULL;
-+ }
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcPcdKgSetScheme(p_FmPcd->h_Hc,
-+ (t_Handle)p_Scheme,
-+ &schemeRegs,
-+ p_SchemeParams->schemeCounter.update);
-+ if (p_SchemeParams->modify)
-+ KgSchemeFlagUnlock(p_Scheme);
-+ if (err)
-+ {
-+ if (!p_SchemeParams->modify &&
-+ p_Scheme->p_Lock)
-+ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
-+ return NULL;
-+ }
-+ if (!p_SchemeParams->modify)
-+ ValidateSchemeSw(p_Scheme);
-+ return (t_Handle)p_Scheme;
-+ }
-+
-+ physicalSchemeId = p_Scheme->schemeId;
-+
-+ /* configure all 21 scheme registers */
-+ p_MemRegs = &p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs;
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ WRITE_UINT32(p_MemRegs->kgse_ppc, schemeRegs.kgse_ppc);
-+ WRITE_UINT32(p_MemRegs->kgse_ccbs, schemeRegs.kgse_ccbs);
-+ WRITE_UINT32(p_MemRegs->kgse_mode, schemeRegs.kgse_mode);
-+ WRITE_UINT32(p_MemRegs->kgse_mv, schemeRegs.kgse_mv);
-+ WRITE_UINT32(p_MemRegs->kgse_dv0, schemeRegs.kgse_dv0);
-+ WRITE_UINT32(p_MemRegs->kgse_dv1, schemeRegs.kgse_dv1);
-+ WRITE_UINT32(p_MemRegs->kgse_ekdv, schemeRegs.kgse_ekdv);
-+ WRITE_UINT32(p_MemRegs->kgse_ekfc, schemeRegs.kgse_ekfc);
-+ WRITE_UINT32(p_MemRegs->kgse_bmch, schemeRegs.kgse_bmch);
-+ WRITE_UINT32(p_MemRegs->kgse_bmcl, schemeRegs.kgse_bmcl);
-+ WRITE_UINT32(p_MemRegs->kgse_hc, schemeRegs.kgse_hc);
-+ WRITE_UINT32(p_MemRegs->kgse_spc, schemeRegs.kgse_spc);
-+ WRITE_UINT32(p_MemRegs->kgse_fqb, schemeRegs.kgse_fqb);
-+ WRITE_UINT32(p_MemRegs->kgse_om, schemeRegs.kgse_om);
-+ WRITE_UINT32(p_MemRegs->kgse_vsp, schemeRegs.kgse_vsp);
-+ for (i=0 ; i<FM_KG_NUM_OF_GENERIC_REGS ; i++)
-+ WRITE_UINT32(p_MemRegs->kgse_gec[i], schemeRegs.kgse_gec[i]);
-+
-+ /* call indirect command for scheme write */
-+ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, p_SchemeParams->schemeCounter.update);
-+
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+
-+ if (!p_SchemeParams->modify)
-+ ValidateSchemeSw(p_Scheme);
-+ else
-+ KgSchemeFlagUnlock(p_Scheme);
-+
-+ return (t_Handle)p_Scheme;
-+}
-+
-+t_Error FM_PCD_KgSchemeDelete(t_Handle h_Scheme)
-+{
-+ t_FmPcd *p_FmPcd;
-+ uint8_t physicalSchemeId;
-+ uint32_t tmpKgarReg, intFlags;
-+ t_Error err = E_OK;
-+ t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme *)h_Scheme;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_Scheme, E_INVALID_HANDLE);
-+
-+ p_FmPcd = (t_FmPcd*)(p_Scheme->h_FmPcd);
-+
-+ UpdateRequiredActionFlag(h_Scheme, FALSE);
-+
-+ /* check that no port is bound to this scheme */
-+ err = InvalidateSchemeSw(h_Scheme);
-+ if (err)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcPcdKgDeleteScheme(p_FmPcd->h_Hc, h_Scheme);
-+ if (p_Scheme->p_Lock)
-+ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
-+ return err;
-+ }
-+
-+ physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId;
-+
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ /* clear mode register, including enable bit */
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, 0);
-+
-+ /* call indirect command for scheme write */
-+ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE);
-+
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+
-+ if (p_Scheme->p_Lock)
-+ FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock);
-+
-+ return E_OK;
-+}
-+
-+uint32_t FM_PCD_KgSchemeGetCounter(t_Handle h_Scheme)
-+{
-+ t_FmPcd *p_FmPcd;
-+ uint32_t tmpKgarReg, spc, intFlags;
-+ uint8_t physicalSchemeId;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_Scheme, E_INVALID_HANDLE, 0);
-+
-+ p_FmPcd = (t_FmPcd*)(((t_FmPcdKgScheme *)h_Scheme)->h_FmPcd);
-+ if (p_FmPcd->h_Hc)
-+ return FmHcPcdKgGetSchemeCounter(p_FmPcd->h_Hc, h_Scheme);
-+
-+ physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId;
-+
-+ if (FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES)
-+ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+
-+ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode) & KG_SCH_MODE_EN))
-+ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid"));
-+ spc = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_spc);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+
-+ return spc;
-+}
-+
-+t_Error FM_PCD_KgSchemeSetCounter(t_Handle h_Scheme, uint32_t value)
-+{
-+ t_FmPcd *p_FmPcd;
-+ uint32_t tmpKgarReg, intFlags;
-+ uint8_t physicalSchemeId;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_Scheme, E_INVALID_HANDLE, 0);
-+
-+ p_FmPcd = (t_FmPcd*)(((t_FmPcdKgScheme *)h_Scheme)->h_FmPcd);
-+
-+ if (!FmPcdKgIsSchemeValidSw(h_Scheme))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested scheme is invalid."));
-+
-+ if (p_FmPcd->h_Hc)
-+ return FmHcPcdKgSetSchemeCounter(p_FmPcd->h_Hc, h_Scheme, value);
-+
-+ physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId;
-+ /* check that schemeId is in range */
-+ if (FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES)
-+ REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+
-+ /* read specified scheme into scheme registers */
-+ tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId);
-+ intFlags = KgHwLock(p_FmPcd->p_FmPcdKg);
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode) & KG_SCH_MODE_EN))
-+ {
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid"));
-+ }
-+
-+ /* change counter value */
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_spc, value);
-+
-+ /* call indirect command for scheme write */
-+ tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE);
-+
-+ WriteKgarWait(p_FmPcd, tmpKgarReg);
-+ KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_KgSetAdditionalDataAfterParsing(t_Handle h_FmPcd, uint8_t payloadOffset)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ struct fman_kg_regs *p_Regs;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER);
-+
-+ p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+ if (!FmIsMaster(p_FmPcd->h_Fm))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetAdditionalDataAfterParsing - guest mode!"));
-+
-+ WRITE_UINT32(p_Regs->fmkg_fdor,payloadOffset);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_KgSetDfltValue(t_Handle h_FmPcd, uint8_t valueId, uint32_t value)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ struct fman_kg_regs *p_Regs;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((valueId == 0) || (valueId == 1)), E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER);
-+
-+ p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs;
-+
-+ if (!FmIsMaster(p_FmPcd->h_Fm))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetDfltValue - guest mode!"));
-+
-+ if (valueId == 0)
-+ WRITE_UINT32(p_Regs->fmkg_gdv0r,value);
-+ else
-+ WRITE_UINT32(p_Regs->fmkg_gdv1r,value);
-+ return E_OK;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h
-@@ -0,0 +1,206 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_kg.h
-+
-+ @Description FM KG private header
-+*//***************************************************************************/
-+#ifndef __FM_KG_H
-+#define __FM_KG_H
-+
-+#include "std_ext.h"
-+
-+/***********************************************************************/
-+/* Keygen defines */
-+/***********************************************************************/
-+/* maskes */
-+#if (DPAA_VERSION >= 11)
-+#define KG_SCH_VSP_SHIFT_MASK 0x0003f000
-+#define KG_SCH_OM_VSPE 0x00000001
-+#define KG_SCH_VSP_NO_KSP_EN 0x80000000
-+
-+#define MAX_SP_SHIFT 23
-+#define KG_SCH_VSP_MASK_SHIFT 12
-+#define KG_SCH_VSP_SHIFT 24
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+typedef uint32_t t_KnownFieldsMasks;
-+#define KG_SCH_KN_PORT_ID 0x80000000
-+#define KG_SCH_KN_MACDST 0x40000000
-+#define KG_SCH_KN_MACSRC 0x20000000
-+#define KG_SCH_KN_TCI1 0x10000000
-+#define KG_SCH_KN_TCI2 0x08000000
-+#define KG_SCH_KN_ETYPE 0x04000000
-+#define KG_SCH_KN_PPPSID 0x02000000
-+#define KG_SCH_KN_PPPID 0x01000000
-+#define KG_SCH_KN_MPLS1 0x00800000
-+#define KG_SCH_KN_MPLS2 0x00400000
-+#define KG_SCH_KN_MPLS_LAST 0x00200000
-+#define KG_SCH_KN_IPSRC1 0x00100000
-+#define KG_SCH_KN_IPDST1 0x00080000
-+#define KG_SCH_KN_PTYPE1 0x00040000
-+#define KG_SCH_KN_IPTOS_TC1 0x00020000
-+#define KG_SCH_KN_IPV6FL1 0x00010000
-+#define KG_SCH_KN_IPSRC2 0x00008000
-+#define KG_SCH_KN_IPDST2 0x00004000
-+#define KG_SCH_KN_PTYPE2 0x00002000
-+#define KG_SCH_KN_IPTOS_TC2 0x00001000
-+#define KG_SCH_KN_IPV6FL2 0x00000800
-+#define KG_SCH_KN_GREPTYPE 0x00000400
-+#define KG_SCH_KN_IPSEC_SPI 0x00000200
-+#define KG_SCH_KN_IPSEC_NH 0x00000100
-+#define KG_SCH_KN_IPPID 0x00000080
-+#define KG_SCH_KN_L4PSRC 0x00000004
-+#define KG_SCH_KN_L4PDST 0x00000002
-+#define KG_SCH_KN_TFLG 0x00000001
-+
-+typedef uint8_t t_GenericCodes;
-+#define KG_SCH_GEN_SHIM1 0x70
-+#define KG_SCH_GEN_DEFAULT 0x10
-+#define KG_SCH_GEN_PARSE_RESULT_N_FQID 0x20
-+#define KG_SCH_GEN_START_OF_FRM 0x40
-+#define KG_SCH_GEN_SHIM2 0x71
-+#define KG_SCH_GEN_IP_PID_NO_V 0x72
-+#define KG_SCH_GEN_ETH 0x03
-+#define KG_SCH_GEN_ETH_NO_V 0x73
-+#define KG_SCH_GEN_SNAP 0x04
-+#define KG_SCH_GEN_SNAP_NO_V 0x74
-+#define KG_SCH_GEN_VLAN1 0x05
-+#define KG_SCH_GEN_VLAN1_NO_V 0x75
-+#define KG_SCH_GEN_VLAN2 0x06
-+#define KG_SCH_GEN_VLAN2_NO_V 0x76
-+#define KG_SCH_GEN_ETH_TYPE 0x07
-+#define KG_SCH_GEN_ETH_TYPE_NO_V 0x77
-+#define KG_SCH_GEN_PPP 0x08
-+#define KG_SCH_GEN_PPP_NO_V 0x78
-+#define KG_SCH_GEN_MPLS1 0x09
-+#define KG_SCH_GEN_MPLS2 0x19
-+#define KG_SCH_GEN_MPLS3 0x29
-+#define KG_SCH_GEN_MPLS1_NO_V 0x79
-+#define KG_SCH_GEN_MPLS_LAST 0x0a
-+#define KG_SCH_GEN_MPLS_LAST_NO_V 0x7a
-+#define KG_SCH_GEN_IPV4 0x0b
-+#define KG_SCH_GEN_IPV6 0x1b
-+#define KG_SCH_GEN_L3_NO_V 0x7b
-+#define KG_SCH_GEN_IPV4_TUNNELED 0x0c
-+#define KG_SCH_GEN_IPV6_TUNNELED 0x1c
-+#define KG_SCH_GEN_MIN_ENCAP 0x2c
-+#define KG_SCH_GEN_IP2_NO_V 0x7c
-+#define KG_SCH_GEN_GRE 0x0d
-+#define KG_SCH_GEN_GRE_NO_V 0x7d
-+#define KG_SCH_GEN_TCP 0x0e
-+#define KG_SCH_GEN_UDP 0x1e
-+#define KG_SCH_GEN_IPSEC_AH 0x2e
-+#define KG_SCH_GEN_SCTP 0x3e
-+#define KG_SCH_GEN_DCCP 0x4e
-+#define KG_SCH_GEN_IPSEC_ESP 0x6e
-+#define KG_SCH_GEN_L4_NO_V 0x7e
-+#define KG_SCH_GEN_NEXTHDR 0x7f
-+/* shifts */
-+#define KG_SCH_PP_SHIFT_HIGH_SHIFT 27
-+#define KG_SCH_PP_SHIFT_LOW_SHIFT 12
-+#define KG_SCH_PP_MASK_SHIFT 16
-+#define KG_SCH_MODE_CCOBASE_SHIFT 24
-+#define KG_SCH_DEF_MAC_ADDR_SHIFT 30
-+#define KG_SCH_DEF_TCI_SHIFT 28
-+#define KG_SCH_DEF_ENET_TYPE_SHIFT 26
-+#define KG_SCH_DEF_PPP_SESSION_ID_SHIFT 24
-+#define KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT 22
-+#define KG_SCH_DEF_MPLS_LABEL_SHIFT 20
-+#define KG_SCH_DEF_IP_ADDR_SHIFT 18
-+#define KG_SCH_DEF_PROTOCOL_TYPE_SHIFT 16
-+#define KG_SCH_DEF_IP_TOS_TC_SHIFT 14
-+#define KG_SCH_DEF_IPV6_FLOW_LABEL_SHIFT 12
-+#define KG_SCH_DEF_IPSEC_SPI_SHIFT 10
-+#define KG_SCH_DEF_L4_PORT_SHIFT 8
-+#define KG_SCH_DEF_TCP_FLAG_SHIFT 6
-+#define KG_SCH_HASH_CONFIG_SHIFT_SHIFT 24
-+#define KG_SCH_GEN_MASK_SHIFT 16
-+#define KG_SCH_GEN_HT_SHIFT 8
-+#define KG_SCH_GEN_SIZE_SHIFT 24
-+#define KG_SCH_GEN_DEF_SHIFT 29
-+#define FM_PCD_KG_KGAR_NUM_SHIFT 16
-+
-+/* others */
-+#define NUM_OF_SW_DEFAULTS 3
-+#define MAX_PP_SHIFT 23
-+#define MAX_KG_SCH_SIZE 16
-+#define MASK_FOR_GENERIC_BASE_ID 0x20
-+#define MAX_HASH_SHIFT 40
-+#define MAX_KG_SCH_FQID_BIT_OFFSET 31
-+#define MAX_KG_SCH_PP_BIT_OFFSET 15
-+#define MAX_DIST_FQID_SHIFT 23
-+
-+#define GET_MASK_SEL_SHIFT(shift,i) \
-+switch (i) { \
-+ case (0):shift = 26;break; \
-+ case (1):shift = 20;break; \
-+ case (2):shift = 10;break; \
-+ case (3):shift = 4;break; \
-+ default: \
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \
-+}
-+
-+#define GET_MASK_OFFSET_SHIFT(shift,i) \
-+switch (i) { \
-+ case (0):shift = 16;break; \
-+ case (1):shift = 0;break; \
-+ case (2):shift = 28;break; \
-+ case (3):shift = 24;break; \
-+ default: \
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \
-+}
-+
-+#define GET_MASK_SHIFT(shift,i) \
-+switch (i) { \
-+ case (0):shift = 24;break; \
-+ case (1):shift = 16;break; \
-+ case (2):shift = 8;break; \
-+ case (3):shift = 0;break; \
-+ default: \
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \
-+}
-+
-+/***********************************************************************/
-+/* Keygen defines */
-+/***********************************************************************/
-+
-+#define KG_DOUBLE_MEANING_REGS_OFFSET 0x100
-+#define NO_VALIDATION 0x70
-+#define KG_ACTION_REG_TO 1024
-+#define KG_MAX_PROFILE 255
-+#define SCHEME_ALWAYS_DIRECT 0xFFFFFFFF
-+
-+
-+#endif /* __FM_KG_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.c
-@@ -0,0 +1,5571 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_manip.c
-+
-+ @Description FM PCD manip ...
-+ *//***************************************************************************/
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_port_ext.h"
-+#include "fm_muram_ext.h"
-+#include "memcpy_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_hc.h"
-+#include "fm_manip.h"
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+static t_Handle GetManipInfo(t_FmPcdManip *p_Manip, e_ManipInfo manipInfo)
-+{
-+ t_FmPcdManip *p_CurManip = p_Manip;
-+
-+ if (!MANIP_IS_UNIFIED(p_Manip))
-+ p_CurManip = p_Manip;
-+ else
-+ {
-+ /* go to first unified */
-+ while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip))
-+ p_CurManip = p_CurManip->h_PrevManip;
-+ }
-+
-+ switch (manipInfo)
-+ {
-+ case (e_MANIP_HMCT):
-+ return p_CurManip->p_Hmct;
-+ case (e_MANIP_HMTD):
-+ return p_CurManip->h_Ad;
-+ case (e_MANIP_HANDLER_TABLE_OWNER):
-+ return (t_Handle)p_CurManip;
-+ default:
-+ return NULL;
-+ }
-+}
-+
-+static uint16_t GetHmctSize(t_FmPcdManip *p_Manip)
-+{
-+ uint16_t size = 0;
-+ t_FmPcdManip *p_CurManip = p_Manip;
-+
-+ if (!MANIP_IS_UNIFIED(p_Manip))
-+ return p_Manip->tableSize;
-+
-+ /* accumulate sizes, starting with the first node */
-+ while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip))
-+ p_CurManip = p_CurManip->h_PrevManip;
-+
-+ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
-+ {
-+ size += p_CurManip->tableSize;
-+ p_CurManip = (t_FmPcdManip *)p_CurManip->h_NextManip;
-+ }
-+ size += p_CurManip->tableSize; /* add last size */
-+
-+ return (size);
-+}
-+
-+static uint16_t GetDataSize(t_FmPcdManip *p_Manip)
-+{
-+ uint16_t size = 0;
-+ t_FmPcdManip *p_CurManip = p_Manip;
-+
-+ if (!MANIP_IS_UNIFIED(p_Manip))
-+ return p_Manip->dataSize;
-+
-+ /* accumulate sizes, starting with the first node */
-+ while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip))
-+ p_CurManip = p_CurManip->h_PrevManip;
-+
-+ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
-+ {
-+ size += p_CurManip->dataSize;
-+ p_CurManip = (t_FmPcdManip *)p_CurManip->h_NextManip;
-+ }
-+ size += p_CurManip->dataSize; /* add last size */
-+
-+ return (size);
-+}
-+
-+static t_Error CalculateTableSize(t_FmPcdManipParams *p_FmPcdManipParams,
-+ uint16_t *p_TableSize, uint8_t *p_DataSize)
-+{
-+ uint8_t localDataSize, remain, tableSize = 0, dataSize = 0;
-+
-+ if (p_FmPcdManipParams->u.hdr.rmv)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.rmvParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_RMV_GENERIC):
-+ tableSize += HMCD_BASIC_SIZE;
-+ break;
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR):
-+ switch (p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.type)
-+ {
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2):
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP):
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START):
-+#endif /* (DPAA_VERSION >= 11) */
-+ tableSize += HMCD_BASIC_SIZE;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown byHdr.type"));
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown rmvParams.type"));
-+ }
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.insrt)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.insrtParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_INSRT_GENERIC):
-+ remain =
-+ (uint8_t)(p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size
-+ % 4);
-+ if (remain)
-+ localDataSize =
-+ (uint8_t)(p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size
-+ + 4 - remain);
-+ else
-+ localDataSize =
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size;
-+ tableSize += (uint8_t)(HMCD_BASIC_SIZE + localDataSize);
-+ break;
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR):
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.type)
-+ {
-+
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2):
-+ tableSize += HMCD_BASIC_SIZE + HMCD_PTR_SIZE;
-+ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.specificL2)
-+ {
-+ case (e_FM_PCD_MANIP_HDR_INSRT_MPLS):
-+ case (e_FM_PCD_MANIP_HDR_INSRT_PPPOE):
-+ dataSize +=
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.size;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+ }
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_IP):
-+ tableSize +=
-+ (HMCD_BASIC_SIZE + HMCD_PTR_SIZE
-+ + HMCD_PARAM_SIZE
-+ + p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size);
-+ dataSize += 2;
-+ break;
-+
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP):
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE):
-+ tableSize += (HMCD_BASIC_SIZE + HMCD_L4_HDR_SIZE);
-+
-+ break;
-+
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP):
-+ tableSize +=
-+ (HMCD_BASIC_SIZE
-+ + p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size);
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown byHdr.type"));
-+ }
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown insrtParams.type"));
-+ }
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdate)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.fieldUpdateParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN):
-+ tableSize += HMCD_BASIC_SIZE;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
-+ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN)
-+ {
-+ tableSize += HMCD_PTR_SIZE;
-+ dataSize += DSCP_TO_VLAN_TABLE_SIZE;
-+ }
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4):
-+ tableSize += HMCD_BASIC_SIZE;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_ID)
-+ {
-+ tableSize += HMCD_PARAM_SIZE;
-+ dataSize += 2;
-+ }
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_SRC)
-+ tableSize += HMCD_IPV4_ADDR_SIZE;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_DST)
-+ tableSize += HMCD_IPV4_ADDR_SIZE;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6):
-+ tableSize += HMCD_BASIC_SIZE;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV6_SRC)
-+ tableSize += HMCD_IPV6_ADDR_SIZE;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV6_DST)
-+ tableSize += HMCD_IPV6_ADDR_SIZE;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP):
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
-+ == HDR_MANIP_TCP_UDP_CHECKSUM)
-+ /* we implement this case with the update-checksum descriptor */
-+ tableSize += HMCD_BASIC_SIZE;
-+ else
-+ /* we implement this case with the TCP/UDP-update descriptor */
-+ tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown fieldUpdateParams.type"));
-+ }
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.custom)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.customParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE):
-+ {
-+ tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE + HMCD_PARAM_SIZE;
-+ dataSize +=
-+ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdrSize;
-+ if ((p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
-+ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4)
-+ && (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id))
-+ dataSize += 2;
-+ }
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE):
-+ tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown customParams.type"));
-+ }
-+ }
-+
-+ *p_TableSize = tableSize;
-+ *p_DataSize = dataSize;
-+
-+ return E_OK;
-+}
-+
-+static t_Error GetPrOffsetByHeaderOrField(t_FmManipHdrInfo *p_HdrInfo,
-+ uint8_t *parseArrayOffset)
-+{
-+ e_NetHeaderType hdr = p_HdrInfo->hdr;
-+ e_FmPcdHdrIndex hdrIndex = p_HdrInfo->hdrIndex;
-+ bool byField = p_HdrInfo->byField;
-+ t_FmPcdFields field;
-+
-+ if (byField)
-+ field = p_HdrInfo->fullField;
-+
-+ if (byField)
-+ {
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_ETH):
-+ switch (field.eth)
-+ {
-+ case (NET_HEADER_FIELD_ETH_TYPE):
-+ *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET;
-+ break;
-+ default:
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NOT_SUPPORTED,
-+ ("Header manipulation of the type Ethernet with this field not supported"));
-+ }
-+ break;
-+ case (HEADER_TYPE_VLAN):
-+ switch (field.vlan)
-+ {
-+ case (NET_HEADER_FIELD_VLAN_TCI):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
-+ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET;
-+ else
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET;
-+ break;
-+ default:
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NOT_SUPPORTED,
-+ ("Header manipulation of the type VLAN with this field not supported"));
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NOT_SUPPORTED,
-+ ("Header manipulation of this header by field not supported"));
-+ }
-+ }
-+ else
-+ {
-+ switch (hdr)
-+ {
-+ case (HEADER_TYPE_ETH):
-+ *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET;
-+ break;
-+ case (HEADER_TYPE_USER_DEFINED_SHIM1):
-+ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET;
-+ break;
-+ case (HEADER_TYPE_USER_DEFINED_SHIM2):
-+ *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET;
-+ break;
-+ case (HEADER_TYPE_LLC_SNAP):
-+ *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET;
-+ break;
-+ case (HEADER_TYPE_PPPoE):
-+ *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET;
-+ break;
-+ case (HEADER_TYPE_MPLS):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
-+ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET;
-+ else
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)
-+ *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET;
-+ break;
-+ case (HEADER_TYPE_IPv4):
-+ case (HEADER_TYPE_IPv6):
-+ if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE)
-+ || (hdrIndex == e_FM_PCD_HDR_INDEX_1))
-+ *parseArrayOffset = CC_PC_PR_IP1_OFFSET;
-+ else
-+ if (hdrIndex == e_FM_PCD_HDR_INDEX_2)
-+ *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET;
-+ break;
-+ case (HEADER_TYPE_MINENCAP):
-+ *parseArrayOffset = CC_PC_PR_MINENC_OFFSET;
-+ break;
-+ case (HEADER_TYPE_GRE):
-+ *parseArrayOffset = CC_PC_PR_GRE_OFFSET;
-+ break;
-+ case (HEADER_TYPE_TCP):
-+ case (HEADER_TYPE_UDP):
-+ case (HEADER_TYPE_IPSEC_AH):
-+ case (HEADER_TYPE_IPSEC_ESP):
-+ case (HEADER_TYPE_DCCP):
-+ case (HEADER_TYPE_SCTP):
-+ *parseArrayOffset = CC_PC_PR_L4_OFFSET;
-+ break;
-+ case (HEADER_TYPE_CAPWAP):
-+ case (HEADER_TYPE_CAPWAP_DTLS):
-+ *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET;
-+ break;
-+ default:
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NOT_SUPPORTED,
-+ ("Header manipulation of this header is not supported"));
-+ }
-+ }
-+ return E_OK;
-+}
-+
-+static t_Error BuildHmct(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipParams *p_FmPcdManipParams,
-+ uint8_t *p_DestHmct, uint8_t *p_DestData, bool new)
-+{
-+ uint32_t *p_TmpHmct = (uint32_t*)p_DestHmct, *p_LocalData;
-+ uint32_t tmpReg = 0, *p_Last = NULL, tmp_ipv6_addr;
-+ uint8_t remain, i, size = 0, origSize, *p_UsrData = NULL, *p_TmpData =
-+ p_DestData;
-+ t_Handle h_FmPcd = p_Manip->h_FmPcd;
-+ uint8_t j = 0;
-+
-+ if (p_FmPcdManipParams->u.hdr.rmv)
-+ {
-+ if (p_FmPcdManipParams->u.hdr.rmvParams.type
-+ == e_FM_PCD_MANIP_RMV_GENERIC)
-+ {
-+ /* initialize HMCD */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_RMV) << HMCD_OC_SHIFT;
-+ /* tmp, should be conditional */
-+ tmpReg |= p_FmPcdManipParams->u.hdr.rmvParams.u.generic.offset
-+ << HMCD_RMV_OFFSET_SHIFT;
-+ tmpReg |= p_FmPcdManipParams->u.hdr.rmvParams.u.generic.size
-+ << HMCD_RMV_SIZE_SHIFT;
-+ }
-+ else
-+ if (p_FmPcdManipParams->u.hdr.rmvParams.type
-+ == e_FM_PCD_MANIP_RMV_BY_HDR)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.type)
-+ {
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2):
-+ {
-+ uint8_t hmcdOpt;
-+
-+ /* initialize HMCD */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_L2_RMV) << HMCD_OC_SHIFT;
-+
-+ switch (p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.u.specificL2)
-+ {
-+ case (e_FM_PCD_MANIP_HDR_RMV_ETHERNET):
-+ hmcdOpt = HMCD_RMV_L2_ETHERNET;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS):
-+ hmcdOpt = HMCD_RMV_L2_STACKED_QTAGS;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS):
-+ hmcdOpt = HMCD_RMV_L2_ETHERNET_AND_MPLS;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_RMV_MPLS):
-+ hmcdOpt = HMCD_RMV_L2_MPLS;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_RMV_PPPOE):
-+ hmcdOpt = HMCD_RMV_L2_PPPOE;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+ }
-+ tmpReg |= hmcdOpt << HMCD_L2_MODE_SHIFT;
-+ break;
-+ }
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP):
-+ tmpReg = (uint32_t)(HMCD_OPCODE_CAPWAP_RMV)
-+ << HMCD_OC_SHIFT;
-+ break;
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START):
-+ {
-+ uint8_t prsArrayOffset;
-+ t_Error err = E_OK;
-+
-+ tmpReg = (uint32_t)(HMCD_OPCODE_RMV_TILL)
-+ << HMCD_OC_SHIFT;
-+
-+ err =
-+ GetPrOffsetByHeaderOrField(
-+ &p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.u.hdrInfo,
-+ &prsArrayOffset);
-+ ASSERT_COND(!err);
-+ /* was previously checked */
-+
-+ tmpReg |= ((uint32_t)prsArrayOffset << 16);
-+ }
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("manip header remove by hdr type!"));
-+ }
-+ }
-+
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+ /* advance to next command */
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.insrt)
-+ {
-+ if (p_FmPcdManipParams->u.hdr.insrtParams.type
-+ == e_FM_PCD_MANIP_INSRT_GENERIC)
-+ {
-+ /* initialize HMCD */
-+ if (p_FmPcdManipParams->u.hdr.insrtParams.u.generic.replace)
-+ tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_REPLACE)
-+ << HMCD_OC_SHIFT;
-+ else
-+ tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_INSRT) << HMCD_OC_SHIFT;
-+
-+ tmpReg |= p_FmPcdManipParams->u.hdr.insrtParams.u.generic.offset
-+ << HMCD_INSRT_OFFSET_SHIFT;
-+ tmpReg |= p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size
-+ << HMCD_INSRT_SIZE_SHIFT;
-+
-+ size = p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size;
-+ p_UsrData = p_FmPcdManipParams->u.hdr.insrtParams.u.generic.p_Data;
-+
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ /* initialize data to be inserted */
-+ /* if size is not a multiple of 4, padd with 0's */
-+ origSize = size;
-+ remain = (uint8_t)(size % 4);
-+ if (remain)
-+ {
-+ size += (uint8_t)(4 - remain);
-+ p_LocalData = (uint32_t *)XX_Malloc(size);
-+ memset((uint8_t *)p_LocalData, 0, size);
-+ memcpy((uint8_t *)p_LocalData, p_UsrData, origSize);
-+ }
-+ else
-+ p_LocalData = (uint32_t*)p_UsrData;
-+
-+ /* initialize data and advance pointer to next command */
-+ MemCpy8(p_TmpHmct, p_LocalData, size);
-+ p_TmpHmct += size / sizeof(uint32_t);
-+
-+ if (remain)
-+ XX_Free(p_LocalData);
-+ }
-+
-+ else
-+ if (p_FmPcdManipParams->u.hdr.insrtParams.type
-+ == e_FM_PCD_MANIP_INSRT_BY_HDR)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.type)
-+ {
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2):
-+ {
-+ uint8_t hmcdOpt;
-+
-+ /* initialize HMCD */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_L2_INSRT)
-+ << HMCD_OC_SHIFT;
-+
-+ switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.specificL2)
-+ {
-+ case (e_FM_PCD_MANIP_HDR_INSRT_MPLS):
-+ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.update)
-+ hmcdOpt = HMCD_INSRT_N_UPDATE_L2_MPLS;
-+ else
-+ hmcdOpt = HMCD_INSRT_L2_MPLS;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_INSRT_PPPOE):
-+ hmcdOpt = HMCD_INSRT_L2_PPPOE;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG);
-+ }
-+ tmpReg |= hmcdOpt << HMCD_L2_MODE_SHIFT;
-+
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ /* set size and pointer of user's data */
-+ size =
-+ (uint8_t)p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.size;
-+
-+ ASSERT_COND(p_TmpData);
-+ MemCpy8(
-+ p_TmpData,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.p_Data,
-+ size);
-+ tmpReg =
-+ (size << HMCD_INSRT_L2_SIZE_SHIFT)
-+ | (uint32_t)(XX_VirtToPhys(p_TmpData)
-+ - (((t_FmPcd*)h_FmPcd)->physicalMuramBase));
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+ p_TmpData += size;
-+ }
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_IP):
-+ tmpReg = (uint32_t)(HMCD_OPCODE_IP_INSRT)
-+ << HMCD_OC_SHIFT;
-+ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.calcL4Checksum)
-+ tmpReg |= HMCD_IP_L4_CS_CALC;
-+ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.mappingMode
-+ == e_FM_PCD_MANIP_HDR_QOS_MAPPING_AS_IS)
-+ tmpReg |= HMCD_IP_OR_QOS;
-+ tmpReg |=
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.lastPidOffset
-+ & HMCD_IP_LAST_PID_MASK;
-+ tmpReg |=
-+ ((p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size
-+ << HMCD_IP_SIZE_SHIFT)
-+ & HMCD_IP_SIZE_MASK);
-+ if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.dontFragOverwrite)
-+ tmpReg |= HMCD_IP_DF_MODE;
-+
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ /* set IP id */
-+ ASSERT_COND(p_TmpData);
-+ WRITE_UINT16(
-+ *(uint16_t*)p_TmpData,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.id);
-+ WRITE_UINT32(
-+ *p_TmpHmct,
-+ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)));
-+ p_TmpData += 2;
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+
-+ WRITE_UINT8(*p_TmpHmct, p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.lastDstOffset);
-+ p_TmpHmct += HMCD_PARAM_SIZE / 4;
-+
-+ MemCpy8(
-+ p_TmpHmct,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.p_Data,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size);
-+ p_TmpHmct +=
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size
-+ / 4;
-+ break;
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE):
-+ tmpReg = HMCD_INSRT_UDP_LITE;
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP):
-+ tmpReg |= (uint32_t)(HMCD_OPCODE_UDP_INSRT)
-+ << HMCD_OC_SHIFT;
-+
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ MemCpy8(
-+ p_TmpHmct,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.p_Data,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size);
-+ p_TmpHmct +=
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
-+ / 4;
-+ break;
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP):
-+ tmpReg = (uint32_t)(HMCD_OPCODE_CAPWAP_INSRT)
-+ << HMCD_OC_SHIFT;
-+ tmpReg |= HMCD_CAPWAP_INSRT;
-+
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ MemCpy8(
-+ p_TmpHmct,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.p_Data,
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size);
-+ p_TmpHmct +=
-+ p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
-+ / 4;
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("manip header insert by header type!"));
-+
-+ }
-+ }
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdate)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.fieldUpdateParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN):
-+ /* set opcode */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_VLAN_PRI_UPDATE)
-+ << HMCD_OC_SHIFT;
-+
-+ /* set mode & table pointer */
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
-+ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN)
-+ {
-+ /* set Mode */
-+ tmpReg |= (uint32_t)(HMCD_VLAN_PRI_UPDATE_DSCP_TO_VPRI)
-+ << HMCD_VLAN_PRI_REP_MODE_SHIFT;
-+ /* set VPRI default */
-+ tmpReg |=
-+ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.vpriDefVal;
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+ /* write the table pointer into the Manip descriptor */
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ tmpReg = 0;
-+ ASSERT_COND(p_TmpData);
-+ for (i = 0; i < HMCD_DSCP_VALUES; i++)
-+ {
-+ /* first we build from each 8 values a 32bit register */
-+ tmpReg |=
-+ (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.dscpToVpriTable[i])
-+ << (32 - 4 * (j + 1));
-+ j++;
-+ /* Than we write this register to the next table word
-+ * (i=7-->word 0, i=15-->word 1,... i=63-->word 7) */
-+ if ((i % 8) == 7)
-+ {
-+ WRITE_UINT32(*((uint32_t*)p_TmpData + (i+1)/8-1),
-+ tmpReg);
-+ tmpReg = 0;
-+ j = 0;
-+ }
-+ }
-+
-+ WRITE_UINT32(
-+ *p_TmpHmct,
-+ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase)));
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+
-+ p_TmpData += DSCP_TO_VLAN_TABLE_SIZE;
-+ }
-+ else
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
-+ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI)
-+ {
-+ /* set Mode */
-+ /* line commented out as it has no-side-effect ('0' value). */
-+ /*tmpReg |= HMCD_VLAN_PRI_UPDATE << HMCD_VLAN_PRI_REP_MODE_SHIFT*/;
-+ /* set VPRI parameter */
-+ tmpReg |=
-+ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.vpri;
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+ }
-+ break;
-+
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4):
-+ /* set opcode */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_IPV4_UPDATE) << HMCD_OC_SHIFT;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_TTL)
-+ tmpReg |= HMCD_IPV4_UPDATE_TTL;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_TOS)
-+ {
-+ tmpReg |= HMCD_IPV4_UPDATE_TOS;
-+ tmpReg |=
-+ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.tos
-+ << HMCD_IPV4_UPDATE_TOS_SHIFT;
-+ }
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_ID)
-+ tmpReg |= HMCD_IPV4_UPDATE_ID;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_SRC)
-+ tmpReg |= HMCD_IPV4_UPDATE_SRC;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_DST)
-+ tmpReg |= HMCD_IPV4_UPDATE_DST;
-+ /* write the first 4 bytes of the descriptor */
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_ID)
-+ {
-+ ASSERT_COND(p_TmpData);
-+ WRITE_UINT16(
-+ *(uint16_t*)p_TmpData,
-+ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.id);
-+ WRITE_UINT32(
-+ *p_TmpHmct,
-+ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)));
-+ p_TmpData += 2;
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_SRC)
-+ {
-+ WRITE_UINT32(
-+ *p_TmpHmct,
-+ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.src);
-+ p_TmpHmct += HMCD_IPV4_ADDR_SIZE / 4;
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates
-+ & HDR_MANIP_IPV4_DST)
-+ {
-+ WRITE_UINT32(
-+ *p_TmpHmct,
-+ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.dst);
-+ p_TmpHmct += HMCD_IPV4_ADDR_SIZE / 4;
-+ }
-+ break;
-+
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6):
-+ /* set opcode */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_IPV6_UPDATE) << HMCD_OC_SHIFT;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
-+ & HDR_MANIP_IPV6_HL)
-+ tmpReg |= HMCD_IPV6_UPDATE_HL;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
-+ & HDR_MANIP_IPV6_TC)
-+ {
-+ tmpReg |= HMCD_IPV6_UPDATE_TC;
-+ tmpReg |=
-+ p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.trafficClass
-+ << HMCD_IPV6_UPDATE_TC_SHIFT;
-+ }
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
-+ & HDR_MANIP_IPV6_SRC)
-+ tmpReg |= HMCD_IPV6_UPDATE_SRC;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
-+ & HDR_MANIP_IPV6_DST)
-+ tmpReg |= HMCD_IPV6_UPDATE_DST;
-+ /* write the first 4 bytes of the descriptor */
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
-+ & HDR_MANIP_IPV6_SRC)
-+ {
-+ for (i = 0; i < NET_HEADER_FIELD_IPv6_ADDR_SIZE; i += 4)
-+ {
-+ memcpy(&tmp_ipv6_addr,
-+ &p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.src[i],
-+ sizeof(uint32_t));
-+ WRITE_UINT32(*p_TmpHmct, tmp_ipv6_addr);
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+ }
-+ }
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates
-+ & HDR_MANIP_IPV6_DST)
-+ {
-+ for (i = 0; i < NET_HEADER_FIELD_IPv6_ADDR_SIZE; i += 4)
-+ {
-+ memcpy(&tmp_ipv6_addr,
-+ &p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.dst[i],
-+ sizeof(uint32_t));
-+ WRITE_UINT32(*p_TmpHmct, tmp_ipv6_addr);
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+ }
-+ }
-+ break;
-+
-+ case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP):
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
-+ == HDR_MANIP_TCP_UDP_CHECKSUM)
-+ {
-+ /* we implement this case with the update-checksum descriptor */
-+ /* set opcode */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_TCP_UDP_CHECKSUM)
-+ << HMCD_OC_SHIFT;
-+ /* write the first 4 bytes of the descriptor */
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+ }
-+ else
-+ {
-+ /* we implement this case with the TCP/UDP update descriptor */
-+ /* set opcode */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_TCP_UDP_UPDATE)
-+ << HMCD_OC_SHIFT;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
-+ & HDR_MANIP_TCP_UDP_DST)
-+ tmpReg |= HMCD_TCP_UDP_UPDATE_DST;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
-+ & HDR_MANIP_TCP_UDP_SRC)
-+ tmpReg |= HMCD_TCP_UDP_UPDATE_SRC;
-+ /* write the first 4 bytes of the descriptor */
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ tmpReg = 0;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
-+ & HDR_MANIP_TCP_UDP_SRC)
-+ tmpReg |=
-+ ((uint32_t)p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.src)
-+ << HMCD_TCP_UDP_UPDATE_SRC_SHIFT;
-+ if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates
-+ & HDR_MANIP_TCP_UDP_DST)
-+ tmpReg |=
-+ ((uint32_t)p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.dst);
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+ }
-+ break;
-+
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown fieldUpdateParams.type"));
-+ }
-+ }
-+
-+ if (p_FmPcdManipParams->u.hdr.custom)
-+ {
-+ switch (p_FmPcdManipParams->u.hdr.customParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE):
-+ /* set opcode */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_REPLACE_IP) << HMCD_OC_SHIFT;
-+
-+ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.decTtlHl)
-+ tmpReg |= HMCD_IP_REPLACE_TTL_HL;
-+ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
-+ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV4_BY_IPV6)
-+ /* line commented out as it has no-side-effect ('0' value). */
-+ /*tmpReg |= HMCD_IP_REPLACE_REPLACE_IPV4*/;
-+ else
-+ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
-+ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4)
-+ {
-+ tmpReg |= HMCD_IP_REPLACE_REPLACE_IPV6;
-+ if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id)
-+ tmpReg |= HMCD_IP_REPLACE_ID;
-+ }
-+ else
-+ RETURN_ERROR(
-+ MINOR,
-+ E_NOT_SUPPORTED,
-+ ("One flag out of HDR_MANIP_IP_REPLACE_IPV4, HDR_MANIP_IP_REPLACE_IPV6 - must be set."));
-+
-+ /* write the first 4 bytes of the descriptor */
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE / 4;
-+
-+ size =
-+ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdrSize;
-+ ASSERT_COND(p_TmpData);
-+ MemCpy8(
-+ p_TmpData,
-+ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdr,
-+ size);
-+ tmpReg = (uint32_t)(size << HMCD_IP_REPLACE_L3HDRSIZE_SHIFT);
-+ tmpReg |= (uint32_t)(XX_VirtToPhys(p_TmpData)
-+ - (((t_FmPcd*)h_FmPcd)->physicalMuramBase));
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+ p_TmpData += size;
-+
-+ if ((p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType
-+ == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4)
-+ && (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id))
-+ {
-+ WRITE_UINT16(
-+ *(uint16_t*)p_TmpData,
-+ p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.id);
-+ WRITE_UINT32(
-+ *p_TmpHmct,
-+ (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase)));
-+ p_TmpData += 2;
-+ }
-+ p_TmpHmct += HMCD_PTR_SIZE / 4;
-+ break;
-+ case (e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE):
-+ /* set opcode */
-+ tmpReg = (uint32_t)(HMCD_OPCODE_GEN_FIELD_REPLACE) << HMCD_OC_SHIFT;
-+ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.size << HMCD_GEN_FIELD_SIZE_SHIFT;
-+ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.srcOffset << HMCD_GEN_FIELD_SRC_OFF_SHIFT;
-+ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.dstOffset << HMCD_GEN_FIELD_DST_OFF_SHIFT;
-+ if (p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.mask)
-+ tmpReg |= HMCD_GEN_FIELD_MASK_EN;
-+
-+ /* write the first 4 bytes of the descriptor */
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ /* save a pointer to the "last" indication word */
-+ p_Last = p_TmpHmct;
-+
-+ p_TmpHmct += HMCD_BASIC_SIZE/4;
-+
-+ if (p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.mask)
-+ {
-+ tmpReg = p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.mask << HMCD_GEN_FIELD_MASK_SHIFT;
-+ tmpReg |= p_FmPcdManipParams->u.hdr.customParams.u.genFieldReplace.maskOffset << HMCD_GEN_FIELD_MASK_OFF_SHIFT;
-+ /* write the next 4 bytes of the descriptor */
-+ WRITE_UINT32(*p_TmpHmct, tmpReg);
-+ }
-+ p_TmpHmct += HMCD_PARAM_SIZE/4;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("Unknown customParams.type"));
-+ }
-+ }
-+
-+ /* If this node has a nextManip, and no parsing is required, the old table must be copied to the new table
-+ the old table and should be freed */
-+ if (p_FmPcdManipParams->h_NextManip
-+ && (p_Manip->nextManipType == e_FM_PCD_MANIP_HDR)
-+ && (MANIP_DONT_REPARSE(p_Manip)))
-+ {
-+ if (new)
-+ {
-+ /* If this is the first time this manip is created we need to free unused memory. If it
-+ * is a dynamic changes case, the memory used is either the CC shadow or the existing
-+ * table - no allocation, no free */
-+ MANIP_UPDATE_UNIFIED_POSITION(p_FmPcdManipParams->h_NextManip);
-+
-+ p_Manip->unifiedPosition = e_MANIP_UNIFIED_FIRST;
-+ }
-+ }
-+ else
-+ {
-+ ASSERT_COND(p_Last);
-+ /* set the "last" indication on the last command of the current table */
-+ WRITE_UINT32(*p_Last, GET_UINT32(*p_Last) | HMCD_LAST);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error CreateManipActionNew(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipParams *p_FmPcdManipParams)
-+{
-+ t_FmPcdManip *p_CurManip;
-+ t_Error err;
-+ uint32_t nextSize = 0, totalSize;
-+ uint16_t tmpReg;
-+ uint8_t *p_OldHmct, *p_TmpHmctPtr, *p_TmpDataPtr;
-+
-+ /* set Manip structure */
-+
-+ p_Manip->dontParseAfterManip =
-+ p_FmPcdManipParams->u.hdr.dontParseAfterManip;
-+
-+ if (p_FmPcdManipParams->h_NextManip)
-+ { /* Next Header manipulation exists */
-+ p_Manip->nextManipType = MANIP_GET_TYPE(p_FmPcdManipParams->h_NextManip);
-+
-+ if ((p_Manip->nextManipType == e_FM_PCD_MANIP_HDR) && p_Manip->dontParseAfterManip)
-+ nextSize = (uint32_t)(GetHmctSize(p_FmPcdManipParams->h_NextManip)
-+ + GetDataSize(p_FmPcdManipParams->h_NextManip));
-+ else /* either parsing is required or next manip is Frag; no table merging. */
-+ p_Manip->cascaded = TRUE;
-+ /* pass up the "cascaded" attribute. The whole chain is cascaded
-+ * if something is cascaded along the way. */
-+ if (MANIP_IS_CASCADED(p_FmPcdManipParams->h_NextManip))
-+ p_Manip->cascaded = TRUE;
-+ }
-+
-+ /* Allocate new table */
-+ /* calculate table size according to manip parameters */
-+ err = CalculateTableSize(p_FmPcdManipParams, &p_Manip->tableSize,
-+ &p_Manip->dataSize);
-+ if (err)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ totalSize = (uint16_t)(p_Manip->tableSize + p_Manip->dataSize + nextSize);
-+
-+ p_Manip->p_Hmct = (uint8_t*)FM_MURAM_AllocMem(
-+ ((t_FmPcd *)p_Manip->h_FmPcd)->h_FmMuram, totalSize, 4);
-+ if (!p_Manip->p_Hmct)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc failed"));
-+
-+ if (p_Manip->dataSize)
-+ p_Manip->p_Data =
-+ (uint8_t*)PTR_MOVE(p_Manip->p_Hmct, (p_Manip->tableSize + nextSize));
-+
-+ /* update shadow size to allow runtime replacement of Header manipulation */
-+ /* The allocated shadow is divided as follows:
-+ 0 . . . 16 . . .
-+ --------------------------------
-+ | Shadow | Shadow HMTD |
-+ | HMTD | Match Table |
-+ | (16 bytes) | (maximal size) |
-+ --------------------------------
-+ */
-+
-+ err = FmPcdUpdateCcShadow(p_Manip->h_FmPcd, (uint32_t)(totalSize + 16),
-+ (uint16_t)FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (err != E_OK)
-+ {
-+ FM_MURAM_FreeMem(p_Manip->h_FmPcd, p_Manip->p_Hmct);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM allocation for HdrManip node shadow"));
-+ }
-+
-+ if (p_FmPcdManipParams->h_NextManip
-+ && (p_Manip->nextManipType == e_FM_PCD_MANIP_HDR)
-+ && (MANIP_DONT_REPARSE(p_Manip)))
-+ {
-+ p_OldHmct = (uint8_t *)GetManipInfo(p_FmPcdManipParams->h_NextManip,
-+ e_MANIP_HMCT);
-+ p_CurManip = p_FmPcdManipParams->h_NextManip;
-+ /* Run till the last Manip (which is the first to configure) */
-+ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
-+ p_CurManip = p_CurManip->h_NextManip;
-+
-+ while (p_CurManip)
-+ {
-+ /* If this is a unified table, point to the part of the table
-+ * which is the relative offset in HMCT.
-+ */
-+ p_TmpHmctPtr = (uint8_t*)PTR_MOVE(p_Manip->p_Hmct,
-+ (p_Manip->tableSize +
-+ (PTR_TO_UINT(p_CurManip->p_Hmct) -
-+ PTR_TO_UINT(p_OldHmct))));
-+ if (p_CurManip->p_Data)
-+ p_TmpDataPtr = (uint8_t*)PTR_MOVE(p_Manip->p_Hmct,
-+ (p_Manip->tableSize +
-+ (PTR_TO_UINT(p_CurManip->p_Data) -
-+ PTR_TO_UINT(p_OldHmct))));
-+ else
-+ p_TmpDataPtr = NULL;
-+
-+ BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr,
-+ p_TmpDataPtr, FALSE);
-+ /* update old manip table pointer */
-+ MANIP_SET_HMCT_PTR(p_CurManip, p_TmpHmctPtr);
-+ MANIP_SET_DATA_PTR(p_CurManip, p_TmpDataPtr);
-+
-+ p_CurManip = p_CurManip->h_PrevManip;
-+ }
-+ /* We copied the HMCT to create a new large HMCT so we can free the old one */
-+ FM_MURAM_FreeMem(MANIP_GET_MURAM(p_FmPcdManipParams->h_NextManip),
-+ p_OldHmct);
-+ }
-+
-+ /* Fill table */
-+ err = BuildHmct(p_Manip, p_FmPcdManipParams, p_Manip->p_Hmct,
-+ p_Manip->p_Data, TRUE);
-+ if (err)
-+ {
-+ FM_MURAM_FreeMem(p_Manip->h_FmPcd, p_Manip->p_Hmct);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ /* Build HMTD (table descriptor) */
-+ tmpReg = HMTD_CFG_TYPE; /* NADEN = 0 */
-+
-+ /* add parseAfterManip */
-+ if (!p_Manip->dontParseAfterManip)
-+ tmpReg |= HMTD_CFG_PRS_AFTER_HM;
-+
-+ /* create cascade */
-+ /*if (p_FmPcdManipParams->h_NextManip
-+ && (!MANIP_DONT_REPARSE(p_Manip) || (p_Manip->nextManipType != e_FM_PCD_MANIP_HDR)))*/
-+ if (p_Manip->cascaded)
-+ {
-+ uint16_t nextAd;
-+ /* indicate that there's another HM table descriptor */
-+ tmpReg |= HMTD_CFG_NEXT_AD_EN;
-+ /* get address of next HMTD (table descriptor; h_Ad).
-+ * If the next HMTD was removed due to table unifing, get the address
-+ * of the "next next" as written in the h_Ad of the next h_Manip node.
-+ */
-+ if (p_Manip->unifiedPosition != e_MANIP_UNIFIED_FIRST)
-+ nextAd = (uint16_t)((uint32_t)(XX_VirtToPhys(MANIP_GET_HMTD_PTR(p_FmPcdManipParams->h_NextManip)) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4);
-+ else
-+ nextAd = ((t_Hmtd *)((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad)->nextAdIdx;
-+
-+ WRITE_UINT16(((t_Hmtd *)p_Manip->h_Ad)->nextAdIdx, nextAd);
-+ }
-+
-+ WRITE_UINT16(((t_Hmtd *)p_Manip->h_Ad)->cfg, tmpReg);
-+ WRITE_UINT32(
-+ ((t_Hmtd *)p_Manip->h_Ad)->hmcdBasePtr,
-+ (uint32_t)(XX_VirtToPhys(p_Manip->p_Hmct) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)));
-+
-+ WRITE_UINT8(((t_Hmtd *)p_Manip->h_Ad)->opCode, HMAN_OC);
-+
-+ if (p_Manip->unifiedPosition == e_MANIP_UNIFIED_FIRST)
-+ {
-+ /* The HMTD of the next Manip is never going to be used */
-+ if (((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->muramAllocate)
-+ FM_MURAM_FreeMem(
-+ ((t_FmPcd *)((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_FmPcd)->h_FmMuram,
-+ ((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad);
-+ else
-+ XX_Free(((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad);
-+ ((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad = NULL;
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error CreateManipActionShadow(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipParams *p_FmPcdManipParams)
-+{
-+ uint8_t *p_WholeHmct, *p_TmpHmctPtr, newDataSize, *p_TmpDataPtr = NULL;
-+ uint16_t newSize;
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
-+ t_Error err;
-+ t_FmPcdManip *p_CurManip = p_Manip;
-+
-+ err = CalculateTableSize(p_FmPcdManipParams, &newSize, &newDataSize);
-+ if (err)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ /* check coherency of new table parameters */
-+ if (newSize > p_Manip->tableSize)
-+ RETURN_ERROR(
-+ MINOR,
-+ E_INVALID_VALUE,
-+ ("New Hdr Manip configuration requires larger size than current one (command table)."));
-+ if (newDataSize > p_Manip->dataSize)
-+ RETURN_ERROR(
-+ MINOR,
-+ E_INVALID_VALUE,
-+ ("New Hdr Manip configuration requires larger size than current one (data)."));
-+ if (p_FmPcdManipParams->h_NextManip)
-+ RETURN_ERROR(
-+ MINOR, E_INVALID_VALUE,
-+ ("New Hdr Manip configuration can not contain h_NextManip."));
-+ if (MANIP_IS_UNIFIED(p_Manip) && (newSize != p_Manip->tableSize))
-+ RETURN_ERROR(
-+ MINOR,
-+ E_INVALID_VALUE,
-+ ("New Hdr Manip configuration in a chained manipulation requires different size than current one."));
-+ if (p_Manip->dontParseAfterManip
-+ != p_FmPcdManipParams->u.hdr.dontParseAfterManip)
-+ RETURN_ERROR(
-+ MINOR,
-+ E_INVALID_VALUE,
-+ ("New Hdr Manip configuration differs in dontParseAfterManip value."));
-+
-+ p_Manip->tableSize = newSize;
-+ p_Manip->dataSize = newDataSize;
-+
-+ /* Build the new table in the shadow */
-+ if (!MANIP_IS_UNIFIED(p_Manip))
-+ {
-+ p_TmpHmctPtr = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, 16);
-+ if (p_Manip->p_Data)
-+ p_TmpDataPtr =
-+ (uint8_t*)PTR_MOVE(p_TmpHmctPtr,
-+ (PTR_TO_UINT(p_Manip->p_Data) - PTR_TO_UINT(p_Manip->p_Hmct)));
-+
-+ BuildHmct(p_Manip, p_FmPcdManipParams, p_TmpHmctPtr, p_Manip->p_Data,
-+ FALSE);
-+ }
-+ else
-+ {
-+ p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT);
-+ ASSERT_COND(p_WholeHmct);
-+
-+ /* Run till the last Manip (which is the first to configure) */
-+ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
-+ p_CurManip = p_CurManip->h_NextManip;
-+
-+ while (p_CurManip)
-+ {
-+ /* If this is a non-head node in a unified table, point to the part of the shadow
-+ * which is the relative offset in HMCT.
-+ * else, point to the beginning of the
-+ * shadow table (we save 16 for the HMTD.
-+ */
-+ p_TmpHmctPtr =
-+ (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow,
-+ (16 + PTR_TO_UINT(p_CurManip->p_Hmct) - PTR_TO_UINT(p_WholeHmct)));
-+ if (p_CurManip->p_Data)
-+ p_TmpDataPtr =
-+ (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow,
-+ (16 + PTR_TO_UINT(p_CurManip->p_Data) - PTR_TO_UINT(p_WholeHmct)));
-+
-+ BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr,
-+ p_TmpDataPtr, FALSE);
-+ p_CurManip = p_CurManip->h_PrevManip;
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error CreateManipActionBackToOrig(
-+ t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_FmPcdManipParams)
-+{
-+ uint8_t *p_WholeHmct = NULL, *p_TmpHmctPtr, *p_TmpDataPtr;
-+ t_FmPcdManip *p_CurManip = p_Manip;
-+
-+ /* Build the new table in the shadow */
-+ if (!MANIP_IS_UNIFIED(p_Manip))
-+ BuildHmct(p_Manip, p_FmPcdManipParams, p_Manip->p_Hmct, p_Manip->p_Data,
-+ FALSE);
-+ else
-+ {
-+ p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT);
-+ ASSERT_COND(p_WholeHmct);
-+
-+ /* Run till the last Manip (which is the first to configure) */
-+ while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip))
-+ p_CurManip = p_CurManip->h_NextManip;
-+
-+ while (p_CurManip)
-+ {
-+ /* If this is a unified table, point to the part of the table
-+ * which is the relative offset in HMCT.
-+ */
-+ p_TmpHmctPtr = p_CurManip->p_Hmct; /*- (uint32_t)p_WholeHmct*/
-+ p_TmpDataPtr = p_CurManip->p_Data; /*- (uint32_t)p_WholeHmct*/
-+
-+ BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr,
-+ p_TmpDataPtr, FALSE);
-+
-+ p_CurManip = p_CurManip->h_PrevManip;
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+static t_Error UpdateManipIc(t_Handle h_Manip, uint8_t icOffset)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+ t_Handle p_Ad;
-+ uint32_t tmpReg32 = 0;
-+ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
-+
-+ switch (p_Manip->opcode)
-+ {
-+ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+ if (p_Manip->updateParams & INTERNAL_CONTEXT_OFFSET)
-+ {
-+ tmpReg32 =
-+ *(uint32_t *)&((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets;
-+ tmpReg32 |= (uint32_t)((uint32_t)icOffset << 16);
-+ *(uint32_t *)&((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets =
-+ tmpReg32;
-+ p_Manip->updateParams &= ~INTERNAL_CONTEXT_OFFSET;
-+ p_Manip->icOffset = icOffset;
-+ }
-+ else
-+ {
-+ if (p_Manip->icOffset != icOffset)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("this manipulation was updated previously by different value"););
-+ }
-+ break;
-+ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
-+ if (p_Manip->h_Frag)
-+ {
-+ if (p_Manip->updateParams & INTERNAL_CONTEXT_OFFSET)
-+ {
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+ tmpReg32 |= GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets);
-+ tmpReg32 |= (uint32_t)((uint32_t)icOffset << 16);
-+ WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets, tmpReg32);
-+ p_Manip->updateParams &= ~INTERNAL_CONTEXT_OFFSET;
-+ p_Manip->icOffset = icOffset;
-+ }
-+ else
-+ {
-+ if (p_Manip->icOffset != icOffset)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("this manipulation was updated previousely by different value"););
-+ }
-+ }
-+ break;
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error UpdateInitMvIntFrameHeaderFromFrameToBufferPrefix(
-+ t_Handle h_FmPort, t_FmPcdManip *p_Manip, t_Handle h_Ad, bool validate)
-+{
-+
-+ t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)h_Ad;
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+ t_Error err;
-+ uint32_t tmpReg32;
-+
-+ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ (p_Manip->opcode & HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX),
-+ E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Manip->muramAllocate, E_INVALID_STATE);
-+
-+ if (p_Manip->updateParams)
-+ {
-+ if ((!(p_Manip->updateParams & OFFSET_OF_PR))
-+ || (p_Manip->shadowUpdateParams & OFFSET_OF_PR))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("in this stage parameters from Port has not be updated"));
-+
-+ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams;
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_PSO;
-+ fmPortGetSetCcParams.setCcParams.psoSize = 16;
-+
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("Parser result offset wasn't configured previousely"));
-+#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
-+ ASSERT_COND(!(fmPortGetSetCcParams.getCcParams.prOffset % 16));
-+#endif
-+ }
-+ else
-+ if (validate)
-+ {
-+ if ((!(p_Manip->shadowUpdateParams & OFFSET_OF_PR))
-+ || (p_Manip->updateParams & OFFSET_OF_PR))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("in this stage parameters from Port has be updated"));
-+ fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams;
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_PSO;
-+ fmPortGetSetCcParams.setCcParams.psoSize = 16;
-+
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("Parser result offset wasn't configured previousely"));
-+
-+ }
-+
-+ ASSERT_COND(p_Ad);
-+
-+ if (p_Manip->updateParams & OFFSET_OF_PR)
-+ {
-+ tmpReg32 = 0;
-+ tmpReg32 |= fmPortGetSetCcParams.getCcParams.prOffset;
-+ WRITE_UINT32(p_Ad->matchTblPtr,
-+ (GET_UINT32(p_Ad->matchTblPtr) | tmpReg32));
-+ p_Manip->updateParams &= ~OFFSET_OF_PR;
-+ p_Manip->shadowUpdateParams |= OFFSET_OF_PR;
-+ }
-+ else
-+ if (validate)
-+ {
-+ tmpReg32 = GET_UINT32(p_Ad->matchTblPtr);
-+ if ((uint8_t)tmpReg32 != fmPortGetSetCcParams.getCcParams.prOffset)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("this manipulation was updated previousely by different value"););
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error UpdateModifyCapwapFragmenation(t_FmPcdManip *p_Manip, t_Handle h_Ad, bool validate,t_Handle h_FmTree)
-+{
-+ t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)h_Ad;
-+ t_FmPcdCcSavedManipParams *p_SavedManipParams = NULL;
-+ uint32_t tmpReg32 = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->frag,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION) || (p_Manip->opcode == HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER)), E_INVALID_STATE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag;
-+
-+ if (p_Manip->updateParams)
-+ {
-+
-+ if ((!(p_Manip->updateParams & OFFSET_OF_DATA)) ||
-+ ((p_Manip->shadowUpdateParams & OFFSET_OF_DATA)))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated"));
-+ p_SavedManipParams = FmPcdCcTreeGetSavedManipParams(h_FmTree);
-+ if (!p_SavedManipParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("for this manipulation tree has to be configured previosely with this type"));
-+ p_Manip->capwapFragParams.dataOffset = p_SavedManipParams->capwapParams.dataOffset;
-+
-+ tmpReg32 = GET_UINT32(p_Ad->pcAndOffsets);
-+ tmpReg32 |= ((uint32_t)p_Manip->capwapFragParams.dataOffset<< 16);
-+ WRITE_UINT32(p_Ad->pcAndOffsets,tmpReg32);
-+
-+ p_Manip->updateParams &= ~OFFSET_OF_DATA;
-+ p_Manip->shadowUpdateParams |= OFFSET_OF_DATA;
-+ }
-+ else if (validate)
-+ {
-+
-+ p_SavedManipParams = FmPcdCcTreeGetSavedManipParams(h_FmTree);
-+ if (!p_SavedManipParams)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("for this manipulation tree has to be configured previosely with this type"));
-+ if (p_Manip->capwapFragParams.dataOffset != p_SavedManipParams->capwapParams.dataOffset)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("this manipulation was updated previousely by different value"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error UpdateInitCapwapFragmentation(t_Handle h_FmPort,
-+ t_FmPcdManip *p_Manip,
-+ t_Handle h_Ad,
-+ bool validate,
-+ t_Handle h_FmTree)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+ t_Error err;
-+ uint32_t tmpReg32 = 0;
-+ t_FmPcdCcSavedManipParams *p_SavedManipParams;
-+
-+ UNUSED(h_Ad);
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->frag,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION) ||
-+ (p_Manip->opcode == HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER)), E_INVALID_STATE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag;
-+
-+ if (p_Manip->updateParams)
-+ {
-+ if ((!(p_Manip->updateParams & OFFSET_OF_DATA)) ||
-+ ((p_Manip->shadowUpdateParams & OFFSET_OF_DATA)))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated"));
-+ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams;
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN | UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY;
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
-+ /* For CAPWAP Rassembly used FMAN_CTRL2 hardcoded - so for fragmentation its better to use FMAN_CTRL1 */
-+ fmPortGetSetCcParams.setCcParams.orFmanCtrl = FPM_PORT_FM_CTL1;
-+
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Data offset wasn't configured previousely"));
-+
-+ p_SavedManipParams = (t_FmPcdCcSavedManipParams *)XX_Malloc(sizeof(t_FmPcdCcSavedManipParams));
-+ p_SavedManipParams->capwapParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset;
-+
-+#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
-+ ASSERT_COND(!(p_SavedManipParams->capwapParams.dataOffset % 16));
-+#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */
-+
-+ FmPcdCcTreeSetSavedManipParams(h_FmTree, (t_Handle)p_SavedManipParams);
-+ }
-+ else if (validate)
-+ {
-+ if ((!(p_Manip->shadowUpdateParams & OFFSET_OF_DATA)) ||
-+ ((p_Manip->updateParams & OFFSET_OF_DATA)))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated"));
-+ fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams;
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN | UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY;
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Data offset wasn't configured previousely"));
-+ }
-+
-+ if (p_Manip->updateParams)
-+ {
-+ tmpReg32 = GET_UINT32(p_Ad->pcAndOffsets);
-+ tmpReg32 |= ((uint32_t)fmPortGetSetCcParams.getCcParams.dataOffset<< 16);
-+ WRITE_UINT32(p_Ad->pcAndOffsets,tmpReg32);
-+
-+ p_Manip->updateParams &= ~OFFSET_OF_DATA;
-+ p_Manip->shadowUpdateParams |= OFFSET_OF_DATA;
-+ p_Manip->capwapFragParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset;
-+ }
-+ else if (validate)
-+ {
-+ if (p_Manip->capwapFragParams.dataOffset != fmPortGetSetCcParams.getCcParams.dataOffset)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("this manipulation was updated previousely by different value"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error UpdateInitCapwapReasm(t_Handle h_FmPcd,
-+ t_Handle h_FmPort,
-+ t_FmPcdManip *p_Manip,
-+ t_Handle h_Ad,
-+ bool validate)
-+{
-+ t_CapwapReasmPram *p_ReassmTbl;
-+ t_Error err;
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+ uint8_t i = 0;
-+ uint16_t size;
-+ uint32_t tmpReg32;
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdCcCapwapReassmTimeoutParams ccCapwapReassmTimeoutParams;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Manip->frag,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST), E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc,E_INVALID_HANDLE);
-+
-+ if (p_Manip->h_FmPcd != h_FmPcd)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("handler of PCD previously was initiated by different value"));
-+
-+ UNUSED(h_Ad);
-+
-+ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
-+ p_ReassmTbl = (t_CapwapReasmPram *)p_Manip->h_Frag;
-+
-+ if (p_Manip->updateParams)
-+ {
-+ if ((!(p_Manip->updateParams & NUM_OF_TASKS) &&
-+ !(p_Manip->updateParams & OFFSET_OF_DATA) &&
-+ !(p_Manip->updateParams & OFFSET_OF_PR) &&
-+ !(p_Manip->updateParams & HW_PORT_ID)) ||
-+ ((p_Manip->shadowUpdateParams & NUM_OF_TASKS) ||
-+ (p_Manip->shadowUpdateParams & OFFSET_OF_DATA) || (p_Manip->shadowUpdateParams & OFFSET_OF_PR) ||
-+ (p_Manip->shadowUpdateParams & HW_PORT_ID)))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated"));
-+
-+ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams;
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN;
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
-+
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (fmPortGetSetCcParams.getCcParams.type & NUM_OF_TASKS)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Num of tasks wasn't configured previousely"));
-+ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previousely"));
-+ if (fmPortGetSetCcParams.getCcParams.type & HW_PORT_ID)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("hwPortId wasn't updated"));
-+#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
-+ ASSERT_COND((fmPortGetSetCcParams.getCcParams.dataOffset % 16) == 0);
-+#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */
-+ }
-+ else if (validate)
-+ {
-+ if ((!(p_Manip->shadowUpdateParams & NUM_OF_TASKS) &&
-+ !(p_Manip->shadowUpdateParams & OFFSET_OF_DATA) &&
-+ !(p_Manip->shadowUpdateParams & OFFSET_OF_PR) &&
-+ !(p_Manip->shadowUpdateParams & HW_PORT_ID)) &&
-+ ((p_Manip->updateParams & NUM_OF_TASKS) ||
-+ (p_Manip->updateParams & OFFSET_OF_DATA) || (p_Manip->updateParams & OFFSET_OF_PR) ||
-+ (p_Manip->updateParams & HW_PORT_ID)))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated"));
-+
-+ fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams;
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN;
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
-+
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (fmPortGetSetCcParams.getCcParams.type & NUM_OF_TASKS)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("NumOfTasks wasn't configured previously"));
-+ if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previously"));
-+ if (fmPortGetSetCcParams.getCcParams.type & HW_PORT_ID)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("hwPortId wasn't updated"));
-+ }
-+
-+ if (p_Manip->updateParams)
-+ {
-+ if (p_Manip->updateParams & NUM_OF_TASKS)
-+ {
-+ /*recommendation of Microcode team - (maxNumFramesInProcess * 2) */
-+ size = (uint16_t)(p_Manip->capwapFragParams.maxNumFramesInProcess*2 + fmPortGetSetCcParams.getCcParams.numOfTasks);
-+ if (size > 255)
-+ RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("numOfOpenReassmEntries + numOfTasks per port can not be greater than 256"));
-+
-+ p_Manip->capwapFragParams.numOfTasks = fmPortGetSetCcParams.getCcParams.numOfTasks;
-+
-+ /*p_ReassmFrmDescrIndxPoolTbl*/
-+ p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl =
-+ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)(size + 1),
-+ 4);
-+ if (!p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly frame buffer index pool table"));
-+
-+ MemSet8(p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl, 0, (uint32_t)(size + 1));
-+
-+ for ( i = 0; i < size; i++)
-+ WRITE_UINT8(*(uint8_t *)PTR_MOVE(p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl, i), (uint8_t)(i+1));
-+
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl) - p_FmPcd->physicalMuramBase);
-+
-+ WRITE_UINT32(p_ReassmTbl->reasmFrmDescIndexPoolTblPtr, tmpReg32);
-+
-+ /*p_ReassmFrmDescrPoolTbl*/
-+ p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl =
-+ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)((size + 1) * FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE),
-+ 4);
-+
-+ if (!p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly frame buffer pool table"));
-+
-+ MemSet8(p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl, 0, (uint32_t)((size +1)* FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE));
-+
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl) - p_FmPcd->physicalMuramBase);
-+
-+ WRITE_UINT32(p_ReassmTbl->reasmFrmDescPoolTblPtr, tmpReg32);
-+
-+ /*p_TimeOutTbl*/
-+
-+ p_Manip->capwapFragParams.p_TimeOutTbl =
-+ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)((size + 1)* FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE),
-+ 4);
-+
-+ if (!p_Manip->capwapFragParams.p_TimeOutTbl)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly timeout table"));
-+
-+ MemSet8(p_Manip->capwapFragParams.p_TimeOutTbl, 0, (uint16_t)((size + 1)*FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE));
-+
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_TimeOutTbl) - p_FmPcd->physicalMuramBase);
-+ WRITE_UINT32(p_ReassmTbl->timeOutTblPtr, tmpReg32);
-+
-+ p_Manip->updateParams &= ~NUM_OF_TASKS;
-+ p_Manip->shadowUpdateParams |= NUM_OF_TASKS;
-+ }
-+
-+ if (p_Manip->updateParams & OFFSET_OF_DATA)
-+ {
-+ p_Manip->capwapFragParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset;
-+ tmpReg32 = GET_UINT32(p_ReassmTbl->mode);
-+ tmpReg32|= p_Manip->capwapFragParams.dataOffset;
-+ WRITE_UINT32(p_ReassmTbl->mode, tmpReg32);
-+ p_Manip->updateParams &= ~OFFSET_OF_DATA;
-+ p_Manip->shadowUpdateParams |= OFFSET_OF_DATA;
-+ }
-+
-+ if (!(fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR))
-+ {
-+ p_Manip->capwapFragParams.prOffset = fmPortGetSetCcParams.getCcParams.prOffset;
-+
-+ tmpReg32 = GET_UINT32(p_ReassmTbl->mode);
-+ tmpReg32|= FM_PCD_MANIP_CAPWAP_REASM_PR_COPY;
-+ WRITE_UINT32(p_ReassmTbl->mode, tmpReg32);
-+
-+ tmpReg32 = GET_UINT32(p_ReassmTbl->intStatsTblPtr);
-+ tmpReg32 |= (uint32_t)p_Manip->capwapFragParams.prOffset << 24;
-+ WRITE_UINT32(p_ReassmTbl->intStatsTblPtr, tmpReg32);
-+ p_Manip->updateParams &= ~OFFSET_OF_PR;
-+ p_Manip->shadowUpdateParams |= OFFSET_OF_PR;
-+ }
-+ else
-+ {
-+ p_Manip->capwapFragParams.prOffset = 0xff;
-+ p_Manip->updateParams &= ~OFFSET_OF_PR;
-+ p_Manip->shadowUpdateParams |= OFFSET_OF_PR;
-+ }
-+
-+ p_Manip->capwapFragParams.hwPortId = fmPortGetSetCcParams.getCcParams.hardwarePortId;
-+ p_Manip->updateParams &= ~HW_PORT_ID;
-+ p_Manip->shadowUpdateParams |= HW_PORT_ID;
-+
-+ /*timeout hc */
-+ ccCapwapReassmTimeoutParams.fqidForTimeOutFrames = p_Manip->capwapFragParams.fqidForTimeOutFrames;
-+ ccCapwapReassmTimeoutParams.portIdAndCapwapReassmTbl = (uint32_t)p_Manip->capwapFragParams.hwPortId << 24;
-+ ccCapwapReassmTimeoutParams.portIdAndCapwapReassmTbl |= (uint32_t)((XX_VirtToPhys(p_ReassmTbl) - p_FmPcd->physicalMuramBase));
-+ ccCapwapReassmTimeoutParams.timeoutRequestTime = (((uint32_t)1<<p_Manip->capwapFragParams.bitFor1Micro) * p_Manip->capwapFragParams.timeoutRoutineRequestTime)/2;
-+ return FmHcPcdCcCapwapTimeoutReassm(p_FmPcd->h_Hc,&ccCapwapReassmTimeoutParams);
-+ }
-+
-+ else if (validate)
-+ {
-+ if (fmPortGetSetCcParams.getCcParams.hardwarePortId != p_Manip->capwapFragParams.hwPortId)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Reassembly manipulation previously was assigned to another port"));
-+ if (fmPortGetSetCcParams.getCcParams.numOfTasks != p_Manip->capwapFragParams.numOfTasks)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfTasks for this manipulation previously was defined by another value "));
-+
-+ if (!(fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR))
-+ {
-+ if (p_Manip->capwapFragParams.prOffset != fmPortGetSetCcParams.getCcParams.prOffset)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Parse result offset previously was defined by another value "));
-+ }
-+ else
-+ {
-+ if (p_Manip->capwapFragParams.prOffset != 0xff)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Parse result offset previously was defined by another value "));
-+ }
-+ if (fmPortGetSetCcParams.getCcParams.dataOffset != p_Manip->capwapFragParams.dataOffset)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Data offset previously was defined by another value "));
-+ }
-+
-+ return E_OK;
-+}
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+
-+t_Error FmPcdRegisterReassmPort(t_Handle h_FmPcd, t_Handle h_ReasmCommonPramTbl)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdCcReassmTimeoutParams ccReassmTimeoutParams = { 0 };
-+ t_Error err = E_OK;
-+ uint8_t result;
-+ uint32_t bitFor1Micro, tsbs, log2num;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(h_ReasmCommonPramTbl);
-+
-+ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
-+ if (bitFor1Micro == 0)
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
-+
-+ bitFor1Micro = 32 - bitFor1Micro;
-+ LOG2(FM_PCD_MANIP_REASM_TIMEOUT_THREAD_THRESH, log2num);
-+ tsbs = bitFor1Micro - log2num;
-+
-+ ccReassmTimeoutParams.iprcpt = (uint32_t)(XX_VirtToPhys(
-+ h_ReasmCommonPramTbl) - p_FmPcd->physicalMuramBase);
-+ ccReassmTimeoutParams.tsbs = (uint8_t)tsbs;
-+ ccReassmTimeoutParams.activate = TRUE;
-+ if ((err = FmHcPcdCcTimeoutReassm(p_FmPcd->h_Hc, &ccReassmTimeoutParams,
-+ &result)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ switch (result)
-+ {
-+ case (0):
-+ return E_OK;
-+ case (1):
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("failed to allocate TNUM"));
-+ case (2):
-+ RETURN_ERROR(
-+ MAJOR, E_NO_MEMORY,
-+ ("failed to allocate internal buffer from the HC-Port"));
-+ case (3):
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("'Disable Timeout Task' with invalid IPRCPT"));
-+ case (4):
-+ RETURN_ERROR(MAJOR, E_FULL, ("too many timeout tasks"));
-+ case (5):
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("invalid sub command"));
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+ }
-+ return E_OK;
-+}
-+
-+static t_Error CreateReassCommonTable(t_FmPcdManip *p_Manip)
-+{
-+ uint32_t tmpReg32 = 0, i, bitFor1Micro;
-+ uint64_t tmpReg64, size;
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
-+ t_Error err = E_OK;
-+
-+ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
-+ if (bitFor1Micro == 0)
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
-+
-+ /* Allocation of the Reassembly Common Parameters table. This table is located in the
-+ MURAM. Its size is 64 bytes and its base address should be 8-byte aligned. */
-+ p_Manip->reassmParams.p_ReassCommonTbl =
-+ (t_ReassCommonTbl *)FM_MURAM_AllocMem(
-+ p_FmPcd->h_FmMuram,
-+ FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_SIZE,
-+ FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_ALIGN);
-+
-+ if (!p_Manip->reassmParams.p_ReassCommonTbl)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM alloc for Reassembly common parameters table"));
-+
-+ MemSet8(p_Manip->reassmParams.p_ReassCommonTbl, 0,
-+ FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_SIZE);
-+
-+ /* Setting the TimeOut Mode.*/
-+ tmpReg32 = 0;
-+ if (p_Manip->reassmParams.timeOutMode
-+ == e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES)
-+ tmpReg32 |= FM_PCD_MANIP_REASM_TIME_OUT_BETWEEN_FRAMES;
-+
-+ /* Setting TimeOut FQID - Frames that time out are enqueued to this FQID.
-+ In order to cause TimeOut frames to be discarded, this queue should be configured accordingly*/
-+ tmpReg32 |= p_Manip->reassmParams.fqidForTimeOutFrames;
-+ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->timeoutModeAndFqid,
-+ tmpReg32);
-+
-+ /* Calculation the size of IP Reassembly Frame Descriptor - number of frames that are allowed to be reassembled simultaneously + 129.*/
-+ size = p_Manip->reassmParams.maxNumFramesInProcess + 129;
-+
-+ /*Allocation of IP Reassembly Frame Descriptor Indexes Pool - This pool resides in the MURAM */
-+ p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr =
-+ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)(size * 2),
-+ 256));
-+ if (!p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr)
-+ RETURN_ERROR(
-+ MAJOR, E_NO_MEMORY,
-+ ("MURAM alloc for Reassembly frame descriptor indexes pool"));
-+
-+ MemSet8(UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr),
-+ 0, (uint32_t)(size * 2));
-+
-+ /* The entries in IP Reassembly Frame Descriptor Indexes Pool contains indexes starting with 1 up to
-+ the maximum number of frames that are allowed to be reassembled simultaneously + 128.
-+ The last entry in this pool must contain the index zero*/
-+ for (i = 0; i < (size - 1); i++)
-+ WRITE_UINT16(
-+ *(uint16_t *)PTR_MOVE(UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr), (i<<1)),
-+ (uint16_t)(i+1));
-+
-+ /* Sets the IP Reassembly Frame Descriptor Indexes Pool offset from MURAM */
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(
-+ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr))
-+ - p_FmPcd->physicalMuramBase);
-+ WRITE_UINT32(
-+ p_Manip->reassmParams.p_ReassCommonTbl->reassFrmDescIndexPoolTblPtr,
-+ tmpReg32);
-+
-+ /* Allocation of the Reassembly Frame Descriptors Pool - This pool resides in external memory.
-+ The number of entries in this pool should be equal to the number of entries in IP Reassembly Frame Descriptor Indexes Pool.*/
-+ p_Manip->reassmParams.reassFrmDescrPoolTblAddr =
-+ PTR_TO_UINT(XX_MallocSmart((uint32_t)(size * 64), p_Manip->reassmParams.dataMemId, 64));
-+
-+ if (!p_Manip->reassmParams.reassFrmDescrPoolTblAddr)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED"));
-+
-+ MemSet8(UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrPoolTblAddr), 0,
-+ (uint32_t)(size * 64));
-+
-+ /* Sets the Reassembly Frame Descriptors Pool and liodn offset*/
-+ tmpReg64 = (uint64_t)(XX_VirtToPhys(
-+ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrPoolTblAddr)));
-+ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
-+ & FM_PCD_MANIP_REASM_LIODN_MASK)
-+ << (uint64_t)FM_PCD_MANIP_REASM_LIODN_SHIFT);
-+ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
-+ & FM_PCD_MANIP_REASM_ELIODN_MASK)
-+ << (uint64_t)FM_PCD_MANIP_REASM_ELIODN_SHIFT);
-+ WRITE_UINT32(
-+ p_Manip->reassmParams.p_ReassCommonTbl->liodnAndReassFrmDescPoolPtrHi,
-+ (uint32_t)(tmpReg64 >> 32));
-+ WRITE_UINT32(
-+ p_Manip->reassmParams.p_ReassCommonTbl->reassFrmDescPoolPtrLow,
-+ (uint32_t)tmpReg64);
-+
-+ /*Allocation of the TimeOut table - This table resides in the MURAM.
-+ The number of entries in this table is identical to the number of entries in the Reassembly Frame Descriptors Pool*/
-+ p_Manip->reassmParams.timeOutTblAddr =
-+ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, (uint32_t)(size * 8),8));
-+
-+ if (!p_Manip->reassmParams.timeOutTblAddr)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM alloc for Reassembly timeout table"));
-+
-+ MemSet8(UINT_TO_PTR(p_Manip->reassmParams.timeOutTblAddr), 0,
-+ (uint16_t)(size * 8));
-+
-+ /* Sets the TimeOut table offset from MURAM */
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(
-+ UINT_TO_PTR(p_Manip->reassmParams.timeOutTblAddr))
-+ - p_FmPcd->physicalMuramBase);
-+ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->timeOutTblPtr,
-+ tmpReg32);
-+
-+ /* Sets the Expiration Delay */
-+ tmpReg32 = 0;
-+ tmpReg32 |= (((uint32_t)(1 << bitFor1Micro))
-+ * p_Manip->reassmParams.timeoutThresholdForReassmProcess);
-+ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->expirationDelay,
-+ tmpReg32);
-+
-+ err = FmPcdRegisterReassmPort(p_FmPcd,
-+ p_Manip->reassmParams.p_ReassCommonTbl);
-+ if (err != E_OK)
-+ {
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->reassmParams.p_ReassCommonTbl);
-+ RETURN_ERROR(MAJOR, err, ("port registration"));
-+ }
-+
-+ return err;
-+}
-+
-+static t_Error CreateReassTable(t_FmPcdManip *p_Manip, e_NetHeaderType hdr)
-+{
-+ t_FmPcd *p_FmPcd = p_Manip->h_FmPcd;
-+ uint32_t tmpReg32, autoLearnHashTblSize;
-+ uint32_t numOfWays, setSize, setSizeCode, keySize;
-+ uint32_t waySize, numOfSets, numOfEntries;
-+ uint64_t tmpReg64;
-+ uint16_t minFragSize;
-+ uint16_t maxReassemSize;
-+ uintptr_t *p_AutoLearnHashTblAddr, *p_AutoLearnSetLockTblAddr;
-+ t_ReassTbl **p_ReassTbl;
-+
-+ switch (hdr)
-+ {
-+ case HEADER_TYPE_IPv4:
-+ p_ReassTbl = &p_Manip->reassmParams.ip.p_Ipv4ReassTbl;
-+ p_AutoLearnHashTblAddr =
-+ &p_Manip->reassmParams.ip.ipv4AutoLearnHashTblAddr;
-+ p_AutoLearnSetLockTblAddr =
-+ &p_Manip->reassmParams.ip.ipv4AutoLearnSetLockTblAddr;
-+ minFragSize = p_Manip->reassmParams.ip.minFragSize[0];
-+ maxReassemSize = 0;
-+ numOfWays = p_Manip->reassmParams.ip.numOfFramesPerHashEntry[0];
-+ keySize = 4 + 4 + 1 + 2; /* 3-tuple + IP-Id */
-+ break;
-+ case HEADER_TYPE_IPv6:
-+ p_ReassTbl = &p_Manip->reassmParams.ip.p_Ipv6ReassTbl;
-+ p_AutoLearnHashTblAddr =
-+ &p_Manip->reassmParams.ip.ipv6AutoLearnHashTblAddr;
-+ p_AutoLearnSetLockTblAddr =
-+ &p_Manip->reassmParams.ip.ipv6AutoLearnSetLockTblAddr;
-+ minFragSize = p_Manip->reassmParams.ip.minFragSize[1];
-+ maxReassemSize = 0;
-+ numOfWays = p_Manip->reassmParams.ip.numOfFramesPerHashEntry[1];
-+ keySize = 16 + 16 + 4; /* 2-tuple + IP-Id */
-+ if (numOfWays > e_FM_PCD_MANIP_SIX_WAYS_HASH)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("num of ways"));
-+ break;
-+ case HEADER_TYPE_CAPWAP:
-+ p_ReassTbl = &p_Manip->reassmParams.capwap.p_ReassTbl;
-+ p_AutoLearnHashTblAddr =
-+ &p_Manip->reassmParams.capwap.autoLearnHashTblAddr;
-+ p_AutoLearnSetLockTblAddr =
-+ &p_Manip->reassmParams.capwap.autoLearnSetLockTblAddr;
-+ minFragSize = 0;
-+ maxReassemSize = p_Manip->reassmParams.capwap.maxRessembledsSize;
-+ numOfWays = p_Manip->reassmParams.capwap.numOfFramesPerHashEntry;
-+ keySize = 4;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("header type"));
-+ }
-+ keySize += 2; /* 2 bytes reserved for RFDIndex */
-+#if (DPAA_VERSION >= 11)
-+ keySize += 2; /* 2 bytes reserved */
-+#endif /* (DPAA_VERSION >= 11) */
-+ waySize = ROUND_UP(keySize, 8);
-+
-+ /* Allocates the Reassembly Parameters Table - This table is located in the MURAM.*/
-+ *p_ReassTbl = (t_ReassTbl *)FM_MURAM_AllocMem(
-+ p_FmPcd->h_FmMuram, FM_PCD_MANIP_REASM_TABLE_SIZE,
-+ FM_PCD_MANIP_REASM_TABLE_ALIGN);
-+ if (!*p_ReassTbl)
-+ RETURN_ERROR( MAJOR, E_NO_MEMORY,
-+ ("MURAM alloc for Reassembly specific parameters table"));
-+ memset(*p_ReassTbl, 0, sizeof(t_ReassTbl));
-+
-+ /* Sets the Reassembly common Parameters table offset from MURAM in the Reassembly Table descriptor*/
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->reassmParams.p_ReassCommonTbl)
-+ - p_FmPcd->physicalMuramBase);
-+ WRITE_UINT32((*p_ReassTbl)->reassCommonPrmTblPtr, tmpReg32);
-+
-+ /* Calculate set size (set size is rounded-up to next power of 2) */
-+ NEXT_POWER_OF_2(numOfWays * waySize, setSize);
-+
-+ /* Get set size code */
-+ LOG2(setSize, setSizeCode);
-+
-+ /* Sets ways number and set size code */
-+ WRITE_UINT16((*p_ReassTbl)->waysNumAndSetSize,
-+ (uint16_t)((numOfWays << 8) | setSizeCode));
-+
-+ /* It is recommended that the total number of entries in this table
-+ (number of sets * number of ways) will be twice the number of frames that
-+ are expected to be reassembled simultaneously.*/
-+ numOfEntries = (uint32_t)(p_Manip->reassmParams.maxNumFramesInProcess * 2);
-+
-+ /* sets number calculation - number of entries = number of sets * number of ways */
-+ numOfSets = numOfEntries / numOfWays;
-+
-+ /* Sets AutoLearnHashKeyMask*/
-+ NEXT_POWER_OF_2(numOfSets, numOfSets);
-+
-+ WRITE_UINT16((*p_ReassTbl)->autoLearnHashKeyMask,
-+ (uint16_t)(numOfSets - 1));
-+
-+ /* Allocation of Reassembly Automatic Learning Hash Table - This table resides in external memory.
-+ The size of this table is determined by the number of sets and the set size.
-+ Table size = set size * number of sets
-+ This table base address should be aligned to SetSize.*/
-+ autoLearnHashTblSize = numOfSets * setSize;
-+
-+ *p_AutoLearnHashTblAddr =
-+ PTR_TO_UINT(XX_MallocSmart(autoLearnHashTblSize, p_Manip->reassmParams.dataMemId, setSize));
-+ if (!*p_AutoLearnHashTblAddr)
-+ {
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, *p_ReassTbl);
-+ *p_ReassTbl = NULL;
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED"));
-+ }
-+ MemSet8(UINT_TO_PTR(*p_AutoLearnHashTblAddr), 0, autoLearnHashTblSize);
-+
-+ /* Sets the Reassembly Automatic Learning Hash Table and liodn offset */
-+ tmpReg64 = ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
-+ & FM_PCD_MANIP_REASM_LIODN_MASK)
-+ << (uint64_t)FM_PCD_MANIP_REASM_LIODN_SHIFT);
-+ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
-+ & FM_PCD_MANIP_REASM_ELIODN_MASK)
-+ << (uint64_t)FM_PCD_MANIP_REASM_ELIODN_SHIFT);
-+ tmpReg64 |= XX_VirtToPhys(UINT_TO_PTR(*p_AutoLearnHashTblAddr));
-+ WRITE_UINT32( (*p_ReassTbl)->liodnAlAndAutoLearnHashTblPtrHi,
-+ (uint32_t)(tmpReg64 >> 32));
-+ WRITE_UINT32((*p_ReassTbl)->autoLearnHashTblPtrLow, (uint32_t)tmpReg64);
-+
-+ /* Allocation of the Set Lock table - This table resides in external memory
-+ The size of this table is (number of sets in the Reassembly Automatic Learning Hash table)*4 bytes.
-+ This table resides in external memory and its base address should be 4-byte aligned */
-+ *p_AutoLearnSetLockTblAddr =
-+ PTR_TO_UINT(XX_MallocSmart((uint32_t)(numOfSets * 4), p_Manip->reassmParams.dataMemId, 4));
-+ if (!*p_AutoLearnSetLockTblAddr)
-+ {
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, *p_ReassTbl);
-+ *p_ReassTbl = NULL;
-+ XX_FreeSmart(UINT_TO_PTR(*p_AutoLearnHashTblAddr));
-+ *p_AutoLearnHashTblAddr = 0;
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED"));
-+ }
-+ MemSet8(UINT_TO_PTR(*p_AutoLearnSetLockTblAddr), 0, (numOfSets * 4));
-+
-+ /* sets Set Lock table pointer and liodn offset*/
-+ tmpReg64 = ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
-+ & FM_PCD_MANIP_REASM_LIODN_MASK)
-+ << (uint64_t)FM_PCD_MANIP_REASM_LIODN_SHIFT);
-+ tmpReg64 |= ((uint64_t)(p_Manip->reassmParams.dataLiodnOffset
-+ & FM_PCD_MANIP_REASM_ELIODN_MASK)
-+ << (uint64_t)FM_PCD_MANIP_REASM_ELIODN_SHIFT);
-+ tmpReg64 |= XX_VirtToPhys(UINT_TO_PTR(*p_AutoLearnSetLockTblAddr));
-+ WRITE_UINT32( (*p_ReassTbl)->liodnSlAndAutoLearnSetLockTblPtrHi,
-+ (uint32_t)(tmpReg64 >> 32));
-+ WRITE_UINT32((*p_ReassTbl)->autoLearnSetLockTblPtrLow, (uint32_t)tmpReg64);
-+
-+ /* Sets user's requested minimum fragment size (in Bytes) for First/Middle fragment */
-+ WRITE_UINT16((*p_ReassTbl)->minFragSize, minFragSize);
-+
-+ WRITE_UINT16((*p_ReassTbl)->maxReassemblySize, maxReassemSize);
-+
-+ return E_OK;
-+}
-+
-+static t_Error UpdateInitReasm(t_Handle h_FmPcd, t_Handle h_PcdParams,
-+ t_Handle h_FmPort, t_FmPcdManip *p_Manip,
-+ t_Handle h_Ad, bool validate)
-+{
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+ uint32_t tmpReg32;
-+ t_Error err;
-+ t_FmPortPcdParams *p_PcdParams = (t_FmPortPcdParams *)h_PcdParams;
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdCtrlParamsPage *p_ParamsPage;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Manip->frag, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ (p_Manip->opcode == HMAN_OC_IP_REASSEMBLY) || (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY),
-+ E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Manip->updateParams || h_PcdParams,
-+ E_INVALID_HANDLE);
-+
-+ UNUSED(h_Ad);
-+
-+ if (!p_Manip->updateParams)
-+ return E_OK;
-+
-+ if (p_Manip->h_FmPcd != h_FmPcd)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("handler of PCD previously was initiated by different value"));
-+
-+ if (p_Manip->updateParams)
-+ {
-+ if ((!(p_Manip->updateParams
-+ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK)))
-+ || ((p_Manip->shadowUpdateParams
-+ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK))))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("in this stage parameters from Port has not be updated"));
-+
-+ fmPortGetSetCcParams.setCcParams.type = 0;
-+ if (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY)
-+ {
-+ fmPortGetSetCcParams.setCcParams.type |= UPDATE_OFP_DPTE;
-+ fmPortGetSetCcParams.setCcParams.ofpDpde = 0xF;
-+ }
-+ fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams | FM_REV;
-+ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
-+ != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (fmPortGetSetCcParams.getCcParams.type
-+ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK | FM_REV))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("offset of the data wasn't configured previously"));
-+ if (p_Manip->updateParams
-+ & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK))
-+ {
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint8_t *p_Ptr, i, totalNumOfTnums;
-+
-+ totalNumOfTnums =
-+ (uint8_t)(fmPortGetSetCcParams.getCcParams.numOfTasks
-+ + fmPortGetSetCcParams.getCcParams.numOfExtraTasks);
-+
-+ p_Manip->reassmParams.internalBufferPoolAddr =
-+ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)(totalNumOfTnums * BMI_FIFO_UNITS),
-+ BMI_FIFO_UNITS));
-+ if (!p_Manip->reassmParams.internalBufferPoolAddr)
-+ RETURN_ERROR(
-+ MAJOR, E_NO_MEMORY,
-+ ("MURAM alloc for Reassembly internal buffers pool"));
-+ MemSet8(
-+ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolAddr),
-+ 0, (uint32_t)(totalNumOfTnums * BMI_FIFO_UNITS));
-+
-+ p_Manip->reassmParams.internalBufferPoolManagementIndexAddr =
-+ PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)(5 + totalNumOfTnums),
-+ 4));
-+ if (!p_Manip->reassmParams.internalBufferPoolManagementIndexAddr)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NO_MEMORY,
-+ ("MURAM alloc for Reassembly internal buffers management"));
-+
-+ p_Ptr =
-+ (uint8_t*)UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolManagementIndexAddr);
-+ WRITE_UINT32(
-+ *(uint32_t*)p_Ptr,
-+ (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolAddr)) - p_FmPcd->physicalMuramBase));
-+ for (i = 0, p_Ptr += 4; i < totalNumOfTnums; i++, p_Ptr++)
-+ WRITE_UINT8(*p_Ptr, i);
-+ WRITE_UINT8(*p_Ptr, 0xFF);
-+
-+ tmpReg32 =
-+ (4 << FM_PCD_MANIP_REASM_COMMON_INT_BUFFER_IDX_SHIFT)
-+ | ((uint32_t)(XX_VirtToPhys(
-+ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolManagementIndexAddr))
-+ - p_FmPcd->physicalMuramBase));
-+ WRITE_UINT32(
-+ p_Manip->reassmParams.p_ReassCommonTbl->internalBufferManagement,
-+ tmpReg32);
-+
-+ p_Manip->updateParams &= ~(NUM_OF_TASKS | NUM_OF_EXTRA_TASKS
-+ | DISCARD_MASK);
-+ p_Manip->shadowUpdateParams |= (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS
-+ | DISCARD_MASK);
-+ }
-+ }
-+
-+ if (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY)
-+ {
-+ if (p_Manip->reassmParams.capwap.h_Scheme)
-+ {
-+ p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] =
-+ p_Manip->reassmParams.capwap.h_Scheme;
-+ p_PcdParams->p_KgParams->numOfSchemes++;
-+ }
-+
-+ }
-+ else
-+ {
-+ if (p_Manip->reassmParams.ip.h_Ipv4Scheme)
-+ {
-+ p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] =
-+ p_Manip->reassmParams.ip.h_Ipv4Scheme;
-+ p_PcdParams->p_KgParams->numOfSchemes++;
-+ }
-+ if (p_Manip->reassmParams.ip.h_Ipv6Scheme)
-+ {
-+ p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] =
-+ p_Manip->reassmParams.ip.h_Ipv6Scheme;
-+ p_PcdParams->p_KgParams->numOfSchemes++;
-+ }
-+#if (DPAA_VERSION >= 11)
-+ if (fmPortGetSetCcParams.getCcParams.revInfo.majorRev >= 6)
-+ {
-+ if ((err = FmPortSetGprFunc(h_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
-+ (void**)&p_ParamsPage)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ tmpReg32 = NIA_ENG_KG;
-+ if (p_Manip->reassmParams.ip.h_Ipv4Scheme)
-+ {
-+ tmpReg32 |= NIA_KG_DIRECT;
-+ tmpReg32 |= NIA_KG_CC_EN;
-+ tmpReg32 |= FmPcdKgGetSchemeId(
-+ p_Manip->reassmParams.ip.h_Ipv4Scheme);
-+ WRITE_UINT32(p_ParamsPage->iprIpv4Nia, tmpReg32);
-+ }
-+ if (p_Manip->reassmParams.ip.h_Ipv6Scheme)
-+ {
-+ tmpReg32 &= ~NIA_AC_MASK;
-+ tmpReg32 |= NIA_KG_DIRECT;
-+ tmpReg32 |= NIA_KG_CC_EN;
-+ tmpReg32 |= FmPcdKgGetSchemeId(
-+ p_Manip->reassmParams.ip.h_Ipv6Scheme);
-+ WRITE_UINT32(p_ParamsPage->iprIpv6Nia, tmpReg32);
-+ }
-+ }
-+#else
-+ if (fmPortGetSetCcParams.getCcParams.revInfo.majorRev < 6)
-+ {
-+ WRITE_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->discardMask,
-+ fmPortGetSetCcParams.getCcParams.discardMask);
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+ return E_OK;
-+}
-+
-+#if (DPAA_VERSION == 10)
-+static t_Error FmPcdFragHcScratchPoolFill(t_Handle h_FmPcd, uint8_t scratchBpid)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdCcFragScratchPoolCmdParams fmPcdCcFragScratchPoolCmdParams;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ memset(&fmPcdCcFragScratchPoolCmdParams, 0, sizeof(t_FmPcdCcFragScratchPoolCmdParams));
-+
-+ fmPcdCcFragScratchPoolCmdParams.numOfBuffers = NUM_OF_SCRATCH_POOL_BUFFERS;
-+ fmPcdCcFragScratchPoolCmdParams.bufferPoolId = scratchBpid;
-+ if ((err = FmHcPcdCcIpFragScratchPollCmd(p_FmPcd->h_Hc, TRUE, &fmPcdCcFragScratchPoolCmdParams)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (fmPcdCcFragScratchPoolCmdParams.numOfBuffers != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Fill scratch pool failed,"
-+ "Failed to release %d buffers to the BM (missing FBPRs)",
-+ fmPcdCcFragScratchPoolCmdParams.numOfBuffers));
-+
-+ return E_OK;
-+}
-+
-+static t_Error FmPcdFragHcScratchPoolEmpty(t_Handle h_FmPcd, uint8_t scratchBpid)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdCcFragScratchPoolCmdParams fmPcdCcFragScratchPoolCmdParams;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ memset(&fmPcdCcFragScratchPoolCmdParams, 0, sizeof(t_FmPcdCcFragScratchPoolCmdParams));
-+
-+ fmPcdCcFragScratchPoolCmdParams.bufferPoolId = scratchBpid;
-+ if ((err = FmHcPcdCcIpFragScratchPollCmd(p_FmPcd->h_Hc, FALSE, &fmPcdCcFragScratchPoolCmdParams)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+#endif /* (DPAA_VERSION == 10) */
-+
-+static void ReleaseManipHandler(t_FmPcdManip *p_Manip, t_FmPcd *p_FmPcd)
-+{
-+ if (p_Manip->h_Ad)
-+ {
-+ if (p_Manip->muramAllocate)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->h_Ad);
-+ else
-+ XX_Free(p_Manip->h_Ad);
-+ p_Manip->h_Ad = NULL;
-+ }
-+ if (p_Manip->p_Template)
-+ {
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->p_Template);
-+ p_Manip->p_Template = NULL;
-+ }
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ if (p_Manip->h_Frag)
-+ {
-+ if (p_Manip->capwapFragParams.p_AutoLearnHashTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->capwapFragParams.p_AutoLearnHashTbl);
-+ if (p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->capwapFragParams.p_ReassmFrmDescrPoolTbl);
-+ if (p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->capwapFragParams.p_ReassmFrmDescrIndxPoolTbl);
-+ if (p_Manip->capwapFragParams.p_TimeOutTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->capwapFragParams.p_TimeOutTbl);
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->h_Frag);
-+
-+ }
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ if (p_Manip->frag)
-+ {
-+ if (p_Manip->fragParams.p_Frag)
-+ {
-+#if (DPAA_VERSION == 10)
-+ FmPcdFragHcScratchPoolEmpty((t_Handle)p_FmPcd, p_Manip->fragParams.scratchBpid);
-+#endif /* (DPAA_VERSION == 10) */
-+
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_Frag);
-+ }
-+ }
-+ else
-+ if (p_Manip->reassm)
-+ {
-+ FmPcdUnregisterReassmPort(p_FmPcd,
-+ p_Manip->reassmParams.p_ReassCommonTbl);
-+
-+ if (p_Manip->reassmParams.timeOutTblAddr)
-+ FM_MURAM_FreeMem(
-+ p_FmPcd->h_FmMuram,
-+ UINT_TO_PTR(p_Manip->reassmParams.timeOutTblAddr));
-+ if (p_Manip->reassmParams.reassFrmDescrPoolTblAddr)
-+ XX_FreeSmart(
-+ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrPoolTblAddr));
-+ if (p_Manip->reassmParams.p_ReassCommonTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->reassmParams.p_ReassCommonTbl);
-+ if (p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr)
-+ FM_MURAM_FreeMem(
-+ p_FmPcd->h_FmMuram,
-+ UINT_TO_PTR(p_Manip->reassmParams.reassFrmDescrIndxPoolTblAddr));
-+ if (p_Manip->reassmParams.internalBufferPoolManagementIndexAddr)
-+ FM_MURAM_FreeMem(
-+ p_FmPcd->h_FmMuram,
-+ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolManagementIndexAddr));
-+ if (p_Manip->reassmParams.internalBufferPoolAddr)
-+ FM_MURAM_FreeMem(
-+ p_FmPcd->h_FmMuram,
-+ UINT_TO_PTR(p_Manip->reassmParams.internalBufferPoolAddr));
-+ if (p_Manip->reassmParams.hdr == HEADER_TYPE_CAPWAP)
-+ {
-+
-+ }
-+ else
-+ {
-+ if (p_Manip->reassmParams.ip.ipv4AutoLearnHashTblAddr)
-+ XX_FreeSmart(
-+ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv4AutoLearnHashTblAddr));
-+ if (p_Manip->reassmParams.ip.ipv6AutoLearnHashTblAddr)
-+ XX_FreeSmart(
-+ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv6AutoLearnHashTblAddr));
-+ if (p_Manip->reassmParams.ip.ipv4AutoLearnSetLockTblAddr)
-+ XX_FreeSmart(
-+ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv4AutoLearnSetLockTblAddr));
-+ if (p_Manip->reassmParams.ip.ipv6AutoLearnSetLockTblAddr)
-+ XX_FreeSmart(
-+ UINT_TO_PTR(p_Manip->reassmParams.ip.ipv6AutoLearnSetLockTblAddr));
-+ if (p_Manip->reassmParams.ip.p_Ipv4ReassTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->reassmParams.ip.p_Ipv4ReassTbl);
-+ if (p_Manip->reassmParams.ip.p_Ipv6ReassTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram,
-+ p_Manip->reassmParams.ip.p_Ipv6ReassTbl);
-+ if (p_Manip->reassmParams.ip.h_Ipv6Ad)
-+ XX_FreeSmart(p_Manip->reassmParams.ip.h_Ipv6Ad);
-+ if (p_Manip->reassmParams.ip.h_Ipv4Ad)
-+ XX_FreeSmart(p_Manip->reassmParams.ip.h_Ipv4Ad);
-+ }
-+ }
-+
-+ if (p_Manip->p_StatsTbl)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->p_StatsTbl);
-+}
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+static t_Error CheckManipParamsAndSetType(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_ManipParams)
-+{
-+ if (p_ManipParams->u.hdr.rmv)
-+ {
-+ switch (p_ManipParams->u.hdr.rmvParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR):
-+ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.type)
-+ {
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START) :
-+ if (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.include)
-+ {
-+ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.hdrInfo.hdr)
-+ {
-+ case (HEADER_TYPE_CAPWAP_DTLS) :
-+ p_Manip->opcode = HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST;
-+ p_Manip->muramAllocate = TRUE;
-+ if (p_ManipParams->u.hdr.insrt)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for CAPWAP_DTLS_HDR remove can not be insrt manipualtion after"));
-+ if (p_ManipParams->fragOrReasm)
-+ {
-+ if (!p_ManipParams->fragOrReasmParams.frag)
-+ {
-+ switch (p_ManipParams->fragOrReasmParams.hdr)
-+ {
-+ case (HEADER_TYPE_CAPWAP):
-+ p_Manip->opcode = HMAN_OC_CAPWAP_REASSEMBLY;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("unsupported header for Reassembly"));
-+ }
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for this type of manipulation frag can not be TRUE"));
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("non valid net header of remove location"));
-+ }
-+ }
-+ else
-+ {
-+ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.hdrInfo.hdr)
-+ {
-+ case (HEADER_TYPE_CAPWAP_DTLS) :
-+ case (HEADER_TYPE_CAPWAP) :
-+ if (p_ManipParams->fragOrReasm || p_ManipParams->u.hdr.insrt)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for the type of remove e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_TILL_CAPWAP can not be insert or fragOrReasm TRUE"));
-+ p_Manip->opcode = HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR;
-+ p_Manip->muramAllocate = TRUE;
-+ p_ManipParams->u.hdr.insrt = TRUE; //internal frame header
-+ break;
-+ default :
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation"));
-+ }
-+ }
-+ break;
-+ default :
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation"));
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation"));
-+ }
-+ }
-+ else if (p_ManipParams->u.hdr.insrt)
-+ {
-+ switch (p_ManipParams->u.hdr.insrtParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_INSRT_BY_TEMPLATE) :
-+
-+ p_Manip->opcode = HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER;
-+ p_Manip->muramAllocate = FALSE;
-+ if (p_ManipParams->fragOrReasm)
-+ {
-+ if (p_ManipParams->fragOrReasmParams.frag)
-+ {
-+ switch (p_ManipParams->fragOrReasmParams.hdr)
-+ {
-+ case (HEADER_TYPE_CAPWAP):
-+ p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid header for fragmentation"));
-+ }
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,("can not reach this point"));
-+ }
-+ break;
-+
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for only isert manipulation unsupported type"));
-+ }
-+ }
-+ else if (p_ManipParams->fragOrReasm)
-+ {
-+ if (p_ManipParams->fragOrReasmParams.frag)
-+ {
-+ switch (p_ManipParams->fragOrReasmParams.hdr)
-+ {
-+ case (HEADER_TYPE_CAPWAP):
-+ p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION;
-+ p_Manip->muramAllocate = FALSE;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported header for fragmentation"));
-+ }
-+ }
-+ else
-+ {
-+ switch (p_ManipParams->fragOrReasmParams.hdr)
-+ {
-+ case (HEADER_TYPE_CAPWAP):
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Reassembly has to be with additional operation - rmv = TRUE, type of remove - e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_INCLUDE_SPECIFIC_LOCATION,type = e_FM_PCD_MANIP_LOC_BY_HDR, hdr = HEADER_TYPE_CAPWAP_DTLS"));
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported header for reassembly"));
-+ }
-+ }
-+
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("User didn't ask for any manipulation"));
-+
-+ p_Manip->insrt = p_ManipParams->u.hdr.insrt;
-+ p_Manip->rmv = p_ManipParams->u.hdr.rmv;
-+
-+ return E_OK;
-+}
-+
-+#else /* not (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+static t_Error CheckManipParamsAndSetType(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipParams *p_ManipParams)
-+{
-+ switch (p_ManipParams->type)
-+ {
-+ case e_FM_PCD_MANIP_HDR:
-+ /* Check that next-manip is not already used */
-+ if (p_ManipParams->h_NextManip)
-+ {
-+ if (!MANIP_IS_FIRST(p_ManipParams->h_NextManip))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("h_NextManip is already a part of another chain"));
-+ if ((MANIP_GET_TYPE(p_ManipParams->h_NextManip)
-+ != e_FM_PCD_MANIP_HDR) &&
-+ (MANIP_GET_TYPE(p_ManipParams->h_NextManip)
-+ != e_FM_PCD_MANIP_FRAG))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_NOT_SUPPORTED,
-+ ("For a Header Manipulation node - no support of h_NextManip of type other than Header Manipulation or Fragmentation."));
-+ }
-+
-+ if (p_ManipParams->u.hdr.rmv)
-+ {
-+ switch (p_ManipParams->u.hdr.rmvParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR):
-+ switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.type)
-+ {
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2):
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP):
-+ break;
-+ case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START):
-+ {
-+ t_Error err;
-+ uint8_t prsArrayOffset;
-+
-+ err =
-+ GetPrOffsetByHeaderOrField(
-+ &p_ManipParams->u.hdr.rmvParams.u.byHdr.u.hdrInfo,
-+ &prsArrayOffset);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ break;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("invalid type of remove manipulation"));
-+ }
-+ break;
-+ case (e_FM_PCD_MANIP_RMV_GENERIC):
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("invalid type of remove manipulation"));
-+ }
-+ p_Manip->opcode = HMAN_OC;
-+ p_Manip->muramAllocate = TRUE;
-+ p_Manip->rmv = TRUE;
-+ }
-+ else
-+ if (p_ManipParams->u.hdr.insrt)
-+ {
-+ switch (p_ManipParams->u.hdr.insrtParams.type)
-+ {
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR):
-+ {
-+ switch (p_ManipParams->u.hdr.insrtParams.u.byHdr.type)
-+ {
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2):
-+ /* nothing to check */
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_IP):
-+ if (p_ManipParams->u.hdr.insrtParams.u.byHdr.u.ipParams.insrt.size
-+ % 4)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("IP inserted header must be of size which is a multiple of four bytes"));
-+ break;
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP):
-+ if (p_ManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
-+ % 4)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("CAPWAP inserted header must be of size which is a multiple of four bytes"));
-+ break;
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP):
-+ case (e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE):
-+ if (p_ManipParams->u.hdr.insrtParams.u.byHdr.u.insrt.size
-+ != 8)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("Inserted header must be of size 8"));
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("unsupported insert by header type"));
-+ }
-+ }
-+ case (e_FM_PCD_MANIP_INSRT_GENERIC):
-+ break;
-+ default:
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("for only insert manipulation unsupported type"));
-+ }
-+ p_Manip->opcode = HMAN_OC;
-+ p_Manip->muramAllocate = TRUE;
-+ p_Manip->insrt = TRUE;
-+ }
-+ else
-+ if (p_ManipParams->u.hdr.fieldUpdate)
-+ {
-+ /* Check parameters */
-+ if (p_ManipParams->u.hdr.fieldUpdateParams.type
-+ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN)
-+ {
-+ if ((p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
-+ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI)
-+ && (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.vpri
-+ > 7))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("vpri should get values of 0-7 "));
-+ if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType
-+ == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN)
-+ {
-+ int i;
-+
-+ if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.vpriDefVal
-+ > 7)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("vpriDefVal should get values of 0-7 "));
-+ for (i = 0; i < FM_PCD_MANIP_DSCP_TO_VLAN_TRANS;
-+ i++)
-+ if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.dscpToVpriTable[i]
-+ & 0xf0)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("dscpToVpriTabl value out of range (0-15)"));
-+ }
-+
-+ }
-+
-+ p_Manip->opcode = HMAN_OC;
-+ p_Manip->muramAllocate = TRUE;
-+ p_Manip->fieldUpdate = TRUE;
-+ }
-+ else
-+ if (p_ManipParams->u.hdr.custom)
-+ {
-+ if (p_ManipParams->u.hdr.customParams.type == e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE)
-+ {
-+
-+ if ((p_ManipParams->u.hdr.customParams.u.genFieldReplace.size == 0) ||
-+ (p_ManipParams->u.hdr.customParams.u.genFieldReplace.size > 8))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("size should get values of 1-8 "));
-+
-+ if (p_ManipParams->u.hdr.customParams.u.genFieldReplace.srcOffset > 7)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("srcOffset should be <= 7"));
-+
-+ if ((p_ManipParams->u.hdr.customParams.u.genFieldReplace.srcOffset +
-+ p_ManipParams->u.hdr.customParams.u.genFieldReplace.size) > 8)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("(srcOffset + size) should be <= 8"));
-+
-+ if ((p_ManipParams->u.hdr.customParams.u.genFieldReplace.dstOffset +
-+ p_ManipParams->u.hdr.customParams.u.genFieldReplace.size) > 256)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("(dstOffset + size) should be <= 256"));
-+
-+ }
-+
-+ p_Manip->opcode = HMAN_OC;
-+ p_Manip->muramAllocate = TRUE;
-+ p_Manip->custom = TRUE;
-+ }
-+ break;
-+ case e_FM_PCD_MANIP_REASSEM:
-+ if (p_ManipParams->h_NextManip)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("next manip with reassembly"));
-+ switch (p_ManipParams->u.reassem.hdr)
-+ {
-+ case (HEADER_TYPE_IPv4):
-+ p_Manip->reassmParams.hdr = HEADER_TYPE_IPv4;
-+ p_Manip->opcode = HMAN_OC_IP_REASSEMBLY;
-+ break;
-+ case (HEADER_TYPE_IPv6):
-+ p_Manip->reassmParams.hdr = HEADER_TYPE_IPv6;
-+ p_Manip->opcode = HMAN_OC_IP_REASSEMBLY;
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (HEADER_TYPE_CAPWAP):
-+ p_Manip->reassmParams.hdr = HEADER_TYPE_CAPWAP;
-+ p_Manip->opcode = HMAN_OC_CAPWAP_REASSEMBLY;
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("header for reassembly"));
-+ }
-+ break;
-+ case e_FM_PCD_MANIP_FRAG:
-+ if (p_ManipParams->h_NextManip)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("next manip with fragmentation"));
-+ switch (p_ManipParams->u.frag.hdr)
-+ {
-+ case (HEADER_TYPE_IPv4):
-+ case (HEADER_TYPE_IPv6):
-+ p_Manip->opcode = HMAN_OC_IP_FRAGMENTATION;
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (HEADER_TYPE_CAPWAP):
-+ p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION;
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("header for fragmentation"));
-+ }
-+ p_Manip->muramAllocate = TRUE;
-+ break;
-+ case e_FM_PCD_MANIP_SPECIAL_OFFLOAD:
-+ switch (p_ManipParams->u.specialOffload.type)
-+ {
-+ case (e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC):
-+ p_Manip->opcode = HMAN_OC_IPSEC_MANIP;
-+ p_Manip->muramAllocate = TRUE;
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP):
-+ p_Manip->opcode = HMAN_OC_CAPWAP_MANIP;
-+ p_Manip->muramAllocate = TRUE;
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("special offload type"));
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("manip type"));
-+ }
-+
-+ return E_OK;
-+}
-+#endif /* not (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+
-+static t_Error UpdateIndxStats(t_Handle h_FmPcd,
-+ t_Handle h_FmPort,
-+ t_FmPcdManip *p_Manip)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint32_t tmpReg32 = 0;
-+ t_AdOfTypeContLookup *p_Ad;
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+ if (p_Manip->h_FmPcd != h_FmPcd)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("handler of PCD previously was initiated by different value"));
-+
-+ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
-+
-+ if (!p_Manip->p_StatsTbl)
-+ {
-+
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN;
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_CC;
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ tmpReg32 = GET_UINT32(p_Ad->ccAdBase);
-+
-+ p_Manip->p_StatsTbl =
-+ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)p_Manip->owner * FM_PCD_MANIP_INDEXED_STATS_ENTRY_SIZE,
-+ 4);
-+ if (!p_Manip->p_StatsTbl)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Manipulation indexed statistics table"));
-+
-+ MemSet8(p_Manip->p_StatsTbl, 0, (uint32_t)(p_Manip->owner * 4));
-+
-+ tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->p_StatsTbl) - p_FmPcd->physicalMuramBase);
-+
-+ if (p_Manip->cnia)
-+ tmpReg32 |= FM_PCD_MANIP_INDEXED_STATS_CNIA;
-+
-+ tmpReg32 |= FM_PCD_MANIP_INDEXED_STATS_DPD;
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+ }
-+ else
-+ {
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN;
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_CC;
-+ err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error RmvHdrTillSpecLocNOrInsrtIntFrmHdr(t_FmPcdManipHdrRmvParams *p_ManipParams, t_FmPcdManip *p_Manip)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ uint32_t tmpReg32 = 0;
-+ uint8_t prsArrayOffset = 0;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip,E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+ if (p_Manip->rmv)
-+ {
-+ err = GetPrOffsetByHeaderOrField(&p_ManipParams->u.byHdr.u.fromStartByHdr.hdrInfo, &prsArrayOffset);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ tmpReg32 |= (uint32_t)prsArrayOffset << 24;
-+ tmpReg32 |= HMAN_RMV_HDR;
-+ }
-+
-+ if (p_Manip->insrt)
-+ tmpReg32 |= HMAN_INSRT_INT_FRM_HDR;
-+
-+ tmpReg32 |= (uint32_t)HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR;
-+
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+
-+ return E_OK;
-+}
-+
-+static t_Error MvIntFrameHeaderFromFrameToBufferPrefix(t_FmPcdManip *p_Manip,
-+ bool caamUsed)
-+{
-+ t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+ uint32_t tmpReg32 = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Ad, E_INVALID_HANDLE);
-+
-+ p_Manip->updateParams |= OFFSET_OF_PR | INTERNAL_CONTEXT_OFFSET;
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ *(uint32_t *)&p_Ad->ccAdBase = tmpReg32;
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX;
-+ tmpReg32 |= (uint32_t)0x16 << 16;
-+ *(uint32_t *)&p_Ad->pcAndOffsets = tmpReg32;
-+
-+ if (caamUsed)
-+ *(uint32_t *)&p_Ad->gmask = 0xf0000000;
-+
-+ return E_OK;
-+}
-+
-+static t_Error CapwapRmvDtlsHdr(t_FmPcd *p_FmPcd, t_FmPcdManip *p_Manip)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ uint32_t tmpReg32 = 0;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST;
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+
-+
-+ if (p_Manip->h_Frag)
-+ {
-+ p_Manip->updateParams |= INTERNAL_CONTEXT_OFFSET;
-+ tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->h_Frag) - (p_FmPcd->physicalMuramBase));
-+ }
-+
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+
-+ return err;
-+}
-+
-+static t_Error CapwapReassembly(t_CapwapReassemblyParams *p_ManipParams,
-+ t_FmPcdManip *p_Manip,
-+ t_FmPcd *p_FmPcd,
-+ uint8_t poolId)
-+{
-+ t_Handle p_Table;
-+ uint32_t tmpReg32 = 0;
-+ int i = 0;
-+ uint8_t log2Num;
-+ uint8_t numOfSets;
-+ uint32_t j = 0;
-+ uint32_t bitFor1Micro;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ if (!p_FmPcd->h_Hc)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("hc port has to be initialized in this mode"));
-+ if (!POWER_OF_2(p_ManipParams->timeoutRoutineRequestTime))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("timeoutRoutineRequestTime has to be power of 2"));
-+ if (!POWER_OF_2(p_ManipParams->maxNumFramesInProcess))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("maxNumFramesInProcess has to be power of 2"));
-+ if (!p_ManipParams->timeoutRoutineRequestTime && p_ManipParams->timeoutThresholdForReassmProcess)
-+ DBG(WARNING, ("if timeoutRoutineRequestTime 0, timeoutThresholdForReassmProcess is uselessly"));
-+ if (p_ManipParams->numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH)
-+ {
-+ if ((p_ManipParams->maxNumFramesInProcess < 4) ||
-+ (p_ManipParams->maxNumFramesInProcess > 512))
-+ RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("In the case of numOfFramesPerHashEntry = e_FM_PCD_MANIP_EIGHT_WAYS_HASH maxNumFramesInProcess has to be in the range 4-512"));
-+ }
-+ else
-+ {
-+ if ((p_ManipParams->maxNumFramesInProcess < 8) ||
-+ (p_ManipParams->maxNumFramesInProcess > 2048))
-+ RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("In the case of numOfFramesPerHashEntry = e_FM_PCD_MANIP_FOUR_WAYS_HASH maxNumFramesInProcess has to be in the range 8-2048"));
-+ }
-+
-+ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
-+ if (bitFor1Micro == 0)
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
-+
-+ p_Manip->updateParams |= (NUM_OF_TASKS | OFFSET_OF_PR | OFFSET_OF_DATA | HW_PORT_ID);
-+
-+ p_Manip->h_Frag = (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE,
-+ FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN);
-+ if (!p_Manip->h_Frag)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc CAPWAP reassembly parameters table"));
-+
-+ MemSet8(p_Manip->h_Frag, 0, FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE);
-+
-+ p_Table = (t_CapwapReasmPram *)p_Manip->h_Frag;
-+
-+ p_Manip->capwapFragParams.p_AutoLearnHashTbl =
-+ (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ (uint32_t)(p_ManipParams->maxNumFramesInProcess * 2 * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE),
-+ FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN);
-+
-+ if (!p_Manip->capwapFragParams.p_AutoLearnHashTbl)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,("MURAM alloc for CAPWAP automatic learning hash table"));
-+
-+ MemSet8(p_Manip->capwapFragParams.p_AutoLearnHashTbl, 0, (uint32_t)(p_ManipParams->maxNumFramesInProcess * 2 * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE));
-+
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->capwapFragParams.p_AutoLearnHashTbl) - p_FmPcd->physicalMuramBase);
-+
-+ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->autoLearnHashTblPtr, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ if (p_ManipParams->timeOutMode == e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES)
-+ tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_BETWEEN_FRAMES;
-+ if (p_ManipParams->haltOnDuplicationFrag)
-+ tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_HALT_ON_DUPLICATE_FRAG;
-+ if (p_ManipParams->numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH)
-+ {
-+ i = 8;
-+ tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_AUTOMATIC_LEARNIN_HASH_8_WAYS;
-+ }
-+ else
-+ i = 4;
-+
-+ numOfSets = (uint8_t)((p_ManipParams->maxNumFramesInProcess * 2) / i);
-+ LOG2(numOfSets, log2Num);
-+ tmpReg32 |= (uint32_t)(log2Num - 1) << 24;
-+
-+ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->mode, tmpReg32);
-+
-+ for (j=0; j<p_ManipParams->maxNumFramesInProcess*2; j++)
-+ if (((j / i) % 2)== 0)
-+ WRITE_UINT32(*(uint32_t *)PTR_MOVE(p_Manip->capwapFragParams.p_AutoLearnHashTbl, j * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE), 0x80000000);
-+
-+ tmpReg32 = 0x00008000;
-+ tmpReg32 |= (uint32_t)poolId << 16;
-+ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->bufferPoolIdAndRisc1SetIndexes, tmpReg32);
-+ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->risc23SetIndexes, 0x80008000);
-+ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->risc4SetIndexesAndExtendedStatsTblPtr, 0x80000000);
-+
-+ p_Manip->capwapFragParams.maxNumFramesInProcess = p_ManipParams->maxNumFramesInProcess;
-+
-+ p_Manip->capwapFragParams.sgBpid = poolId;
-+
-+ p_Manip->capwapFragParams.fqidForTimeOutFrames = p_ManipParams->fqidForTimeOutFrames;
-+ p_Manip->capwapFragParams.timeoutRoutineRequestTime = p_ManipParams->timeoutRoutineRequestTime;
-+ p_Manip->capwapFragParams.bitFor1Micro = bitFor1Micro;
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= (((uint32_t)1<<p_Manip->capwapFragParams.bitFor1Micro) * p_ManipParams->timeoutThresholdForReassmProcess);
-+ WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->expirationDelay, tmpReg32);
-+
-+ return E_OK;
-+}
-+
-+static t_Error CapwapFragmentation(t_CapwapFragmentationParams *p_ManipParams,
-+ t_FmPcdManip *p_Manip,
-+ t_FmPcd *p_FmPcd,
-+ uint8_t poolId)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ uint32_t tmpReg32 = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
-+
-+ p_Manip->updateParams |= OFFSET_OF_DATA;
-+
-+ p_Manip->frag = TRUE;
-+
-+ p_Manip->h_Frag = (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_Manip->h_Frag)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP fragmentation table descriptor"));
-+
-+ MemSet8(p_Manip->h_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag;
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_FRAGMENTATION;
-+
-+ if (p_ManipParams->headerOptionsCompr)
-+ tmpReg32 |= FM_PCD_MANIP_CAPWAP_FRAG_COMPR_OPTION_FIELD_EN;
-+ tmpReg32 |= ((uint32_t)poolId << 8);
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+
-+ p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation;
-+ p_Manip->capwapFragParams.sgBpid = poolId;
-+
-+ return E_OK;
-+}
-+
-+static t_Error IndxStats(t_FmPcdStatsParams *p_StatsParams,t_FmPcdManip *p_Manip,t_FmPcd *p_FmPcd)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ uint32_t tmpReg32 = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
-+
-+ UNUSED(p_FmPcd);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_INDEXED_STATS;
-+ if (p_StatsParams->type == e_FM_PCD_STATS_PER_FLOWID)
-+ tmpReg32 |= (uint32_t)0x16 << 16;
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+
-+ return E_OK;
-+}
-+
-+static t_Error InsrtHdrByTempl(t_FmPcdManipHdrInsrtParams *p_ManipParams, t_FmPcdManip *p_Manip, t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdManipHdrInsrtByTemplateParams *p_InsrtByTemplate = &p_ManipParams->u.byTemplate;
-+ uint8_t tmpReg8 = 0xff;
-+ t_AdOfTypeContLookup *p_Ad;
-+ bool ipModify = FALSE;
-+ uint32_t tmpReg32 = 0, tmpRegNia = 0;
-+ uint16_t tmpReg16 = 0;
-+ t_Error err = E_OK;
-+ uint8_t extraAddedBytes = 0, blockSize = 0, extraAddedBytesAlignedToBlockSize = 0, log2Num = 0;
-+ uint8_t *p_Template = NULL;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip,E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd,E_NULL_POINTER);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+ if (p_Manip->insrt)
-+ {
-+ if ((!p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterIp) ||
-+ (!p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterVlan))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : asking for header template modifications with no template for insertion (template size)"));
-+
-+ if (p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterIp && (p_InsrtByTemplate->size <= p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : size of template < ipOuterOffset"));
-+
-+ if (p_InsrtByTemplate->size > 128)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Size of header template for insertion can not be more than 128"));
-+
-+ if (p_InsrtByTemplate->size)
-+ {
-+ p_Manip->p_Template = (uint8_t *)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram,
-+ p_InsrtByTemplate->size,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if(!p_Manip->p_Template)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation in MURAM FAILED"));
-+
-+ tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->p_Template) - (p_FmPcd->physicalMuramBase));
-+ tmpReg32 |= (uint32_t)p_InsrtByTemplate->size << 24;
-+ *(uint32_t *)&p_Ad->matchTblPtr = tmpReg32;
-+ }
-+
-+ tmpReg32 = 0;
-+
-+ p_Template = (uint8_t *)XX_Malloc(p_InsrtByTemplate->size * sizeof(uint8_t));
-+
-+ if (!p_Template)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("XX_Malloc allocation FAILED"));
-+
-+ memcpy(p_Template, p_InsrtByTemplate->hdrTemplate, p_InsrtByTemplate->size * sizeof(uint8_t));
-+
-+ if (p_InsrtByTemplate->modifyOuterIp)
-+ {
-+ ipModify = TRUE;
-+
-+ tmpReg8 = (uint8_t)p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset];
-+
-+ if((tmpReg8 & 0xf0) == 0x40)
-+ tmpReg8 = 4;
-+ else if((tmpReg8 & 0xf0) == 0x60)
-+ tmpReg8 = 6;
-+ else
-+ tmpReg8 = 0xff;
-+
-+ if (tmpReg8 != 0xff)
-+ {
-+ if(p_InsrtByTemplate->modifyOuterIpParams.dscpEcn & 0xff00)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : IPV4 present in header template, dscpEcn has to be only 1 byte"));
-+ if(p_InsrtByTemplate->modifyOuterIpParams.recalculateLength)
-+ {
-+
-+ if((p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize + p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedNotAlignedToBlockSize) > 255)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("extra Byte added can not be more than 256 bytes"));
-+ extraAddedBytes = (uint8_t) (p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize + p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedNotAlignedToBlockSize);
-+ blockSize = p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.blockSize;
-+ extraAddedBytesAlignedToBlockSize = p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize;
-+ /*IP header template - IP totalLength -
-+ (1 byte) extraByteForIp = headerTemplateSize - ipOffset + insertedBytesAfterThisStage ,
-+ in the case of SEC insertedBytesAfterThisStage - SEC trailer (21/31) + header(13)
-+ second byte - extraByteForIp = headerTemplate - ipOffset + insertedBytesAfterThisStage*/
-+ }
-+ if (blockSize)
-+ {
-+ if (!POWER_OF_2(blockSize))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("inputFrmPaddingUpToBlockSize has to be power of 2"));
-+ }
-+
-+ }
-+ if (tmpReg8 == 4)
-+ {
-+ if ((IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP + p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset) > p_InsrtByTemplate->size)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : IP present in header template, user asked for IP modifications but ipOffset + ipTotalLengthFieldOffset in header template bigger than template size"));
-+
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_DSCECN_FIELD_OFFSET_FROM_IP] = (uint8_t)p_InsrtByTemplate->modifyOuterIpParams.dscpEcn;
-+
-+ if (blockSize)
-+ blockSize -= 1;
-+
-+ if ((p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes) > 255)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes has to be less than 255"));
-+
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP + 1] = blockSize; // IPV6 - in AD instead of SEQ IND
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP] = (uint8_t)(p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes);// for IPV6 decrement additional 40 bytes of IPV6 heade size
-+
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_ID_FIELD_OFFSET_FROM_IP] = 0x00;
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_ID_FIELD_OFFSET_FROM_IP + 1] = extraAddedBytesAlignedToBlockSize;
-+
-+ /*IP header template - relevant only for ipv4 CheckSum = 0*/
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP] = 0x00;
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP + 1] = 0x00;
-+
-+ /*UDP checksum has to be 0*/
-+ if (p_InsrtByTemplate->modifyOuterIpParams.udpPresent)
-+ {
-+ if ((p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + UDP_CHECKSUM_FIELD_SIZE) > p_InsrtByTemplate->size)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : UDP present according to user but (UDP offset + UDP header size) < size of header template"));
-+
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP ] = 0x00;
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + 1] = 0x00;
-+
-+ }
-+
-+ if (p_InsrtByTemplate->modifyOuterIpParams.ipIdentGenId > 7)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("ipIdentGenId has to be one out of 8 sequence number generators (0 - 7) for IP identification field"));
-+
-+ tmpRegNia |= (uint32_t)p_InsrtByTemplate->modifyOuterIpParams.ipIdentGenId<<24;
-+ }
-+ else if (tmpReg8 == 6)
-+ {
-+ /*TODO - add check for maximum value of blockSize;*/
-+ if (blockSize)
-+ LOG2(blockSize, log2Num);
-+ tmpRegNia |= (uint32_t)log2Num << 24;
-+
-+ // for IPV6 decrement additional 40 bytes of IPV6 heade size - because IPV6 header size is not included in payloadLength
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP] = (uint8_t)(p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes - 40);
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP + 1] = extraAddedBytesAlignedToBlockSize;
-+ if (p_InsrtByTemplate->modifyOuterIpParams.udpPresent)
-+ {
-+ if ((p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + UDP_CHECKSUM_FIELD_SIZE) > p_InsrtByTemplate->size)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : UDP present according to user but (UDP offset + UDP header size) < size of header template"));
-+ if (p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_NEXT_HEADER_OFFSET_FROM_IP] != 0x88)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("OUr suppport is only IPv6/UDPLite"));
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_LENGTH_FIELD_OFFSET_FROM_UDP] = 0x00;
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_LENGTH_FIELD_OFFSET_FROM_UDP + 1] = 0x08;
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP] = 0x00;
-+ p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + 1] = 0x00;
-+ }
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("IP version supported only IPV4"));
-+ }
-+
-+ tmpReg32 = tmpReg16 = tmpReg8 = 0;
-+ /*TODO - check it*/
-+ if (p_InsrtByTemplate->modifyOuterVlan)
-+ {
-+ if (p_InsrtByTemplate->modifyOuterVlanParams.vpri & ~0x07)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,("Inconsistent parameters : user asked for VLAN modifications but VPRI more than 3 bits"));
-+
-+ memcpy(&tmpReg16, &p_Template[VLAN_TAG_FIELD_OFFSET_FROM_ETH], 2*(sizeof(uint8_t)));
-+ if ((tmpReg16 != 0x9100) && (tmpReg16!= 0x9200) && (tmpReg16 != 0x8100))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,("Inconsistent parameters : user asked for VLAN modifications but Tag Protocol identifier is not VLAN "));
-+
-+ memcpy(&tmpReg8, &p_Template[14],1*(sizeof(uint8_t)));
-+ tmpReg8 &= 0x1f;
-+ tmpReg8 |= (uint8_t)(p_InsrtByTemplate->modifyOuterVlanParams.vpri << 5);
-+
-+ p_Template[14] = tmpReg8;
-+ }
-+
-+ MemCpy8(p_Manip->p_Template, p_Template, p_InsrtByTemplate->size);
-+
-+ XX_Free(p_Template);
-+ }
-+
-+ tmpReg32 = 0;
-+ if (p_Manip->h_Frag)
-+ {
-+ tmpRegNia |= (uint32_t)(XX_VirtToPhys(p_Manip->h_Frag) - (p_FmPcd->physicalMuramBase));
-+ tmpReg32 |= (uint32_t)p_Manip->sizeForFragmentation << 16;
-+ }
-+ else
-+ tmpReg32 = 0xffff0000;
-+
-+ if (ipModify)
-+ tmpReg32 |= (uint32_t)p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset << 8;
-+ else
-+ tmpReg32 |= (uint32_t)0x0000ff00;
-+
-+ tmpReg32 |= (uint32_t)HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER;
-+ *(uint32_t *)&p_Ad->pcAndOffsets = tmpReg32;
-+
-+ tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ *(uint32_t *)&p_Ad->ccAdBase = tmpRegNia;
-+
-+ return err;
-+}
-+
-+static t_Error CheckStatsParamsAndSetType(t_FmPcdManip *p_Manip, t_FmPcdStatsParams *p_StatsParams)
-+{
-+
-+ switch (p_StatsParams->type)
-+ {
-+ case (e_FM_PCD_STATS_PER_FLOWID):
-+ p_Manip->opcode = HMAN_OC_CAPWAP_INDEXED_STATS;
-+ p_Manip->muramAllocate = TRUE;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported statistics type"));
-+ }
-+
-+ return E_OK;
-+}
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+
-+static t_Error FillReassmManipParams(t_FmPcdManip *p_Manip, e_NetHeaderType hdr)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
-+ uint32_t tmpReg32;
-+ t_Error err = E_OK;
-+
-+ /* Creates the Reassembly Parameters table. It contains parameters that are specific to either the IPv4 reassembly
-+ function or to the IPv6 reassembly function. If both IPv4 reassembly and IPv6 reassembly are required, then
-+ two separate IP Reassembly Parameter tables are required.*/
-+ if ((err = CreateReassTable(p_Manip, hdr)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /* Sets the first Ad register (ccAdBase) - Action Descriptor Type and Pointer to the Reassembly Parameters Table offset from MURAM*/
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+
-+ /* Gets the required Action descriptor table pointer */
-+ switch (hdr)
-+ {
-+ case HEADER_TYPE_IPv4:
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->reassmParams.ip.h_Ipv4Ad;
-+ tmpReg32 |= (uint32_t)(XX_VirtToPhys(
-+ p_Manip->reassmParams.ip.p_Ipv4ReassTbl)
-+ - (p_FmPcd->physicalMuramBase));
-+ break;
-+ case HEADER_TYPE_IPv6:
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->reassmParams.ip.h_Ipv6Ad;
-+ tmpReg32 |= (uint32_t)(XX_VirtToPhys(
-+ p_Manip->reassmParams.ip.p_Ipv6ReassTbl)
-+ - (p_FmPcd->physicalMuramBase));
-+ break;
-+ case HEADER_TYPE_CAPWAP:
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->reassmParams.capwap.h_Ad;
-+ tmpReg32 |= (uint32_t)(XX_VirtToPhys(
-+ p_Manip->reassmParams.capwap.p_ReassTbl)
-+ - (p_FmPcd->physicalMuramBase));
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("header type"));
-+ }
-+
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+
-+ /* Sets the second Ad register (matchTblPtr) - Buffer pool ID (BPID for V2) and Scatter/Gather table offset*/
-+ /* mark the Scatter/Gather table offset to be set later on when the port will be known */
-+ p_Manip->updateParams = (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS | DISCARD_MASK);
-+
-+ if ((hdr == HEADER_TYPE_IPv6) || (hdr == HEADER_TYPE_IPv4))
-+ {
-+#if (DPAA_VERSION == 10)
-+ tmpReg32 = (uint32_t)(p_Manip->reassmParams.sgBpid << 8);
-+ WRITE_UINT32(p_Ad->matchTblPtr, tmpReg32);
-+#endif /* (DPAA_VERSION == 10) */
-+#if (DPAA_VERSION >= 11)
-+ if (p_Manip->reassmParams.ip.nonConsistentSpFqid != 0)
-+ {
-+ tmpReg32 = FM_PCD_AD_NCSPFQIDM_MASK
-+ | (uint32_t)(p_Manip->reassmParams.ip.nonConsistentSpFqid);
-+ WRITE_UINT32(p_Ad->gmask, tmpReg32);
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+ /* Sets the third Ad register (pcAndOffsets)- IP Reassemble Operation Code*/
-+ tmpReg32 = 0;
-+ tmpReg32 |= (uint32_t)HMAN_OC_IP_REASSEMBLY;
-+ }
-+#if (DPAA_VERSION >= 11)
-+ else
-+ if (hdr == HEADER_TYPE_CAPWAP)
-+ {
-+ tmpReg32 = 0;
-+ tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_REASSEMBLY;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+
-+ p_Manip->reassm = TRUE;
-+
-+ return E_OK;
-+}
-+
-+static t_Error SetIpv4ReassmManip(t_FmPcdManip *p_Manip)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
-+
-+ /* Allocation if IPv4 Action descriptor */
-+ p_Manip->reassmParams.ip.h_Ipv4Ad = (t_Handle)XX_MallocSmart(
-+ FM_PCD_CC_AD_ENTRY_SIZE, p_Manip->reassmParams.dataMemId,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_Manip->reassmParams.ip.h_Ipv4Ad)
-+ {
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("Allocation of IPv4 table descriptor"));
-+ }
-+
-+ memset(p_Manip->reassmParams.ip.h_Ipv4Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Fill reassembly manipulation parameter in the IP Reassembly Action Descriptor */
-+ return FillReassmManipParams(p_Manip, HEADER_TYPE_IPv4);
-+}
-+
-+static t_Error SetIpv6ReassmManip(t_FmPcdManip *p_Manip)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
-+
-+ /* Allocation if IPv6 Action descriptor */
-+ p_Manip->reassmParams.ip.h_Ipv6Ad = (t_Handle)XX_MallocSmart(
-+ FM_PCD_CC_AD_ENTRY_SIZE, p_Manip->reassmParams.dataMemId,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_Manip->reassmParams.ip.h_Ipv6Ad)
-+ {
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("Allocation of IPv6 table descriptor"));
-+ }
-+
-+ memset(p_Manip->reassmParams.ip.h_Ipv6Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Fill reassembly manipulation parameter in the IP Reassembly Action Descriptor */
-+ return FillReassmManipParams(p_Manip, HEADER_TYPE_IPv6);
-+}
-+
-+static t_Error IpReassembly(t_FmPcdManipReassemParams *p_ManipReassmParams,
-+ t_FmPcdManip *p_Manip)
-+{
-+ uint32_t maxSetNumber = 10000;
-+ t_FmPcdManipReassemIpParams reassmManipParams =
-+ p_ManipReassmParams->u.ipReassem;
-+ t_Error res;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((t_FmPcd *)p_Manip->h_FmPcd)->h_Hc,
-+ E_INVALID_HANDLE);
-+
-+ /* Check validation of user's parameter.*/
-+ if ((reassmManipParams.timeoutThresholdForReassmProcess < 1000)
-+ || (reassmManipParams.timeoutThresholdForReassmProcess > 8000000))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("timeoutThresholdForReassmProcess should be 1msec - 8sec"));
-+ /* It is recommended that the total number of entries in this table (number of sets * number of ways)
-+ will be twice the number of frames that are expected to be reassembled simultaneously.*/
-+ if (reassmManipParams.maxNumFramesInProcess
-+ > (reassmManipParams.maxNumFramesInProcess * maxSetNumber / 2))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("maxNumFramesInProcess has to be less than (maximun set number * number of ways / 2)"));
-+
-+ if ((p_ManipReassmParams->hdr == HEADER_TYPE_IPv6)
-+ && (reassmManipParams.minFragSize[1] < 256))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("minFragSize[1] must be >= 256"));
-+
-+ /* Saves user's reassembly manipulation parameters */
-+ p_Manip->reassmParams.ip.relativeSchemeId[0] =
-+ reassmManipParams.relativeSchemeId[0];
-+ p_Manip->reassmParams.ip.relativeSchemeId[1] =
-+ reassmManipParams.relativeSchemeId[1];
-+ p_Manip->reassmParams.ip.numOfFramesPerHashEntry[0] =
-+ reassmManipParams.numOfFramesPerHashEntry[0];
-+ p_Manip->reassmParams.ip.numOfFramesPerHashEntry[1] =
-+ reassmManipParams.numOfFramesPerHashEntry[1];
-+ p_Manip->reassmParams.ip.minFragSize[0] = reassmManipParams.minFragSize[0];
-+ p_Manip->reassmParams.ip.minFragSize[1] = reassmManipParams.minFragSize[1];
-+ p_Manip->reassmParams.maxNumFramesInProcess =
-+ reassmManipParams.maxNumFramesInProcess;
-+ p_Manip->reassmParams.timeOutMode = reassmManipParams.timeOutMode;
-+ p_Manip->reassmParams.fqidForTimeOutFrames =
-+ reassmManipParams.fqidForTimeOutFrames;
-+ p_Manip->reassmParams.timeoutThresholdForReassmProcess =
-+ reassmManipParams.timeoutThresholdForReassmProcess;
-+ p_Manip->reassmParams.dataMemId = reassmManipParams.dataMemId;
-+ p_Manip->reassmParams.dataLiodnOffset = reassmManipParams.dataLiodnOffset;
-+#if (DPAA_VERSION == 10)
-+ p_Manip->reassmParams.sgBpid = reassmManipParams.sgBpid;
-+#endif /* (DPAA_VERSION == 10) */
-+#if (DPAA_VERSION >= 11)
-+ if (reassmManipParams.nonConsistentSpFqid != 0)
-+ {
-+ p_Manip->reassmParams.ip.nonConsistentSpFqid =
-+ reassmManipParams.nonConsistentSpFqid;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* Creates and initializes the IP Reassembly common parameter table */
-+ CreateReassCommonTable(p_Manip);
-+
-+ /* Creation of IPv4 reassembly manipulation */
-+ if ((p_Manip->reassmParams.hdr == HEADER_TYPE_IPv6)
-+ || (p_Manip->reassmParams.hdr == HEADER_TYPE_IPv4))
-+ {
-+ res = SetIpv4ReassmManip(p_Manip);
-+ if (res != E_OK)
-+ return res;
-+ }
-+
-+ /* Creation of IPv6 reassembly manipulation */
-+ if (p_Manip->reassmParams.hdr == HEADER_TYPE_IPv6)
-+ {
-+ res = SetIpv6ReassmManip(p_Manip);
-+ if (res != E_OK)
-+ return res;
-+ }
-+
-+ return E_OK;
-+}
-+
-+static void setIpReassmSchemeParams(t_FmPcd* p_FmPcd,
-+ t_FmPcdKgSchemeParams *p_Scheme,
-+ t_Handle h_CcTree, bool ipv4,
-+ uint8_t groupId)
-+{
-+ uint32_t j;
-+ uint8_t res;
-+
-+ /* Configures scheme's network environment parameters */
-+ p_Scheme->netEnvParams.numOfDistinctionUnits = 2;
-+ if (ipv4)
-+ res = FmPcdNetEnvGetUnitId(
-+ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
-+ HEADER_TYPE_IPv4, FALSE, 0);
-+ else
-+ res = FmPcdNetEnvGetUnitId(
-+ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
-+ HEADER_TYPE_IPv6, FALSE, 0);
-+ ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+ p_Scheme->netEnvParams.unitIds[0] = res;
-+
-+ res = FmPcdNetEnvGetUnitId(
-+ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
-+ HEADER_TYPE_USER_DEFINED_SHIM2, FALSE, 0);
-+ ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+ p_Scheme->netEnvParams.unitIds[1] = res;
-+
-+ /* Configures scheme's next engine parameters*/
-+ p_Scheme->nextEngine = e_FM_PCD_CC;
-+ p_Scheme->kgNextEngineParams.cc.h_CcTree = h_CcTree;
-+ p_Scheme->kgNextEngineParams.cc.grpId = groupId;
-+ p_Scheme->useHash = TRUE;
-+
-+ /* Configures scheme's key*/
-+ if (ipv4 == TRUE)
-+ {
-+ p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].type =
-+ e_FM_PCD_EXTRACT_BY_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.type =
-+ e_FM_PCD_EXTRACT_FULL_FIELD;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.hdr =
-+ HEADER_TYPE_IPv4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.extractByHdrType.fullField.ipv4 =
-+ NET_HEADER_FIELD_IPv4_DST_IP;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].type =
-+ e_FM_PCD_EXTRACT_BY_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.type =
-+ e_FM_PCD_EXTRACT_FULL_FIELD;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.hdr =
-+ HEADER_TYPE_IPv4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.extractByHdrType.fullField.ipv4 =
-+ NET_HEADER_FIELD_IPv4_SRC_IP;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].type =
-+ e_FM_PCD_EXTRACT_BY_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.type =
-+ e_FM_PCD_EXTRACT_FULL_FIELD;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.hdr =
-+ HEADER_TYPE_IPv4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fullField.ipv4 =
-+ NET_HEADER_FIELD_IPv4_PROTO;
-+ p_Scheme->keyExtractAndHashParams.extractArray[3].type =
-+ e_FM_PCD_EXTRACT_BY_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.hdr =
-+ HEADER_TYPE_IPv4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.type =
-+ e_FM_PCD_EXTRACT_FROM_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.ignoreProtocolValidation =
-+ FALSE;
-+ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.extractByHdrType.fromHdr.size =
-+ 2;
-+ p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.extractByHdrType.fromHdr.offset =
-+ 4;
-+ }
-+ else /* IPv6 */
-+ {
-+ p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 3;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].type =
-+ e_FM_PCD_EXTRACT_BY_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.type =
-+ e_FM_PCD_EXTRACT_FULL_FIELD;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.hdr =
-+ HEADER_TYPE_IPv6;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.extractByHdrType.fullField.ipv6 =
-+ NET_HEADER_FIELD_IPv6_DST_IP;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].type =
-+ e_FM_PCD_EXTRACT_BY_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.type =
-+ e_FM_PCD_EXTRACT_FULL_FIELD;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.hdr =
-+ HEADER_TYPE_IPv6;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.extractByHdrType.fullField.ipv6 =
-+ NET_HEADER_FIELD_IPv6_SRC_IP;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].type =
-+ e_FM_PCD_EXTRACT_BY_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.hdr =
-+ HEADER_TYPE_USER_DEFINED_SHIM2;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.type =
-+ e_FM_PCD_EXTRACT_FROM_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fromHdr.size =
-+ 4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fromHdr.offset =
-+ 4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.ignoreProtocolValidation =
-+ TRUE;
-+ }
-+
-+ p_Scheme->keyExtractAndHashParams.privateDflt0 = 0x01020304;
-+ p_Scheme->keyExtractAndHashParams.privateDflt1 = 0x11121314;
-+ p_Scheme->keyExtractAndHashParams.numOfUsedDflts =
-+ FM_PCD_KG_NUM_OF_DEFAULT_GROUPS;
-+ for (j = 0; j < FM_PCD_KG_NUM_OF_DEFAULT_GROUPS; j++)
-+ {
-+ p_Scheme->keyExtractAndHashParams.dflts[j].type =
-+ (e_FmPcdKgKnownFieldsDfltTypes)j; /* all types */
-+ p_Scheme->keyExtractAndHashParams.dflts[j].dfltSelect =
-+ e_FM_PCD_KG_DFLT_GBL_0;
-+ }
-+}
-+
-+static t_Error IpReassemblyStats(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipReassemIpStats *p_Stats)
-+{
-+ ASSERT_COND(p_Manip);
-+ ASSERT_COND(p_Stats);
-+ ASSERT_COND(p_Manip->reassmParams.p_ReassCommonTbl);
-+
-+ p_Stats->timeout =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalTimeOutCounter);
-+ p_Stats->rfdPoolBusy =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalRfdPoolBusyCounter);
-+ p_Stats->internalBufferBusy =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalInternalBufferBusy);
-+ p_Stats->externalBufferBusy =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalExternalBufferBusy);
-+ p_Stats->sgFragments =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalSgFragmentCounter);
-+ p_Stats->dmaSemaphoreDepletion =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalDmaSemaphoreDepletionCounter);
-+#if (DPAA_VERSION >= 11)
-+ p_Stats->nonConsistentSp =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalNCSPCounter);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (p_Manip->reassmParams.ip.p_Ipv4ReassTbl)
-+ {
-+ p_Stats->specificHdrStatistics[0].successfullyReassembled =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalSuccessfullyReasmFramesCounter);
-+ p_Stats->specificHdrStatistics[0].validFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalValidFragmentCounter);
-+ p_Stats->specificHdrStatistics[0].processedFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalProcessedFragCounter);
-+ p_Stats->specificHdrStatistics[0].malformedFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalMalformdFragCounter);
-+ p_Stats->specificHdrStatistics[0].autoLearnBusy =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalSetBusyCounter);
-+ p_Stats->specificHdrStatistics[0].discardedFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalDiscardedFragsCounter);
-+ p_Stats->specificHdrStatistics[0].moreThan16Fragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv4ReassTbl->totalMoreThan16FramesCounter);
-+ }
-+ if (p_Manip->reassmParams.ip.p_Ipv6ReassTbl)
-+ {
-+ p_Stats->specificHdrStatistics[1].successfullyReassembled =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalSuccessfullyReasmFramesCounter);
-+ p_Stats->specificHdrStatistics[1].validFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalValidFragmentCounter);
-+ p_Stats->specificHdrStatistics[1].processedFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalProcessedFragCounter);
-+ p_Stats->specificHdrStatistics[1].malformedFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalMalformdFragCounter);
-+ p_Stats->specificHdrStatistics[1].autoLearnBusy =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalSetBusyCounter);
-+ p_Stats->specificHdrStatistics[1].discardedFragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalDiscardedFragsCounter);
-+ p_Stats->specificHdrStatistics[1].moreThan16Fragments =
-+ GET_UINT32(p_Manip->reassmParams.ip.p_Ipv6ReassTbl->totalMoreThan16FramesCounter);
-+ }
-+ return E_OK;
-+}
-+
-+static t_Error IpFragmentationStats(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipFragIpStats *p_Stats)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+
-+ ASSERT_COND(p_Manip);
-+ ASSERT_COND(p_Stats);
-+ ASSERT_COND(p_Manip->h_Ad);
-+ ASSERT_COND(p_Manip->fragParams.p_Frag);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+
-+ p_Stats->totalFrames = GET_UINT32(p_Ad->gmask);
-+ p_Stats->fragmentedFrames = GET_UINT32(p_Manip->fragParams.p_Frag->ccAdBase)
-+ & 0x00ffffff;
-+ p_Stats->generatedFragments =
-+ GET_UINT32(p_Manip->fragParams.p_Frag->matchTblPtr);
-+
-+ return E_OK;
-+}
-+
-+static t_Error IpFragmentation(t_FmPcdManipFragIpParams *p_ManipParams,
-+ t_FmPcdManip *p_Manip)
-+{
-+ uint32_t pcAndOffsetsReg = 0, ccAdBaseReg = 0, gmaskReg = 0;
-+ t_FmPcd *p_FmPcd;
-+#if (DPAA_VERSION == 10)
-+ t_Error err = E_OK;
-+#endif /* (DPAA_VERSION == 10) */
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_ManipParams->sizeForFragmentation != 0xFFFF,
-+ E_INVALID_VALUE);
-+
-+ p_FmPcd = p_Manip->h_FmPcd;
-+ /* Allocation of fragmentation Action Descriptor */
-+ p_Manip->fragParams.p_Frag = (t_AdOfTypeContLookup *)FM_MURAM_AllocMem(
-+ p_FmPcd->h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_Manip->fragParams.p_Frag)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM alloc for Fragmentation table descriptor"));
-+ MemSet8(p_Manip->fragParams.p_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Prepare the third Ad register (pcAndOffsets)- OperationCode */
-+ pcAndOffsetsReg = (uint32_t)HMAN_OC_IP_FRAGMENTATION;
-+
-+ /* Prepare the first Ad register (ccAdBase) - Don't frag action and Action descriptor type*/
-+ ccAdBaseReg = FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ ccAdBaseReg |= (p_ManipParams->dontFragAction
-+ << FM_PCD_MANIP_IP_FRAG_DF_SHIFT);
-+
-+
-+ /* Set Scatter/Gather BPid */
-+ if (p_ManipParams->sgBpidEn)
-+ {
-+ ccAdBaseReg |= FM_PCD_MANIP_IP_FRAG_SG_BDID_EN;
-+ pcAndOffsetsReg |= ((p_ManipParams->sgBpid
-+ << FM_PCD_MANIP_IP_FRAG_SG_BDID_SHIFT)
-+ & FM_PCD_MANIP_IP_FRAG_SG_BDID_MASK);
-+ }
-+
-+ /* Prepare the first Ad register (gmask) - scratch buffer pool id and Pointer to fragment ID */
-+ gmaskReg = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr))
-+ - p_FmPcd->physicalMuramBase);
-+#if (DPAA_VERSION == 10)
-+ gmaskReg |= p_ManipParams->scratchBpid << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID;
-+#else
-+ gmaskReg |= (0xFF) << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID;
-+#endif /* (DPAA_VERSION == 10) */
-+
-+ /* Set all Ad registers */
-+ WRITE_UINT32(p_Manip->fragParams.p_Frag->pcAndOffsets, pcAndOffsetsReg);
-+ WRITE_UINT32(p_Manip->fragParams.p_Frag->ccAdBase, ccAdBaseReg);
-+ WRITE_UINT32(p_Manip->fragParams.p_Frag->gmask, gmaskReg);
-+
-+ /* Saves user's fragmentation manipulation parameters */
-+ p_Manip->frag = TRUE;
-+ p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation;
-+
-+#if (DPAA_VERSION == 10)
-+ p_Manip->fragParams.scratchBpid = p_ManipParams->scratchBpid;
-+
-+ /* scratch buffer pool initialization */
-+ if ((err = FmPcdFragHcScratchPoolFill((t_Handle)p_FmPcd, p_ManipParams->scratchBpid)) != E_OK)
-+ {
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_Frag);
-+ p_Manip->fragParams.p_Frag = NULL;
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+#endif /* (DPAA_VERSION == 10) */
-+
-+ return E_OK;
-+}
-+
-+static t_Error IPManip(t_FmPcdManip *p_Manip)
-+{
-+ t_Error err = E_OK;
-+ t_FmPcd *p_FmPcd;
-+ t_AdOfTypeContLookup *p_Ad;
-+ uint32_t tmpReg32 = 0, tmpRegNia = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ p_FmPcd = p_Manip->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+
-+ tmpReg32 = FM_PCD_MANIP_IP_NO_FRAGMENTATION;
-+ if (p_Manip->frag == TRUE)
-+ {
-+ tmpRegNia = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_Frag)
-+ - (p_FmPcd->physicalMuramBase));
-+ tmpReg32 = (uint32_t)p_Manip->sizeForFragmentation
-+ << FM_PCD_MANIP_IP_MTU_SHIFT;
-+ }
-+
-+ tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ tmpReg32 |= HMAN_OC_IP_MANIP;
-+
-+#if (DPAA_VERSION >= 11)
-+ tmpRegNia |= FM_PCD_MANIP_IP_CNIA;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpRegNia);
-+ WRITE_UINT32(p_Ad->gmask, 0);
-+ /* Total frame counter - MUST be initialized to zero.*/
-+
-+ return err;
-+}
-+
-+static t_Error UpdateInitIpFrag(t_Handle h_FmPcd, t_Handle h_PcdParams,
-+ t_Handle h_FmPort, t_FmPcdManip *p_Manip,
-+ t_Handle h_Ad, bool validate)
-+{
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_IP_FRAGMENTATION),
-+ E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+
-+ UNUSED(h_FmPcd);
-+ UNUSED(h_Ad);
-+ UNUSED(h_PcdParams);
-+ UNUSED(validate);
-+ UNUSED(p_Manip);
-+
-+ fmPortGetSetCcParams.setCcParams.type = 0;
-+ fmPortGetSetCcParams.getCcParams.type = MANIP_EXTRA_SPACE;
-+ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (!fmPortGetSetCcParams.getCcParams.internalBufferOffset)
-+ DBG(WARNING, ("manipExtraSpace must be larger than '0'"));
-+
-+ return E_OK;
-+}
-+
-+static t_Error IPSecManip(t_FmPcdManipParams *p_ManipParams,
-+ t_FmPcdManip *p_Manip)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ t_FmPcdManipSpecialOffloadIPSecParams *p_IPSecParams;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg32 = 0;
-+ uint32_t power;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_ManipParams, E_INVALID_HANDLE);
-+
-+ p_IPSecParams = &p_ManipParams->u.specialOffload.u.ipsec;
-+
-+ SANITY_CHECK_RETURN_ERROR(
-+ !p_IPSecParams->variableIpHdrLen || p_IPSecParams->decryption,
-+ E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ !p_IPSecParams->variableIpVersion || !p_IPSecParams->decryption,
-+ E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ !p_IPSecParams->variableIpVersion || p_IPSecParams->outerIPHdrLen,
-+ E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ !p_IPSecParams->arwSize || p_IPSecParams->arwAddr,
-+ E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ !p_IPSecParams->arwSize || p_IPSecParams->decryption,
-+ E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR((p_IPSecParams->arwSize % 16) == 0, E_INVALID_VALUE);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ tmpReg32 |= (p_IPSecParams->decryption) ? FM_PCD_MANIP_IPSEC_DEC : 0;
-+ tmpReg32 |= (p_IPSecParams->ecnCopy) ? FM_PCD_MANIP_IPSEC_ECN_EN : 0;
-+ tmpReg32 |= (p_IPSecParams->dscpCopy) ? FM_PCD_MANIP_IPSEC_DSCP_EN : 0;
-+ tmpReg32 |=
-+ (p_IPSecParams->variableIpHdrLen) ? FM_PCD_MANIP_IPSEC_VIPL_EN : 0;
-+ tmpReg32 |=
-+ (p_IPSecParams->variableIpVersion) ? FM_PCD_MANIP_IPSEC_VIPV_EN : 0;
-+ if (p_IPSecParams->arwSize)
-+ tmpReg32 |= (uint32_t)((XX_VirtToPhys(UINT_TO_PTR(p_IPSecParams->arwAddr))-FM_MM_MURAM)
-+ & (FM_MURAM_SIZE-1));
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+
-+ tmpReg32 = 0;
-+ if (p_IPSecParams->arwSize) {
-+ NEXT_POWER_OF_2((p_IPSecParams->arwSize + 32), power);
-+ LOG2(power, power);
-+ tmpReg32 = (p_IPSecParams->arwSize | (power - 5)) << FM_PCD_MANIP_IPSEC_ARW_SIZE_SHIFT;
-+ }
-+
-+ if (p_ManipParams->h_NextManip)
-+ tmpReg32 |=
-+ (uint32_t)(XX_VirtToPhys(((t_FmPcdManip *)p_ManipParams->h_NextManip)->h_Ad)-
-+ (((t_FmPcd *)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4;
-+ WRITE_UINT32(p_Ad->matchTblPtr, tmpReg32);
-+
-+ tmpReg32 = HMAN_OC_IPSEC_MANIP;
-+ tmpReg32 |= p_IPSecParams->outerIPHdrLen
-+ << FM_PCD_MANIP_IPSEC_IP_HDR_LEN_SHIFT;
-+ if (p_ManipParams->h_NextManip)
-+ tmpReg32 |= FM_PCD_MANIP_IPSEC_NADEN;
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+
-+ return err;
-+}
-+
-+static t_Error SetCapwapReassmManip(t_FmPcdManip *p_Manip)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd;
-+
-+ /* Allocation if CAPWAP Action descriptor */
-+ p_Manip->reassmParams.capwap.h_Ad = (t_Handle)XX_MallocSmart(
-+ FM_PCD_CC_AD_ENTRY_SIZE, p_Manip->reassmParams.dataMemId,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_Manip->reassmParams.capwap.h_Ad)
-+ {
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("Allocation of CAPWAP table descriptor"));
-+ }
-+
-+ memset(p_Manip->reassmParams.capwap.h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Fill reassembly manipulation parameter in the Reassembly Action Descriptor */
-+ return FillReassmManipParams(p_Manip, HEADER_TYPE_CAPWAP);
-+}
-+
-+static void setCapwapReassmSchemeParams(t_FmPcd* p_FmPcd,
-+ t_FmPcdKgSchemeParams *p_Scheme,
-+ t_Handle h_CcTree, uint8_t groupId)
-+{
-+ uint8_t res;
-+
-+ /* Configures scheme's network environment parameters */
-+ p_Scheme->netEnvParams.numOfDistinctionUnits = 1;
-+ res = FmPcdNetEnvGetUnitId(
-+ p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv),
-+ HEADER_TYPE_USER_DEFINED_SHIM2, FALSE, 0);
-+ ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+ p_Scheme->netEnvParams.unitIds[0] = res;
-+
-+ /* Configures scheme's next engine parameters*/
-+ p_Scheme->nextEngine = e_FM_PCD_CC;
-+ p_Scheme->kgNextEngineParams.cc.h_CcTree = h_CcTree;
-+ p_Scheme->kgNextEngineParams.cc.grpId = groupId;
-+ p_Scheme->useHash = TRUE;
-+
-+ /* Configures scheme's key*/
-+ p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 2;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].type =
-+ e_FM_PCD_EXTRACT_NON_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.src =
-+ e_FM_PCD_EXTRACT_FROM_PARSE_RESULT;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.action =
-+ e_FM_PCD_ACTION_NONE;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.offset = 20;
-+ p_Scheme->keyExtractAndHashParams.extractArray[0].extractNonHdr.size = 4;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].type =
-+ e_FM_PCD_EXTRACT_NON_HDR;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.src =
-+ e_FM_PCD_EXTRACT_FROM_DFLT_VALUE;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.action =
-+ e_FM_PCD_ACTION_NONE;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.offset = 0;
-+ p_Scheme->keyExtractAndHashParams.extractArray[1].extractNonHdr.size = 1;
-+
-+ p_Scheme->keyExtractAndHashParams.privateDflt0 = 0x0;
-+ p_Scheme->keyExtractAndHashParams.privateDflt1 = 0x0;
-+ p_Scheme->keyExtractAndHashParams.numOfUsedDflts = 1;
-+ p_Scheme->keyExtractAndHashParams.dflts[0].type = e_FM_PCD_KG_GENERIC_NOT_FROM_DATA;
-+ p_Scheme->keyExtractAndHashParams.dflts[0].dfltSelect = e_FM_PCD_KG_DFLT_PRIVATE_0;
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+static t_Error CapwapReassemblyStats(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipReassemCapwapStats *p_Stats)
-+{
-+ ASSERT_COND(p_Manip);
-+ ASSERT_COND(p_Stats);
-+ ASSERT_COND(p_Manip->reassmParams.p_ReassCommonTbl);
-+
-+ p_Stats->timeout =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalTimeOutCounter);
-+ p_Stats->rfdPoolBusy =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalRfdPoolBusyCounter);
-+ p_Stats->internalBufferBusy =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalInternalBufferBusy);
-+ p_Stats->externalBufferBusy =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalExternalBufferBusy);
-+ p_Stats->sgFragments =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalSgFragmentCounter);
-+ p_Stats->dmaSemaphoreDepletion =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalDmaSemaphoreDepletionCounter);
-+ p_Stats->exceedMaxReassemblyFrameLen =
-+ GET_UINT32(p_Manip->reassmParams.p_ReassCommonTbl->totalNCSPCounter);
-+
-+ p_Stats->successfullyReassembled =
-+ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalSuccessfullyReasmFramesCounter);
-+ p_Stats->validFragments =
-+ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalValidFragmentCounter);
-+ p_Stats->processedFragments =
-+ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalProcessedFragCounter);
-+ p_Stats->malformedFragments =
-+ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalMalformdFragCounter);
-+ p_Stats->autoLearnBusy =
-+ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalSetBusyCounter);
-+ p_Stats->discardedFragments =
-+ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalDiscardedFragsCounter);
-+ p_Stats->moreThan16Fragments =
-+ GET_UINT32(p_Manip->reassmParams.capwap.p_ReassTbl->totalMoreThan16FramesCounter);
-+
-+ return E_OK;
-+}
-+
-+static t_Error CapwapFragmentationStats(t_FmPcdManip *p_Manip,
-+ t_FmPcdManipFragCapwapStats *p_Stats)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+
-+ ASSERT_COND(p_Manip);
-+ ASSERT_COND(p_Stats);
-+ ASSERT_COND(p_Manip->h_Ad);
-+ ASSERT_COND(p_Manip->fragParams.p_Frag);
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+
-+ p_Stats->totalFrames = GET_UINT32(p_Ad->gmask);
-+
-+ return E_OK;
-+}
-+
-+static t_Error CapwapReassembly(t_FmPcdManipReassemParams *p_ManipReassmParams,
-+ t_FmPcdManip *p_Manip)
-+{
-+ uint32_t maxSetNumber = 10000;
-+ t_FmPcdManipReassemCapwapParams reassmManipParams =
-+ p_ManipReassmParams->u.capwapReassem;
-+ t_Error res;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((t_FmPcd *)p_Manip->h_FmPcd)->h_Hc,
-+ E_INVALID_HANDLE);
-+
-+ /* Check validation of user's parameter.*/
-+ if ((reassmManipParams.timeoutThresholdForReassmProcess < 1000)
-+ || (reassmManipParams.timeoutThresholdForReassmProcess > 8000000))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("timeoutThresholdForReassmProcess should be 1msec - 8sec"));
-+ /* It is recommended that the total number of entries in this table (number of sets * number of ways)
-+ will be twice the number of frames that are expected to be reassembled simultaneously.*/
-+ if (reassmManipParams.maxNumFramesInProcess
-+ > (reassmManipParams.maxNumFramesInProcess * maxSetNumber / 2))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("maxNumFramesInProcess has to be less than (maximun set number * number of ways / 2)"));
-+
-+ /* Saves user's reassembly manipulation parameters */
-+ p_Manip->reassmParams.capwap.relativeSchemeId =
-+ reassmManipParams.relativeSchemeId;
-+ p_Manip->reassmParams.capwap.numOfFramesPerHashEntry =
-+ reassmManipParams.numOfFramesPerHashEntry;
-+ p_Manip->reassmParams.capwap.maxRessembledsSize =
-+ reassmManipParams.maxReassembledFrameLength;
-+ p_Manip->reassmParams.maxNumFramesInProcess =
-+ reassmManipParams.maxNumFramesInProcess;
-+ p_Manip->reassmParams.timeOutMode = reassmManipParams.timeOutMode;
-+ p_Manip->reassmParams.fqidForTimeOutFrames =
-+ reassmManipParams.fqidForTimeOutFrames;
-+ p_Manip->reassmParams.timeoutThresholdForReassmProcess =
-+ reassmManipParams.timeoutThresholdForReassmProcess;
-+ p_Manip->reassmParams.dataMemId = reassmManipParams.dataMemId;
-+ p_Manip->reassmParams.dataLiodnOffset = reassmManipParams.dataLiodnOffset;
-+
-+ /* Creates and initializes the Reassembly common parameter table */
-+ CreateReassCommonTable(p_Manip);
-+
-+ res = SetCapwapReassmManip(p_Manip);
-+ if (res != E_OK)
-+ return res;
-+
-+ return E_OK;
-+}
-+
-+static t_Error CapwapFragmentation(t_FmPcdManipFragCapwapParams *p_ManipParams,
-+ t_FmPcdManip *p_Manip)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_AdOfTypeContLookup *p_Ad;
-+ uint32_t pcAndOffsetsReg = 0, ccAdBaseReg = 0, gmaskReg = 0;
-+ uint32_t tmpReg32 = 0, tmpRegNia = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_ManipParams->sizeForFragmentation != 0xFFFF,
-+ E_INVALID_VALUE);
-+ p_FmPcd = p_Manip->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ /* Allocation of fragmentation Action Descriptor */
-+ p_Manip->fragParams.p_Frag = (t_AdOfTypeContLookup *)FM_MURAM_AllocMem(
-+ p_FmPcd->h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_Manip->fragParams.p_Frag)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("MURAM alloc for Fragmentation table descriptor"));
-+ MemSet8(p_Manip->fragParams.p_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Prepare the third Ad register (pcAndOffsets)- OperationCode */
-+ pcAndOffsetsReg = (uint32_t)HMAN_OC_CAPWAP_FRAGMENTATION;
-+
-+ /* Prepare the first Ad register (ccAdBase) - Don't frag action and Action descriptor type*/
-+ ccAdBaseReg = FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ ccAdBaseReg |=
-+ (p_ManipParams->compressModeEn) ? FM_PCD_MANIP_CAPWAP_FRAG_COMPRESS_EN :
-+ 0;
-+
-+ /* Set Scatter/Gather BPid */
-+ if (p_ManipParams->sgBpidEn)
-+ {
-+ ccAdBaseReg |= FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_EN;
-+ pcAndOffsetsReg |= ((p_ManipParams->sgBpid
-+ << FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_SHIFT)
-+ & FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_MASK);
-+ }
-+
-+ /* Prepare the first Ad register (gmask) - scratch buffer pool id and Pointer to fragment ID */
-+ gmaskReg = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_FmPcd->capwapFrameIdAddr))
-+ - p_FmPcd->physicalMuramBase);
-+ gmaskReg |= (0xFF) << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID;
-+
-+ /* Set all Ad registers */
-+ WRITE_UINT32(p_Manip->fragParams.p_Frag->pcAndOffsets, pcAndOffsetsReg);
-+ WRITE_UINT32(p_Manip->fragParams.p_Frag->ccAdBase, ccAdBaseReg);
-+ WRITE_UINT32(p_Manip->fragParams.p_Frag->gmask, gmaskReg);
-+
-+ /* Saves user's fragmentation manipulation parameters */
-+ p_Manip->frag = TRUE;
-+ p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation;
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+
-+ tmpRegNia = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_Frag)
-+ - (p_FmPcd->physicalMuramBase));
-+ tmpReg32 = (uint32_t)p_Manip->sizeForFragmentation
-+ << FM_PCD_MANIP_CAPWAP_FRAG_CHECK_MTU_SHIFT;
-+
-+ tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ tmpReg32 |= HMAN_OC_CAPWAP_FRAG_CHECK;
-+
-+ tmpRegNia |= FM_PCD_MANIP_CAPWAP_FRAG_CHECK_CNIA;
-+
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpRegNia);
-+ WRITE_UINT32(p_Ad->gmask, 0);
-+ /* Total frame counter - MUST be initialized to zero.*/
-+
-+ return E_OK;
-+}
-+
-+static t_Error UpdateInitCapwapFrag(t_Handle h_FmPcd, t_Handle h_PcdParams,
-+ t_Handle h_FmPort, t_FmPcdManip *p_Manip,
-+ t_Handle h_Ad, bool validate)
-+{
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION),
-+ E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+
-+ UNUSED(h_FmPcd);
-+ UNUSED(h_Ad);
-+ UNUSED(h_PcdParams);
-+ UNUSED(validate);
-+ UNUSED(p_Manip);
-+
-+ fmPortGetSetCcParams.setCcParams.type = 0;
-+ fmPortGetSetCcParams.getCcParams.type = MANIP_EXTRA_SPACE;
-+ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (!fmPortGetSetCcParams.getCcParams.internalBufferOffset)
-+ DBG(WARNING, ("manipExtraSpace must be larger than '0'"));
-+
-+ return E_OK;
-+}
-+
-+static t_Error CapwapManip(t_FmPcdManipParams *p_ManipParams,
-+ t_FmPcdManip *p_Manip)
-+{
-+ t_AdOfTypeContLookup *p_Ad;
-+ t_FmPcdManipSpecialOffloadCapwapParams *p_Params;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg32 = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_ManipParams, E_INVALID_HANDLE);
-+
-+ p_Params = &p_ManipParams->u.specialOffload.u.capwap;
-+
-+ p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad;
-+ tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE;
-+ tmpReg32 |= (p_Params->dtls) ? FM_PCD_MANIP_CAPWAP_DTLS : 0;
-+ /* TODO - add 'qosSrc' */
-+ WRITE_UINT32(p_Ad->ccAdBase, tmpReg32);
-+
-+ tmpReg32 = HMAN_OC_CAPWAP_MANIP;
-+ if (p_ManipParams->h_NextManip)
-+ {
-+ WRITE_UINT32(
-+ p_Ad->matchTblPtr,
-+ (uint32_t)(XX_VirtToPhys(((t_FmPcdManip *)p_ManipParams->h_NextManip)->h_Ad)- (((t_FmPcd *)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4);
-+
-+ tmpReg32 |= FM_PCD_MANIP_CAPWAP_NADEN;
-+ }
-+
-+ WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32);
-+
-+ return err;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+static t_Handle ManipOrStatsSetNode(t_Handle h_FmPcd, t_Handle *p_Params,
-+ bool stats)
-+{
-+ t_FmPcdManip *p_Manip;
-+ t_Error err;
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+
-+ p_Manip = (t_FmPcdManip*)XX_Malloc(sizeof(t_FmPcdManip));
-+ if (!p_Manip)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
-+ return NULL;
-+ }
-+ memset(p_Manip, 0, sizeof(t_FmPcdManip));
-+
-+ p_Manip->type = ((t_FmPcdManipParams *)p_Params)->type;
-+ memcpy((uint8_t*)&p_Manip->manipParams, p_Params,
-+ sizeof(p_Manip->manipParams));
-+
-+ if (!stats)
-+ err = CheckManipParamsAndSetType(p_Manip,
-+ (t_FmPcdManipParams *)p_Params);
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ else
-+ err = CheckStatsParamsAndSetType(p_Manip, (t_FmPcdStatsParams *)p_Params);
-+#else /* not (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ else
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Statistics node!"));
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Invalid header manipulation type"));
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ if ((p_Manip->opcode != HMAN_OC_IP_REASSEMBLY) && (p_Manip->opcode != HMAN_OC_CAPWAP_REASSEMBLY))
-+ {
-+ /* In Case of reassembly manipulation the reassembly action descriptor will
-+ be defines later on */
-+ if (p_Manip->muramAllocate)
-+ {
-+ p_Manip->h_Ad = (t_Handle)FM_MURAM_AllocMem(
-+ p_FmPcd->h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_Manip->h_Ad)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Manipulation action descriptor"));
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ MemSet8(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+ }
-+ else
-+ {
-+ p_Manip->h_Ad = (t_Handle)XX_Malloc(
-+ FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
-+ if (!p_Manip->h_Ad)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of Manipulation action descriptor"));
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ memset(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
-+ }
-+ }
-+
-+ p_Manip->h_FmPcd = h_FmPcd;
-+
-+ return p_Manip;
-+}
-+
-+static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfManip(
-+ t_FmPcdManip *p_CrntMdfManip, t_List *h_NodesLst)
-+{
-+ t_CcNodeInformation *p_CcNodeInformation;
-+ t_FmPcdCcNode *p_NodePtrOnCurrentMdfManip = NULL;
-+ t_List *p_Pos;
-+ int i = 0;
-+ t_Handle p_AdTablePtOnCrntCurrentMdfNode/*, p_AdTableNewModified*/;
-+ t_CcNodeInformation ccNodeInfo;
-+
-+ LIST_FOR_EACH(p_Pos, &p_CrntMdfManip->nodesLst)
-+ {
-+ p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos);
-+ p_NodePtrOnCurrentMdfManip =
-+ (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode;
-+
-+ ASSERT_COND(p_NodePtrOnCurrentMdfManip);
-+
-+ /* Search in the previous node which exact index points on this current modified node for getting AD */
-+ for (i = 0; i < p_NodePtrOnCurrentMdfManip->numOfKeys + 1; i++)
-+ {
-+ if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].nextEngineParams.nextEngine
-+ == e_FM_PCD_CC)
-+ {
-+ if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].nextEngineParams.h_Manip
-+ == (t_Handle)p_CrntMdfManip)
-+ {
-+ if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].p_StatsObj)
-+ p_AdTablePtOnCrntCurrentMdfNode =
-+ p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].p_StatsObj->h_StatsAd;
-+ else
-+ p_AdTablePtOnCrntCurrentMdfNode =
-+ PTR_MOVE(p_NodePtrOnCurrentMdfManip->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
-+ ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode;
-+ EnqueueNodeInfoToRelevantLst(h_NodesLst, &ccNodeInfo, NULL);
-+ }
-+ }
-+ }
-+
-+ ASSERT_COND(i != p_NodePtrOnCurrentMdfManip->numOfKeys);
-+ }
-+}
-+
-+static void BuildHmtd(uint8_t *p_Dest, uint8_t *p_Src, uint8_t *p_Hmcd,
-+ t_FmPcd *p_FmPcd)
-+{
-+ t_Error err;
-+
-+ /* Copy the HMTD */
-+ MemCpy8(p_Dest, (uint8_t*)p_Src, 16);
-+ /* Replace the HMCT table pointer */
-+ WRITE_UINT32(
-+ ((t_Hmtd *)p_Dest)->hmcdBasePtr,
-+ (uint32_t)(XX_VirtToPhys(p_Hmcd) - ((t_FmPcd*)p_FmPcd)->physicalMuramBase));
-+ /* Call Host Command to replace HMTD by a new HMTD */
-+ err = FmHcPcdCcDoDynamicChange(
-+ p_FmPcd->h_Hc,
-+ (uint32_t)(XX_VirtToPhys(p_Src) - p_FmPcd->physicalMuramBase),
-+ (uint32_t)(XX_VirtToPhys(p_Dest) - p_FmPcd->physicalMuramBase));
-+ if (err)
-+ REPORT_ERROR(MINOR, err, ("Failed in dynamic manip change, continued to the rest of the owners."));
-+}
-+
-+static t_Error FmPcdManipInitUpdate(t_Handle h_FmPcd, t_Handle h_PcdParams,
-+ t_Handle h_FmPort, t_Handle h_Manip,
-+ t_Handle h_Ad, bool validate, int level,
-+ t_Handle h_FmTree)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
-+
-+ UNUSED(level);
-+ UNUSED(h_FmTree);
-+
-+ switch (p_Manip->opcode)
-+ {
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
-+ err = UpdateInitMvIntFrameHeaderFromFrameToBufferPrefix(h_FmPort,
-+ p_Manip,
-+ h_Ad,
-+ validate);
-+ break;
-+ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
-+ if (!p_Manip->h_Frag)
-+ break;
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+ err = UpdateInitCapwapFragmentation(h_FmPort, p_Manip, h_Ad, validate, h_FmTree);
-+ break;
-+ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
-+ if (p_Manip->h_Frag)
-+ err = UpdateInitCapwapReasm(h_FmPcd, h_FmPort, p_Manip, h_Ad, validate);
-+ break;
-+ case (HMAN_OC_CAPWAP_INDEXED_STATS):
-+ err = UpdateIndxStats(h_FmPcd, h_FmPort, p_Manip);
-+ break;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ case (HMAN_OC_IP_REASSEMBLY):
-+ err = UpdateInitReasm(h_FmPcd, h_PcdParams, h_FmPort, p_Manip, h_Ad,
-+ validate);
-+ break;
-+ case (HMAN_OC_IP_FRAGMENTATION):
-+ err = UpdateInitIpFrag(h_FmPcd, h_PcdParams, h_FmPort, p_Manip,
-+ h_Ad, validate);
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+ err = UpdateInitCapwapFrag(h_FmPcd, h_PcdParams, h_FmPort, p_Manip,
-+ h_Ad, validate);
-+ break;
-+ case (HMAN_OC_CAPWAP_REASSEMBLY):
-+ err = UpdateInitReasm(h_FmPcd, h_PcdParams, h_FmPort, p_Manip, h_Ad,
-+ validate);
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ return E_OK;
-+ }
-+
-+ return err;
-+}
-+
-+static t_Error FmPcdManipModifyUpdate(t_Handle h_Manip, t_Handle h_Ad,
-+ bool validate, int level,
-+ t_Handle h_FmTree)
-+{
-+
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+ t_Error err = E_OK;
-+
-+ UNUSED(level);
-+
-+ switch (p_Manip->opcode)
-+ {
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("modify node with this type of manipulation is not suppported"));
-+ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
-+
-+ if (p_Manip->h_Frag)
-+ {
-+ if (!(p_Manip->shadowUpdateParams & NUM_OF_TASKS)
-+ && !(p_Manip->shadowUpdateParams & OFFSET_OF_DATA)
-+ && !(p_Manip->shadowUpdateParams & OFFSET_OF_PR))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("modify node with this type of manipulation requires manipulation be updated previously in SetPcd function"));
-+ }
-+ break;
-+ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
-+ if (p_Manip->h_Frag)
-+ err = UpdateModifyCapwapFragmenation(p_Manip, h_Ad, validate, h_FmTree);
-+ break;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ default:
-+ return E_OK;
-+ }
-+
-+ return err;
-+}
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+
-+t_Error FmPcdManipUpdate(t_Handle h_FmPcd, t_Handle h_PcdParams,
-+ t_Handle h_FmPort, t_Handle h_Manip, t_Handle h_Ad,
-+ bool validate, int level, t_Handle h_FmTree,
-+ bool modify)
-+{
-+ t_Error err;
-+
-+ if (!modify)
-+ err = FmPcdManipInitUpdate(h_FmPcd, h_PcdParams, h_FmPort, h_Manip,
-+ h_Ad, validate, level, h_FmTree);
-+ else
-+ err = FmPcdManipModifyUpdate(h_Manip, h_Ad, validate, level, h_FmTree);
-+
-+ return err;
-+}
-+
-+void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add)
-+{
-+
-+ uint32_t intFlags;
-+
-+ intFlags = XX_LockIntrSpinlock(((t_FmPcdManip *)h_Manip)->h_Spinlock);
-+ if (add)
-+ ((t_FmPcdManip *)h_Manip)->owner++;
-+ else
-+ {
-+ ASSERT_COND(((t_FmPcdManip *)h_Manip)->owner);
-+ ((t_FmPcdManip *)h_Manip)->owner--;
-+ }
-+ XX_UnlockIntrSpinlock(((t_FmPcdManip *)h_Manip)->h_Spinlock, intFlags);
-+}
-+
-+t_List *FmPcdManipGetNodeLstPointedOnThisManip(t_Handle h_Manip)
-+{
-+ ASSERT_COND(h_Manip);
-+ return &((t_FmPcdManip *)h_Manip)->nodesLst;
-+}
-+
-+t_List *FmPcdManipGetSpinlock(t_Handle h_Manip)
-+{
-+ ASSERT_COND(h_Manip);
-+ return ((t_FmPcdManip *)h_Manip)->h_Spinlock;
-+}
-+
-+t_Error FmPcdManipCheckParamsForCcNextEngine(
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
-+ uint32_t *requiredAction)
-+{
-+ t_FmPcdManip *p_Manip;
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ t_Error err = E_OK;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))*/
-+ bool pointFromCc = TRUE;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams->h_Manip,
-+ E_NULL_POINTER);
-+
-+ p_Manip = (t_FmPcdManip *)(p_FmPcdCcNextEngineParams->h_Manip);
-+ *requiredAction = 0;
-+
-+ while (p_Manip)
-+ {
-+ switch (p_Manip->opcode)
-+ {
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ case (HMAN_OC_CAPWAP_INDEXED_STATS):
-+ if (p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE"));
-+ if (p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid)
-+ p_Manip->cnia = TRUE;
-+ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
-+ *requiredAction = UPDATE_NIA_ENQ_WITHOUT_DMA;
-+ case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR):
-+ p_Manip->ownerTmp++;
-+ break;
-+ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
-+ if ((p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE)
-+ && !p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE with fqidForCtrlFlow FALSE"));
-+ p_Manip->ownerTmp++;
-+ break;
-+ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
-+ if ((p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_CC)
-+ && (FmPcdCcGetParseCode(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)
-+ != CC_PC_GENERIC_IC_HASH_INDEXED))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation next engine has to be CC and action = e_FM_PCD_ACTION_INDEXED_LOOKUP"));
-+ err = UpdateManipIc(p_FmPcdCcNextEngineParams->h_Manip,
-+ FmPcdCcGetOffset(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode));
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ *requiredAction = UPDATE_NIA_ENQ_WITHOUT_DMA;
-+ break;
-+ #endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ case (HMAN_OC_IP_FRAGMENTATION):
-+ case (HMAN_OC_IP_REASSEMBLY):
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_REASSEMBLY):
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+#endif /* (DPAA_VERSION >= 11) */
-+ if (p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE"));
-+ p_Manip->ownerTmp++;
-+ break;
-+ case (HMAN_OC_IPSEC_MANIP):
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_MANIP):
-+#endif /* (DPAA_VERSION >= 11) */
-+ p_Manip->ownerTmp++;
-+ break;
-+ case (HMAN_OC):
-+ if ((p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC)
-+ && MANIP_IS_CASCADED(p_Manip))
-+ RETURN_ERROR(
-+ MINOR,
-+ E_INVALID_STATE,
-+ ("Can't have a cascaded manipulation when and Next Engine is CC"));
-+ if (!MANIP_IS_FIRST(p_Manip) && pointFromCc)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("h_Manip is already used and may not be shared (no sharing of non-head manip nodes)"));
-+ break;
-+ default:
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("invalid type of header manipulation for this state"));
-+ }
-+ p_Manip = p_Manip->h_NextManip;
-+ pointFromCc = FALSE;
-+ }
-+ return E_OK;
-+}
-+
-+
-+t_Error FmPcdManipCheckParamsWithCcNodeParams(t_Handle h_Manip,
-+ t_Handle h_FmPcdCcNode)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcdCcNode, E_INVALID_HANDLE);
-+
-+ switch (p_Manip->opcode)
-+ {
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ case (HMAN_OC_CAPWAP_INDEXED_STATS):
-+ if (p_Manip->ownerTmp != FmPcdCcGetNumOfKeys(h_FmPcdCcNode))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("The manipulation of the type statistics flowId if exist has to be pointed by all numOfKeys"));
-+ break;
-+ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
-+ if (p_Manip->h_Frag)
-+ {
-+ if (p_Manip->ownerTmp != FmPcdCcGetNumOfKeys(h_FmPcdCcNode))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("The manipulation of the type remove DTLS if exist has to be pointed by all numOfKeys"));
-+ err = UpdateManipIc(h_Manip, FmPcdCcGetOffset(h_FmPcdCcNode));
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ break;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ default:
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+void FmPcdManipUpdateAdResultForCc(
-+ t_Handle h_Manip, t_FmPcdCcNextEngineParams *p_CcNextEngineParams,
-+ t_Handle p_Ad, t_Handle *p_AdNewPtr)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+
-+ /* This routine creates a Manip AD and can return in "p_AdNewPtr"
-+ * either the new descriptor or NULL if it writes the Manip AD into p_AD (into the match table) */
-+
-+ ASSERT_COND(p_Manip);
-+ ASSERT_COND(p_CcNextEngineParams);
-+ ASSERT_COND(p_Ad);
-+ ASSERT_COND(p_AdNewPtr);
-+
-+ FmPcdManipUpdateOwner(h_Manip, TRUE);
-+
-+ /* According to "type", either build & initialize a new AD (p_AdNew) or initialize
-+ * p_Ad ( the AD in the match table) and set p_AdNew = NULL. */
-+ switch (p_Manip->opcode)
-+ {
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR):
-+ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
-+ case (HMAN_OC_CAPWAP_INDEXED_STATS):
-+ *p_AdNewPtr = p_Manip->h_Ad;
-+ break;
-+ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+ WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->fqid,
-+ ((t_AdOfTypeResult *)(p_Manip->h_Ad))->fqid);
-+ WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->plcrProfile,
-+ ((t_AdOfTypeResult *)(p_Manip->h_Ad))->plcrProfile);
-+ WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->nia,
-+ ((t_AdOfTypeResult *)(p_Manip->h_Ad))->nia);
-+ *p_AdNewPtr = NULL;
-+ break;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ case (HMAN_OC_IPSEC_MANIP):
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_MANIP):
-+#endif /* (DPAA_VERSION >= 11) */
-+ *p_AdNewPtr = p_Manip->h_Ad;
-+ break;
-+ case (HMAN_OC_IP_FRAGMENTATION):
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+#endif /* (DPAA_VERSION >= 11) */
-+ if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_DONE)
-+ && (!p_CcNextEngineParams->params.enqueueParams.overrideFqid))
-+ {
-+ memcpy((uint8_t *)p_Ad, (uint8_t *)p_Manip->h_Ad,
-+ sizeof(t_AdOfTypeContLookup));
-+#if (DPAA_VERSION >= 11)
-+ WRITE_UINT32(
-+ ((t_AdOfTypeContLookup *)p_Ad)->ccAdBase,
-+ GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase) & ~FM_PCD_MANIP_IP_CNIA);
-+#endif /* (DPAA_VERSION >= 11) */
-+ *p_AdNewPtr = NULL;
-+ }
-+ else
-+ *p_AdNewPtr = p_Manip->h_Ad;
-+ break;
-+ case (HMAN_OC_IP_REASSEMBLY):
-+ if (FmPcdManipIpReassmIsIpv6Hdr(p_Manip))
-+ {
-+ if (!p_Manip->reassmParams.ip.ipv6Assigned)
-+ {
-+ *p_AdNewPtr = p_Manip->reassmParams.ip.h_Ipv6Ad;
-+ p_Manip->reassmParams.ip.ipv6Assigned = TRUE;
-+ FmPcdManipUpdateOwner(h_Manip, FALSE);
-+ }
-+ else
-+ {
-+ *p_AdNewPtr = p_Manip->reassmParams.ip.h_Ipv4Ad;
-+ p_Manip->reassmParams.ip.ipv6Assigned = FALSE;
-+ }
-+ }
-+ else
-+ *p_AdNewPtr = p_Manip->reassmParams.ip.h_Ipv4Ad;
-+ memcpy((uint8_t *)p_Ad, (uint8_t *)*p_AdNewPtr,
-+ sizeof(t_AdOfTypeContLookup));
-+ *p_AdNewPtr = NULL;
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_REASSEMBLY):
-+ *p_AdNewPtr = p_Manip->reassmParams.capwap.h_Ad;
-+ memcpy((uint8_t *)p_Ad, (uint8_t *)*p_AdNewPtr,
-+ sizeof(t_AdOfTypeContLookup));
-+ *p_AdNewPtr = NULL;
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+ case (HMAN_OC):
-+ /* Allocate and initialize HMTD */
-+ *p_AdNewPtr = p_Manip->h_Ad;
-+ break;
-+ default:
-+ break;
-+ }
-+}
-+
-+void FmPcdManipUpdateAdContLookupForCc(t_Handle h_Manip, t_Handle p_Ad,
-+ t_Handle *p_AdNewPtr,
-+ uint32_t adTableOffset)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+
-+ /* This routine creates a Manip AD and can return in "p_AdNewPtr"
-+ * either the new descriptor or NULL if it writes the Manip AD into p_AD (into the match table) */
-+ ASSERT_COND(p_Manip);
-+
-+ FmPcdManipUpdateOwner(h_Manip, TRUE);
-+
-+ switch (p_Manip->opcode)
-+ {
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
-+ WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase,
-+ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->ccAdBase);
-+ WRITE_UINT32(
-+ ((t_AdOfTypeContLookup *)p_Ad)->matchTblPtr,
-+ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->matchTblPtr);
-+ WRITE_UINT32(
-+ ((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets,
-+ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->pcAndOffsets);
-+ WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->gmask,
-+ ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->gmask);
-+ WRITE_UINT32(
-+ ((t_AdOfTypeContLookup *)p_Ad)->ccAdBase,
-+ (GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase) | adTableOffset));
-+ *p_AdNewPtr = NULL;
-+ break;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ case (HMAN_OC):
-+ /* Initialize HMTD within the match table*/
-+ MemSet8(p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+ /* copy the existing HMTD *//* ask Alla - memcpy??? */
-+ memcpy((uint8_t*)p_Ad, p_Manip->h_Ad, sizeof(t_Hmtd));
-+ /* update NADEN to be "1"*/
-+ WRITE_UINT16(
-+ ((t_Hmtd *)p_Ad)->cfg,
-+ (uint16_t)(GET_UINT16(((t_Hmtd *)p_Ad)->cfg) | HMTD_CFG_NEXT_AD_EN));
-+ /* update next action descriptor */
-+ WRITE_UINT16(((t_Hmtd *)p_Ad)->nextAdIdx,
-+ (uint16_t)(adTableOffset >> 4));
-+ /* mark that Manip's HMTD is not used */
-+ *p_AdNewPtr = NULL;
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+t_Error FmPcdManipBuildIpReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv,
-+ t_Handle h_CcTree, t_Handle h_Manip,
-+ bool isIpv4, uint8_t groupId)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+ t_FmPcdKgSchemeParams *p_SchemeParams = NULL;
-+ t_Handle h_Scheme;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(h_NetEnv);
-+ ASSERT_COND(p_Manip);
-+
-+ /* scheme was already build, no need to check for IPv6 */
-+ if (p_Manip->reassmParams.ip.h_Ipv4Scheme)
-+ return E_OK;
-+
-+ if (isIpv4) {
-+ h_Scheme = FmPcdKgGetSchemeHandle(p_FmPcd, p_Manip->reassmParams.ip.relativeSchemeId[0]);
-+ if (h_Scheme) {
-+ /* scheme was found */
-+ p_Manip->reassmParams.ip.h_Ipv4Scheme = h_Scheme;
-+ return E_OK;
-+ }
-+ } else {
-+ h_Scheme = FmPcdKgGetSchemeHandle(p_FmPcd, p_Manip->reassmParams.ip.relativeSchemeId[1]);
-+ if (h_Scheme) {
-+ /* scheme was found */
-+ p_Manip->reassmParams.ip.h_Ipv6Scheme = h_Scheme;
-+ return E_OK;
-+ }
-+ }
-+
-+ p_SchemeParams = XX_Malloc(sizeof(t_FmPcdKgSchemeParams));
-+ if (!p_SchemeParams)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("Memory allocation failed for scheme"));
-+
-+ /* Configures the IPv4 or IPv6 scheme*/
-+ memset(p_SchemeParams, 0, sizeof(t_FmPcdKgSchemeParams));
-+ p_SchemeParams->netEnvParams.h_NetEnv = h_NetEnv;
-+ p_SchemeParams->id.relativeSchemeId = (uint8_t)(
-+ (isIpv4 == TRUE) ? p_Manip->reassmParams.ip.relativeSchemeId[0] :
-+ p_Manip->reassmParams.ip.relativeSchemeId[1]);
-+ p_SchemeParams->schemeCounter.update = TRUE;
-+#if (DPAA_VERSION >= 11)
-+ p_SchemeParams->alwaysDirect = TRUE;
-+ p_SchemeParams->bypassFqidGeneration = TRUE;
-+#else
-+ p_SchemeParams->keyExtractAndHashParams.hashDistributionNumOfFqids = 1;
-+ p_SchemeParams->baseFqid = 0xFFFFFF; /*TODO- baseFqid*/
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ setIpReassmSchemeParams(p_FmPcd, p_SchemeParams, h_CcTree, isIpv4, groupId);
-+
-+ /* Sets the new scheme */
-+ if (isIpv4)
-+ p_Manip->reassmParams.ip.h_Ipv4Scheme = FM_PCD_KgSchemeSet(
-+ p_FmPcd, p_SchemeParams);
-+ else
-+ p_Manip->reassmParams.ip.h_Ipv6Scheme = FM_PCD_KgSchemeSet(
-+ p_FmPcd, p_SchemeParams);
-+
-+ XX_Free(p_SchemeParams);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdManipDeleteIpReassmSchemes(t_Handle h_Manip)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+
-+ ASSERT_COND(p_Manip);
-+
-+ if ((p_Manip->reassmParams.ip.h_Ipv4Scheme) &&
-+ !FmPcdKgIsSchemeHasOwners(p_Manip->reassmParams.ip.h_Ipv4Scheme))
-+ FM_PCD_KgSchemeDelete(p_Manip->reassmParams.ip.h_Ipv4Scheme);
-+
-+ if ((p_Manip->reassmParams.ip.h_Ipv6Scheme) &&
-+ !FmPcdKgIsSchemeHasOwners(p_Manip->reassmParams.ip.h_Ipv6Scheme))
-+ FM_PCD_KgSchemeDelete(p_Manip->reassmParams.ip.h_Ipv6Scheme);
-+
-+ return E_OK;
-+}
-+
-+bool FmPcdManipIpReassmIsIpv6Hdr(t_Handle h_Manip)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+
-+ ASSERT_COND(p_Manip);
-+
-+ return (p_Manip->reassmParams.hdr == HEADER_TYPE_IPv6);
-+}
-+
-+t_Error FmPcdManipBuildCapwapReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv,
-+ t_Handle h_CcTree, t_Handle h_Manip,
-+ uint8_t groupId)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+ t_FmPcdKgSchemeParams *p_SchemeParams = NULL;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(h_NetEnv);
-+ ASSERT_COND(p_Manip);
-+
-+ /* scheme was already build, no need to check for IPv6 */
-+ if (p_Manip->reassmParams.capwap.h_Scheme)
-+ return E_OK;
-+
-+ p_SchemeParams = XX_Malloc(sizeof(t_FmPcdKgSchemeParams));
-+ if (!p_SchemeParams)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY,
-+ ("Memory allocation failed for scheme"));
-+
-+ memset(p_SchemeParams, 0, sizeof(t_FmPcdKgSchemeParams));
-+ p_SchemeParams->netEnvParams.h_NetEnv = h_NetEnv;
-+ p_SchemeParams->id.relativeSchemeId =
-+ (uint8_t)p_Manip->reassmParams.capwap.relativeSchemeId;
-+ p_SchemeParams->schemeCounter.update = TRUE;
-+ p_SchemeParams->bypassFqidGeneration = TRUE;
-+
-+ setCapwapReassmSchemeParams(p_FmPcd, p_SchemeParams, h_CcTree, groupId);
-+
-+ p_Manip->reassmParams.capwap.h_Scheme = FM_PCD_KgSchemeSet(p_FmPcd,
-+ p_SchemeParams);
-+
-+ XX_Free(p_SchemeParams);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdManipDeleteCapwapReassmSchemes(t_Handle h_Manip)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+
-+ ASSERT_COND(p_Manip);
-+
-+ if (p_Manip->reassmParams.capwap.h_Scheme)
-+ FM_PCD_KgSchemeDelete(p_Manip->reassmParams.capwap.h_Scheme);
-+
-+ return E_OK;
-+}
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+t_Handle FmPcdManipApplSpecificBuild(void)
-+{
-+ t_FmPcdManip *p_Manip;
-+
-+ p_Manip = (t_FmPcdManip*)XX_Malloc(sizeof(t_FmPcdManip));
-+ if (!p_Manip)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
-+ return NULL;
-+ }
-+ memset(p_Manip, 0, sizeof(t_FmPcdManip));
-+
-+ p_Manip->opcode = HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX;
-+ p_Manip->muramAllocate = FALSE;
-+
-+ p_Manip->h_Ad = (t_Handle)XX_Malloc(FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
-+ if (!p_Manip->h_Ad)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of Manipulation action descriptor"));
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ memset(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t));
-+
-+ /*treatFdStatusFieldsAsErrors = TRUE hardcoded - assumption its always come after CAAM*/
-+ /*Application specific = type of flowId index, move internal frame header from data to IC,
-+ SEC errors check*/
-+ if (MvIntFrameHeaderFromFrameToBufferPrefix(p_Manip, TRUE)!= E_OK)
-+ {
-+ XX_Free(p_Manip->h_Ad);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+ return p_Manip;
-+}
-+
-+bool FmPcdManipIsCapwapApplSpecific(t_Handle h_Manip)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip;
-+ ASSERT_COND(h_Manip);
-+
-+ return (bool)((p_Manip->opcode == HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST) ? TRUE : FALSE);
-+}
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+/*********************** End of inter-module routines ************************/
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+
-+t_Handle FM_PCD_ManipNodeSet(t_Handle h_FmPcd,
-+ t_FmPcdManipParams *p_ManipParams)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdManip *p_Manip;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_ManipParams, E_INVALID_HANDLE, NULL);
-+
-+ p_Manip = ManipOrStatsSetNode(h_FmPcd, (t_Handle)p_ManipParams, FALSE);
-+ if (!p_Manip)
-+ return NULL;
-+
-+ if (((p_Manip->opcode == HMAN_OC_IP_REASSEMBLY)
-+ || (p_Manip->opcode == HMAN_OC_IP_FRAGMENTATION)
-+ || (p_Manip->opcode == HMAN_OC)
-+ || (p_Manip->opcode == HMAN_OC_IPSEC_MANIP)
-+#if (DPAA_VERSION >= 11)
-+ || (p_Manip->opcode == HMAN_OC_CAPWAP_MANIP)
-+ || (p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION)
-+ || (p_Manip->opcode == HMAN_OC_CAPWAP_REASSEMBLY)
-+#endif /* (DPAA_VERSION >= 11) */
-+ ) && (!FmPcdIsAdvancedOffloadSupported(p_FmPcd)))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Advanced-offload must be enabled"));
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+ p_Manip->h_Spinlock = XX_InitSpinlock();
-+ if (!p_Manip->h_Spinlock)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }INIT_LIST(&p_Manip->nodesLst);
-+
-+ switch (p_Manip->opcode)
-+ {
-+ case (HMAN_OC_IP_REASSEMBLY):
-+ /* IpReassembly */
-+ err = IpReassembly(&p_ManipParams->u.reassem, p_Manip);
-+ break;
-+ case (HMAN_OC_IP_FRAGMENTATION):
-+ /* IpFragmentation */
-+ err = IpFragmentation(&p_ManipParams->u.frag.u.ipFrag, p_Manip);
-+ if (err)
-+ break;
-+ err = IPManip(p_Manip);
-+ break;
-+ case (HMAN_OC_IPSEC_MANIP):
-+ err = IPSecManip(p_ManipParams, p_Manip);
-+ break;
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_REASSEMBLY):
-+ /* CapwapReassembly */
-+ err = CapwapReassembly(&p_ManipParams->u.reassem, p_Manip);
-+ break;
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+ /* CapwapFragmentation */
-+ err = CapwapFragmentation(&p_ManipParams->u.frag.u.capwapFrag,
-+ p_Manip);
-+ break;
-+ case (HMAN_OC_CAPWAP_MANIP):
-+ err = CapwapManip(p_ManipParams, p_Manip);
-+ break;
-+#endif /* (DPAA_VERSION >= 11) */
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR):
-+ /* HmanType1 */
-+ err = RmvHdrTillSpecLocNOrInsrtIntFrmHdr(&p_ManipParams->u.hdr.rmvParams, p_Manip);
-+ break;
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+ err = CapwapFragmentation(&p_ManipParams->fragOrReasmParams.u.capwapFragParams,
-+ p_Manip,
-+ p_FmPcd,
-+ p_ManipParams->fragOrReasmParams.sgBpid);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+ if (p_Manip->insrt)
-+ p_Manip->opcode = HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER;
-+ case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER):
-+ /* HmanType2 + if user asked only for fragmentation still need to allocate HmanType2 */
-+ err = InsrtHdrByTempl(&p_ManipParams->u.hdr.insrtParams, p_Manip, p_FmPcd);
-+ break;
-+ case (HMAN_OC_CAPWAP_REASSEMBLY):
-+ err = CapwapReassembly(&p_ManipParams->fragOrReasmParams.u.capwapReasmParams,
-+ p_Manip,
-+ p_FmPcd,
-+ p_ManipParams->fragOrReasmParams.sgBpid);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+ if (p_Manip->rmv)
-+ p_Manip->opcode = HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST;
-+ case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST):
-+ /*CAPWAP decapsulation + if user asked only for reassembly still need to allocate CAPWAP decapsulation*/
-+ err = CapwapRmvDtlsHdr(p_FmPcd, p_Manip);
-+ break;
-+ case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX):
-+ /*Application Specific type 1*/
-+ err = MvIntFrameHeaderFromFrameToBufferPrefix(p_Manip, TRUE);
-+ break;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ case (HMAN_OC):
-+ /* New Manip */
-+ err = CreateManipActionNew(p_Manip, p_ManipParams);
-+ break;
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE"));
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ if (p_ManipParams->h_NextManip)
-+ {
-+ /* in the check routine we've verified that h_NextManip has no owners
-+ * and that only supported types are allowed. */
-+ p_Manip->h_NextManip = p_ManipParams->h_NextManip;
-+ /* save a "prev" pointer in h_NextManip */
-+ MANIP_SET_PREV(p_Manip->h_NextManip, p_Manip);
-+ FmPcdManipUpdateOwner(p_Manip->h_NextManip, TRUE);
-+ }
-+
-+ return p_Manip;
-+}
-+
-+t_Error FM_PCD_ManipNodeReplace(t_Handle h_Manip,
-+ t_FmPcdManipParams *p_ManipParams)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip, *p_FirstManip;
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)(p_Manip->h_FmPcd);
-+ t_Error err;
-+ uint8_t *p_WholeHmct = NULL, *p_ShadowHmct = NULL, *p_Hmtd = NULL;
-+ t_List lstOfNodeshichPointsOnCrntMdfManip, *p_Pos;
-+ t_CcNodeInformation *p_CcNodeInfo;
-+ SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_ManipParams, E_INVALID_HANDLE);
-+
-+ INIT_LIST(&lstOfNodeshichPointsOnCrntMdfManip);
-+
-+ if ((p_ManipParams->type != e_FM_PCD_MANIP_HDR)
-+ || (p_Manip->type != e_FM_PCD_MANIP_HDR))
-+ RETURN_ERROR(
-+ MINOR,
-+ E_NOT_SUPPORTED,
-+ ("FM_PCD_ManipNodeReplace Functionality supported only for Header Manipulation."));
-+
-+ ASSERT_COND(p_Manip->opcode == HMAN_OC);
-+ ASSERT_COND(p_Manip->manipParams.h_NextManip == p_Manip->h_NextManip);
-+ memcpy((uint8_t*)&p_Manip->manipParams, p_ManipParams,
-+ sizeof(p_Manip->manipParams));
-+ p_Manip->manipParams.h_NextManip = p_Manip->h_NextManip;
-+
-+ /* The replacement of the HdrManip depends on the node type.*/
-+ /*
-+ * (1) If this is an independent node, all its owners should be updated.
-+ *
-+ * (2) If it is the head of a cascaded chain (it does not have a "prev" but
-+ * it has a "next" and it has a "cascaded" indication), the next
-+ * node remains unchanged, and the behavior is as in (1).
-+ *
-+ * (3) If it is not the head, but a part of a cascaded chain, in can be
-+ * also replaced as a regular node with just one owner.
-+ *
-+ * (4) If it is a part of a chain implemented as a unified table, the
-+ * whole table is replaced and the owners of the head node must be updated.
-+ *
-+ */
-+ /* lock shadow */
-+ if (!p_FmPcd->p_CcShadow)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated"));
-+
-+ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ return ERROR_CODE(E_BUSY);
-+
-+ /* this routine creates a new manip action in the CC Shadow. */
-+ err = CreateManipActionShadow(p_Manip, p_ManipParams);
-+ if (err)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ /* If the owners list is empty (these are NOT the "owners" counter, but pointers from CC)
-+ * replace only HMTD and no lcok is required. Otherwise
-+ * lock the whole PCD
-+ * In case 4 MANIP_IS_UNIFIED_NON_FIRST(p_Manip) - Use the head node instead. */
-+ if (!FmPcdLockTryLockAll(p_FmPcd))
-+ {
-+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ p_ShadowHmct = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, 16);
-+
-+ p_FirstManip = (t_FmPcdManip*)GetManipInfo(p_Manip,
-+ e_MANIP_HANDLER_TABLE_OWNER);
-+ ASSERT_COND(p_FirstManip);
-+
-+ if (!LIST_IsEmpty(&p_FirstManip->nodesLst))
-+ UpdateAdPtrOfNodesWhichPointsOnCrntMdfManip(
-+ p_FirstManip, &lstOfNodeshichPointsOnCrntMdfManip);
-+
-+ p_Hmtd = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMTD);
-+ ASSERT_COND(p_Hmtd);
-+ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_Hmtd, p_ShadowHmct,
-+ ((t_FmPcd*)(p_Manip->h_FmPcd)));
-+
-+ LIST_FOR_EACH(p_Pos, &lstOfNodeshichPointsOnCrntMdfManip)
-+ {
-+ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
-+ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_CcNodeInfo->h_CcNode,
-+ p_ShadowHmct, ((t_FmPcd*)(p_Manip->h_FmPcd)));
-+ }
-+
-+ p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT);
-+ ASSERT_COND(p_WholeHmct);
-+
-+ /* re-build the HMCT n the original location */
-+ err = CreateManipActionBackToOrig(p_Manip, p_ManipParams);
-+ if (err)
-+ {
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ p_Hmtd = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMTD);
-+ ASSERT_COND(p_Hmtd);
-+ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_Hmtd, p_WholeHmct,
-+ ((t_FmPcd*)p_Manip->h_FmPcd));
-+
-+ /* If LIST > 0, create a list of p_Ad's that point to the HMCT. Join also t_HMTD to this list.
-+ * For each p_Hmct (from list+fixed):
-+ * call Host Command to replace HMTD by a new one */LIST_FOR_EACH(p_Pos, &lstOfNodeshichPointsOnCrntMdfManip)
-+ {
-+ p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos);
-+ BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_CcNodeInfo->h_CcNode,
-+ p_WholeHmct, ((t_FmPcd*)(p_Manip->h_FmPcd)));
-+ }
-+
-+
-+ ReleaseLst(&lstOfNodeshichPointsOnCrntMdfManip);
-+
-+ FmPcdLockUnlockAll(p_FmPcd);
-+
-+ /* unlock shadow */
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_ManipNodeDelete(t_Handle h_ManipNode)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_ManipNode;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+
-+ if (p_Manip->owner)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("This manipulation node not be removed because this node is occupied, first - unbind this node "));
-+
-+ if (p_Manip->h_NextManip)
-+ {
-+ MANIP_SET_PREV(p_Manip->h_NextManip, NULL);
-+ FmPcdManipUpdateOwner(p_Manip->h_NextManip, FALSE);
-+ }
-+
-+ if (p_Manip->p_Hmct
-+ && (MANIP_IS_UNIFIED_FIRST(p_Manip) || !MANIP_IS_UNIFIED(p_Manip)))
-+ FM_MURAM_FreeMem(((t_FmPcd *)p_Manip->h_FmPcd)->h_FmMuram,
-+ p_Manip->p_Hmct);
-+
-+ if (p_Manip->h_Spinlock)
-+ {
-+ XX_FreeSpinlock(p_Manip->h_Spinlock);
-+ p_Manip->h_Spinlock = NULL;
-+ }
-+
-+ ReleaseManipHandler(p_Manip, p_Manip->h_FmPcd);
-+
-+ XX_Free(h_ManipNode);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_ManipGetStatistics(t_Handle h_ManipNode,
-+ t_FmPcdManipStats *p_FmPcdManipStats)
-+{
-+ t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_ManipNode;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdManipStats, E_NULL_POINTER);
-+
-+ switch (p_Manip->opcode)
-+ {
-+ case (HMAN_OC_IP_REASSEMBLY):
-+ return IpReassemblyStats(p_Manip,
-+ &p_FmPcdManipStats->u.reassem.u.ipReassem);
-+ case (HMAN_OC_IP_FRAGMENTATION):
-+ return IpFragmentationStats(p_Manip,
-+ &p_FmPcdManipStats->u.frag.u.ipFrag);
-+#if (DPAA_VERSION >= 11)
-+ case (HMAN_OC_CAPWAP_REASSEMBLY):
-+ return CapwapReassemblyStats(
-+ p_Manip, &p_FmPcdManipStats->u.reassem.u.capwapReassem);
-+ case (HMAN_OC_CAPWAP_FRAGMENTATION):
-+ return CapwapFragmentationStats(
-+ p_Manip, &p_FmPcdManipStats->u.frag.u.capwapFrag);
-+#endif /* (DPAA_VERSION >= 11) */
-+ default:
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("no statistics to this type of manip"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+t_Handle FM_PCD_StatisticsSetNode(t_Handle h_FmPcd, t_FmPcdStatsParams *p_StatsParams)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdManip *p_Manip;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_StatsParams,E_INVALID_HANDLE,NULL);
-+
-+ p_Manip = ManipOrStatsSetNode(h_FmPcd, (t_Handle)p_StatsParams, TRUE);
-+ if (!p_Manip)
-+ return NULL;
-+
-+ switch (p_Manip->opcode)
-+ {
-+ case (HMAN_OC_CAPWAP_INDEXED_STATS):
-+ /* Indexed statistics */
-+ err = IndxStats(p_StatsParams, p_Manip, p_FmPcd);
-+ break;
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED Statistics type"));
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ ReleaseManipHandler(p_Manip, p_FmPcd);
-+ XX_Free(p_Manip);
-+ return NULL;
-+ }
-+
-+ return p_Manip;
-+}
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_manip.h
-@@ -0,0 +1,555 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_manip.h
-+
-+ @Description FM PCD manip...
-+*//***************************************************************************/
-+#ifndef __FM_MANIP_H
-+#define __FM_MANIP_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+
-+#include "fm_cc.h"
-+
-+
-+/***********************************************************************/
-+/* Header manipulations defines */
-+/***********************************************************************/
-+
-+#define NUM_OF_SCRATCH_POOL_BUFFERS 1000 /*TODO - Change it!!*/
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+#define HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR 0x2e
-+#define HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER 0x31
-+#define HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX 0x2f
-+#define HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST 0x30
-+#define HMAN_OC_CAPWAP_REASSEMBLY 0x11 /* dummy */
-+#define HMAN_OC_CAPWAP_INDEXED_STATS 0x32 /* dummy */
-+#define HMAN_OC_CAPWAP_FRAGMENTATION 0x33
-+#else
-+#define HMAN_OC_CAPWAP_MANIP 0x2F
-+#define HMAN_OC_CAPWAP_FRAG_CHECK 0x2E
-+#define HMAN_OC_CAPWAP_FRAGMENTATION 0x33
-+#define HMAN_OC_CAPWAP_REASSEMBLY 0x30
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+#define HMAN_OC_IP_MANIP 0x34
-+#define HMAN_OC_IP_FRAGMENTATION 0x74
-+#define HMAN_OC_IP_REASSEMBLY 0xB4
-+#define HMAN_OC_IPSEC_MANIP 0xF4
-+#define HMAN_OC 0x35
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+#define HMAN_RMV_HDR 0x80000000
-+#define HMAN_INSRT_INT_FRM_HDR 0x40000000
-+
-+#define UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP 6
-+#define UDP_CHECKSUM_FIELD_SIZE 2
-+#define UDP_LENGTH_FIELD_OFFSET_FROM_UDP 4
-+
-+#define IPv4_DSCECN_FIELD_OFFSET_FROM_IP 1
-+#define IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP 2
-+#define IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP 10
-+#define VLAN_TAG_FIELD_OFFSET_FROM_ETH 12
-+#define IPv4_ID_FIELD_OFFSET_FROM_IP 4
-+
-+#define IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP 4
-+#define IPv6_NEXT_HEADER_OFFSET_FROM_IP 6
-+
-+#define FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE 0x80
-+#define FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN 8
-+#define FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE 32
-+#define FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE 4
-+#define FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE 8
-+
-+
-+#define FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_BETWEEN_FRAMES 0x40000000
-+#define FM_PCD_MANIP_CAPWAP_REASM_HALT_ON_DUPLICATE_FRAG 0x10000000
-+#define FM_PCD_MANIP_CAPWAP_REASM_AUTOMATIC_LEARNIN_HASH_8_WAYS 0x08000000
-+#define FM_PCD_MANIP_CAPWAP_REASM_PR_COPY 0x00800000
-+
-+#define FM_PCD_MANIP_CAPWAP_FRAG_COMPR_OPTION_FIELD_EN 0x80000000
-+
-+#define FM_PCD_MANIP_INDEXED_STATS_ENTRY_SIZE 4
-+#define FM_PCD_MANIP_INDEXED_STATS_CNIA 0x20000000
-+#define FM_PCD_MANIP_INDEXED_STATS_DPD 0x10000000
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+
-+#if (DPAA_VERSION >= 11)
-+#define FM_PCD_MANIP_CAPWAP_DTLS 0x00040000
-+#define FM_PCD_MANIP_CAPWAP_NADEN 0x20000000
-+
-+#define FM_PCD_MANIP_CAPWAP_FRAG_CHECK_MTU_SHIFT 16
-+#define FM_PCD_MANIP_CAPWAP_FRAG_CHECK_NO_FRAGMENTATION 0xFFFF0000
-+#define FM_PCD_MANIP_CAPWAP_FRAG_CHECK_CNIA 0x20000000
-+
-+#define FM_PCD_MANIP_CAPWAP_FRAG_COMPRESS_EN 0x04000000
-+#define FM_PCD_MANIP_CAPWAP_FRAG_SCRATCH_BPID 24
-+#define FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_EN 0x08000000
-+#define FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_MASK 0xFF000000
-+#define FM_PCD_MANIP_CAPWAP_FRAG_SG_BDID_SHIFT 24
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+#define FM_PCD_MANIP_REASM_TABLE_SIZE 0x40
-+#define FM_PCD_MANIP_REASM_TABLE_ALIGN 8
-+
-+#define FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_SIZE 64
-+#define FM_PCD_MANIP_REASM_COMMON_PARAM_TABLE_ALIGN 8
-+#define FM_PCD_MANIP_REASM_TIME_OUT_BETWEEN_FRAMES 0x80000000
-+#define FM_PCD_MANIP_REASM_COUPLING_ENABLE 0x40000000
-+#define FM_PCD_MANIP_REASM_COUPLING_MASK 0xFF000000
-+#define FM_PCD_MANIP_REASM_COUPLING_SHIFT 24
-+#define FM_PCD_MANIP_REASM_LIODN_MASK 0x0000003F
-+#define FM_PCD_MANIP_REASM_LIODN_SHIFT 56
-+#define FM_PCD_MANIP_REASM_ELIODN_MASK 0x000003c0
-+#define FM_PCD_MANIP_REASM_ELIODN_SHIFT 38
-+#define FM_PCD_MANIP_REASM_COMMON_INT_BUFFER_IDX_MASK 0x000000FF
-+#define FM_PCD_MANIP_REASM_COMMON_INT_BUFFER_IDX_SHIFT 24
-+#define FM_PCD_MANIP_REASM_TIMEOUT_THREAD_THRESH 1024
-+
-+#define FM_PCD_MANIP_IP_MTU_SHIFT 16
-+#define FM_PCD_MANIP_IP_NO_FRAGMENTATION 0xFFFF0000
-+#define FM_PCD_MANIP_IP_CNIA 0x20000000
-+
-+#define FM_PCD_MANIP_IP_FRAG_DF_SHIFT 28
-+#define FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID 24
-+#define FM_PCD_MANIP_IP_FRAG_SG_BDID_EN 0x08000000
-+#define FM_PCD_MANIP_IP_FRAG_SG_BDID_MASK 0xFF000000
-+#define FM_PCD_MANIP_IP_FRAG_SG_BDID_SHIFT 24
-+
-+#define FM_PCD_MANIP_IPSEC_DEC 0x10000000
-+#define FM_PCD_MANIP_IPSEC_VIPV_EN 0x08000000
-+#define FM_PCD_MANIP_IPSEC_ECN_EN 0x04000000
-+#define FM_PCD_MANIP_IPSEC_DSCP_EN 0x02000000
-+#define FM_PCD_MANIP_IPSEC_VIPL_EN 0x01000000
-+#define FM_PCD_MANIP_IPSEC_NADEN 0x20000000
-+
-+#define FM_PCD_MANIP_IPSEC_IP_HDR_LEN_MASK 0x00FF0000
-+#define FM_PCD_MANIP_IPSEC_IP_HDR_LEN_SHIFT 16
-+
-+#define FM_PCD_MANIP_IPSEC_ARW_SIZE_MASK 0xFFFF0000
-+#define FM_PCD_MANIP_IPSEC_ARW_SIZE_SHIFT 16
-+
-+#define e_FM_MANIP_IP_INDX 1
-+
-+#define HMCD_OPCODE_GENERIC_RMV 0x01
-+#define HMCD_OPCODE_GENERIC_INSRT 0x02
-+#define HMCD_OPCODE_GENERIC_REPLACE 0x05
-+#define HMCD_OPCODE_L2_RMV 0x08
-+#define HMCD_OPCODE_L2_INSRT 0x09
-+#define HMCD_OPCODE_VLAN_PRI_UPDATE 0x0B
-+#define HMCD_OPCODE_IPV4_UPDATE 0x0C
-+#define HMCD_OPCODE_IPV6_UPDATE 0x10
-+#define HMCD_OPCODE_TCP_UDP_UPDATE 0x0E
-+#define HMCD_OPCODE_TCP_UDP_CHECKSUM 0x14
-+#define HMCD_OPCODE_REPLACE_IP 0x12
-+#define HMCD_OPCODE_RMV_TILL 0x15
-+#define HMCD_OPCODE_UDP_INSRT 0x16
-+#define HMCD_OPCODE_IP_INSRT 0x17
-+#define HMCD_OPCODE_CAPWAP_RMV 0x18
-+#define HMCD_OPCODE_CAPWAP_INSRT 0x18
-+#define HMCD_OPCODE_GEN_FIELD_REPLACE 0x19
-+
-+#define HMCD_LAST 0x00800000
-+
-+#define HMCD_DSCP_VALUES 64
-+
-+#define HMCD_BASIC_SIZE 4
-+#define HMCD_PTR_SIZE 4
-+#define HMCD_PARAM_SIZE 4
-+#define HMCD_IPV4_ADDR_SIZE 4
-+#define HMCD_IPV6_ADDR_SIZE 0x10
-+#define HMCD_L4_HDR_SIZE 8
-+
-+#define HMCD_CAPWAP_INSRT 0x00010000
-+#define HMCD_INSRT_UDP_LITE 0x00010000
-+#define HMCD_IP_ID_MASK 0x0000FFFF
-+#define HMCD_IP_SIZE_MASK 0x0000FF00
-+#define HMCD_IP_SIZE_SHIFT 8
-+#define HMCD_IP_LAST_PID_MASK 0x000000FF
-+#define HMCD_IP_OR_QOS 0x00010000
-+#define HMCD_IP_L4_CS_CALC 0x00040000
-+#define HMCD_IP_DF_MODE 0x00400000
-+
-+
-+#define HMCD_OC_SHIFT 24
-+
-+#define HMCD_RMV_OFFSET_SHIFT 0
-+#define HMCD_RMV_SIZE_SHIFT 8
-+
-+#define HMCD_INSRT_OFFSET_SHIFT 0
-+#define HMCD_INSRT_SIZE_SHIFT 8
-+
-+#define HMTD_CFG_TYPE 0x4000
-+#define HMTD_CFG_EXT_HMCT 0x0080
-+#define HMTD_CFG_PRS_AFTER_HM 0x0040
-+#define HMTD_CFG_NEXT_AD_EN 0x0020
-+
-+#define HMCD_RMV_L2_ETHERNET 0
-+#define HMCD_RMV_L2_STACKED_QTAGS 1
-+#define HMCD_RMV_L2_ETHERNET_AND_MPLS 2
-+#define HMCD_RMV_L2_MPLS 3
-+#define HMCD_RMV_L2_PPPOE 4
-+
-+#define HMCD_INSRT_L2_MPLS 0
-+#define HMCD_INSRT_N_UPDATE_L2_MPLS 1
-+#define HMCD_INSRT_L2_PPPOE 2
-+#define HMCD_INSRT_L2_SIZE_SHIFT 24
-+
-+#define HMCD_L2_MODE_SHIFT 16
-+
-+#define HMCD_VLAN_PRI_REP_MODE_SHIFT 16
-+#define HMCD_VLAN_PRI_UPDATE 0
-+#define HMCD_VLAN_PRI_UPDATE_DSCP_TO_VPRI 1
-+
-+#define HMCD_IPV4_UPDATE_TTL 0x00000001
-+#define HMCD_IPV4_UPDATE_TOS 0x00000002
-+#define HMCD_IPV4_UPDATE_DST 0x00000020
-+#define HMCD_IPV4_UPDATE_SRC 0x00000040
-+#define HMCD_IPV4_UPDATE_ID 0x00000080
-+#define HMCD_IPV4_UPDATE_TOS_SHIFT 8
-+
-+#define HMCD_IPV6_UPDATE_HL 0x00000001
-+#define HMCD_IPV6_UPDATE_TC 0x00000002
-+#define HMCD_IPV6_UPDATE_DST 0x00000040
-+#define HMCD_IPV6_UPDATE_SRC 0x00000080
-+#define HMCD_IPV6_UPDATE_TC_SHIFT 8
-+
-+#define HMCD_TCP_UDP_UPDATE_DST 0x00004000
-+#define HMCD_TCP_UDP_UPDATE_SRC 0x00008000
-+#define HMCD_TCP_UDP_UPDATE_SRC_SHIFT 16
-+
-+#define HMCD_IP_REPLACE_REPLACE_IPV4 0x00000000
-+#define HMCD_IP_REPLACE_REPLACE_IPV6 0x00010000
-+#define HMCD_IP_REPLACE_TTL_HL 0x00200000
-+#define HMCD_IP_REPLACE_ID 0x00400000
-+
-+#define HMCD_IP_REPLACE_L3HDRSIZE_SHIFT 24
-+
-+#define HMCD_GEN_FIELD_SIZE_SHIFT 16
-+#define HMCD_GEN_FIELD_SRC_OFF_SHIFT 8
-+#define HMCD_GEN_FIELD_DST_OFF_SHIFT 0
-+#define HMCD_GEN_FIELD_MASK_EN 0x00400000
-+
-+#define HMCD_GEN_FIELD_MASK_OFF_SHIFT 16
-+#define HMCD_GEN_FIELD_MASK_SHIFT 24
-+
-+#define DSCP_TO_VLAN_TABLE_SIZE 32
-+
-+#define MANIP_GET_HMCT_SIZE(h_Manip) (((t_FmPcdManip *)h_Manip)->tableSize)
-+#define MANIP_GET_DATA_SIZE(h_Manip) (((t_FmPcdManip *)h_Manip)->dataSize)
-+
-+#define MANIP_GET_HMCT_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->p_Hmct)
-+#define MANIP_GET_DATA_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->p_Data)
-+
-+#define MANIP_SET_HMCT_PTR(h_Manip, h_NewPtr) (((t_FmPcdManip *)h_Manip)->p_Hmct = h_NewPtr)
-+#define MANIP_SET_DATA_PTR(h_Manip, h_NewPtr) (((t_FmPcdManip *)h_Manip)->p_Data = h_NewPtr)
-+
-+#define MANIP_GET_HMTD_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->h_Ad)
-+#define MANIP_DONT_REPARSE(h_Manip) (((t_FmPcdManip *)h_Manip)->dontParseAfterManip)
-+#define MANIP_SET_PREV(h_Manip, h_Prev) (((t_FmPcdManip *)h_Manip)->h_PrevManip = h_Prev)
-+#define MANIP_GET_OWNERS(h_Manip) (((t_FmPcdManip *)h_Manip)->owner)
-+#define MANIP_GET_TYPE(h_Manip) (((t_FmPcdManip *)h_Manip)->type)
-+#define MANIP_SET_UNIFIED_TBL_PTR_INDICATION(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedTablePtr = TRUE)
-+#define MANIP_GET_MURAM(h_Manip) (((t_FmPcd *)((t_FmPcdManip *)h_Manip)->h_FmPcd)->h_FmMuram)
-+#define MANIP_FREE_HMTD(h_Manip) \
-+ {if (((t_FmPcdManip *)h_Manip)->muramAllocate) \
-+ FM_MURAM_FreeMem(((t_FmPcd *)((t_FmPcdManip *)h_Manip)->h_FmPcd)->h_FmMuram, ((t_FmPcdManip *)h_Manip)->h_Ad);\
-+ else \
-+ XX_Free(((t_FmPcdManip *)h_Manip)->h_Ad); \
-+ ((t_FmPcdManip *)h_Manip)->h_Ad = NULL; \
-+ }
-+/* position regarding Manip SW structure */
-+#define MANIP_IS_FIRST(h_Manip) (!(((t_FmPcdManip *)h_Manip)->h_PrevManip))
-+#define MANIP_IS_CASCADED(h_Manip) (((t_FmPcdManip *)h_Manip)->cascaded)
-+#define MANIP_IS_UNIFIED(h_Manip) (!(((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_NONE))
-+#define MANIP_IS_UNIFIED_NON_FIRST(h_Manip) ((((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_MID) || \
-+ (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_LAST))
-+#define MANIP_IS_UNIFIED_NON_LAST(h_Manip) ((((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_FIRST) ||\
-+ (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_MID))
-+#define MANIP_IS_UNIFIED_FIRST(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_FIRST)
-+#define MANIP_IS_UNIFIED_LAST(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_LAST)
-+
-+#define MANIP_UPDATE_UNIFIED_POSITION(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition = \
-+ (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_NONE)? \
-+ e_MANIP_UNIFIED_LAST : e_MANIP_UNIFIED_MID)
-+
-+typedef enum e_ManipUnifiedPosition {
-+ e_MANIP_UNIFIED_NONE = 0,
-+ e_MANIP_UNIFIED_FIRST,
-+ e_MANIP_UNIFIED_MID,
-+ e_MANIP_UNIFIED_LAST
-+} e_ManipUnifiedPosition;
-+
-+typedef enum e_ManipInfo {
-+ e_MANIP_HMTD,
-+ e_MANIP_HMCT,
-+ e_MANIP_HANDLER_TABLE_OWNER
-+}e_ManipInfo;
-+/***********************************************************************/
-+/* Memory map */
-+/***********************************************************************/
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+typedef struct t_CapwapReasmPram {
-+ volatile uint32_t mode;
-+ volatile uint32_t autoLearnHashTblPtr;
-+ volatile uint32_t intStatsTblPtr;
-+ volatile uint32_t reasmFrmDescPoolTblPtr;
-+ volatile uint32_t reasmFrmDescIndexPoolTblPtr;
-+ volatile uint32_t timeOutTblPtr;
-+ volatile uint32_t bufferPoolIdAndRisc1SetIndexes;
-+ volatile uint32_t risc23SetIndexes;
-+ volatile uint32_t risc4SetIndexesAndExtendedStatsTblPtr;
-+ volatile uint32_t extendedStatsTblPtr;
-+ volatile uint32_t expirationDelay;
-+ volatile uint32_t totalProcessedFragCounter;
-+ volatile uint32_t totalUnsuccessfulReasmFramesCounter;
-+ volatile uint32_t totalDuplicatedFragCounter;
-+ volatile uint32_t totalMalformdFragCounter;
-+ volatile uint32_t totalTimeOutCounter;
-+ volatile uint32_t totalSetBusyCounter;
-+ volatile uint32_t totalRfdPoolBusyCounter;
-+ volatile uint32_t totalDiscardedFragsCounter;
-+ volatile uint32_t totalMoreThan16FramesCounter;
-+ volatile uint32_t internalBufferBusy;
-+ volatile uint32_t externalBufferBusy;
-+ volatile uint32_t reserved1[4];
-+} t_CapwapReasmPram;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+
-+typedef _Packed struct t_ReassTbl {
-+ volatile uint16_t waysNumAndSetSize;
-+ volatile uint16_t autoLearnHashKeyMask;
-+ volatile uint32_t reassCommonPrmTblPtr;
-+ volatile uint32_t liodnAlAndAutoLearnHashTblPtrHi;
-+ volatile uint32_t autoLearnHashTblPtrLow;
-+ volatile uint32_t liodnSlAndAutoLearnSetLockTblPtrHi;
-+ volatile uint32_t autoLearnSetLockTblPtrLow;
-+ volatile uint16_t minFragSize; /* Not relevant for CAPWAP*/
-+ volatile uint16_t maxReassemblySize; /* Only relevant for CAPWAP*/
-+ volatile uint32_t totalSuccessfullyReasmFramesCounter;
-+ volatile uint32_t totalValidFragmentCounter;
-+ volatile uint32_t totalProcessedFragCounter;
-+ volatile uint32_t totalMalformdFragCounter;
-+ volatile uint32_t totalSetBusyCounter;
-+ volatile uint32_t totalDiscardedFragsCounter;
-+ volatile uint32_t totalMoreThan16FramesCounter;
-+ volatile uint32_t reserved2[2];
-+} _PackedType t_ReassTbl;
-+
-+typedef struct t_ReassCommonTbl {
-+ volatile uint32_t timeoutModeAndFqid;
-+ volatile uint32_t reassFrmDescIndexPoolTblPtr;
-+ volatile uint32_t liodnAndReassFrmDescPoolPtrHi;
-+ volatile uint32_t reassFrmDescPoolPtrLow;
-+ volatile uint32_t timeOutTblPtr;
-+ volatile uint32_t expirationDelay;
-+ volatile uint32_t internalBufferManagement;
-+ volatile uint32_t reserved2;
-+ volatile uint32_t totalTimeOutCounter;
-+ volatile uint32_t totalRfdPoolBusyCounter;
-+ volatile uint32_t totalInternalBufferBusy;
-+ volatile uint32_t totalExternalBufferBusy;
-+ volatile uint32_t totalSgFragmentCounter;
-+ volatile uint32_t totalDmaSemaphoreDepletionCounter;
-+ volatile uint32_t totalNCSPCounter;
-+ volatile uint32_t discardMask;
-+} t_ReassCommonTbl;
-+
-+typedef _Packed struct t_Hmtd {
-+ volatile uint16_t cfg;
-+ volatile uint8_t eliodnOffset;
-+ volatile uint8_t extHmcdBasePtrHi;
-+ volatile uint32_t hmcdBasePtr;
-+ volatile uint16_t nextAdIdx;
-+ volatile uint8_t res1;
-+ volatile uint8_t opCode;
-+ volatile uint32_t res2;
-+} _PackedType t_Hmtd;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/***********************************************************************/
-+/* Driver's internal structures */
-+/***********************************************************************/
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+typedef struct
-+{
-+ t_Handle p_AutoLearnHashTbl;
-+ t_Handle p_ReassmFrmDescrPoolTbl;
-+ t_Handle p_ReassmFrmDescrIndxPoolTbl;
-+ t_Handle p_TimeOutTbl;
-+ uint16_t maxNumFramesInProcess;
-+ uint8_t numOfTasks;
-+ //uint8_t poolId;
-+ uint8_t prOffset;
-+ uint16_t dataOffset;
-+ uint8_t sgBpid;
-+ uint8_t hwPortId;
-+ uint32_t fqidForTimeOutFrames;
-+ uint32_t timeoutRoutineRequestTime;
-+ uint32_t bitFor1Micro;
-+} t_CapwapFragParams;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+
-+typedef struct
-+{
-+ t_AdOfTypeContLookup *p_Frag;
-+#if (DPAA_VERSION == 10)
-+ uint8_t scratchBpid;
-+#endif /* (DPAA_VERSION == 10) */
-+} t_FragParams;
-+
-+typedef struct t_ReassmParams
-+{
-+ e_NetHeaderType hdr; /* Header selection */
-+ t_ReassCommonTbl *p_ReassCommonTbl;
-+ uintptr_t reassFrmDescrIndxPoolTblAddr;
-+ uintptr_t reassFrmDescrPoolTblAddr;
-+ uintptr_t timeOutTblAddr;
-+ uintptr_t internalBufferPoolManagementIndexAddr;
-+ uintptr_t internalBufferPoolAddr;
-+ uint32_t maxNumFramesInProcess;
-+ uint8_t sgBpid;
-+ uint8_t dataMemId;
-+ uint16_t dataLiodnOffset;
-+ uint32_t fqidForTimeOutFrames;
-+ e_FmPcdManipReassemTimeOutMode timeOutMode;
-+ uint32_t timeoutThresholdForReassmProcess;
-+ union {
-+ struct {
-+ t_Handle h_Ipv4Ad;
-+ t_Handle h_Ipv6Ad;
-+ bool ipv6Assigned;
-+ t_ReassTbl *p_Ipv4ReassTbl;
-+ t_ReassTbl *p_Ipv6ReassTbl;
-+ uintptr_t ipv4AutoLearnHashTblAddr;
-+ uintptr_t ipv6AutoLearnHashTblAddr;
-+ uintptr_t ipv4AutoLearnSetLockTblAddr;
-+ uintptr_t ipv6AutoLearnSetLockTblAddr;
-+ uint16_t minFragSize[2];
-+ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry[2];
-+ uint8_t relativeSchemeId[2];
-+ t_Handle h_Ipv4Scheme;
-+ t_Handle h_Ipv6Scheme;
-+ uint32_t nonConsistentSpFqid;
-+ } ip;
-+ struct {
-+ t_Handle h_Ad;
-+ t_ReassTbl *p_ReassTbl;
-+ uintptr_t autoLearnHashTblAddr;
-+ uintptr_t autoLearnSetLockTblAddr;
-+ uint16_t maxRessembledsSize;
-+ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry;
-+ uint8_t relativeSchemeId;
-+ t_Handle h_Scheme;
-+ } capwap;
-+ };
-+} t_ReassmParams;
-+
-+typedef struct{
-+ e_FmPcdManipType type;
-+ t_FmPcdManipParams manipParams;
-+ bool muramAllocate;
-+ t_Handle h_Ad;
-+ uint32_t opcode;
-+ bool rmv;
-+ bool insrt;
-+ t_Handle h_NextManip;
-+ t_Handle h_PrevManip;
-+ e_FmPcdManipType nextManipType;
-+ /* HdrManip parameters*/
-+ uint8_t *p_Hmct;
-+ uint8_t *p_Data;
-+ bool dontParseAfterManip;
-+ bool fieldUpdate;
-+ bool custom;
-+ uint16_t tableSize;
-+ uint8_t dataSize;
-+ bool cascaded;
-+ e_ManipUnifiedPosition unifiedPosition;
-+ /* end HdrManip */
-+ uint8_t *p_Template;
-+ uint16_t owner;
-+ uint32_t updateParams;
-+ uint32_t shadowUpdateParams;
-+ bool frag;
-+ bool reassm;
-+ uint16_t sizeForFragmentation;
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ t_Handle h_Frag;
-+ t_CapwapFragParams capwapFragParams;
-+#endif /* (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10)) */
-+ union {
-+ t_ReassmParams reassmParams;
-+ t_FragParams fragParams;
-+ };
-+ uint8_t icOffset;
-+ uint16_t ownerTmp;
-+ bool cnia;
-+ t_Handle p_StatsTbl;
-+ t_Handle h_FmPcd;
-+ t_List nodesLst;
-+ t_Handle h_Spinlock;
-+} t_FmPcdManip;
-+
-+typedef struct t_FmPcdCcSavedManipParams
-+{
-+ union
-+ {
-+ struct
-+ {
-+ uint16_t dataOffset;
-+ //uint8_t poolId;
-+ }capwapParams;
-+ struct
-+ {
-+ uint16_t dataOffset;
-+ uint8_t poolId;
-+ }ipParams;
-+ };
-+
-+} t_FmPcdCcSavedManipParams;
-+
-+
-+#endif /* __FM_MANIP_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c
-@@ -0,0 +1,2095 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_pcd.c
-+
-+ @Description FM PCD ...
-+*//***************************************************************************/
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "xx_ext.h"
-+#include "sprint_ext.h"
-+#include "debug_ext.h"
-+#include "net_ext.h"
-+#include "fm_ext.h"
-+#include "fm_pcd_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_pcd.h"
-+#include "fm_pcd_ipc.h"
-+#include "fm_hc.h"
-+#include "fm_muram_ext.h"
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+
-+static t_Error CheckFmPcdParameters(t_FmPcd *p_FmPcd)
-+{
-+ if (!p_FmPcd->h_Fm)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("h_Fm has to be initialized"));
-+
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ {
-+ if (p_FmPcd->p_FmPcdKg && !p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG"));
-+
-+ if (p_FmPcd->p_FmPcdPlcr && !p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG"));
-+
-+ if (!p_FmPcd->f_Exception)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdExceptions has to be initialized"));
-+
-+ if ((!p_FmPcd->f_FmPcdIndexedException) && (p_FmPcd->p_FmPcdPlcr || p_FmPcd->p_FmPcdKg))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdIndexedException has to be initialized"));
-+
-+ if (p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit > PRS_MAX_CYCLE_LIMIT)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("prsMaxParseCycleLimit has to be less than 8191"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+static volatile bool blockingFlag = FALSE;
-+static void IpcMsgCompletionCB(t_Handle h_FmPcd,
-+ uint8_t *p_Msg,
-+ uint8_t *p_Reply,
-+ uint32_t replyLength,
-+ t_Error status)
-+{
-+ UNUSED(h_FmPcd);UNUSED(p_Msg);UNUSED(p_Reply);UNUSED(replyLength);UNUSED(status);
-+ blockingFlag = FALSE;
-+}
-+
-+static t_Error IpcMsgHandlerCB(t_Handle h_FmPcd,
-+ uint8_t *p_Msg,
-+ uint32_t msgLength,
-+ uint8_t *p_Reply,
-+ uint32_t *p_ReplyLength)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_Error err = E_OK;
-+ t_FmPcdIpcMsg *p_IpcMsg = (t_FmPcdIpcMsg*)p_Msg;
-+ t_FmPcdIpcReply *p_IpcReply = (t_FmPcdIpcReply*)p_Reply;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((msgLength >= sizeof(uint32_t)), E_INVALID_VALUE);
-+
-+#ifdef DISABLE_SANITY_CHECKS
-+ UNUSED(msgLength);
-+#endif /* DISABLE_SANITY_CHECKS */
-+
-+ ASSERT_COND(p_Msg);
-+
-+ memset(p_IpcReply, 0, (sizeof(uint8_t) * FM_PCD_MAX_REPLY_SIZE));
-+ *p_ReplyLength = 0;
-+
-+ switch (p_IpcMsg->msgId)
-+ {
-+ case (FM_PCD_MASTER_IS_ALIVE):
-+ *(uint8_t*)(p_IpcReply->replyBody) = 1;
-+ p_IpcReply->error = E_OK;
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ break;
-+ case (FM_PCD_MASTER_IS_ENABLED):
-+ /* count partitions registrations */
-+ if (p_FmPcd->enabled)
-+ p_FmPcd->numOfEnabledGuestPartitionsPcds++;
-+ *(uint8_t*)(p_IpcReply->replyBody) = (uint8_t)p_FmPcd->enabled;
-+ p_IpcReply->error = E_OK;
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ break;
-+ case (FM_PCD_GUEST_DISABLE):
-+ if (p_FmPcd->numOfEnabledGuestPartitionsPcds)
-+ {
-+ p_FmPcd->numOfEnabledGuestPartitionsPcds--;
-+ p_IpcReply->error = E_OK;
-+ }
-+ else
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE,("Trying to disable an unregistered partition"));
-+ p_IpcReply->error = E_INVALID_STATE;
-+ }
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+ case (FM_PCD_GET_COUNTER):
-+ {
-+ e_FmPcdCounters inCounter;
-+ uint32_t outCounter;
-+
-+ memcpy((uint8_t*)&inCounter, p_IpcMsg->msgBody, sizeof(uint32_t));
-+ outCounter = FM_PCD_GetCounter(h_FmPcd, inCounter);
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&outCounter, sizeof(uint32_t));
-+ p_IpcReply->error = E_OK;
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_PCD_ALLOC_KG_SCHEMES):
-+ {
-+ t_FmPcdIpcKgSchemesParams ipcSchemesParams;
-+
-+ memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams));
-+ err = FmPcdKgAllocSchemes(h_FmPcd,
-+ ipcSchemesParams.numOfSchemes,
-+ ipcSchemesParams.guestId,
-+ p_IpcReply->replyBody);
-+ p_IpcReply->error = err;
-+ *p_ReplyLength = sizeof(uint32_t) + ipcSchemesParams.numOfSchemes*sizeof(uint8_t);
-+ break;
-+ }
-+ case (FM_PCD_FREE_KG_SCHEMES):
-+ {
-+ t_FmPcdIpcKgSchemesParams ipcSchemesParams;
-+
-+ memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams));
-+ err = FmPcdKgFreeSchemes(h_FmPcd,
-+ ipcSchemesParams.numOfSchemes,
-+ ipcSchemesParams.guestId,
-+ ipcSchemesParams.schemesIds);
-+ p_IpcReply->error = err;
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_PCD_ALLOC_KG_CLSPLAN):
-+ {
-+ t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams;
-+
-+ memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams));
-+ err = KgAllocClsPlanEntries(h_FmPcd,
-+ ipcKgClsPlanParams.numOfClsPlanEntries,
-+ ipcKgClsPlanParams.guestId,
-+ p_IpcReply->replyBody);
-+ p_IpcReply->error = err;
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ break;
-+ }
-+ case (FM_PCD_FREE_KG_CLSPLAN):
-+ {
-+ t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams;
-+
-+ memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams));
-+ KgFreeClsPlanEntries(h_FmPcd,
-+ ipcKgClsPlanParams.numOfClsPlanEntries,
-+ ipcKgClsPlanParams.guestId,
-+ ipcKgClsPlanParams.clsPlanBase);
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_PCD_ALLOC_PROFILES):
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ uint16_t base;
-+ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
-+ base = PlcrAllocProfilesForPartition(h_FmPcd,
-+ ipcAllocParams.base,
-+ ipcAllocParams.num,
-+ ipcAllocParams.guestId);
-+ memcpy(p_IpcReply->replyBody, (uint16_t*)&base, sizeof(uint16_t));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint16_t);
-+ break;
-+ }
-+ case (FM_PCD_FREE_PROFILES):
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
-+ PlcrFreeProfilesForPartition(h_FmPcd,
-+ ipcAllocParams.base,
-+ ipcAllocParams.num,
-+ ipcAllocParams.guestId);
-+ break;
-+ }
-+ case (FM_PCD_SET_PORT_PROFILES):
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
-+ PlcrSetPortProfiles(h_FmPcd,
-+ ipcAllocParams.guestId,
-+ ipcAllocParams.num,
-+ ipcAllocParams.base);
-+ break;
-+ }
-+ case (FM_PCD_CLEAR_PORT_PROFILES):
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
-+ PlcrClearPortProfiles(h_FmPcd,
-+ ipcAllocParams.guestId);
-+ break;
-+ }
-+ case (FM_PCD_GET_SW_PRS_OFFSET):
-+ {
-+ t_FmPcdIpcSwPrsLable ipcSwPrsLable;
-+ uint32_t swPrsOffset;
-+
-+ memcpy((uint8_t*)&ipcSwPrsLable, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcSwPrsLable));
-+ swPrsOffset =
-+ FmPcdGetSwPrsOffset(h_FmPcd,
-+ (e_NetHeaderType)ipcSwPrsLable.enumHdr,
-+ ipcSwPrsLable.indexPerHdr);
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&swPrsOffset, sizeof(uint32_t));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_PCD_PRS_INC_PORT_STATS):
-+ {
-+ t_FmPcdIpcPrsIncludePort ipcPrsIncludePort;
-+
-+ memcpy((uint8_t*)&ipcPrsIncludePort, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcPrsIncludePort));
-+ PrsIncludePortInStatistics(h_FmPcd,
-+ ipcPrsIncludePort.hardwarePortId,
-+ ipcPrsIncludePort.include);
-+ break;
-+ }
-+ default:
-+ *p_ReplyLength = 0;
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!"));
-+ }
-+ return E_OK;
-+}
-+
-+static uint32_t NetEnvLock(t_Handle h_NetEnv)
-+{
-+ ASSERT_COND(h_NetEnv);
-+ return XX_LockIntrSpinlock(((t_FmPcdNetEnv*)h_NetEnv)->h_Spinlock);
-+}
-+
-+static void NetEnvUnlock(t_Handle h_NetEnv, uint32_t intFlags)
-+{
-+ ASSERT_COND(h_NetEnv);
-+ XX_UnlockIntrSpinlock(((t_FmPcdNetEnv*)h_NetEnv)->h_Spinlock, intFlags);
-+}
-+
-+static void EnqueueLockToFreeLst(t_FmPcd *p_FmPcd, t_FmPcdLock *p_Lock)
-+{
-+ uint32_t intFlags;
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
-+ LIST_AddToTail(&p_Lock->node, &p_FmPcd->freeLocksLst);
-+ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
-+}
-+
-+static t_FmPcdLock * DequeueLockFromFreeLst(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdLock *p_Lock = NULL;
-+ uint32_t intFlags;
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
-+ if (!LIST_IsEmpty(&p_FmPcd->freeLocksLst))
-+ {
-+ p_Lock = FM_PCD_LOCK_OBJ(p_FmPcd->freeLocksLst.p_Next);
-+ LIST_DelAndInit(&p_Lock->node);
-+ }
-+ if (p_FmPcd->h_Spinlock)
-+ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
-+
-+ return p_Lock;
-+}
-+
-+static void EnqueueLockToAcquiredLst(t_FmPcd *p_FmPcd, t_FmPcdLock *p_Lock)
-+{
-+ uint32_t intFlags;
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
-+ LIST_AddToTail(&p_Lock->node, &p_FmPcd->acquiredLocksLst);
-+ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
-+}
-+
-+static t_Error FillFreeLocksLst(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdLock *p_Lock;
-+ int i;
-+
-+ for (i=0; i<10; i++)
-+ {
-+ p_Lock = (t_FmPcdLock *)XX_Malloc(sizeof(t_FmPcdLock));
-+ if (!p_Lock)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("FM-PCD lock obj!"));
-+ memset(p_Lock, 0, sizeof(t_FmPcdLock));
-+ INIT_LIST(&p_Lock->node);
-+ p_Lock->h_Spinlock = XX_InitSpinlock();
-+ if (!p_Lock->h_Spinlock)
-+ {
-+ XX_Free(p_Lock);
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("FM-PCD spinlock obj!"));
-+ }
-+ EnqueueLockToFreeLst(p_FmPcd, p_Lock);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static void ReleaseFreeLocksLst(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdLock *p_Lock;
-+
-+ p_Lock = DequeueLockFromFreeLst(p_FmPcd);
-+ while (p_Lock)
-+ {
-+ XX_FreeSpinlock(p_Lock->h_Spinlock);
-+ XX_Free(p_Lock);
-+ p_Lock = DequeueLockFromFreeLst(p_FmPcd);
-+ }
-+}
-+
-+
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+
-+void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId)
-+{
-+ ASSERT_COND(p_FmPcd);
-+ p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = clsPlanGrpId;
-+}
-+
-+t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams)
-+{
-+ uint8_t netEnvId = p_GrpParams->netEnvId;
-+ int i, k, j;
-+
-+ ASSERT_COND(p_FmPcd);
-+ if (p_FmPcd->netEnvs[netEnvId].clsPlanGrpId != ILLEGAL_CLS_PLAN)
-+ {
-+ p_GrpParams->grpExists = TRUE;
-+ p_GrpParams->clsPlanGrpId = p_FmPcd->netEnvs[netEnvId].clsPlanGrpId;
-+ return E_OK;
-+ }
-+
-+ for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++)
-+ {
-+ for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++)
-+ {
-+ /* if an option exists, add it to the opts list */
-+ if (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt)
-+ {
-+ /* check if this option already exists, add if it doesn't */
-+ for (j = 0;j<p_GrpParams->numOfOptions;j++)
-+ {
-+ if (p_GrpParams->options[j] == p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt)
-+ break;
-+ }
-+ p_GrpParams->optVectors[j] |= p_FmPcd->netEnvs[netEnvId].unitsVectors[i];
-+ if (j == p_GrpParams->numOfOptions)
-+ {
-+ p_GrpParams->options[p_GrpParams->numOfOptions] = p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt;
-+ p_GrpParams->numOfOptions++;
-+ }
-+ }
-+ }
-+ }
-+
-+ if (p_GrpParams->numOfOptions == 0)
-+ {
-+ if (p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId != ILLEGAL_CLS_PLAN)
-+ {
-+ p_GrpParams->grpExists = TRUE;
-+ p_GrpParams->clsPlanGrpId = p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId;
-+ }
-+ }
-+
-+ return E_OK;
-+
-+}
-+
-+t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector)
-+{
-+ uint8_t j,k;
-+
-+ *p_Vector = 0;
-+
-+ ASSERT_COND(p_FmPcd);
-+ for (j=0; ((j < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[0].hdr != HEADER_TYPE_NONE)); j++)
-+ {
-+ for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].hdr != HEADER_TYPE_NONE)); k++)
-+ {
-+ if (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].opt == opt)
-+ *p_Vector |= p_FmPcd->netEnvs[netEnvId].unitsVectors[j];
-+ }
-+ }
-+
-+ if (!*p_Vector)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested option was not defined for this Network Environment Characteristics module"));
-+ else
-+ return E_OK;
-+}
-+
-+t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params)
-+{
-+ int i;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(p_Params->netEnvId < FM_MAX_NUM_OF_PORTS);
-+
-+ p_Params->vector = 0;
-+ for (i=0; i<p_Params->numOfDistinctionUnits ;i++)
-+ {
-+ if (p_FmPcd->netEnvs[p_Params->netEnvId].units[p_Params->unitIds[i]].hdrs[0].hdr == HEADER_TYPE_NONE)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested unit was not defined for this Network Environment Characteristics module"));
-+ ASSERT_COND(p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]]);
-+ p_Params->vector |= p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]];
-+ }
-+
-+ return E_OK;
-+}
-+
-+bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector)
-+{
-+ int i=0, k;
-+
-+ ASSERT_COND(p_FmPcd);
-+ /* check whether a given unit may be used by non-clsPlan users. */
-+ /* first, recognize the unit by its vector */
-+ while (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)
-+ {
-+ if (p_FmPcd->netEnvs[netEnvId].unitsVectors[i] == unitVector)
-+ {
-+ for (k=0;
-+ ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE));
-+ k++)
-+ /* check that no option exists */
-+ if ((protocolOpt_t)p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt)
-+ return FALSE;
-+ break;
-+ }
-+ i++;
-+ }
-+ /* assert that a unit was found to mach the vector */
-+ ASSERT_COND(p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE);
-+
-+ return TRUE;
-+}
-+bool FmPcdNetEnvIsHdrExist(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ int i, k;
-+
-+ ASSERT_COND(p_FmPcd);
-+
-+ for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++)
-+ {
-+ for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++)
-+ if (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr == hdr)
-+ return TRUE;
-+ }
-+ for (i=0; ((i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS) &&
-+ (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE)); i++)
-+ {
-+ if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr)
-+ return TRUE;
-+ }
-+
-+ return FALSE;
-+}
-+
-+uint8_t FmPcdNetEnvGetUnitId(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr, bool interchangeable, protocolOpt_t opt)
-+{
-+ uint8_t i, k;
-+
-+ ASSERT_COND(p_FmPcd);
-+
-+ if (interchangeable)
-+ {
-+ for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
-+ {
-+ for (k=0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
-+ {
-+ if ((p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr == hdr) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt == opt))
-+
-+ return i;
-+ }
-+ }
-+ }
-+ else
-+ {
-+ for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
-+ if ((p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr == hdr) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].opt == opt) &&
-+ (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[1].hdr == HEADER_TYPE_NONE))
-+ return i;
-+
-+ for (i=0; (i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS) &&
-+ (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE); i++)
-+ if ((p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr) &&
-+ (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].opt == opt))
-+ return p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].aliasHdr;
-+ }
-+
-+ return FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS;
-+}
-+
-+t_Error FmPcdUnregisterReassmPort(t_Handle h_FmPcd, t_Handle h_ReasmCommonPramTbl)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdCcReassmTimeoutParams ccReassmTimeoutParams = {0};
-+ uint8_t result;
-+ t_Error err = E_OK;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(h_ReasmCommonPramTbl);
-+
-+ ccReassmTimeoutParams.iprcpt = (uint32_t)(XX_VirtToPhys(h_ReasmCommonPramTbl) - p_FmPcd->physicalMuramBase);
-+ ccReassmTimeoutParams.activate = FALSE; /*Disable Timeout Task*/
-+
-+ if ((err = FmHcPcdCcTimeoutReassm(p_FmPcd->h_Hc, &ccReassmTimeoutParams, &result)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ switch (result)
-+ {
-+ case (0):
-+ return E_OK;
-+ case (1):
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, (""));
-+ case (2):
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, (""));
-+ case (3):
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Disable Timeout Task with invalid IPRCPT"));
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+
-+e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr)
-+{
-+ int i;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(netEnvId < FM_MAX_NUM_OF_PORTS);
-+
-+ for (i=0; (i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS)
-+ && (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE); i++)
-+ {
-+ if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr)
-+ return p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].aliasHdr;
-+ }
-+
-+ return HEADER_TYPE_NONE;
-+}
-+
-+void FmPcdPortRegister(t_Handle h_FmPcd, t_Handle h_FmPort, uint8_t hardwarePortId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint16_t swPortIndex = 0;
-+
-+ ASSERT_COND(h_FmPcd);
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort = h_FmPort;
-+}
-+
-+uint32_t FmPcdGetLcv(t_Handle h_FmPcd, uint32_t netEnvId, uint8_t hdrNum)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(h_FmPcd);
-+ return p_FmPcd->netEnvs[netEnvId].lcvs[hdrNum];
-+}
-+
-+uint32_t FmPcdGetMacsecLcv(t_Handle h_FmPcd, uint32_t netEnvId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(h_FmPcd);
-+ return p_FmPcd->netEnvs[netEnvId].macsecVector;
-+}
-+
-+uint8_t FmPcdGetNetEnvId(t_Handle h_NetEnv)
-+{
-+ return ((t_FmPcdNetEnv*)h_NetEnv)->netEnvId;
-+}
-+
-+void FmPcdIncNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId)
-+{
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(h_FmPcd);
-+
-+ intFlags = NetEnvLock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId]);
-+ ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners++;
-+ NetEnvUnlock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId], intFlags);
-+}
-+
-+void FmPcdDecNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId)
-+{
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(h_FmPcd);
-+ ASSERT_COND(((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners);
-+
-+ intFlags = NetEnvLock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId]);
-+ ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners--;
-+ NetEnvUnlock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId], intFlags);
-+}
-+
-+uint32_t FmPcdLock(t_Handle h_FmPcd)
-+{
-+ ASSERT_COND(h_FmPcd);
-+ return XX_LockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock);
-+}
-+
-+void FmPcdUnlock(t_Handle h_FmPcd, uint32_t intFlags)
-+{
-+ ASSERT_COND(h_FmPcd);
-+ XX_UnlockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock, intFlags);
-+}
-+
-+t_FmPcdLock * FmPcdAcquireLock(t_Handle h_FmPcd)
-+{
-+ t_FmPcdLock *p_Lock;
-+ ASSERT_COND(h_FmPcd);
-+ p_Lock = DequeueLockFromFreeLst((t_FmPcd*)h_FmPcd);
-+ if (!p_Lock)
-+ {
-+ FillFreeLocksLst(h_FmPcd);
-+ p_Lock = DequeueLockFromFreeLst((t_FmPcd*)h_FmPcd);
-+ }
-+
-+ if (p_Lock)
-+ EnqueueLockToAcquiredLst((t_FmPcd*)h_FmPcd, p_Lock);
-+ return p_Lock;
-+}
-+
-+void FmPcdReleaseLock(t_Handle h_FmPcd, t_FmPcdLock *p_Lock)
-+{
-+ uint32_t intFlags;
-+ ASSERT_COND(h_FmPcd);
-+ intFlags = FmPcdLock(h_FmPcd);
-+ LIST_DelAndInit(&p_Lock->node);
-+ FmPcdUnlock(h_FmPcd, intFlags);
-+ EnqueueLockToFreeLst((t_FmPcd*)h_FmPcd, p_Lock);
-+}
-+
-+bool FmPcdLockTryLockAll(t_Handle h_FmPcd)
-+{
-+ uint32_t intFlags;
-+ t_List *p_Pos, *p_SavedPos=NULL;
-+
-+ ASSERT_COND(h_FmPcd);
-+ intFlags = FmPcdLock(h_FmPcd);
-+ LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst)
-+ {
-+ t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos);
-+ if (!FmPcdLockTryLock(p_Lock))
-+ {
-+ p_SavedPos = p_Pos;
-+ break;
-+ }
-+ }
-+ if (p_SavedPos)
-+ {
-+ LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst)
-+ {
-+ t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos);
-+ if (p_Pos == p_SavedPos)
-+ break;
-+ FmPcdLockUnlock(p_Lock);
-+ }
-+ }
-+ FmPcdUnlock(h_FmPcd, intFlags);
-+
-+ CORE_MemoryBarrier();
-+
-+ if (p_SavedPos)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+void FmPcdLockUnlockAll(t_Handle h_FmPcd)
-+{
-+ uint32_t intFlags;
-+ t_List *p_Pos;
-+
-+ ASSERT_COND(h_FmPcd);
-+ intFlags = FmPcdLock(h_FmPcd);
-+ LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst)
-+ {
-+ t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos);
-+ p_Lock->flag = FALSE;
-+ }
-+ FmPcdUnlock(h_FmPcd, intFlags);
-+
-+ CORE_MemoryBarrier();
-+}
-+
-+t_Error FmPcdHcSync(t_Handle h_FmPcd)
-+{
-+ ASSERT_COND(h_FmPcd);
-+ ASSERT_COND(((t_FmPcd*)h_FmPcd)->h_Hc);
-+
-+ return FmHcPcdSync(((t_FmPcd*)h_FmPcd)->h_Hc);
-+}
-+
-+t_Handle FmPcdGetHcHandle(t_Handle h_FmPcd)
-+{
-+ ASSERT_COND(h_FmPcd);
-+ return ((t_FmPcd*)h_FmPcd)->h_Hc;
-+}
-+
-+bool FmPcdIsAdvancedOffloadSupported(t_Handle h_FmPcd)
-+{
-+ ASSERT_COND(h_FmPcd);
-+ return ((t_FmPcd*)h_FmPcd)->advancedOffloadSupport;
-+}
-+/*********************** End of inter-module routines ************************/
-+
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+
-+t_Handle FM_PCD_Config(t_FmPcdParams *p_FmPcdParams)
-+{
-+ t_FmPcd *p_FmPcd = NULL;
-+ t_FmPhysAddr physicalMuramBase;
-+ uint8_t i;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcdParams, E_INVALID_HANDLE,NULL);
-+
-+ p_FmPcd = (t_FmPcd *) XX_Malloc(sizeof(t_FmPcd));
-+ if (!p_FmPcd)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD"));
-+ return NULL;
-+ }
-+ memset(p_FmPcd, 0, sizeof(t_FmPcd));
-+
-+ p_FmPcd->p_FmPcdDriverParam = (t_FmPcdDriverParam *) XX_Malloc(sizeof(t_FmPcdDriverParam));
-+ if (!p_FmPcd->p_FmPcdDriverParam)
-+ {
-+ XX_Free(p_FmPcd);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Driver Param"));
-+ return NULL;
-+ }
-+ memset(p_FmPcd->p_FmPcdDriverParam, 0, sizeof(t_FmPcdDriverParam));
-+
-+ p_FmPcd->h_Fm = p_FmPcdParams->h_Fm;
-+ p_FmPcd->guestId = FmGetGuestId(p_FmPcd->h_Fm);
-+ p_FmPcd->h_FmMuram = FmGetMuramHandle(p_FmPcd->h_Fm);
-+ if (p_FmPcd->h_FmMuram)
-+ {
-+ FmGetPhysicalMuramBase(p_FmPcdParams->h_Fm, &physicalMuramBase);
-+ p_FmPcd->physicalMuramBase = (uint64_t)((uint64_t)(&physicalMuramBase)->low | ((uint64_t)(&physicalMuramBase)->high << 32));
-+ }
-+
-+ for (i = 0; i<FM_MAX_NUM_OF_PORTS; i++)
-+ p_FmPcd->netEnvs[i].clsPlanGrpId = ILLEGAL_CLS_PLAN;
-+
-+ if (p_FmPcdParams->useHostCommand)
-+ {
-+ t_FmHcParams hcParams;
-+
-+ memset(&hcParams, 0, sizeof(hcParams));
-+ hcParams.h_Fm = p_FmPcd->h_Fm;
-+ hcParams.h_FmPcd = (t_Handle)p_FmPcd;
-+ memcpy((uint8_t*)&hcParams.params, (uint8_t*)&p_FmPcdParams->hc, sizeof(t_FmPcdHcParams));
-+ p_FmPcd->h_Hc = FmHcConfigAndInit(&hcParams);
-+ if (!p_FmPcd->h_Hc)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD HC"));
-+ FM_PCD_Free(p_FmPcd);
-+ return NULL;
-+ }
-+ }
-+ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("No Host Command defined for a guest partition."));
-+
-+ if (p_FmPcdParams->kgSupport)
-+ {
-+ p_FmPcd->p_FmPcdKg = (t_FmPcdKg *)KgConfig(p_FmPcd, p_FmPcdParams);
-+ if (!p_FmPcd->p_FmPcdKg)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Keygen"));
-+ FM_PCD_Free(p_FmPcd);
-+ return NULL;
-+ }
-+ }
-+
-+ if (p_FmPcdParams->plcrSupport)
-+ {
-+ p_FmPcd->p_FmPcdPlcr = (t_FmPcdPlcr *)PlcrConfig(p_FmPcd, p_FmPcdParams);
-+ if (!p_FmPcd->p_FmPcdPlcr)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Policer"));
-+ FM_PCD_Free(p_FmPcd);
-+ return NULL;
-+ }
-+ }
-+
-+ if (p_FmPcdParams->prsSupport)
-+ {
-+ p_FmPcd->p_FmPcdPrs = (t_FmPcdPrs *)PrsConfig(p_FmPcd, p_FmPcdParams);
-+ if (!p_FmPcd->p_FmPcdPrs)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Parser"));
-+ FM_PCD_Free(p_FmPcd);
-+ return NULL;
-+ }
-+ }
-+
-+ p_FmPcd->h_Spinlock = XX_InitSpinlock();
-+ if (!p_FmPcd->h_Spinlock)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD spinlock"));
-+ FM_PCD_Free(p_FmPcd);
-+ return NULL;
-+ }
-+ INIT_LIST(&p_FmPcd->freeLocksLst);
-+ INIT_LIST(&p_FmPcd->acquiredLocksLst);
-+
-+ p_FmPcd->numOfEnabledGuestPartitionsPcds = 0;
-+
-+ p_FmPcd->f_Exception = p_FmPcdParams->f_Exception;
-+ p_FmPcd->f_FmPcdIndexedException = p_FmPcdParams->f_ExceptionId;
-+ p_FmPcd->h_App = p_FmPcdParams->h_App;
-+
-+ p_FmPcd->p_CcShadow = NULL;
-+ p_FmPcd->ccShadowSize = 0;
-+ p_FmPcd->ccShadowAlign = 0;
-+
-+ p_FmPcd->h_ShadowSpinlock = XX_InitSpinlock();
-+ if (!p_FmPcd->h_ShadowSpinlock)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD shadow spinlock"));
-+ FM_PCD_Free(p_FmPcd);
-+ return NULL;
-+ }
-+
-+ return p_FmPcd;
-+}
-+
-+t_Error FM_PCD_Init(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_Error err = E_OK;
-+ t_FmPcdIpcMsg msg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
-+
-+ FM_GetRevision(p_FmPcd->h_Fm, &p_FmPcd->fmRevInfo);
-+
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ {
-+ memset(p_FmPcd->fmPcdIpcHandlerModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
-+ if (Sprint (p_FmPcd->fmPcdIpcHandlerModuleName, "FM_PCD_%d_%d", FmGetId(p_FmPcd->h_Fm), NCSW_MASTER_ID) != 10)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+ memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
-+ if (Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm), p_FmPcd->guestId) != (p_FmPcd->guestId<10 ? 10:11))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+
-+ p_FmPcd->h_IpcSession = XX_IpcInitSession(p_FmPcd->fmPcdIpcHandlerModuleName, p_FmPcd->fmPcdModuleName);
-+ if (p_FmPcd->h_IpcSession)
-+ {
-+ t_FmPcdIpcReply reply;
-+ uint32_t replyLength;
-+ uint8_t isMasterAlive = 0;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_PCD_MASTER_IS_ALIVE;
-+ msg.msgBody[0] = p_FmPcd->guestId;
-+ blockingFlag = TRUE;
-+
-+ do
-+ {
-+ replyLength = sizeof(uint32_t) + sizeof(isMasterAlive);
-+ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(p_FmPcd->guestId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ IpcMsgCompletionCB,
-+ h_FmPcd)) != E_OK)
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ while (blockingFlag) ;
-+ if (replyLength != (sizeof(uint32_t) + sizeof(isMasterAlive)))
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ isMasterAlive = *(uint8_t*)(reply.replyBody);
-+ } while (!isMasterAlive);
-+ }
-+ }
-+
-+ CHECK_INIT_PARAMETERS(p_FmPcd, CheckFmPcdParameters);
-+
-+ if (p_FmPcd->p_FmPcdKg)
-+ {
-+ err = KgInit(p_FmPcd);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (p_FmPcd->p_FmPcdPlcr)
-+ {
-+ err = PlcrInit(p_FmPcd);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (p_FmPcd->p_FmPcdPrs)
-+ {
-+ err = PrsInit(p_FmPcd);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ {
-+ /* register to inter-core messaging mechanism */
-+ memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
-+ if (Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm),NCSW_MASTER_ID) != 10)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+ err = XX_IpcRegisterMsgHandler(p_FmPcd->fmPcdModuleName, IpcMsgHandlerCB, p_FmPcd, FM_PCD_MAX_REPLY_SIZE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /* IPv6 Frame-Id used for fragmentation */
-+ p_FmPcd->ipv6FrameIdAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, 4, 4));
-+ if (!p_FmPcd->ipv6FrameIdAddr)
-+ {
-+ FM_PCD_Free(p_FmPcd);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for IPv6 Frame-Id"));
-+ }
-+ IOMemSet32(UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr), 0, 4);
-+
-+ /* CAPWAP Frame-Id used for fragmentation */
-+ p_FmPcd->capwapFrameIdAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, 2, 4));
-+ if (!p_FmPcd->capwapFrameIdAddr)
-+ {
-+ FM_PCD_Free(p_FmPcd);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CAPWAP Frame-Id"));
-+ }
-+ IOMemSet32(UINT_TO_PTR(p_FmPcd->capwapFrameIdAddr), 0, 2);
-+
-+ XX_Free(p_FmPcd->p_FmPcdDriverParam);
-+ p_FmPcd->p_FmPcdDriverParam = NULL;
-+
-+ FmRegisterPcd(p_FmPcd->h_Fm, p_FmPcd);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_Free(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd =(t_FmPcd *)h_FmPcd;
-+ t_Error err = E_OK;
-+
-+ if (p_FmPcd->ipv6FrameIdAddr)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr));
-+
-+ if (p_FmPcd->capwapFrameIdAddr)
-+ FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_FmPcd->capwapFrameIdAddr));
-+
-+ if (p_FmPcd->enabled)
-+ FM_PCD_Disable(p_FmPcd);
-+
-+ if (p_FmPcd->p_FmPcdDriverParam)
-+ {
-+ XX_Free(p_FmPcd->p_FmPcdDriverParam);
-+ p_FmPcd->p_FmPcdDriverParam = NULL;
-+ }
-+
-+ if (p_FmPcd->p_FmPcdKg)
-+ {
-+ if ((err = KgFree(p_FmPcd)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ XX_Free(p_FmPcd->p_FmPcdKg);
-+ p_FmPcd->p_FmPcdKg = NULL;
-+ }
-+
-+ if (p_FmPcd->p_FmPcdPlcr)
-+ {
-+ PlcrFree(p_FmPcd);
-+ XX_Free(p_FmPcd->p_FmPcdPlcr);
-+ p_FmPcd->p_FmPcdPlcr = NULL;
-+ }
-+
-+ if (p_FmPcd->p_FmPcdPrs)
-+ {
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ PrsFree(p_FmPcd);
-+ XX_Free(p_FmPcd->p_FmPcdPrs);
-+ p_FmPcd->p_FmPcdPrs = NULL;
-+ }
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ FmHcFree(p_FmPcd->h_Hc);
-+ p_FmPcd->h_Hc = NULL;
-+ }
-+
-+ XX_IpcUnregisterMsgHandler(p_FmPcd->fmPcdModuleName);
-+
-+ FmUnregisterPcd(p_FmPcd->h_Fm);
-+
-+ ReleaseFreeLocksLst(p_FmPcd);
-+
-+ if (p_FmPcd->h_Spinlock)
-+ XX_FreeSpinlock(p_FmPcd->h_Spinlock);
-+
-+ if (p_FmPcd->h_ShadowSpinlock)
-+ XX_FreeSpinlock(p_FmPcd->h_ShadowSpinlock);
-+
-+ XX_Free(p_FmPcd);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_ConfigException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigException - guest mode!"));
-+
-+ GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmPcd->exceptions |= bitMask;
-+ else
-+ p_FmPcd->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_ConfigHcFramesDataMemory(t_Handle h_FmPcd, uint8_t memId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
-+
-+ return FmHcSetFramesDataMemory(p_FmPcd->h_Hc, memId);
-+}
-+
-+t_Error FM_PCD_Enable(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+
-+ if (p_FmPcd->enabled)
-+ return E_OK;
-+
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ p_FmPcd->h_IpcSession)
-+ {
-+ uint8_t enabled;
-+ t_FmPcdIpcMsg msg;
-+ t_FmPcdIpcReply reply;
-+ uint32_t replyLength;
-+
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_PCD_MASTER_IS_ENABLED;
-+ replyLength = sizeof(uint32_t) + sizeof(enabled);
-+ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t) + sizeof(enabled))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ p_FmPcd->enabled = (bool)!!(*(uint8_t*)(reply.replyBody));
-+ if (!p_FmPcd->enabled)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-PCD master should be enabled first!"));
-+
-+ return E_OK;
-+ }
-+ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+
-+ if (p_FmPcd->p_FmPcdKg)
-+ KgEnable(p_FmPcd);
-+
-+ if (p_FmPcd->p_FmPcdPlcr)
-+ PlcrEnable(p_FmPcd);
-+
-+ if (p_FmPcd->p_FmPcdPrs)
-+ PrsEnable(p_FmPcd);
-+
-+ p_FmPcd->enabled = TRUE;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_Disable(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+
-+ if (!p_FmPcd->enabled)
-+ return E_OK;
-+
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ p_FmPcd->h_IpcSession)
-+ {
-+ t_FmPcdIpcMsg msg;
-+ t_FmPcdIpcReply reply;
-+ uint32_t replyLength;
-+
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_PCD_GUEST_DISABLE;
-+ replyLength = sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ if (reply.error == E_OK)
-+ p_FmPcd->enabled = FALSE;
-+
-+ return (t_Error)(reply.error);
-+ }
-+ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+
-+ if (p_FmPcd->numOfEnabledGuestPartitionsPcds != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Trying to disable a master partition PCD while"
-+ "guest partitions are still enabled!"));
-+
-+ if (p_FmPcd->p_FmPcdKg)
-+ KgDisable(p_FmPcd);
-+
-+ if (p_FmPcd->p_FmPcdPlcr)
-+ PlcrDisable(p_FmPcd);
-+
-+ if (p_FmPcd->p_FmPcdPrs)
-+ PrsDisable(p_FmPcd);
-+
-+ p_FmPcd->enabled = FALSE;
-+
-+ return E_OK;
-+}
-+
-+t_Handle FM_PCD_NetEnvCharacteristicsSet(t_Handle h_FmPcd, t_FmPcdNetEnvParams *p_NetEnvParams)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t intFlags, specialUnits = 0;
-+ uint8_t bitId = 0;
-+ uint8_t i, j, k;
-+ uint8_t netEnvCurrId;
-+ uint8_t ipsecAhUnit = 0,ipsecEspUnit = 0;
-+ bool ipsecAhExists = FALSE, ipsecEspExists = FALSE, shim1Selected = FALSE;
-+ uint8_t hdrNum;
-+ t_FmPcdNetEnvParams *p_ModifiedNetEnvParams;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_STATE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_NetEnvParams, E_NULL_POINTER, NULL);
-+
-+ intFlags = FmPcdLock(p_FmPcd);
-+
-+ /* find a new netEnv */
-+ for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++)
-+ if (!p_FmPcd->netEnvs[i].used)
-+ break;
-+
-+ if (i== FM_MAX_NUM_OF_PORTS)
-+ {
-+ REPORT_ERROR(MAJOR, E_FULL,("No more than %d netEnv's allowed.", FM_MAX_NUM_OF_PORTS));
-+ FmPcdUnlock(p_FmPcd, intFlags);
-+ return NULL;
-+ }
-+
-+ p_FmPcd->netEnvs[i].used = TRUE;
-+ FmPcdUnlock(p_FmPcd, intFlags);
-+
-+ /* As anyone doesn't have handle of this netEnv yet, no need
-+ to protect it with spinlocks */
-+
-+ p_ModifiedNetEnvParams = (t_FmPcdNetEnvParams *)XX_Malloc(sizeof(t_FmPcdNetEnvParams));
-+ if (!p_ModifiedNetEnvParams)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FmPcdNetEnvParams"));
-+ return NULL;
-+ }
-+
-+ memcpy(p_ModifiedNetEnvParams, p_NetEnvParams, sizeof(t_FmPcdNetEnvParams));
-+ p_NetEnvParams = p_ModifiedNetEnvParams;
-+
-+ netEnvCurrId = (uint8_t)i;
-+
-+ /* clear from previous use */
-+ memset(&p_FmPcd->netEnvs[netEnvCurrId].units, 0, FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS * sizeof(t_FmPcdIntDistinctionUnit));
-+ memset(&p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs, 0, FM_PCD_MAX_NUM_OF_ALIAS_HDRS * sizeof(t_FmPcdNetEnvAliases));
-+ memcpy(&p_FmPcd->netEnvs[netEnvCurrId].units, p_NetEnvParams->units, p_NetEnvParams->numOfDistinctionUnits*sizeof(t_FmPcdIntDistinctionUnit));
-+
-+ p_FmPcd->netEnvs[netEnvCurrId].netEnvId = netEnvCurrId;
-+ p_FmPcd->netEnvs[netEnvCurrId].h_FmPcd = p_FmPcd;
-+
-+ p_FmPcd->netEnvs[netEnvCurrId].clsPlanGrpId = ILLEGAL_CLS_PLAN;
-+
-+ /* check that header with opt is not interchanged with the same header */
-+ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
-+ {
-+ for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
-+ {
-+ /* if an option exists, check that other headers are not the same header
-+ without option */
-+ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt)
-+ {
-+ for (j = 0; (j < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr != HEADER_TYPE_NONE); j++)
-+ {
-+ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr == p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr) &&
-+ !p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].opt)
-+ {
-+ REPORT_ERROR(MINOR, E_FULL,
-+ ("Illegal unit - header with opt may not be interchangeable with the same header without opt"));
-+ XX_Free(p_ModifiedNetEnvParams);
-+ return NULL;
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+ /* Specific headers checking */
-+ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
-+ {
-+ for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
-+ {
-+ /* Some headers pairs may not be defined on different units as the parser
-+ doesn't distinguish */
-+ /* IPSEC_AH and IPSEC_SPI can't be 2 units, */
-+ /* check that header with opt is not interchanged with the same header */
-+ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_AH)
-+ {
-+ if (ipsecEspExists && (ipsecEspUnit != i))
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units"));
-+ XX_Free(p_ModifiedNetEnvParams);
-+ return NULL;
-+ }
-+ else
-+ {
-+ ipsecAhUnit = i;
-+ ipsecAhExists = TRUE;
-+ }
-+ }
-+ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_ESP)
-+ {
-+ if (ipsecAhExists && (ipsecAhUnit != i))
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units"));
-+ XX_Free(p_ModifiedNetEnvParams);
-+ return NULL;
-+ }
-+ else
-+ {
-+ ipsecEspUnit = i;
-+ ipsecEspExists = TRUE;
-+ }
-+ }
-+ /* ENCAP_ESP */
-+ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_UDP_ENCAP_ESP)
-+ {
-+ /* IPSec UDP encapsulation is currently set to use SHIM1 */
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_UDP_ENCAP_ESP;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM1;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM1;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
-+ }
-+#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ /* UDP_LITE */
-+ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_UDP_LITE)
-+ {
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_UDP_LITE;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_UDP;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_UDP;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
-+ }
-+#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+
-+ /* IP FRAG */
-+ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPv4) &&
-+ (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == IPV4_FRAG_1))
-+ {
-+ /* If IPv4+Frag, we need to set 2 units - SHIM 2 and IPv4. We first set SHIM2, and than check if
-+ * IPv4 exists. If so we don't need to set an extra unit
-+ * We consider as "having IPv4" any IPv4 without interchangable headers
-+ * but including any options. */
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_IPv4;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = IPV4_FRAG_1;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
-+
-+ /* check if IPv4 header exists by itself */
-+ if (FmPcdNetEnvGetUnitId(p_FmPcd, netEnvCurrId, HEADER_TYPE_IPv4, FALSE, 0) == FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+ {
-+ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits].hdrs[0].hdr = HEADER_TYPE_IPv4;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits++].hdrs[0].opt = 0;
-+ }
-+ }
-+ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPv6) &&
-+ (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == IPV6_FRAG_1))
-+ {
-+ /* If IPv6+Frag, we need to set 2 units - SHIM 2 and IPv6. We first set SHIM2, and than check if
-+ * IPv4 exists. If so we don't need to set an extra unit
-+ * We consider as "having IPv6" any IPv6 without interchangable headers
-+ * but including any options. */
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_IPv6;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = IPV6_FRAG_1;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
-+
-+ /* check if IPv6 header exists by itself */
-+ if (FmPcdNetEnvGetUnitId(p_FmPcd, netEnvCurrId, HEADER_TYPE_IPv6, FALSE, 0) == FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+ {
-+ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits].hdrs[0].hdr = HEADER_TYPE_IPv6;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits++].hdrs[0].opt = 0;
-+ }
-+ }
-+#if (DPAA_VERSION >= 11)
-+ /* CAPWAP FRAG */
-+ if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_CAPWAP) &&
-+ (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == CAPWAP_FRAG_1))
-+ {
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_CAPWAP;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = CAPWAP_FRAG_1;
-+ p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2;
-+ p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+ }
-+
-+ /* if private header (shim), check that no other headers specified */
-+ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
-+ {
-+ if (IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
-+ if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[1].hdr != HEADER_TYPE_NONE)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header may not be interchanged with other headers"));
-+ XX_Free(p_ModifiedNetEnvParams);
-+ return NULL;
-+ }
-+ }
-+
-+ for (i = 0; i < p_NetEnvParams->numOfDistinctionUnits; i++)
-+ {
-+ if (IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
-+ switch (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)
-+ {
-+ case (HEADER_TYPE_USER_DEFINED_SHIM1):
-+ if (shim1Selected)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header cannot be selected with UDP_IPSEC_ESP"));
-+ XX_Free(p_ModifiedNetEnvParams);
-+ return NULL;
-+ }
-+ shim1Selected = TRUE;
-+ p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000001;
-+ break;
-+ case (HEADER_TYPE_USER_DEFINED_SHIM2):
-+ p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000002;
-+ break;
-+ default:
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Requested SHIM not supported"));
-+ }
-+ else
-+ {
-+ p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = (uint32_t)(0x80000000 >> bitId++);
-+
-+ if (IS_SPECIAL_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
-+ p_FmPcd->netEnvs[netEnvCurrId].macsecVector = p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i];
-+ }
-+ }
-+
-+ /* define a set of hardware parser LCV's according to the defined netenv */
-+
-+ /* set an array of LCV's for each header in the netEnv */
-+ for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++)
-+ {
-+ /* private headers have no LCV in the hard parser */
-+ if (!IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr))
-+ {
-+ for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
-+ && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++)
-+ {
-+ hdrNum = GetPrsHdrNum(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr);
-+ if ((hdrNum == ILLEGAL_HDR_NUM) || (hdrNum == NO_HDR_NUM))
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG);
-+ XX_Free(p_ModifiedNetEnvParams);
-+ return NULL;
-+ }
-+ p_FmPcd->netEnvs[netEnvCurrId].lcvs[hdrNum] |= p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i];
-+ }
-+ }
-+ }
-+ XX_Free(p_ModifiedNetEnvParams);
-+
-+ p_FmPcd->netEnvs[netEnvCurrId].h_Spinlock = XX_InitSpinlock();
-+ if (!p_FmPcd->netEnvs[netEnvCurrId].h_Spinlock)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd NetEnv spinlock"));
-+ return NULL;
-+ }
-+ return &p_FmPcd->netEnvs[netEnvCurrId];
-+}
-+
-+t_Error FM_PCD_NetEnvCharacteristicsDelete(t_Handle h_NetEnv)
-+{
-+ t_FmPcdNetEnv *p_NetEnv = (t_FmPcdNetEnv*)h_NetEnv;
-+ t_FmPcd *p_FmPcd = p_NetEnv->h_FmPcd;
-+ uint32_t intFlags;
-+ uint8_t netEnvId = p_NetEnv->netEnvId;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
-+
-+ /* check that no port is bound to this netEnv */
-+ if (p_FmPcd->netEnvs[netEnvId].owners)
-+ {
-+ RETURN_ERROR(MINOR, E_INVALID_STATE,
-+ ("Trying to delete a netEnv that has ports/schemes/trees/clsPlanGrps bound to"));
-+ }
-+
-+ intFlags = FmPcdLock(p_FmPcd);
-+
-+ p_FmPcd->netEnvs[netEnvId].used = FALSE;
-+ p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = ILLEGAL_CLS_PLAN;
-+
-+ memset(p_FmPcd->netEnvs[netEnvId].units, 0, sizeof(t_FmPcdIntDistinctionUnit)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+ memset(p_FmPcd->netEnvs[netEnvId].unitsVectors, 0, sizeof(uint32_t)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+ memset(p_FmPcd->netEnvs[netEnvId].lcvs, 0, sizeof(uint32_t)*FM_PCD_PRS_NUM_OF_HDRS);
-+
-+ if (p_FmPcd->netEnvs[netEnvId].h_Spinlock)
-+ XX_FreeSpinlock(p_FmPcd->netEnvs[netEnvId].h_Spinlock);
-+
-+ FmPcdUnlock(p_FmPcd, intFlags);
-+ return E_OK;
-+}
-+
-+void FM_PCD_HcTxConf(t_Handle h_FmPcd, t_DpaaFD *p_Fd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN(h_FmPcd, E_INVALID_STATE);
-+
-+ FmHcTxConf(p_FmPcd->h_Hc, p_Fd);
-+}
-+
-+t_Error FM_PCD_SetAdvancedOffloadSupport(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmCtrlCodeRevisionInfo revInfo;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->enabled, E_INVALID_STATE);
-+
-+ if ((err = FM_GetFmanCtrlCodeRevision(p_FmPcd->h_Fm, &revInfo)) != E_OK)
-+ {
-+ DBG(WARNING, ("FM in guest-mode without IPC, can't validate firmware revision."));
-+ revInfo.packageRev = IP_OFFLOAD_PACKAGE_NUMBER;
-+ }
-+ if (!IS_OFFLOAD_PACKAGE(revInfo.packageRev))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Fman ctrl code package"));
-+
-+ if (!p_FmPcd->h_Hc)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("HC must be initialized in this mode"));
-+
-+ p_FmPcd->advancedOffloadSupport = TRUE;
-+
-+ return E_OK;
-+}
-+
-+uint32_t FM_PCD_GetCounter(t_Handle h_FmPcd, e_FmPcdCounters counter)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t outCounter = 0;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0);
-+
-+ switch (counter)
-+ {
-+ case (e_FM_PCD_KG_COUNTERS_TOTAL):
-+ if (!p_FmPcd->p_FmPcdKg)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("KeyGen is not activated"));
-+ return 0;
-+ }
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ !p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs &&
-+ !p_FmPcd->h_IpcSession)
-+ {
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without neither IPC nor mapped register!"));
-+ return 0;
-+ }
-+ break;
-+
-+ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
-+ case (e_FM_PCD_PLCR_COUNTERS_RED):
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
-+ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
-+ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
-+ if (!p_FmPcd->p_FmPcdPlcr)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Policer is not activated"));
-+ return 0;
-+ }
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ !p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs &&
-+ !p_FmPcd->h_IpcSession)
-+ {
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in \"guest-mode\" without neither IPC nor mapped register!"));
-+ return 0;
-+ }
-+
-+ /* check that counters are enabled */
-+ if (p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs &&
-+ !(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN))
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled"));
-+ return 0;
-+ }
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs ||
-+ ((p_FmPcd->guestId != NCSW_MASTER_ID) && p_FmPcd->h_IpcSession));
-+ break;
-+
-+ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
-+ if (!p_FmPcd->p_FmPcdPrs)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Parser is not activated"));
-+ return 0;
-+ }
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ !p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs &&
-+ !p_FmPcd->h_IpcSession)
-+ {
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without neither IPC nor mapped register!"));
-+ return 0;
-+ }
-+ break;
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported type of counter"));
-+ return 0;
-+ }
-+
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ p_FmPcd->h_IpcSession)
-+ {
-+ t_FmPcdIpcMsg msg;
-+ t_FmPcdIpcReply reply;
-+ uint32_t replyLength;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_PCD_GET_COUNTER;
-+ memcpy(msg.msgBody, (uint8_t *)&counter, sizeof(uint32_t));
-+ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) +sizeof(uint32_t),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t) + sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+
-+ memcpy((uint8_t*)&outCounter, reply.replyBody, sizeof(uint32_t));
-+ return outCounter;
-+ }
-+
-+ switch (counter)
-+ {
-+ /* Parser statistics */
-+ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pds);
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rrs);
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rrs);
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rrs);
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srrs);
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rres);
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rres);
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rres);
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srres);
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spcs);
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spscs);
-+ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_hxscs);
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrcs);
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrscs);
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwcs);
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwscs);
-+ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_fcscs);
-+ case (e_FM_PCD_KG_COUNTERS_TOTAL):
-+ return GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_tpc);
-+
-+ /* Policer statistics */
-+ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt);
-+ case (e_FM_PCD_PLCR_COUNTERS_RED):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt);
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt);
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt);
-+ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt);
-+ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
-+ return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt);
-+ }
-+ return 0;
-+}
-+
-+t_Error FM_PCD_SetException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t bitMask = 0, tmpReg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetException - guest mode!"));
-+
-+ GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception);
-+
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_FmPcd->exceptions |= bitMask;
-+ else
-+ p_FmPcd->exceptions &= ~bitMask;
-+
-+ switch (exception)
-+ {
-+ case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC):
-+ case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW):
-+ if (!p_FmPcd->p_FmPcdKg)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working"));
-+ break;
-+ case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC):
-+ case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR):
-+ case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE):
-+ case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE):
-+ if (!p_FmPcd->p_FmPcdPlcr)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working"));
-+ break;
-+ case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC):
-+ case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC):
-+ if (!p_FmPcd->p_FmPcdPrs)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - parser is not working"));
-+ break;
-+ }
-+
-+ switch (exception)
-+ {
-+ case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer);
-+ if (enable)
-+ tmpReg |= FM_EX_KG_DOUBLE_ECC;
-+ else
-+ tmpReg &= ~FM_EX_KG_DOUBLE_ECC;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer, tmpReg);
-+ break;
-+ case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer);
-+ if (enable)
-+ tmpReg |= FM_EX_KG_KEYSIZE_OVERFLOW;
-+ else
-+ tmpReg &= ~FM_EX_KG_KEYSIZE_OVERFLOW;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer, tmpReg);
-+ break;
-+ case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_perer);
-+ if (enable)
-+ tmpReg |= FM_PCD_PRS_DOUBLE_ECC;
-+ else
-+ tmpReg &= ~FM_PCD_PRS_DOUBLE_ECC;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_perer, tmpReg);
-+ break;
-+ case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pever);
-+ if (enable)
-+ tmpReg |= FM_PCD_PRS_SINGLE_ECC;
-+ else
-+ tmpReg &= ~FM_PCD_PRS_SINGLE_ECC;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pever, tmpReg);
-+ break;
-+ case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier);
-+ if (enable)
-+ tmpReg |= FM_PCD_PLCR_DOUBLE_ECC;
-+ else
-+ tmpReg &= ~FM_PCD_PLCR_DOUBLE_ECC;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg);
-+ break;
-+ case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier);
-+ if (enable)
-+ tmpReg |= FM_PCD_PLCR_INIT_ENTRY_ERROR;
-+ else
-+ tmpReg &= ~FM_PCD_PLCR_INIT_ENTRY_ERROR;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg);
-+ break;
-+ case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier);
-+ if (enable)
-+ tmpReg |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE;
-+ else
-+ tmpReg &= ~FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg);
-+ break;
-+ case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE):
-+ tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier);
-+ if (enable)
-+ tmpReg |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE;
-+ else
-+ tmpReg &= ~FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE;
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg);
-+ break;
-+ }
-+ /* for ECC exceptions driver automatically enables ECC mechanism, if disabled.
-+ Driver may disable them automatically, depending on driver's status */
-+ if (enable && ((exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) |
-+ (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) |
-+ (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) |
-+ (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC)))
-+ FmEnableRamsEcc(p_FmPcd->h_Fm);
-+ if (!enable && ((exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) |
-+ (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) |
-+ (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) |
-+ (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC)))
-+ FmDisableRamsEcc(p_FmPcd->h_Fm);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_ForceIntr (t_Handle h_FmPcd, e_FmPcdExceptions exception)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ForceIntr - guest mode!"));
-+
-+ switch (exception)
-+ {
-+ case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC):
-+ case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW):
-+ if (!p_FmPcd->p_FmPcdKg)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working"));
-+ break;
-+ case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC):
-+ case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR):
-+ case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE):
-+ case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE):
-+ if (!p_FmPcd->p_FmPcdPlcr)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working"));
-+ break;
-+ case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC):
-+ case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC):
-+ if (!p_FmPcd->p_FmPcdPrs)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt -parsrer is not working"));
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid interrupt requested"));
-+ }
-+ switch (exception)
-+ {
-+ case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC:
-+ if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC:
-+ if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC:
-+ if (!(p_FmPcd->exceptions & FM_EX_KG_DOUBLE_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_feer, FM_EX_KG_DOUBLE_ECC);
-+ break;
-+ case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW:
-+ if (!(p_FmPcd->exceptions & FM_EX_KG_KEYSIZE_OVERFLOW))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_feer, FM_EX_KG_KEYSIZE_OVERFLOW);
-+ break;
-+ case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC:
-+ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_DOUBLE_ECC);
-+ break;
-+ case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR:
-+ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_INIT_ENTRY_ERROR);
-+ break;
-+ case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE:
-+ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE);
-+ break;
-+ case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE:
-+ if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE);
-+ break;
-+ }
-+
-+ return E_OK;
-+}
-+
-+
-+t_Error FM_PCD_ModifyCounter(t_Handle h_FmPcd, e_FmPcdCounters counter, uint32_t value)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ModifyCounter - guest mode!"));
-+
-+ switch (counter)
-+ {
-+ case (e_FM_PCD_KG_COUNTERS_TOTAL):
-+ if (!p_FmPcd->p_FmPcdKg)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid counters - KeyGen is not working"));
-+ break;
-+ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
-+ case (e_FM_PCD_PLCR_COUNTERS_RED):
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
-+ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
-+ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
-+ if (!p_FmPcd->p_FmPcdPlcr)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid counters - Policer is not working"));
-+ if (!(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN))
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled"));
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
-+ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
-+ if (!p_FmPcd->p_FmPcdPrs)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter"));
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter"));
-+ }
-+ switch (counter)
-+ {
-+ case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pds, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rrs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rrs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rrs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srrs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rres, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rres, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rres, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srres, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spcs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spscs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_hxscs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrcs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrscs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwcs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwscs, value);
-+ break;
-+ case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_fcscs, value);
-+ break;
-+ case (e_FM_PCD_KG_COUNTERS_TOTAL):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_tpc,value);
-+ break;
-+
-+ /*Policer counters*/
-+ case (e_FM_PCD_PLCR_COUNTERS_YELLOW):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt, value);
-+ break;
-+ case (e_FM_PCD_PLCR_COUNTERS_RED):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt, value);
-+ break;
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt, value);
-+ break;
-+ case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt, value);
-+ break;
-+ case (e_FM_PCD_PLCR_COUNTERS_TOTAL):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt, value);
-+ break;
-+ case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH):
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt, value);
-+ break;
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Handle FM_PCD_GetHcPort(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ return FmHcGetPort(p_FmPcd->h_Hc);
-+}
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h
-@@ -0,0 +1,543 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_pcd.h
-+
-+ @Description FM PCD ...
-+*//***************************************************************************/
-+#ifndef __FM_PCD_H
-+#define __FM_PCD_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_common.h"
-+#include "fsl_fman_prs.h"
-+#include "fsl_fman_kg.h"
-+
-+#define __ERR_MODULE__ MODULE_FM_PCD
-+
-+
-+/****************************/
-+/* Defaults */
-+/****************************/
-+#define DEFAULT_plcrAutoRefresh FALSE
-+#define DEFAULT_fmPcdKgErrorExceptions (FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW)
-+#define DEFAULT_fmPcdPlcrErrorExceptions (FM_PCD_EX_PLCR_DOUBLE_ECC | FM_PCD_EX_PLCR_INIT_ENTRY_ERROR)
-+#define DEFAULT_fmPcdPlcrExceptions 0
-+#define DEFAULT_fmPcdPrsErrorExceptions (FM_PCD_EX_PRS_DOUBLE_ECC)
-+
-+#define DEFAULT_fmPcdPrsExceptions FM_PCD_EX_PRS_SINGLE_ECC
-+#define DEFAULT_numOfUsedProfilesPerWindow 16
-+#define DEFAULT_numOfSharedPlcrProfiles 4
-+
-+/****************************/
-+/* Network defines */
-+/****************************/
-+#define UDP_HEADER_SIZE 8
-+
-+#define ESP_SPI_OFFSET 0
-+#define ESP_SPI_SIZE 4
-+#define ESP_SEQ_NUM_OFFSET ESP_SPI_SIZE
-+#define ESP_SEQ_NUM_SIZE 4
-+
-+/****************************/
-+/* General defines */
-+/****************************/
-+#define ILLEGAL_CLS_PLAN 0xff
-+#define ILLEGAL_NETENV 0xff
-+
-+#define FM_PCD_MAX_NUM_OF_ALIAS_HDRS 3
-+
-+/****************************/
-+/* Error defines */
-+/****************************/
-+
-+#define FM_PCD_EX_PLCR_DOUBLE_ECC 0x20000000
-+#define FM_PCD_EX_PLCR_INIT_ENTRY_ERROR 0x10000000
-+#define FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE 0x08000000
-+#define FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE 0x04000000
-+
-+#define GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception) \
-+switch (exception){ \
-+ case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC: \
-+ bitMask = FM_EX_KG_DOUBLE_ECC; break; \
-+ case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC: \
-+ bitMask = FM_PCD_EX_PLCR_DOUBLE_ECC; break; \
-+ case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW: \
-+ bitMask = FM_EX_KG_KEYSIZE_OVERFLOW; break; \
-+ case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR: \
-+ bitMask = FM_PCD_EX_PLCR_INIT_ENTRY_ERROR; break; \
-+ case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE: \
-+ bitMask = FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE; break; \
-+ case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE: \
-+ bitMask = FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE; break; \
-+ case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC: \
-+ bitMask = FM_PCD_EX_PRS_DOUBLE_ECC; break; \
-+ case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC: \
-+ bitMask = FM_PCD_EX_PRS_SINGLE_ECC; break; \
-+ default: bitMask = 0;break;}
-+
-+/***********************************************************************/
-+/* Policer defines */
-+/***********************************************************************/
-+#define FM_PCD_PLCR_GCR_STEN 0x40000000
-+#define FM_PCD_PLCR_DOUBLE_ECC 0x80000000
-+#define FM_PCD_PLCR_INIT_ENTRY_ERROR 0x40000000
-+#define FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE 0x80000000
-+#define FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE 0x40000000
-+
-+/***********************************************************************/
-+/* Memory map */
-+/***********************************************************************/
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+typedef struct {
-+/* General Configuration and Status Registers */
-+ volatile uint32_t fmpl_gcr; /* 0x000 FMPL_GCR - FM Policer General Configuration */
-+ volatile uint32_t fmpl_gsr; /* 0x004 FMPL_GSR - FM Policer Global Status Register */
-+ volatile uint32_t fmpl_evr; /* 0x008 FMPL_EVR - FM Policer Event Register */
-+ volatile uint32_t fmpl_ier; /* 0x00C FMPL_IER - FM Policer Interrupt Enable Register */
-+ volatile uint32_t fmpl_ifr; /* 0x010 FMPL_IFR - FM Policer Interrupt Force Register */
-+ volatile uint32_t fmpl_eevr; /* 0x014 FMPL_EEVR - FM Policer Error Event Register */
-+ volatile uint32_t fmpl_eier; /* 0x018 FMPL_EIER - FM Policer Error Interrupt Enable Register */
-+ volatile uint32_t fmpl_eifr; /* 0x01C FMPL_EIFR - FM Policer Error Interrupt Force Register */
-+/* Global Statistic Counters */
-+ volatile uint32_t fmpl_rpcnt; /* 0x020 FMPL_RPC - FM Policer RED Packets Counter */
-+ volatile uint32_t fmpl_ypcnt; /* 0x024 FMPL_YPC - FM Policer YELLOW Packets Counter */
-+ volatile uint32_t fmpl_rrpcnt; /* 0x028 FMPL_RRPC - FM Policer Recolored RED Packet Counter */
-+ volatile uint32_t fmpl_rypcnt; /* 0x02C FMPL_RYPC - FM Policer Recolored YELLOW Packet Counter */
-+ volatile uint32_t fmpl_tpcnt; /* 0x030 FMPL_TPC - FM Policer Total Packet Counter */
-+ volatile uint32_t fmpl_flmcnt; /* 0x034 FMPL_FLMC - FM Policer Frame Length Mismatch Counter */
-+ volatile uint32_t fmpl_res0[21]; /* 0x038 - 0x08B Reserved */
-+/* Profile RAM Access Registers */
-+ volatile uint32_t fmpl_par; /* 0x08C FMPL_PAR - FM Policer Profile Action Register*/
-+ t_FmPcdPlcrProfileRegs profileRegs;
-+/* Error Capture Registers */
-+ volatile uint32_t fmpl_serc; /* 0x100 FMPL_SERC - FM Policer Soft Error Capture */
-+ volatile uint32_t fmpl_upcr; /* 0x104 FMPL_UPCR - FM Policer Uninitialized Profile Capture Register */
-+ volatile uint32_t fmpl_res2; /* 0x108 Reserved */
-+/* Debug Registers */
-+ volatile uint32_t fmpl_res3[61]; /* 0x10C-0x200 Reserved Debug*/
-+/* Profile Selection Mapping Registers Per Port-ID (n=1-11, 16) */
-+ volatile uint32_t fmpl_dpmr; /* 0x200 FMPL_DPMR - FM Policer Default Mapping Register */
-+ volatile uint32_t fmpl_pmr[63]; /*+default 0x204-0x2FF FMPL_PMR1 - FMPL_PMR63, - FM Policer Profile Mapping Registers.
-+ (for port-ID 1-11, only for supported Port-ID registers) */
-+} t_FmPcdPlcrRegs;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/***********************************************************************/
-+/* Driver's internal structures */
-+/***********************************************************************/
-+
-+typedef struct {
-+ bool known;
-+ uint8_t id;
-+} t_FmPcdKgSchemesExtractsEntry;
-+
-+typedef struct {
-+ t_FmPcdKgSchemesExtractsEntry extractsArray[FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
-+} t_FmPcdKgSchemesExtracts;
-+
-+typedef struct {
-+ t_Handle h_Manip;
-+ bool keepRes;
-+ e_FmPcdEngine nextEngine;
-+ uint8_t parseCode;
-+} t_FmPcdInfoForManip;
-+
-+/**************************************************************************//**
-+ @Description A structure of parameters to communicate
-+ between the port and PCD regarding the KG scheme.
-+*//***************************************************************************/
-+typedef struct {
-+ uint8_t netEnvId; /* in */
-+ uint8_t numOfDistinctionUnits; /* in */
-+ uint8_t unitIds[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /* in */
-+ uint32_t vector; /* out */
-+} t_NetEnvParams;
-+
-+typedef struct {
-+ bool allocated;
-+ uint8_t ownerId; /* guestId for KG in multi-partition only.
-+ portId for PLCR in any environment */
-+} t_FmPcdAllocMng;
-+
-+typedef struct {
-+ volatile bool lock;
-+ bool used;
-+ uint8_t owners;
-+ uint8_t netEnvId;
-+ uint8_t guestId;
-+ uint8_t baseEntry;
-+ uint16_t sizeOfGrp;
-+ protocolOpt_t optArray[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
-+} t_FmPcdKgClsPlanGrp;
-+
-+typedef struct {
-+ t_Handle h_FmPcd;
-+ uint8_t schemeId;
-+ t_FmPcdLock *p_Lock;
-+ bool valid;
-+ uint8_t netEnvId;
-+ uint8_t owners;
-+ uint32_t matchVector;
-+ uint32_t ccUnits;
-+ bool nextRelativePlcrProfile;
-+ uint16_t relativeProfileId;
-+ uint16_t numOfProfiles;
-+ t_FmPcdKgKeyOrder orderedArray;
-+ e_FmPcdEngine nextEngine;
-+ e_FmPcdDoneAction doneAction;
-+ bool requiredActionFlag;
-+ uint32_t requiredAction;
-+ bool extractedOrs;
-+ uint8_t bitOffsetInPlcrProfile;
-+ bool directPlcr;
-+#if (DPAA_VERSION >= 11)
-+ bool vspe;
-+#endif
-+} t_FmPcdKgScheme;
-+
-+typedef union {
-+ struct fman_kg_scheme_regs schemeRegs;
-+ struct fman_kg_pe_regs portRegs;
-+ struct fman_kg_cp_regs clsPlanRegs;
-+} u_FmPcdKgIndirectAccessRegs;
-+
-+typedef struct {
-+ struct fman_kg_regs *p_FmPcdKgRegs;
-+ uint32_t schemeExceptionsBitMask;
-+ uint8_t numOfSchemes;
-+ t_Handle h_HwSpinlock;
-+ uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES];
-+ t_FmPcdKgScheme schemes[FM_PCD_KG_NUM_OF_SCHEMES];
-+ t_FmPcdKgClsPlanGrp clsPlanGrps[FM_MAX_NUM_OF_PORTS];
-+ uint8_t emptyClsPlanGrpId;
-+ t_FmPcdAllocMng schemesMng[FM_PCD_KG_NUM_OF_SCHEMES]; /* only for MASTER ! */
-+ t_FmPcdAllocMng clsPlanBlocksMng[FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP];
-+ u_FmPcdKgIndirectAccessRegs *p_IndirectAccessRegs;
-+} t_FmPcdKg;
-+
-+typedef struct {
-+ uint16_t profilesBase;
-+ uint16_t numOfProfiles;
-+ t_Handle h_FmPort;
-+} t_FmPcdPlcrMapParam;
-+
-+typedef struct {
-+ uint16_t absoluteProfileId;
-+ t_Handle h_FmPcd;
-+ bool valid;
-+ t_FmPcdLock *p_Lock;
-+ t_FmPcdAllocMng profilesMng;
-+ bool requiredActionFlag;
-+ uint32_t requiredAction;
-+ e_FmPcdEngine nextEngineOnGreen; /**< Green next engine type */
-+ u_FmPcdPlcrNextEngineParams paramsOnGreen; /**< Green next engine params */
-+
-+ e_FmPcdEngine nextEngineOnYellow; /**< Yellow next engine type */
-+ u_FmPcdPlcrNextEngineParams paramsOnYellow; /**< Yellow next engine params */
-+
-+ e_FmPcdEngine nextEngineOnRed; /**< Red next engine type */
-+ u_FmPcdPlcrNextEngineParams paramsOnRed; /**< Red next engine params */
-+} t_FmPcdPlcrProfile;
-+
-+typedef struct {
-+ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
-+ uint16_t partPlcrProfilesBase;
-+ uint16_t partNumOfPlcrProfiles;
-+ t_FmPcdPlcrProfile profiles[FM_PCD_PLCR_NUM_ENTRIES];
-+ uint16_t numOfSharedProfiles;
-+ uint16_t sharedProfilesIds[FM_PCD_PLCR_NUM_ENTRIES];
-+ t_FmPcdPlcrMapParam portsMapping[FM_MAX_NUM_OF_PORTS];
-+ t_Handle h_HwSpinlock;
-+ t_Handle h_SwSpinlock;
-+} t_FmPcdPlcr;
-+
-+typedef struct {
-+ uint32_t *p_SwPrsCode;
-+ uint32_t *p_CurrSwPrs;
-+ uint8_t currLabel;
-+ struct fman_prs_regs *p_FmPcdPrsRegs;
-+ t_FmPcdPrsLabelParams labelsTable[FM_PCD_PRS_NUM_OF_LABELS];
-+ uint32_t fmPcdPrsPortIdStatistics;
-+} t_FmPcdPrs;
-+
-+typedef struct {
-+ struct {
-+ e_NetHeaderType hdr;
-+ protocolOpt_t opt; /* only one option !! */
-+ } hdrs[FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS];
-+} t_FmPcdIntDistinctionUnit;
-+
-+typedef struct {
-+ e_NetHeaderType hdr;
-+ protocolOpt_t opt; /* only one option !! */
-+ e_NetHeaderType aliasHdr;
-+} t_FmPcdNetEnvAliases;
-+
-+typedef struct {
-+ uint8_t netEnvId;
-+ t_Handle h_FmPcd;
-+ t_Handle h_Spinlock;
-+ bool used;
-+ uint8_t owners;
-+ uint8_t clsPlanGrpId;
-+ t_FmPcdIntDistinctionUnit units[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
-+ uint32_t unitsVectors[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
-+ uint32_t lcvs[FM_PCD_PRS_NUM_OF_HDRS];
-+ uint32_t macsecVector;
-+ t_FmPcdNetEnvAliases aliasHdrs[FM_PCD_MAX_NUM_OF_ALIAS_HDRS];
-+} t_FmPcdNetEnv;
-+
-+typedef struct {
-+ struct fman_prs_cfg dfltCfg;
-+ bool plcrAutoRefresh;
-+ uint16_t prsMaxParseCycleLimit;
-+} t_FmPcdDriverParam;
-+
-+typedef struct {
-+ t_Handle h_Fm;
-+ t_Handle h_FmMuram;
-+ t_FmRevisionInfo fmRevInfo;
-+
-+ uint64_t physicalMuramBase;
-+
-+ t_Handle h_Spinlock;
-+ t_List freeLocksLst;
-+ t_List acquiredLocksLst;
-+
-+ t_Handle h_IpcSession; /* relevant for guest only */
-+ bool enabled;
-+ uint8_t guestId; /**< Guest Partition Id */
-+ uint8_t numOfEnabledGuestPartitionsPcds;
-+ char fmPcdModuleName[MODULE_NAME_SIZE];
-+ char fmPcdIpcHandlerModuleName[MODULE_NAME_SIZE]; /* relevant for guest only - this is the master's name */
-+ t_FmPcdNetEnv netEnvs[FM_MAX_NUM_OF_PORTS];
-+ t_FmPcdKg *p_FmPcdKg;
-+ t_FmPcdPlcr *p_FmPcdPlcr;
-+ t_FmPcdPrs *p_FmPcdPrs;
-+
-+ void *p_CcShadow; /**< CC MURAM shadow */
-+ uint32_t ccShadowSize;
-+ uint32_t ccShadowAlign;
-+ volatile bool shadowLock;
-+ t_Handle h_ShadowSpinlock;
-+
-+ t_Handle h_Hc;
-+
-+ uint32_t exceptions;
-+ t_FmPcdExceptionCallback *f_Exception;
-+ t_FmPcdIdExceptionCallback *f_FmPcdIndexedException;
-+ t_Handle h_App;
-+ uintptr_t ipv6FrameIdAddr;
-+ uintptr_t capwapFrameIdAddr;
-+ bool advancedOffloadSupport;
-+
-+ t_FmPcdDriverParam *p_FmPcdDriverParam;
-+} t_FmPcd;
-+
-+#if (DPAA_VERSION >= 11)
-+typedef uint8_t t_FmPcdFrmReplicUpdateType;
-+#define FRM_REPLIC_UPDATE_COUNTER 0x01
-+#define FRM_REPLIC_UPDATE_INFO 0x02
-+#endif /* (DPAA_VERSION >= 11) */
-+/***********************************************************************/
-+/* PCD internal routines */
-+/***********************************************************************/
-+
-+t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector);
-+t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params);
-+bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector);
-+t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams);
-+void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId);
-+e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr);
-+uint8_t FmPcdNetEnvGetUnitIdForSingleHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr);
-+uint8_t FmPcdNetEnvGetUnitId(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr, bool interchangeable, protocolOpt_t opt);
-+
-+t_Error FmPcdManipBuildIpReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv, t_Handle h_CcTree, t_Handle h_Manip, bool isIpv4, uint8_t groupId);
-+t_Error FmPcdManipDeleteIpReassmSchemes(t_Handle h_Manip);
-+t_Error FmPcdManipBuildCapwapReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv, t_Handle h_CcTree, t_Handle h_Manip, uint8_t groupId);
-+t_Error FmPcdManipDeleteCapwapReassmSchemes(t_Handle h_Manip);
-+bool FmPcdManipIpReassmIsIpv6Hdr(t_Handle h_Manip);
-+
-+t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams);
-+t_Error KgInit(t_FmPcd *p_FmPcd);
-+t_Error KgFree(t_FmPcd *p_FmPcd);
-+void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set);
-+bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId);
-+void KgEnable(t_FmPcd *p_FmPcd);
-+void KgDisable(t_FmPcd *p_FmPcd);
-+t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First);
-+void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base);
-+
-+/* only for MULTI partittion */
-+t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds);
-+t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds);
-+/* only for SINGLE partittion */
-+t_Error KgBindPortToSchemes(t_Handle h_FmPcd , uint8_t hardwarePortId, uint32_t spReg);
-+
-+t_FmPcdLock *FmPcdAcquireLock(t_Handle h_FmPcd);
-+void FmPcdReleaseLock(t_Handle h_FmPcd, t_FmPcdLock *p_Lock);
-+
-+t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams);
-+t_Error PlcrInit(t_FmPcd *p_FmPcd);
-+t_Error PlcrFree(t_FmPcd *p_FmPcd);
-+void PlcrEnable(t_FmPcd *p_FmPcd);
-+void PlcrDisable(t_FmPcd *p_FmPcd);
-+uint16_t PlcrAllocProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId);
-+void PlcrFreeProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId);
-+t_Error PlcrSetPortProfiles(t_FmPcd *p_FmPcd,
-+ uint8_t hardwarePortId,
-+ uint16_t numOfProfiles,
-+ uint16_t base);
-+t_Error PlcrClearPortProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId);
-+
-+t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams);
-+t_Error PrsInit(t_FmPcd *p_FmPcd);
-+void PrsEnable(t_FmPcd *p_FmPcd);
-+void PrsDisable(t_FmPcd *p_FmPcd);
-+void PrsFree(t_FmPcd *p_FmPcd );
-+t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include);
-+
-+t_Error FmPcdCcGetGrpParams(t_Handle treeId, uint8_t grpId, uint32_t *p_GrpBits, uint8_t *p_GrpBase);
-+uint8_t FmPcdCcGetOffset(t_Handle h_CcNode);
-+uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode);
-+uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode);
-+t_Error ValidateNextEngineParams(t_Handle h_FmPcd, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, e_FmPcdCcStatsMode supportedStatsMode);
-+
-+void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add);
-+t_Error FmPcdManipCheckParamsForCcNextEngine(t_FmPcdCcNextEngineParams *p_InfoForManip, uint32_t *requiredAction);
-+void FmPcdManipUpdateAdResultForCc(t_Handle h_Manip,
-+ t_FmPcdCcNextEngineParams *p_CcNextEngineParams,
-+ t_Handle p_Ad,
-+ t_Handle *p_AdNewPtr);
-+void FmPcdManipUpdateAdContLookupForCc(t_Handle h_Manip, t_Handle p_Ad, t_Handle *p_AdNew, uint32_t adTableOffset);
-+void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add);
-+t_Error FmPcdManipCheckParamsWithCcNodeParams(t_Handle h_Manip, t_Handle h_FmPcdCcNode);
-+#ifdef FM_CAPWAP_SUPPORT
-+t_Handle FmPcdManipApplSpecificBuild(void);
-+bool FmPcdManipIsCapwapApplSpecific(t_Handle h_Manip);
-+#endif /* FM_CAPWAP_SUPPORT */
-+#if (DPAA_VERSION >= 11)
-+void * FrmReplicGroupGetSourceTableDescriptor(t_Handle h_ReplicGroup);
-+void FrmReplicGroupUpdateOwner(t_Handle h_ReplicGroup, bool add);
-+void FrmReplicGroupUpdateAd(t_Handle h_ReplicGroup, void *p_Ad, t_Handle *h_AdNew);
-+
-+void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node,
-+ t_Handle h_ReplicGroup,
-+ t_List *p_AdTables,
-+ uint32_t *p_NumOfAdTables);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo, t_Handle h_Spinlock);
-+void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock);
-+t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock);
-+t_List *FmPcdManipGetSpinlock(t_Handle h_Manip);
-+t_List *FmPcdManipGetNodeLstPointedOnThisManip(t_Handle h_Manip);
-+
-+typedef struct
-+{
-+ t_Handle h_StatsAd;
-+ t_Handle h_StatsCounters;
-+#if (DPAA_VERSION >= 11)
-+ t_Handle h_StatsFLRs;
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmPcdCcStatsParams;
-+
-+void NextStepAd(t_Handle h_Ad,
-+ t_FmPcdCcStatsParams *p_FmPcdCcStatsParams,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,
-+ t_FmPcd *p_FmPcd);
-+void ReleaseLst(t_List *p_List);
-+
-+static __inline__ t_Handle FmPcdGetMuramHandle(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ ASSERT_COND(p_FmPcd);
-+ return p_FmPcd->h_FmMuram;
-+}
-+
-+static __inline__ uint64_t FmPcdGetMuramPhysBase(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ ASSERT_COND(p_FmPcd);
-+ return p_FmPcd->physicalMuramBase;
-+}
-+
-+static __inline__ uint32_t FmPcdLockSpinlock(t_FmPcdLock *p_Lock)
-+{
-+ ASSERT_COND(p_Lock);
-+ return XX_LockIntrSpinlock(p_Lock->h_Spinlock);
-+}
-+
-+static __inline__ void FmPcdUnlockSpinlock(t_FmPcdLock *p_Lock, uint32_t flags)
-+{
-+ ASSERT_COND(p_Lock);
-+ XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, flags);
-+}
-+
-+static __inline__ bool FmPcdLockTryLock(t_FmPcdLock *p_Lock)
-+{
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_Lock);
-+ intFlags = XX_LockIntrSpinlock(p_Lock->h_Spinlock);
-+ if (p_Lock->flag)
-+ {
-+ XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, intFlags);
-+ return FALSE;
-+ }
-+ p_Lock->flag = TRUE;
-+ XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, intFlags);
-+ return TRUE;
-+}
-+
-+static __inline__ void FmPcdLockUnlock(t_FmPcdLock *p_Lock)
-+{
-+ ASSERT_COND(p_Lock);
-+ p_Lock->flag = FALSE;
-+}
-+
-+
-+#endif /* __FM_PCD_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h
-@@ -0,0 +1,280 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_pcd_ipc.h
-+
-+ @Description FM PCD Inter-Partition prototypes, structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_PCD_IPC_H
-+#define __FM_PCD_IPC_H
-+
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Description Structure for getting a sw parser address according to a label
-+ Fields commented 'IN' are passed by the port module to be used
-+ by the FM module.
-+ Fields commented 'OUT' will be filled by FM before returning to port.
-+*//***************************************************************************/
-+typedef _Packed struct t_FmPcdIpcSwPrsLable
-+{
-+ uint32_t enumHdr; /**< IN. The existence of this header will invoke
-+ the sw parser code. */
-+ uint8_t indexPerHdr; /**< IN. Normally 0, if more than one sw parser
-+ attachments for the same header, use this
-+
-+ index to distinguish between them. */
-+} _PackedType t_FmPcdIpcSwPrsLable;
-+
-+/**************************************************************************//**
-+ @Description Structure for port-PCD communication.
-+ Fields commented 'IN' are passed by the port module to be used
-+ by the FM module.
-+ Fields commented 'OUT' will be filled by FM before returning to port.
-+ Some fields are optional (depending on configuration) and
-+ will be analized by the port and FM modules accordingly.
-+*//***************************************************************************/
-+
-+typedef struct t_FmPcdIpcKgSchemesParams
-+{
-+ uint8_t guestId;
-+ uint8_t numOfSchemes;
-+ uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES];
-+} _PackedType t_FmPcdIpcKgSchemesParams;
-+
-+typedef struct t_FmPcdIpcKgClsPlanParams
-+{
-+ uint8_t guestId;
-+ uint16_t numOfClsPlanEntries;
-+ uint8_t clsPlanBase;
-+} _PackedType t_FmPcdIpcKgClsPlanParams;
-+
-+typedef _Packed struct t_FmPcdIpcPrsIncludePort
-+{
-+ uint8_t hardwarePortId;
-+ bool include;
-+} _PackedType t_FmPcdIpcPrsIncludePort;
-+
-+
-+#define FM_PCD_MAX_REPLY_SIZE 16
-+#define FM_PCD_MAX_MSG_SIZE 36
-+#define FM_PCD_MAX_REPLY_BODY_SIZE 36
-+
-+typedef _Packed struct {
-+ uint32_t msgId;
-+ uint8_t msgBody[FM_PCD_MAX_MSG_SIZE];
-+} _PackedType t_FmPcdIpcMsg;
-+
-+typedef _Packed struct t_FmPcdIpcReply {
-+ uint32_t error;
-+ uint8_t replyBody[FM_PCD_MAX_REPLY_BODY_SIZE];
-+} _PackedType t_FmPcdIpcReply;
-+
-+typedef _Packed struct t_FmIpcResourceAllocParams {
-+ uint8_t guestId;
-+ uint16_t base;
-+ uint16_t num;
-+}_PackedType t_FmIpcResourceAllocParams;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ALLOC_KG_SCHEMES
-+
-+ @Description Used by FM PCD front-end in order to allocate KG resources
-+
-+ @Param[in/out] t_FmPcdIpcKgAllocParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_ALLOC_KG_SCHEMES 3
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FREE_KG_SCHEMES
-+
-+ @Description Used by FM PCD front-end in order to Free KG resources
-+
-+ @Param[in/out] t_FmPcdIpcKgSchemesParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_FREE_KG_SCHEMES 4
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ALLOC_PROFILES
-+
-+ @Description Used by FM PCD front-end in order to allocate Policer profiles
-+
-+ @Param[in/out] t_FmIpcResourceAllocParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_ALLOC_PROFILES 5
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FREE_PROFILES
-+
-+ @Description Used by FM PCD front-end in order to Free Policer profiles
-+
-+ @Param[in/out] t_FmIpcResourceAllocParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_FREE_PROFILES 6
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_SET_PORT_PROFILES
-+
-+ @Description Used by FM PCD front-end in order to allocate Policer profiles
-+ for specific port
-+
-+ @Param[in/out] t_FmIpcResourceAllocParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_SET_PORT_PROFILES 7
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_CLEAR_PORT_PROFILES
-+
-+ @Description Used by FM PCD front-end in order to allocate Policer profiles
-+ for specific port
-+
-+ @Param[in/out] t_FmIpcResourceAllocParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_CLEAR_PORT_PROFILES 8
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_GET_PHYS_MURAM_BASE
-+
-+ @Description Used by FM PCD front-end in order to get MURAM base address
-+
-+ @Param[in/out] t_FmPcdIcPhysAddr Pointer
-+*//***************************************************************************/
-+#define FM_PCD_GET_PHYS_MURAM_BASE 9
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_GET_SW_PRS_OFFSET
-+
-+ @Description Used by FM front-end to get the SW parser offset of the start of
-+ code relevant to a given label.
-+
-+ @Param[in/out] t_FmPcdIpcSwPrsLable Pointer
-+*//***************************************************************************/
-+#define FM_PCD_GET_SW_PRS_OFFSET 10
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MASTER_IS_ENABLED
-+
-+ @Description Used by FM front-end in order to verify
-+ PCD enablement.
-+
-+ @Param[in] bool Pointer
-+*//***************************************************************************/
-+#define FM_PCD_MASTER_IS_ENABLED 15
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_GUEST_DISABLE
-+
-+ @Description Used by FM front-end to inform back-end when
-+ front-end PCD is disabled
-+
-+ @Param[in] None
-+*//***************************************************************************/
-+#define FM_PCD_GUEST_DISABLE 16
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FREE_KG_CLSPLAN
-+
-+ @Description Used by FM PCD front-end in order to Free KG classification plan entries
-+
-+ @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_FREE_KG_CLSPLAN 22
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ALLOC_KG_CLSPLAN
-+
-+ @Description Used by FM PCD front-end in order to allocate KG classification plan entries
-+
-+ @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer
-+*//***************************************************************************/
-+#define FM_PCD_ALLOC_KG_CLSPLAN 23
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MASTER_IS_ALIVE
-+
-+ @Description Used by FM front-end to check that back-end exists
-+
-+ @Param[in] None
-+*//***************************************************************************/
-+#define FM_PCD_MASTER_IS_ALIVE 24
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_GET_COUNTER
-+
-+ @Description Used by FM front-end to read PCD counters
-+
-+ @Param[in/out] t_FmPcdIpcGetCounter Pointer
-+*//***************************************************************************/
-+#define FM_PCD_GET_COUNTER 25
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PRS_INC_PORT_STATS
-+
-+ @Description Used by FM front-end to set/clear statistics for port
-+
-+ @Param[in/out] t_FmPcdIpcPrsIncludePort Pointer
-+*//***************************************************************************/
-+#define FM_PCD_PRS_INC_PORT_STATS 26
-+
-+#if (DPAA_VERSION >= 11)
-+/* TODO - doc */
-+#define FM_PCD_ALLOC_SP 27
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+
-+/** @} */ /* end of FM_PCD_IPC_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#endif /* __FM_PCD_IPC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.c
-@@ -0,0 +1,1847 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_plcr.c
-+
-+ @Description FM PCD POLICER...
-+*//***************************************************************************/
-+#include <linux/math64.h>
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+#include "net_ext.h"
-+#include "fm_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_pcd.h"
-+#include "fm_hc.h"
-+#include "fm_pcd_ipc.h"
-+#include "fm_plcr.h"
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+
-+static uint32_t PlcrProfileLock(t_Handle h_Profile)
-+{
-+ ASSERT_COND(h_Profile);
-+ return FmPcdLockSpinlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock);
-+}
-+
-+static void PlcrProfileUnlock(t_Handle h_Profile, uint32_t intFlags)
-+{
-+ ASSERT_COND(h_Profile);
-+ FmPcdUnlockSpinlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock, intFlags);
-+}
-+
-+static bool PlcrProfileFlagTryLock(t_Handle h_Profile)
-+{
-+ ASSERT_COND(h_Profile);
-+ return FmPcdLockTryLock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock);
-+}
-+
-+static void PlcrProfileFlagUnlock(t_Handle h_Profile)
-+{
-+ ASSERT_COND(h_Profile);
-+ FmPcdLockUnlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock);
-+}
-+
-+static uint32_t PlcrHwLock(t_Handle h_FmPcdPlcr)
-+{
-+ ASSERT_COND(h_FmPcdPlcr);
-+ return XX_LockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_HwSpinlock);
-+}
-+
-+static void PlcrHwUnlock(t_Handle h_FmPcdPlcr, uint32_t intFlags)
-+{
-+ ASSERT_COND(h_FmPcdPlcr);
-+ XX_UnlockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_HwSpinlock, intFlags);
-+}
-+
-+static uint32_t PlcrSwLock(t_Handle h_FmPcdPlcr)
-+{
-+ ASSERT_COND(h_FmPcdPlcr);
-+ return XX_LockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_SwSpinlock);
-+}
-+
-+static void PlcrSwUnlock(t_Handle h_FmPcdPlcr, uint32_t intFlags)
-+{
-+ ASSERT_COND(h_FmPcdPlcr);
-+ XX_UnlockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_SwSpinlock, intFlags);
-+}
-+
-+static bool IsProfileShared(t_Handle h_FmPcd, uint16_t absoluteProfileId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint16_t i;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, FALSE);
-+
-+ for (i=0;i<p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles;i++)
-+ if (p_FmPcd->p_FmPcdPlcr->sharedProfilesIds[i] == absoluteProfileId)
-+ return TRUE;
-+ return FALSE;
-+}
-+
-+static t_Error SetProfileNia(t_FmPcd *p_FmPcd, e_FmPcdEngine nextEngine, u_FmPcdPlcrNextEngineParams *p_NextEngineParams, uint32_t *nextAction)
-+{
-+ uint32_t nia;
-+ uint16_t absoluteProfileId;
-+ uint8_t relativeSchemeId, physicalSchemeId;
-+
-+ nia = FM_PCD_PLCR_NIA_VALID;
-+
-+ switch (nextEngine)
-+ {
-+ case e_FM_PCD_DONE :
-+ switch (p_NextEngineParams->action)
-+ {
-+ case e_FM_PCD_DROP_FRAME :
-+ nia |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd);
-+ break;
-+ case e_FM_PCD_ENQ_FRAME:
-+ nia |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ break;
-+ case e_FM_PCD_KG:
-+ physicalSchemeId = FmPcdKgGetSchemeId(p_NextEngineParams->h_DirectScheme);
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId);
-+ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG);
-+ if (!FmPcdKgIsSchemeValidSw(p_NextEngineParams->h_DirectScheme))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid direct scheme."));
-+ if (!KgIsSchemeAlwaysDirect(p_FmPcd, relativeSchemeId))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Policer Profile may point only to a scheme that is always direct."));
-+ nia |= NIA_ENG_KG | NIA_KG_DIRECT | physicalSchemeId;
-+ break;
-+ case e_FM_PCD_PLCR:
-+ absoluteProfileId = ((t_FmPcdPlcrProfile *)p_NextEngineParams->h_Profile)->absoluteProfileId;
-+ if (!IsProfileShared(p_FmPcd, absoluteProfileId))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next profile must be a shared profile"));
-+ if (!FmPcdPlcrIsProfileValid(p_FmPcd, absoluteProfileId))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid profile "));
-+ nia |= NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ *nextAction = nia;
-+
-+ return E_OK;
-+}
-+
-+static uint32_t CalcFPP(uint32_t fpp)
-+{
-+ if (fpp > 15)
-+ return 15 - (0x1f - fpp);
-+ else
-+ return 16 + fpp;
-+}
-+
-+static void GetInfoRateReg(e_FmPcdPlcrRateMode rateMode,
-+ uint32_t rate,
-+ uint64_t tsuInTenthNano,
-+ uint32_t fppShift,
-+ uint64_t *p_Integer,
-+ uint64_t *p_Fraction)
-+{
-+ uint64_t tmp, div;
-+
-+ if (rateMode == e_FM_PCD_PLCR_BYTE_MODE)
-+ {
-+ /* now we calculate the initial integer for the bigger rate */
-+ /* from Kbps to Bytes/TSU */
-+ tmp = (uint64_t)rate;
-+ tmp *= 1000; /* kb --> b */
-+ tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */
-+
-+ div = 1000000000; /* nano */
-+ div *= 10; /* 10 nano */
-+ div *= 8; /* bit to byte */
-+ }
-+ else
-+ {
-+ /* now we calculate the initial integer for the bigger rate */
-+ /* from Kbps to Bytes/TSU */
-+ tmp = (uint64_t)rate;
-+ tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */
-+
-+ div = 1000000000; /* nano */
-+ div *= 10; /* 10 nano */
-+ }
-+ *p_Integer = div64_u64(tmp<<fppShift, div);
-+
-+ /* for calculating the fraction, we will recalculate cir and deduct the integer.
-+ * For precision, we will multiply by 2^16. we do not divid back, since we write
-+ * this value as fraction - see spec.
-+ */
-+ *p_Fraction = div64_u64(((tmp<<fppShift)<<16) - ((*p_Integer<<16)*div), div);
-+}
-+
-+/* .......... */
-+
-+static void CalcRates(uint32_t bitFor1Micro,
-+ t_FmPcdPlcrNonPassthroughAlgParams *p_NonPassthroughAlgParam,
-+ uint32_t *cir,
-+ uint32_t *cbs,
-+ uint32_t *pir_eir,
-+ uint32_t *pbs_ebs,
-+ uint32_t *fpp)
-+{
-+ uint64_t integer, fraction;
-+ uint32_t temp, tsuInTenthNanos;
-+ uint8_t fppShift=0;
-+
-+ /* we want the tsu to count 10 nano for better precision normally tsu is 3.9 nano, now we will get 39 */
-+ tsuInTenthNanos = (uint32_t)(1000*10/(1 << bitFor1Micro));
-+
-+ /* we choose the faster rate to calibrate fpp */
-+ /* The meaning of this step:
-+ * when fppShift is 0 it means all TS bits are treated as integer and TSU is the TS LSB count.
-+ * In this configuration we calculate the integer and fraction that represent the higher infoRate
-+ * When this is done, we can tell where we have "spare" unused bits and optimize the division of TS
-+ * into "integer" and "fraction" where the logic is - as many bits as possible for integer at
-+ * high rate, as many bits as possible for fraction at low rate.
-+ */
-+ if (p_NonPassthroughAlgParam->committedInfoRate > p_NonPassthroughAlgParam->peakOrExcessInfoRate)
-+ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->committedInfoRate, tsuInTenthNanos, 0, &integer, &fraction);
-+ else
-+ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrExcessInfoRate, tsuInTenthNanos, 0, &integer, &fraction);
-+
-+ /* we shift integer, as in cir/pir it is represented by the MSB 16 bits, and
-+ * the LSB bits are for the fraction */
-+ temp = (uint32_t)((integer<<16) & 0x00000000FFFFFFFF);
-+ /* temp is effected by the rate. For low rates it may be as low as 0, and then we'll
-+ * take max FP = 31.
-+ * For high rates it will never exceed the 32 bit reg (after the 16 shift), as it is
-+ * limited by the 10G physical port.
-+ */
-+ if (temp != 0)
-+ {
-+ /* In this case, the largest rate integer is non 0, if it does not occupy all (high) 16
-+ * bits of the PIR_EIR we can use this fact and enlarge it to occupy all 16 bits.
-+ * The logic is to have as many bits for integer in the higher rates, but if we have "0"s
-+ * in the integer part of the cir/pir register, than these bits are wasted. So we want
-+ * to use these bits for the fraction. in this way we will have for fraction - the number
-+ * of "0" bits and the rest - for integer.
-+ * In other words: For each bit we shift it in PIR_EIR, we move the FP in the TS
-+ * one bit to the left - preserving the relationship and achieving more bits
-+ * for integer in the TS.
-+ */
-+
-+ /* count zeroes left of the higher used bit (in order to shift the value such that
-+ * unused bits may be used for fraction).
-+ */
-+ while ((temp & 0x80000000) == 0)
-+ {
-+ temp = temp << 1;
-+ fppShift++;
-+ }
-+ if (fppShift > 15)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, ("timeStampPeriod to Information rate ratio is too small"));
-+ return;
-+ }
-+ }
-+ else
-+ {
-+ temp = (uint32_t)fraction; /* fraction will alyas be smaller than 2^16 */
-+ if (!temp)
-+ /* integer and fraction are 0, we set FP to its max val */
-+ fppShift = 31;
-+ else
-+ {
-+ /* integer was 0 but fraction is not. FP is 16 for the fraction,
-+ * + all left zeroes of the fraction. */
-+ fppShift=16;
-+ /* count zeroes left of the higher used bit (in order to shift the value such that
-+ * unused bits may be used for fraction).
-+ */
-+ while ((temp & 0x8000) == 0)
-+ {
-+ temp = temp << 1;
-+ fppShift++;
-+ }
-+ }
-+ }
-+
-+ /*
-+ * This means that the FM TS register will now be used so that 'fppShift' bits are for
-+ * fraction and the rest for integer */
-+ /* now we re-calculate cir and pir_eir with the calculated FP */
-+ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->committedInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction);
-+ *cir = (uint32_t)(integer << 16 | (fraction & 0xFFFF));
-+ GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrExcessInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction);
-+ *pir_eir = (uint32_t)(integer << 16 | (fraction & 0xFFFF));
-+
-+ *cbs = p_NonPassthroughAlgParam->committedBurstSize;
-+ *pbs_ebs = p_NonPassthroughAlgParam->peakOrExcessBurstSize;
-+
-+ /* convert FP as it should be written to reg.
-+ * 0-15 --> 16-31
-+ * 16-31 --> 0-15
-+ */
-+ *fpp = CalcFPP(fppShift);
-+}
-+
-+static void WritePar(t_FmPcd *p_FmPcd, uint32_t par)
-+{
-+ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+
-+ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
-+ WRITE_UINT32(p_FmPcdPlcrRegs->fmpl_par, par);
-+
-+ while (GET_UINT32(p_FmPcdPlcrRegs->fmpl_par) & FM_PCD_PLCR_PAR_GO) ;
-+}
-+
-+static t_Error BuildProfileRegs(t_FmPcd *p_FmPcd,
-+ t_FmPcdPlcrProfileParams *p_ProfileParams,
-+ t_FmPcdPlcrProfileRegs *p_PlcrRegs)
-+{
-+ t_Error err = E_OK;
-+ uint32_t pemode, gnia, ynia, rnia, bitFor1Micro;
-+
-+ ASSERT_COND(p_FmPcd);
-+
-+ bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm);
-+ if (bitFor1Micro == 0)
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Timestamp scale"));
-+
-+/* Set G, Y, R Nia */
-+ err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnGreen, &(p_ProfileParams->paramsOnGreen), &gnia);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnYellow, &(p_ProfileParams->paramsOnYellow), &ynia);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnRed, &(p_ProfileParams->paramsOnRed), &rnia);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+/* Mode fmpl_pemode */
-+ pemode = FM_PCD_PLCR_PEMODE_PI;
-+
-+ switch (p_ProfileParams->algSelection)
-+ {
-+ case e_FM_PCD_PLCR_PASS_THROUGH:
-+ p_PlcrRegs->fmpl_pecir = 0;
-+ p_PlcrRegs->fmpl_pecbs = 0;
-+ p_PlcrRegs->fmpl_pepepir_eir = 0;
-+ p_PlcrRegs->fmpl_pepbs_ebs = 0;
-+ p_PlcrRegs->fmpl_pelts = 0;
-+ p_PlcrRegs->fmpl_pects = 0;
-+ p_PlcrRegs->fmpl_pepts_ets = 0;
-+ pemode &= ~FM_PCD_PLCR_PEMODE_ALG_MASK;
-+ switch (p_ProfileParams->colorMode)
-+ {
-+ case e_FM_PCD_PLCR_COLOR_BLIND:
-+ pemode |= FM_PCD_PLCR_PEMODE_CBLND;
-+ switch (p_ProfileParams->color.dfltColor)
-+ {
-+ case e_FM_PCD_PLCR_GREEN:
-+ pemode &= ~FM_PCD_PLCR_PEMODE_DEFC_MASK;
-+ break;
-+ case e_FM_PCD_PLCR_YELLOW:
-+ pemode |= FM_PCD_PLCR_PEMODE_DEFC_Y;
-+ break;
-+ case e_FM_PCD_PLCR_RED:
-+ pemode |= FM_PCD_PLCR_PEMODE_DEFC_R;
-+ break;
-+ case e_FM_PCD_PLCR_OVERRIDE:
-+ pemode |= FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ break;
-+ case e_FM_PCD_PLCR_COLOR_AWARE:
-+ pemode &= ~FM_PCD_PLCR_PEMODE_CBLND;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ break;
-+
-+ case e_FM_PCD_PLCR_RFC_2698:
-+ /* Select algorithm MODE[ALG] = "01" */
-+ pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC2698;
-+ if (p_ProfileParams->nonPassthroughAlgParams.committedInfoRate > p_ProfileParams->nonPassthroughAlgParams.peakOrExcessInfoRate)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("in RFC2698 Peak rate must be equal or larger than committedInfoRate."));
-+ goto cont_rfc;
-+ case e_FM_PCD_PLCR_RFC_4115:
-+ /* Select algorithm MODE[ALG] = "10" */
-+ pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC4115;
-+cont_rfc:
-+ /* Select Color-Blind / Color-Aware operation (MODE[CBLND]) */
-+ switch (p_ProfileParams->colorMode)
-+ {
-+ case e_FM_PCD_PLCR_COLOR_BLIND:
-+ pemode |= FM_PCD_PLCR_PEMODE_CBLND;
-+ break;
-+ case e_FM_PCD_PLCR_COLOR_AWARE:
-+ pemode &= ~FM_PCD_PLCR_PEMODE_CBLND;
-+ /*In color aware more select override color interpretation (MODE[OVCLR]) */
-+ switch (p_ProfileParams->color.override)
-+ {
-+ case e_FM_PCD_PLCR_GREEN:
-+ pemode &= ~FM_PCD_PLCR_PEMODE_OVCLR_MASK;
-+ break;
-+ case e_FM_PCD_PLCR_YELLOW:
-+ pemode |= FM_PCD_PLCR_PEMODE_OVCLR_Y;
-+ break;
-+ case e_FM_PCD_PLCR_RED:
-+ pemode |= FM_PCD_PLCR_PEMODE_OVCLR_R;
-+ break;
-+ case e_FM_PCD_PLCR_OVERRIDE:
-+ pemode |= FM_PCD_PLCR_PEMODE_OVCLR_G_NC;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ /* Select Measurement Unit Mode to BYTE or PACKET (MODE[PKT]) */
-+ switch (p_ProfileParams->nonPassthroughAlgParams.rateMode)
-+ {
-+ case e_FM_PCD_PLCR_BYTE_MODE :
-+ pemode &= ~FM_PCD_PLCR_PEMODE_PKT;
-+ switch (p_ProfileParams->nonPassthroughAlgParams.byteModeParams.frameLengthSelection)
-+ {
-+ case e_FM_PCD_PLCR_L2_FRM_LEN:
-+ pemode |= FM_PCD_PLCR_PEMODE_FLS_L2;
-+ break;
-+ case e_FM_PCD_PLCR_L3_FRM_LEN:
-+ pemode |= FM_PCD_PLCR_PEMODE_FLS_L3;
-+ break;
-+ case e_FM_PCD_PLCR_L4_FRM_LEN:
-+ pemode |= FM_PCD_PLCR_PEMODE_FLS_L4;
-+ break;
-+ case e_FM_PCD_PLCR_FULL_FRM_LEN:
-+ pemode |= FM_PCD_PLCR_PEMODE_FLS_FULL;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ switch (p_ProfileParams->nonPassthroughAlgParams.byteModeParams.rollBackFrameSelection)
-+ {
-+ case e_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN:
-+ pemode &= ~FM_PCD_PLCR_PEMODE_RBFLS;
-+ break;
-+ case e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN:
-+ pemode |= FM_PCD_PLCR_PEMODE_RBFLS;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ break;
-+ case e_FM_PCD_PLCR_PACKET_MODE :
-+ pemode |= FM_PCD_PLCR_PEMODE_PKT;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ /* Select timeStamp floating point position (MODE[FPP]) to fit the actual traffic rates. For PACKET
-+ mode with low traffic rates move the fixed point to the left to increase fraction accuracy. For BYTE
-+ mode with high traffic rates move the fixed point to the right to increase integer accuracy. */
-+
-+ /* Configure Traffic Parameters*/
-+ {
-+ uint32_t cir=0, cbs=0, pir_eir=0, pbs_ebs=0, fpp=0;
-+
-+ CalcRates(bitFor1Micro, &p_ProfileParams->nonPassthroughAlgParams, &cir, &cbs, &pir_eir, &pbs_ebs, &fpp);
-+
-+ /* Set Committed Information Rate (CIR) */
-+ p_PlcrRegs->fmpl_pecir = cir;
-+ /* Set Committed Burst Size (CBS). */
-+ p_PlcrRegs->fmpl_pecbs = cbs;
-+ /* Set Peak Information Rate (PIR_EIR used as PIR) */
-+ p_PlcrRegs->fmpl_pepepir_eir = pir_eir;
-+ /* Set Peak Burst Size (PBS_EBS used as PBS) */
-+ p_PlcrRegs->fmpl_pepbs_ebs = pbs_ebs;
-+
-+ /* Initialize the Metering Buckets to be full (write them with 0xFFFFFFFF. */
-+ /* Peak Rate Token Bucket Size (PTS_ETS used as PTS) */
-+ p_PlcrRegs->fmpl_pepts_ets = 0xFFFFFFFF;
-+ /* Committed Rate Token Bucket Size (CTS) */
-+ p_PlcrRegs->fmpl_pects = 0xFFFFFFFF;
-+
-+ /* Set the FPP based on calculation */
-+ pemode |= (fpp << FM_PCD_PLCR_PEMODE_FPP_SHIFT);
-+ }
-+ break; /* FM_PCD_PLCR_PEMODE_ALG_RFC2698 , FM_PCD_PLCR_PEMODE_ALG_RFC4115 */
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ p_PlcrRegs->fmpl_pemode = pemode;
-+
-+ p_PlcrRegs->fmpl_pegnia = gnia;
-+ p_PlcrRegs->fmpl_peynia = ynia;
-+ p_PlcrRegs->fmpl_pernia = rnia;
-+
-+ /* Zero Counters */
-+ p_PlcrRegs->fmpl_pegpc = 0;
-+ p_PlcrRegs->fmpl_peypc = 0;
-+ p_PlcrRegs->fmpl_perpc = 0;
-+ p_PlcrRegs->fmpl_perypc = 0;
-+ p_PlcrRegs->fmpl_perrpc = 0;
-+
-+ return E_OK;
-+}
-+
-+static t_Error AllocSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds)
-+{
-+ uint32_t profilesFound;
-+ uint16_t i, k=0;
-+ uint32_t intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ if (!numOfProfiles)
-+ return E_OK;
-+
-+ if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES)
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big."));
-+
-+ intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr);
-+ /* Find numOfProfiles free profiles (may be spread) */
-+ profilesFound = 0;
-+ for (i=0;i<FM_PCD_PLCR_NUM_ENTRIES; i++)
-+ if (!p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated)
-+ {
-+ profilesFound++;
-+ profilesIds[k] = i;
-+ k++;
-+ if (profilesFound == numOfProfiles)
-+ break;
-+ }
-+
-+ if (profilesFound != numOfProfiles)
-+ {
-+ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,NO_MSG);
-+ }
-+
-+ for (i = 0;i<k;i++)
-+ {
-+ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = TRUE;
-+ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.ownerId = 0;
-+ }
-+ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+
-+ return E_OK;
-+}
-+
-+static void FreeSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds)
-+{
-+ uint16_t i;
-+
-+ SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE);
-+
-+ ASSERT_COND(numOfProfiles);
-+
-+ for (i=0; i < numOfProfiles; i++)
-+ {
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated);
-+ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = FALSE;
-+ p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.ownerId = p_FmPcd->guestId;
-+ }
-+}
-+
-+static void UpdateRequiredActionFlag(t_Handle h_FmPcd, uint16_t absoluteProfileId, bool set)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ /* this routine is protected by calling routine */
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
-+
-+ if (set)
-+ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredActionFlag = TRUE;
-+ else
-+ {
-+ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction = 0;
-+ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredActionFlag = FALSE;
-+ }
-+}
-+
-+/*********************************************/
-+/*............Policer Exception..............*/
-+/*********************************************/
-+static void EventsCB(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint32_t event, mask, force;
-+
-+ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
-+ event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr);
-+ mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier);
-+
-+ event &= mask;
-+
-+ /* clear the forced events */
-+ force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr);
-+ if (force & event)
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, force & ~event);
-+
-+
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr, event);
-+
-+ if (event & FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE)
-+ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE);
-+ if (event & FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE)
-+ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE);
-+}
-+
-+/* ..... */
-+
-+static void ErrorExceptionsCB(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint32_t event, force, captureReg, mask;
-+
-+ ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm));
-+ event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr);
-+ mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier);
-+
-+ event &= mask;
-+
-+ /* clear the forced events */
-+ force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr);
-+ if (force & event)
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, force & ~event);
-+
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr, event);
-+
-+ if (event & FM_PCD_PLCR_DOUBLE_ECC)
-+ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC);
-+ if (event & FM_PCD_PLCR_INIT_ENTRY_ERROR)
-+ {
-+ captureReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr);
-+ /*ASSERT_COND(captureReg & PLCR_ERR_UNINIT_CAP);
-+ p_UnInitCapt->profileNum = (uint8_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK);
-+ p_UnInitCapt->portId = (uint8_t)((captureReg & PLCR_ERR_UNINIT_PID_MASK) >>PLCR_ERR_UNINIT_PID_SHIFT) ;
-+ p_UnInitCapt->absolute = (bool)(captureReg & PLCR_ERR_UNINIT_ABSOLUTE_MASK);*/
-+ p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR,(uint16_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK));
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr, PLCR_ERR_UNINIT_CAP);
-+ }
-+}
-+
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+
-+t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams)
-+{
-+ t_FmPcdPlcr *p_FmPcdPlcr;
-+ uint16_t i=0;
-+
-+ UNUSED(p_FmPcd);
-+ UNUSED(p_FmPcdParams);
-+
-+ p_FmPcdPlcr = (t_FmPcdPlcr *) XX_Malloc(sizeof(t_FmPcdPlcr));
-+ if (!p_FmPcdPlcr)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer structure allocation FAILED"));
-+ return NULL;
-+ }
-+ memset(p_FmPcdPlcr, 0, sizeof(t_FmPcdPlcr));
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ {
-+ p_FmPcdPlcr->p_FmPcdPlcrRegs = (t_FmPcdPlcrRegs *)UINT_TO_PTR(FmGetPcdPlcrBaseAddr(p_FmPcdParams->h_Fm));
-+ p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = DEFAULT_plcrAutoRefresh;
-+ p_FmPcd->exceptions |= (DEFAULT_fmPcdPlcrExceptions | DEFAULT_fmPcdPlcrErrorExceptions);
-+ }
-+
-+ p_FmPcdPlcr->numOfSharedProfiles = DEFAULT_numOfSharedPlcrProfiles;
-+
-+ p_FmPcdPlcr->partPlcrProfilesBase = p_FmPcdParams->partPlcrProfilesBase;
-+ p_FmPcdPlcr->partNumOfPlcrProfiles = p_FmPcdParams->partNumOfPlcrProfiles;
-+ /* for backward compatabilty. if no policer profile, will set automatically to the max */
-+ if ((p_FmPcd->guestId == NCSW_MASTER_ID) &&
-+ (p_FmPcdPlcr->partNumOfPlcrProfiles == 0))
-+ p_FmPcdPlcr->partNumOfPlcrProfiles = FM_PCD_PLCR_NUM_ENTRIES;
-+
-+ for (i=0; i<FM_PCD_PLCR_NUM_ENTRIES; i++)
-+ p_FmPcdPlcr->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
-+
-+ return p_FmPcdPlcr;
-+}
-+
-+t_Error PlcrInit(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam;
-+ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
-+ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg32 = 0;
-+ uint16_t base;
-+
-+ if ((p_FmPcdPlcr->partPlcrProfilesBase + p_FmPcdPlcr->partNumOfPlcrProfiles) > FM_PCD_PLCR_NUM_ENTRIES)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("partPlcrProfilesBase+partNumOfPlcrProfiles out of range!!!"));
-+
-+ p_FmPcdPlcr->h_HwSpinlock = XX_InitSpinlock();
-+ if (!p_FmPcdPlcr->h_HwSpinlock)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer HW spinlock"));
-+
-+ p_FmPcdPlcr->h_SwSpinlock = XX_InitSpinlock();
-+ if (!p_FmPcdPlcr->h_SwSpinlock)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer SW spinlock"));
-+
-+ base = PlcrAllocProfilesForPartition(p_FmPcd,
-+ p_FmPcdPlcr->partPlcrProfilesBase,
-+ p_FmPcdPlcr->partNumOfPlcrProfiles,
-+ p_FmPcd->guestId);
-+ if (base == (uint16_t)ILLEGAL_BASE)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+
-+ if (p_FmPcdPlcr->numOfSharedProfiles)
-+ {
-+ err = AllocSharedProfiles(p_FmPcd,
-+ p_FmPcdPlcr->numOfSharedProfiles,
-+ p_FmPcdPlcr->sharedProfilesIds);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err,NO_MSG);
-+ }
-+
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ return E_OK;
-+
-+ /**********************FMPL_GCR******************/
-+ tmpReg32 = 0;
-+ tmpReg32 |= FM_PCD_PLCR_GCR_STEN;
-+ if (p_Param->plcrAutoRefresh)
-+ tmpReg32 |= FM_PCD_PLCR_GCR_DAR;
-+ tmpReg32 |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd);
-+
-+ WRITE_UINT32(p_Regs->fmpl_gcr, tmpReg32);
-+ /**********************FMPL_GCR******************/
-+
-+ /**********************FMPL_EEVR******************/
-+ WRITE_UINT32(p_Regs->fmpl_eevr, (FM_PCD_PLCR_DOUBLE_ECC | FM_PCD_PLCR_INIT_ENTRY_ERROR));
-+ /**********************FMPL_EEVR******************/
-+ /**********************FMPL_EIER******************/
-+ tmpReg32 = 0;
-+ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC)
-+ {
-+ FmEnableRamsEcc(p_FmPcd->h_Fm);
-+ tmpReg32 |= FM_PCD_PLCR_DOUBLE_ECC;
-+ }
-+ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR)
-+ tmpReg32 |= FM_PCD_PLCR_INIT_ENTRY_ERROR;
-+ WRITE_UINT32(p_Regs->fmpl_eier, tmpReg32);
-+ /**********************FMPL_EIER******************/
-+
-+ /**********************FMPL_EVR******************/
-+ WRITE_UINT32(p_Regs->fmpl_evr, (FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE | FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE));
-+ /**********************FMPL_EVR******************/
-+ /**********************FMPL_IER******************/
-+ tmpReg32 = 0;
-+ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE)
-+ tmpReg32 |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE;
-+ if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE)
-+ tmpReg32 |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE;
-+ WRITE_UINT32(p_Regs->fmpl_ier, tmpReg32);
-+ /**********************FMPL_IER******************/
-+
-+ /* register even if no interrupts enabled, to allow future enablement */
-+ FmRegisterIntr(p_FmPcd->h_Fm,
-+ e_FM_MOD_PLCR,
-+ 0,
-+ e_FM_INTR_TYPE_ERR,
-+ ErrorExceptionsCB,
-+ p_FmPcd);
-+ FmRegisterIntr(p_FmPcd->h_Fm,
-+ e_FM_MOD_PLCR,
-+ 0,
-+ e_FM_INTR_TYPE_NORMAL,
-+ EventsCB,
-+ p_FmPcd);
-+
-+ /* driver initializes one DFLT profile at the last entry*/
-+ /**********************FMPL_DPMR******************/
-+ tmpReg32 = 0;
-+ WRITE_UINT32(p_Regs->fmpl_dpmr, tmpReg32);
-+ p_FmPcd->p_FmPcdPlcr->profiles[0].profilesMng.allocated = TRUE;
-+
-+ return E_OK;
-+}
-+
-+t_Error PlcrFree(t_FmPcd *p_FmPcd)
-+{
-+ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_ERR);
-+ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_NORMAL);
-+
-+ if (p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles)
-+ FreeSharedProfiles(p_FmPcd,
-+ p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles,
-+ p_FmPcd->p_FmPcdPlcr->sharedProfilesIds);
-+
-+ if (p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles)
-+ PlcrFreeProfilesForPartition(p_FmPcd,
-+ p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase,
-+ p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles,
-+ p_FmPcd->guestId);
-+
-+ if (p_FmPcd->p_FmPcdPlcr->h_SwSpinlock)
-+ XX_FreeSpinlock(p_FmPcd->p_FmPcdPlcr->h_SwSpinlock);
-+
-+ if (p_FmPcd->p_FmPcdPlcr->h_HwSpinlock)
-+ XX_FreeSpinlock(p_FmPcd->p_FmPcdPlcr->h_HwSpinlock);
-+
-+ return E_OK;
-+}
-+
-+void PlcrEnable(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+
-+ WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) | FM_PCD_PLCR_GCR_EN);
-+}
-+
-+void PlcrDisable(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+
-+ WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) & ~FM_PCD_PLCR_GCR_EN);
-+}
-+
-+uint16_t PlcrAllocProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId)
-+{
-+ uint32_t intFlags;
-+ uint16_t profilesFound = 0;
-+ int i = 0;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr);
-+
-+ if (!numOfProfiles)
-+ return 0;
-+
-+ if ((numOfProfiles > FM_PCD_PLCR_NUM_ENTRIES) ||
-+ (base + numOfProfiles > FM_PCD_PLCR_NUM_ENTRIES))
-+ return (uint16_t)ILLEGAL_BASE;
-+
-+ if (p_FmPcd->h_IpcSession)
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ t_FmPcdIpcMsg msg;
-+ t_FmPcdIpcReply reply;
-+ t_Error err;
-+ uint32_t replyLength;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
-+ ipcAllocParams.guestId = p_FmPcd->guestId;
-+ ipcAllocParams.num = p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles;
-+ ipcAllocParams.base = p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase;
-+ msg.msgId = FM_PCD_ALLOC_PROFILES;
-+ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
-+ replyLength = sizeof(uint32_t) + sizeof(uint16_t);
-+ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if ((err != E_OK) ||
-+ (replyLength != (sizeof(uint32_t) + sizeof(uint16_t))))
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return (uint16_t)ILLEGAL_BASE;
-+ }
-+ else
-+ memcpy((uint8_t*)&p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase, reply.replyBody, sizeof(uint16_t));
-+ if (p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase == (uint16_t)ILLEGAL_BASE)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return (uint16_t)ILLEGAL_BASE;
-+ }
-+ }
-+ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ {
-+ DBG(WARNING, ("FM Guest mode, without IPC - can't validate Policer-profiles range!"));
-+ return (uint16_t)ILLEGAL_BASE;
-+ }
-+
-+ intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock);
-+ for (i=base; i<(base+numOfProfiles); i++)
-+ if (p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == (uint8_t)ILLEGAL_BASE)
-+ profilesFound++;
-+ else
-+ break;
-+
-+ if (profilesFound == numOfProfiles)
-+ for (i=base; i<(base+numOfProfiles); i++)
-+ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = guestId;
-+ else
-+ {
-+ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
-+ return (uint16_t)ILLEGAL_BASE;
-+ }
-+ XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags);
-+
-+ return base;
-+}
-+
-+void PlcrFreeProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId)
-+{
-+ int i = 0;
-+
-+ ASSERT_COND(p_FmPcd);
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr);
-+
-+ if (p_FmPcd->h_IpcSession)
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ t_FmPcdIpcMsg msg;
-+ t_Error err;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
-+ ipcAllocParams.guestId = p_FmPcd->guestId;
-+ ipcAllocParams.num = p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles;
-+ ipcAllocParams.base = p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase;
-+ msg.msgId = FM_PCD_FREE_PROFILES;
-+ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
-+ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return;
-+ }
-+ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ {
-+ DBG(WARNING, ("FM Guest mode, without IPC - can't validate Policer-profiles range!"));
-+ return;
-+ }
-+
-+ for (i=base; i<(base+numOfProfiles); i++)
-+ {
-+ if (p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == guestId)
-+ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
-+ else
-+ DBG(WARNING, ("Request for freeing storage profile window which wasn't allocated to this partition"));
-+ }
-+}
-+
-+t_Error PlcrSetPortProfiles(t_FmPcd *p_FmPcd,
-+ uint8_t hardwarePortId,
-+ uint16_t numOfProfiles,
-+ uint16_t base)
-+{
-+ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+ uint32_t log2Num, tmpReg32;
-+
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ !p_Regs &&
-+ p_FmPcd->h_IpcSession)
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ t_FmPcdIpcMsg msg;
-+ t_Error err;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
-+ ipcAllocParams.guestId = hardwarePortId;
-+ ipcAllocParams.num = numOfProfiles;
-+ ipcAllocParams.base = base;
-+ msg.msgId = FM_PCD_SET_PORT_PROFILES;
-+ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
-+ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+ else if (!p_Regs)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ if (GET_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1]) & FM_PCD_PLCR_PMR_V)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("The requesting port has already an allocated profiles window."));
-+
-+ /**********************FMPL_PMRx******************/
-+ LOG2((uint64_t)numOfProfiles, log2Num);
-+ tmpReg32 = base;
-+ tmpReg32 |= log2Num << 16;
-+ tmpReg32 |= FM_PCD_PLCR_PMR_V;
-+ WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], tmpReg32);
-+
-+ return E_OK;
-+}
-+
-+t_Error PlcrClearPortProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId)
-+{
-+ t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ !p_Regs &&
-+ p_FmPcd->h_IpcSession)
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ t_FmPcdIpcMsg msg;
-+ t_Error err;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
-+ ipcAllocParams.guestId = hardwarePortId;
-+ msg.msgId = FM_PCD_CLEAR_PORT_PROFILES;
-+ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
-+ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+ else if (!p_Regs)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+ WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], 0);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdPlcrAllocProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_Error err = E_OK;
-+ uint32_t profilesFound;
-+ uint32_t intFlags;
-+ uint16_t i, first, swPortIndex = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ if (!numOfProfiles)
-+ return E_OK;
-+
-+ ASSERT_COND(hardwarePortId);
-+
-+ if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES)
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big."));
-+
-+ if (!POWER_OF_2(numOfProfiles))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numProfiles must be a power of 2."));
-+
-+ first = 0;
-+ profilesFound = 0;
-+ intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr);
-+
-+ for (i=0; i<FM_PCD_PLCR_NUM_ENTRIES; )
-+ {
-+ if (!p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated)
-+ {
-+ profilesFound++;
-+ i++;
-+ if (profilesFound == numOfProfiles)
-+ break;
-+ }
-+ else
-+ {
-+ profilesFound = 0;
-+ /* advance i to the next aligned address */
-+ i = first = (uint16_t)(first + numOfProfiles);
-+ }
-+ }
-+
-+ if (profilesFound == numOfProfiles)
-+ {
-+ for (i=first; i<first + numOfProfiles; i++)
-+ {
-+ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated = TRUE;
-+ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = hardwarePortId;
-+ }
-+ }
-+ else
-+ {
-+ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+ RETURN_ERROR(MINOR, E_FULL, ("No profiles."));
-+ }
-+ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+
-+ err = PlcrSetPortProfiles(p_FmPcd, hardwarePortId, numOfProfiles, first);
-+ if (err)
-+ {
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = numOfProfiles;
-+ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = first;
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdPlcrFreeProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_Error err = E_OK;
-+ uint32_t intFlags;
-+ uint16_t i, swPortIndex = 0;
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
-+
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ err = PlcrClearPortProfiles(p_FmPcd, hardwarePortId);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err,NO_MSG);
-+
-+ intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr);
-+ for (i=p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase;
-+ i<(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase +
-+ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles);
-+ i++)
-+ {
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == hardwarePortId);
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated);
-+
-+ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated = FALSE;
-+ p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = p_FmPcd->guestId;
-+ }
-+ PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+
-+ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = 0;
-+ p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = 0;
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdPlcrCcGetSetParams(t_Handle h_FmPcd, uint16_t profileIndx ,uint32_t requiredAction)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
-+ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+ uint32_t tmpReg32, intFlags;
-+ t_Error err;
-+
-+ /* Calling function locked all PCD modules, so no need to lock here */
-+
-+ if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile out of range"));
-+
-+ if (!FmPcdPlcrIsProfileValid(p_FmPcd, profileIndx))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile is not valid"));
-+
-+ /*intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx]);*/
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcPcdPlcrCcGetSetParams(p_FmPcd->h_Hc, profileIndx, requiredAction);
-+
-+ UpdateRequiredActionFlag(p_FmPcd, profileIndx, TRUE);
-+ FmPcdPlcrUpdateRequiredAction(p_FmPcd, profileIndx, requiredAction);
-+
-+ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
-+ return err;
-+ }
-+
-+ /* lock the HW because once we read the registers we don't want them to be changed
-+ * by another access. (We can copy to a tmp location and release the lock!) */
-+
-+ intFlags = PlcrHwLock(p_FmPcdPlcr);
-+ WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx));
-+
-+ if (!p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].requiredActionFlag ||
-+ !(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].requiredAction & requiredAction))
-+ {
-+ if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA)
-+ {
-+ if ((p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnGreen!= e_FM_PCD_DONE) ||
-+ (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnYellow!= e_FM_PCD_DONE) ||
-+ (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnRed!= e_FM_PCD_DONE))
-+ {
-+ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
-+ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
-+ RETURN_ERROR (MAJOR, E_OK, ("In this case the next engine can be e_FM_PCD_DONE"));
-+ }
-+
-+ if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnGreen.action == e_FM_PCD_ENQ_FRAME)
-+ {
-+ tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia);
-+ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
-+ {
-+ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
-+ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
-+ }
-+ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia, tmpReg32);
-+ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
-+ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA;
-+ WritePar(p_FmPcd, tmpReg32);
-+ }
-+
-+ if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnYellow.action == e_FM_PCD_ENQ_FRAME)
-+ {
-+ tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia);
-+ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
-+ {
-+ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
-+ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
-+ }
-+ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia, tmpReg32);
-+ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
-+ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA;
-+ WritePar(p_FmPcd, tmpReg32);
-+ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
-+ }
-+
-+ if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnRed.action == e_FM_PCD_ENQ_FRAME)
-+ {
-+ tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia);
-+ if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)))
-+ {
-+ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
-+ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE"));
-+ }
-+ tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA;
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia, tmpReg32);
-+ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
-+ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA;
-+ WritePar(p_FmPcd, tmpReg32);
-+
-+ }
-+ }
-+ }
-+ PlcrHwUnlock(p_FmPcdPlcr, intFlags);
-+
-+ UpdateRequiredActionFlag(p_FmPcd, profileIndx, TRUE);
-+ FmPcdPlcrUpdateRequiredAction(p_FmPcd, profileIndx, requiredAction);
-+
-+ /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/
-+
-+ return E_OK;
-+}
-+
-+uint32_t FmPcdPlcrGetRequiredActionFlag(t_Handle h_FmPcd, uint16_t absoluteProfileId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
-+
-+ return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredActionFlag;
-+}
-+
-+uint32_t FmPcdPlcrGetRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
-+
-+ return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction;
-+}
-+
-+bool FmPcdPlcrIsProfileValid(t_Handle h_FmPcd, uint16_t absoluteProfileId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
-+
-+ ASSERT_COND(absoluteProfileId < FM_PCD_PLCR_NUM_ENTRIES);
-+
-+ return p_FmPcdPlcr->profiles[absoluteProfileId].valid;
-+}
-+
-+void FmPcdPlcrValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(!p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
-+
-+ intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId]);
-+ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = TRUE;
-+ PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId], intFlags);
-+}
-+
-+void FmPcdPlcrInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
-+
-+ intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId]);
-+ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = FALSE;
-+ PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId], intFlags);
-+}
-+
-+uint16_t FmPcdPlcrProfileGetAbsoluteId(t_Handle h_Profile)
-+{
-+ return ((t_FmPcdPlcrProfile*)h_Profile)->absoluteProfileId;
-+}
-+
-+t_Error FmPcdPlcrGetAbsoluteIdByProfileParams(t_Handle h_FmPcd,
-+ e_FmPcdProfileTypeSelection profileType,
-+ t_Handle h_FmPort,
-+ uint16_t relativeProfile,
-+ uint16_t *p_AbsoluteId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr;
-+ uint8_t i;
-+
-+ switch (profileType)
-+ {
-+ case e_FM_PCD_PLCR_PORT_PRIVATE:
-+ /* get port PCD id from port handle */
-+ for (i=0;i<FM_MAX_NUM_OF_PORTS;i++)
-+ if (p_FmPcd->p_FmPcdPlcr->portsMapping[i].h_FmPort == h_FmPort)
-+ break;
-+ if (i == FM_MAX_NUM_OF_PORTS)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE , ("Invalid port handle."));
-+
-+ if (!p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Port has no allocated profiles"));
-+ if (relativeProfile >= p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range"));
-+ *p_AbsoluteId = (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[i].profilesBase + relativeProfile);
-+ break;
-+ case e_FM_PCD_PLCR_SHARED:
-+ if (relativeProfile >= p_FmPcdPlcr->numOfSharedProfiles)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range"));
-+ *p_AbsoluteId = (uint16_t)(p_FmPcdPlcr->sharedProfilesIds[relativeProfile]);
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Invalid policer profile type"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+uint16_t FmPcdPlcrGetPortProfilesBase(t_Handle h_FmPcd, uint8_t hardwarePortId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint16_t swPortIndex = 0;
-+
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase;
-+}
-+
-+uint16_t FmPcdPlcrGetPortNumOfProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint16_t swPortIndex = 0;
-+
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles;
-+
-+}
-+uint32_t FmPcdPlcrBuildWritePlcrActionReg(uint16_t absoluteProfileId)
-+{
-+ return (uint32_t)(FM_PCD_PLCR_PAR_GO |
-+ ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT));
-+}
-+
-+uint32_t FmPcdPlcrBuildWritePlcrActionRegs(uint16_t absoluteProfileId)
-+{
-+ return (uint32_t)(FM_PCD_PLCR_PAR_GO |
-+ ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) |
-+ FM_PCD_PLCR_PAR_PWSEL_MASK);
-+}
-+
-+bool FmPcdPlcrHwProfileIsValid(uint32_t profileModeReg)
-+{
-+
-+ if (profileModeReg & FM_PCD_PLCR_PEMODE_PI)
-+ return TRUE;
-+ else
-+ return FALSE;
-+}
-+
-+uint32_t FmPcdPlcrBuildReadPlcrActionReg(uint16_t absoluteProfileId)
-+{
-+ return (uint32_t)(FM_PCD_PLCR_PAR_GO |
-+ FM_PCD_PLCR_PAR_R |
-+ ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) |
-+ FM_PCD_PLCR_PAR_PWSEL_MASK);
-+}
-+
-+uint32_t FmPcdPlcrBuildCounterProfileReg(e_FmPcdPlcrProfileCounters counter)
-+{
-+ switch (counter)
-+ {
-+ case (e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER):
-+ return FM_PCD_PLCR_PAR_PWSEL_PEGPC;
-+ case (e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER):
-+ return FM_PCD_PLCR_PAR_PWSEL_PEYPC;
-+ case (e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER) :
-+ return FM_PCD_PLCR_PAR_PWSEL_PERPC;
-+ case (e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER) :
-+ return FM_PCD_PLCR_PAR_PWSEL_PERYPC;
-+ case (e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER) :
-+ return FM_PCD_PLCR_PAR_PWSEL_PERRPC;
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ return 0;
-+ }
-+}
-+
-+uint32_t FmPcdPlcrBuildNiaProfileReg(bool green, bool yellow, bool red)
-+{
-+
-+ uint32_t tmpReg32 = 0;
-+
-+ if (green)
-+ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA;
-+ if (yellow)
-+ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA;
-+ if (red)
-+ tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA;
-+
-+ return tmpReg32;
-+}
-+
-+void FmPcdPlcrUpdateRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId, uint32_t requiredAction)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ /* this routine is protected by calling routine */
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid);
-+
-+ p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction |= requiredAction;
-+}
-+
-+/*********************** End of inter-module routines ************************/
-+
-+
-+/**************************************************/
-+/*............Policer API.........................*/
-+/**************************************************/
-+
-+t_Error FM_PCD_ConfigPlcrAutoRefreshMode(t_Handle h_FmPcd, bool enable)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE);
-+
-+ if (!FmIsMaster(p_FmPcd->h_Fm))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPlcrAutoRefreshMode - guest mode!"));
-+
-+ p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = enable;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_ConfigPlcrNumOfSharedProfiles(t_Handle h_FmPcd, uint16_t numOfSharedPlcrProfiles)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE);
-+
-+ p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles = numOfSharedPlcrProfiles;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_SetPlcrStatistics(t_Handle h_FmPcd, bool enable)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t tmpReg32;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE);
-+
-+ if (!FmIsMaster(p_FmPcd->h_Fm))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPlcrStatistics - guest mode!"));
-+
-+ tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr);
-+ if (enable)
-+ tmpReg32 |= FM_PCD_PLCR_GCR_STEN;
-+ else
-+ tmpReg32 &= ~FM_PCD_PLCR_GCR_STEN;
-+
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr, tmpReg32);
-+ return E_OK;
-+}
-+
-+t_Handle FM_PCD_PlcrProfileSet(t_Handle h_FmPcd,
-+ t_FmPcdPlcrProfileParams *p_ProfileParams)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
-+ t_FmPcdPlcrProfileRegs plcrProfileReg;
-+ uint32_t intFlags;
-+ uint16_t absoluteProfileId;
-+ t_Error err = E_OK;
-+ uint32_t tmpReg32;
-+ t_FmPcdPlcrProfile *p_Profile;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
-+
-+ if (p_ProfileParams->modify)
-+ {
-+ p_Profile = (t_FmPcdPlcrProfile *)p_ProfileParams->id.h_Profile;
-+ p_FmPcd = p_Profile->h_FmPcd;
-+ absoluteProfileId = p_Profile->absoluteProfileId;
-+ if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big "));
-+ return NULL;
-+ }
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, NULL);
-+
-+ /* Try lock profile using flag */
-+ if (!PlcrProfileFlagTryLock(p_Profile))
-+ {
-+ DBG(TRACE, ("Profile Try Lock - BUSY"));
-+ /* Signal to caller BUSY condition */
-+ p_ProfileParams->id.h_Profile = NULL;
-+ return NULL;
-+ }
-+ }
-+ else
-+ {
-+ p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, NULL);
-+
-+ /* SMP: needs to be protected only if another core now changes the windows */
-+ err = FmPcdPlcrGetAbsoluteIdByProfileParams(h_FmPcd,
-+ p_ProfileParams->id.newParams.profileType,
-+ p_ProfileParams->id.newParams.h_FmPort,
-+ p_ProfileParams->id.newParams.relativeProfileId,
-+ &absoluteProfileId);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return NULL;
-+ }
-+
-+ if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big "));
-+ return NULL;
-+ }
-+
-+ if (FmPcdPlcrIsProfileValid(p_FmPcd, absoluteProfileId))
-+ {
-+ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Policer Profile is already used"));
-+ return NULL;
-+ }
-+
-+ /* initialize profile struct */
-+ p_Profile = &p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId];
-+
-+ p_Profile->h_FmPcd = p_FmPcd;
-+ p_Profile->absoluteProfileId = absoluteProfileId;
-+
-+ p_Profile->p_Lock = FmPcdAcquireLock(p_FmPcd);
-+ if (!p_Profile->p_Lock)
-+ REPORT_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM Policer Profile lock obj!"));
-+ }
-+
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL);
-+
-+ p_Profile->nextEngineOnGreen = p_ProfileParams->nextEngineOnGreen;
-+ memcpy(&p_Profile->paramsOnGreen, &(p_ProfileParams->paramsOnGreen), sizeof(u_FmPcdPlcrNextEngineParams));
-+
-+ p_Profile->nextEngineOnYellow = p_ProfileParams->nextEngineOnYellow;
-+ memcpy(&p_Profile->paramsOnYellow, &(p_ProfileParams->paramsOnYellow), sizeof(u_FmPcdPlcrNextEngineParams));
-+
-+ p_Profile->nextEngineOnRed = p_ProfileParams->nextEngineOnRed;
-+ memcpy(&p_Profile->paramsOnRed, &(p_ProfileParams->paramsOnRed), sizeof(u_FmPcdPlcrNextEngineParams));
-+
-+ memset(&plcrProfileReg, 0, sizeof(t_FmPcdPlcrProfileRegs));
-+
-+ /* build the policer profile registers */
-+ err = BuildProfileRegs(h_FmPcd, p_ProfileParams, &plcrProfileReg);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ if (p_ProfileParams->modify)
-+ /* unlock */
-+ PlcrProfileFlagUnlock(p_Profile);
-+ if (!p_ProfileParams->modify &&
-+ p_Profile->p_Lock)
-+ /* release allocated Profile lock */
-+ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
-+ return NULL;
-+ }
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcPcdPlcrSetProfile(p_FmPcd->h_Hc, (t_Handle)p_Profile, &plcrProfileReg);
-+ if (p_ProfileParams->modify)
-+ PlcrProfileFlagUnlock(p_Profile);
-+ if (err)
-+ {
-+ /* release the allocated scheme lock */
-+ if (!p_ProfileParams->modify &&
-+ p_Profile->p_Lock)
-+ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
-+
-+ return NULL;
-+ }
-+ if (!p_ProfileParams->modify)
-+ FmPcdPlcrValidateProfileSw(p_FmPcd,absoluteProfileId);
-+ return (t_Handle)p_Profile;
-+ }
-+
-+ p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, NULL);
-+
-+ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pemode , plcrProfileReg.fmpl_pemode);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia , plcrProfileReg.fmpl_pegnia);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia , plcrProfileReg.fmpl_peynia);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia , plcrProfileReg.fmpl_pernia);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecir , plcrProfileReg.fmpl_pecir);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecbs , plcrProfileReg.fmpl_pecbs);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepepir_eir,plcrProfileReg.fmpl_pepepir_eir);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepbs_ebs,plcrProfileReg.fmpl_pepbs_ebs);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pelts , plcrProfileReg.fmpl_pelts);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pects , plcrProfileReg.fmpl_pects);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepts_ets,plcrProfileReg.fmpl_pepts_ets);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc , plcrProfileReg.fmpl_pegpc);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc , plcrProfileReg.fmpl_peypc);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc , plcrProfileReg.fmpl_perpc);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc , plcrProfileReg.fmpl_perypc);
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc , plcrProfileReg.fmpl_perrpc);
-+
-+ tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(absoluteProfileId);
-+ WritePar(p_FmPcd, tmpReg32);
-+
-+ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+
-+ if (!p_ProfileParams->modify)
-+ FmPcdPlcrValidateProfileSw(p_FmPcd,absoluteProfileId);
-+ else
-+ PlcrProfileFlagUnlock(p_Profile);
-+
-+ return (t_Handle)p_Profile;
-+}
-+
-+t_Error FM_PCD_PlcrProfileDelete(t_Handle h_Profile)
-+{
-+ t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile;
-+ t_FmPcd *p_FmPcd;
-+ uint16_t profileIndx;
-+ uint32_t tmpReg32, intFlags;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE);
-+ p_FmPcd = p_Profile->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ profileIndx = p_Profile->absoluteProfileId;
-+
-+ UpdateRequiredActionFlag(p_FmPcd, profileIndx, FALSE);
-+
-+ FmPcdPlcrInvalidateProfileSw(p_FmPcd,profileIndx);
-+
-+ if (p_FmPcd->h_Hc)
-+ {
-+ err = FmHcPcdPlcrDeleteProfile(p_FmPcd->h_Hc, h_Profile);
-+ if (p_Profile->p_Lock)
-+ /* release allocated Profile lock */
-+ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
-+
-+ return err;
-+ }
-+
-+ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
-+ WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs.fmpl_pemode, ~FM_PCD_PLCR_PEMODE_PI);
-+
-+ tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx);
-+ WritePar(p_FmPcd, tmpReg32);
-+ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+
-+
-+ if (p_Profile->p_Lock)
-+ /* release allocated Profile lock */
-+ FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock);
-+
-+ /* we do not memset profile as all its fields are being re-initialized at "set",
-+ * plus its allocation information is still valid. */
-+ return E_OK;
-+}
-+
-+/***************************************************/
-+/*............Policer Profile Counter..............*/
-+/***************************************************/
-+uint32_t FM_PCD_PlcrProfileGetCounter(t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter)
-+{
-+ t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile;
-+ t_FmPcd *p_FmPcd;
-+ uint16_t profileIndx;
-+ uint32_t intFlags, counterVal = 0;
-+ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE);
-+ p_FmPcd = p_Profile->h_FmPcd;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+
-+ if (p_FmPcd->h_Hc)
-+ return FmHcPcdPlcrGetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter);
-+
-+ p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, 0);
-+
-+ profileIndx = p_Profile->absoluteProfileId;
-+
-+ if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big "));
-+ return 0;
-+ }
-+ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
-+ WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx));
-+
-+ switch (counter)
-+ {
-+ case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER:
-+ counterVal = (GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc));
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER:
-+ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc);
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER:
-+ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc);
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER:
-+ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc);
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER:
-+ counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc);
-+ break;
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ break;
-+ }
-+ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+
-+ return counterVal;
-+}
-+
-+t_Error FM_PCD_PlcrProfileSetCounter(t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value)
-+{
-+ t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile;
-+ t_FmPcd *p_FmPcd;
-+ uint16_t profileIndx;
-+ uint32_t tmpReg32, intFlags;
-+ t_FmPcdPlcrRegs *p_FmPcdPlcrRegs;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE);
-+
-+ p_FmPcd = p_Profile->h_FmPcd;
-+ profileIndx = p_Profile->absoluteProfileId;
-+
-+ if (p_FmPcd->h_Hc)
-+ return FmHcPcdPlcrSetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter, value);
-+
-+ p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcdPlcrRegs, E_INVALID_HANDLE);
-+
-+ intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr);
-+ switch (counter)
-+ {
-+ case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER:
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc, value);
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER:
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc, value);
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER:
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc, value);
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER:
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc ,value);
-+ break;
-+ case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER:
-+ WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc ,value);
-+ break;
-+ default:
-+ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ /* Activate the atomic write action by writing FMPL_PAR with: GO=1, RW=1, PSI=0, PNUM =
-+ * Profile Number, PWSEL=0xFFFF (select all words).
-+ */
-+ tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx);
-+ tmpReg32 |= FmPcdPlcrBuildCounterProfileReg(counter);
-+ WritePar(p_FmPcd, tmpReg32);
-+ PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags);
-+
-+ return E_OK;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_plcr.h
-@@ -0,0 +1,165 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_plcr.h
-+
-+ @Description FM Policer private header
-+*//***************************************************************************/
-+#ifndef __FM_PLCR_H
-+#define __FM_PLCR_H
-+
-+#include "std_ext.h"
-+
-+
-+/***********************************************************************/
-+/* Policer defines */
-+/***********************************************************************/
-+
-+#define FM_PCD_PLCR_PAR_GO 0x80000000
-+#define FM_PCD_PLCR_PAR_PWSEL_MASK 0x0000FFFF
-+#define FM_PCD_PLCR_PAR_R 0x40000000
-+
-+/* shifts */
-+#define FM_PCD_PLCR_PAR_PNUM_SHIFT 16
-+
-+/* masks */
-+#define FM_PCD_PLCR_PEMODE_PI 0x80000000
-+#define FM_PCD_PLCR_PEMODE_CBLND 0x40000000
-+#define FM_PCD_PLCR_PEMODE_ALG_MASK 0x30000000
-+#define FM_PCD_PLCR_PEMODE_ALG_RFC2698 0x10000000
-+#define FM_PCD_PLCR_PEMODE_ALG_RFC4115 0x20000000
-+#define FM_PCD_PLCR_PEMODE_DEFC_MASK 0x0C000000
-+#define FM_PCD_PLCR_PEMODE_DEFC_Y 0x04000000
-+#define FM_PCD_PLCR_PEMODE_DEFC_R 0x08000000
-+#define FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE 0x0C000000
-+#define FM_PCD_PLCR_PEMODE_OVCLR_MASK 0x03000000
-+#define FM_PCD_PLCR_PEMODE_OVCLR_Y 0x01000000
-+#define FM_PCD_PLCR_PEMODE_OVCLR_R 0x02000000
-+#define FM_PCD_PLCR_PEMODE_OVCLR_G_NC 0x03000000
-+#define FM_PCD_PLCR_PEMODE_PKT 0x00800000
-+#define FM_PCD_PLCR_PEMODE_FPP_MASK 0x001F0000
-+#define FM_PCD_PLCR_PEMODE_FPP_SHIFT 16
-+#define FM_PCD_PLCR_PEMODE_FLS_MASK 0x0000F000
-+#define FM_PCD_PLCR_PEMODE_FLS_L2 0x00003000
-+#define FM_PCD_PLCR_PEMODE_FLS_L3 0x0000B000
-+#define FM_PCD_PLCR_PEMODE_FLS_L4 0x0000E000
-+#define FM_PCD_PLCR_PEMODE_FLS_FULL 0x0000F000
-+#define FM_PCD_PLCR_PEMODE_RBFLS 0x00000800
-+#define FM_PCD_PLCR_PEMODE_TRA 0x00000004
-+#define FM_PCD_PLCR_PEMODE_TRB 0x00000002
-+#define FM_PCD_PLCR_PEMODE_TRC 0x00000001
-+#define FM_PCD_PLCR_DOUBLE_ECC 0x80000000
-+#define FM_PCD_PLCR_INIT_ENTRY_ERROR 0x40000000
-+#define FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE 0x80000000
-+#define FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE 0x40000000
-+
-+#define FM_PCD_PLCR_NIA_VALID 0x80000000
-+
-+#define FM_PCD_PLCR_GCR_EN 0x80000000
-+#define FM_PCD_PLCR_GCR_STEN 0x40000000
-+#define FM_PCD_PLCR_GCR_DAR 0x20000000
-+#define FM_PCD_PLCR_GCR_DEFNIA 0x00FFFFFF
-+#define FM_PCD_PLCR_NIA_ABS 0x00000100
-+
-+#define FM_PCD_PLCR_GSR_BSY 0x80000000
-+#define FM_PCD_PLCR_GSR_DQS 0x60000000
-+#define FM_PCD_PLCR_GSR_RPB 0x20000000
-+#define FM_PCD_PLCR_GSR_FQS 0x0C000000
-+#define FM_PCD_PLCR_GSR_LPALG 0x0000C000
-+#define FM_PCD_PLCR_GSR_LPCA 0x00003000
-+#define FM_PCD_PLCR_GSR_LPNUM 0x000000FF
-+
-+#define FM_PCD_PLCR_EVR_PSIC 0x80000000
-+#define FM_PCD_PLCR_EVR_AAC 0x40000000
-+
-+#define FM_PCD_PLCR_PAR_PSI 0x20000000
-+#define FM_PCD_PLCR_PAR_PNUM 0x00FF0000
-+/* PWSEL Selctive select options */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEMODE 0x00008000 /* 0 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEGNIA 0x00004000 /* 1 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEYNIA 0x00002000 /* 2 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PERNIA 0x00001000 /* 3 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PECIR 0x00000800 /* 4 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PECBS 0x00000400 /* 5 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEPIR_EIR 0x00000200 /* 6 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEPBS_EBS 0x00000100 /* 7 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PELTS 0x00000080 /* 8 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PECTS 0x00000040 /* 9 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEPTS_ETS 0x00000020 /* 10 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEGPC 0x00000010 /* 11 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PEYPC 0x00000008 /* 12 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PERPC 0x00000004 /* 13 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PERYPC 0x00000002 /* 14 */
-+#define FM_PCD_PLCR_PAR_PWSEL_PERRPC 0x00000001 /* 15 */
-+
-+#define FM_PCD_PLCR_PAR_PMR_BRN_1TO1 0x0000 /* - Full bit replacement. {PBNUM[0:N-1]
-+ 1-> 2^N specific locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_2TO2 0x1 /* - {PBNUM[0:N-2],PNUM[N-1]}.
-+ 2-> 2^(N-1) base locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_4TO4 0x2 /* - {PBNUM[0:N-3],PNUM[N-2:N-1]}.
-+ 4-> 2^(N-2) base locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_8TO8 0x3 /* - {PBNUM[0:N-4],PNUM[N-3:N-1]}.
-+ 8->2^(N-3) base locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_16TO16 0x4 /* - {PBNUM[0:N-5],PNUM[N-4:N-1]}.
-+ 16-> 2^(N-4) base locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_32TO32 0x5 /* {PBNUM[0:N-6],PNUM[N-5:N-1]}.
-+ 32-> 2^(N-5) base locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_64TO64 0x6 /* {PBNUM[0:N-7],PNUM[N-6:N-1]}.
-+ 64-> 2^(N-6) base locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_128TO128 0x7 /* {PBNUM[0:N-8],PNUM[N-7:N-1]}.
-+ 128-> 2^(N-7) base locations. */
-+#define FM_PCD_PLCR_PAR_PMR_BRN_256TO256 0x8 /* - No bit replacement for N=8. {PNUM[N-8:N-1]}.
-+ When N=8 this option maps all 256 profiles by the DISPATCH bus into one group. */
-+
-+#define FM_PCD_PLCR_PMR_V 0x80000000
-+#define PLCR_ERR_ECC_CAP 0x80000000
-+#define PLCR_ERR_ECC_TYPE_DOUBLE 0x40000000
-+#define PLCR_ERR_ECC_PNUM_MASK 0x00000FF0
-+#define PLCR_ERR_ECC_OFFSET_MASK 0x0000000F
-+
-+#define PLCR_ERR_UNINIT_CAP 0x80000000
-+#define PLCR_ERR_UNINIT_NUM_MASK 0x000000FF
-+#define PLCR_ERR_UNINIT_PID_MASK 0x003f0000
-+#define PLCR_ERR_UNINIT_ABSOLUTE_MASK 0x00008000
-+
-+/* shifts */
-+#define PLCR_ERR_ECC_PNUM_SHIFT 4
-+#define PLCR_ERR_UNINIT_PID_SHIFT 16
-+
-+#define FM_PCD_PLCR_PMR_BRN_SHIFT 16
-+
-+#define PLCR_PORT_WINDOW_SIZE(hardwarePortId)
-+
-+
-+#endif /* __FM_PLCR_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c
-@@ -0,0 +1,423 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_pcd.c
-+
-+ @Description FM PCD ...
-+*//***************************************************************************/
-+#include <linux/math64.h>
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+#include "net_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_pcd.h"
-+#include "fm_pcd_ipc.h"
-+#include "fm_prs.h"
-+#include "fsl_fman_prs.h"
-+
-+
-+static void PcdPrsErrorException(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint32_t event, ev_mask;
-+ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+
-+ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
-+ ev_mask = fman_prs_get_err_ev_mask(PrsRegs);
-+
-+ event = fman_prs_get_err_event(PrsRegs, ev_mask);
-+
-+ fman_prs_ack_err_event(PrsRegs, event);
-+
-+ DBG(TRACE, ("parser error - 0x%08x\n",event));
-+
-+ if(event & FM_PCD_PRS_DOUBLE_ECC)
-+ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC);
-+}
-+
-+static void PcdPrsException(t_Handle h_FmPcd)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ uint32_t event, ev_mask;
-+ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+
-+ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
-+ ev_mask = fman_prs_get_expt_ev_mask(PrsRegs);
-+ event = fman_prs_get_expt_event(PrsRegs, ev_mask);
-+
-+ ASSERT_COND(event & FM_PCD_PRS_SINGLE_ECC);
-+
-+ DBG(TRACE, ("parser event - 0x%08x\n",event));
-+
-+ fman_prs_ack_expt_event(PrsRegs, event);
-+
-+ p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC);
-+}
-+
-+t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams)
-+{
-+ t_FmPcdPrs *p_FmPcdPrs;
-+ uintptr_t baseAddr;
-+
-+ UNUSED(p_FmPcd);
-+ UNUSED(p_FmPcdParams);
-+
-+ p_FmPcdPrs = (t_FmPcdPrs *) XX_Malloc(sizeof(t_FmPcdPrs));
-+ if (!p_FmPcdPrs)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Parser structure allocation FAILED"));
-+ return NULL;
-+ }
-+ memset(p_FmPcdPrs, 0, sizeof(t_FmPcdPrs));
-+ fman_prs_defconfig(&p_FmPcd->p_FmPcdDriverParam->dfltCfg);
-+
-+ if (p_FmPcd->guestId == NCSW_MASTER_ID)
-+ {
-+ baseAddr = FmGetPcdPrsBaseAddr(p_FmPcdParams->h_Fm);
-+ p_FmPcdPrs->p_SwPrsCode = (uint32_t *)UINT_TO_PTR(baseAddr);
-+ p_FmPcdPrs->p_FmPcdPrsRegs = (struct fman_prs_regs *)UINT_TO_PTR(baseAddr + PRS_REGS_OFFSET);
-+ }
-+
-+ p_FmPcdPrs->fmPcdPrsPortIdStatistics = p_FmPcd->p_FmPcdDriverParam->dfltCfg.port_id_stat;
-+ p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = p_FmPcd->p_FmPcdDriverParam->dfltCfg.max_prs_cyc_lim;
-+ p_FmPcd->exceptions |= p_FmPcd->p_FmPcdDriverParam->dfltCfg.prs_exceptions;
-+
-+ return p_FmPcdPrs;
-+}
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ static uint8_t swPrsPatch[] = SW_PRS_UDP_LITE_PATCH;
-+#else
-+ static uint8_t swPrsPatch[] = SW_PRS_OFFLOAD_PATCH;
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+t_Error PrsInit(t_FmPcd *p_FmPcd)
-+{
-+ t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam;
-+ uint32_t *p_TmpCode;
-+ uint32_t *p_LoadTarget = (uint32_t *)PTR_MOVE(p_FmPcd->p_FmPcdPrs->p_SwPrsCode,
-+ FM_PCD_SW_PRS_SIZE-FM_PCD_PRS_SW_PATCHES_SIZE);
-+ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+ uint32_t i;
-+
-+ ASSERT_COND(sizeof(swPrsPatch) <= (FM_PCD_PRS_SW_PATCHES_SIZE-FM_PCD_PRS_SW_TAIL_SIZE));
-+
-+ /* nothing to do in guest-partition */
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ return E_OK;
-+
-+ p_TmpCode = (uint32_t *)XX_MallocSmart(ROUND_UP(sizeof(swPrsPatch),4), 0, sizeof(uint32_t));
-+ if (!p_TmpCode)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Tmp Sw-Parser code allocation FAILED"));
-+ memset((uint8_t *)p_TmpCode, 0, ROUND_UP(sizeof(swPrsPatch),4));
-+ memcpy((uint8_t *)p_TmpCode, (uint8_t *)swPrsPatch, sizeof(swPrsPatch));
-+
-+ fman_prs_init(PrsRegs, &p_Param->dfltCfg);
-+
-+ /* register even if no interrupts enabled, to allow future enablement */
-+ FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR, PcdPrsErrorException, p_FmPcd);
-+
-+ /* register even if no interrupts enabled, to allow future enablement */
-+ FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL, PcdPrsException, p_FmPcd);
-+
-+ if(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC)
-+ FmEnableRamsEcc(p_FmPcd->h_Fm);
-+
-+ if(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC)
-+ FmEnableRamsEcc(p_FmPcd->h_Fm);
-+
-+ /* load sw parser Ip-Frag patch */
-+ for (i=0; i<DIV_CEIL(sizeof(swPrsPatch), 4); i++)
-+ WRITE_UINT32(p_LoadTarget[i], GET_UINT32(p_TmpCode[i]));
-+
-+ XX_FreeSmart(p_TmpCode);
-+
-+ return E_OK;
-+}
-+
-+void PrsFree(t_FmPcd *p_FmPcd)
-+{
-+ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
-+ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR);
-+ /* register even if no interrupts enabled, to allow future enablement */
-+ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL);
-+}
-+
-+void PrsEnable(t_FmPcd *p_FmPcd)
-+{
-+ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+
-+ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
-+ fman_prs_enable(PrsRegs);
-+}
-+
-+void PrsDisable(t_FmPcd *p_FmPcd)
-+{
-+ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+
-+ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
-+ fman_prs_disable(PrsRegs);
-+}
-+
-+int PrsIsEnabled(t_FmPcd *p_FmPcd)
-+{
-+ struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+
-+ ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID);
-+ return fman_prs_is_enabled(PrsRegs);
-+}
-+
-+t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include)
-+{
-+ struct fman_prs_regs *PrsRegs;
-+ uint32_t bitMask = 0;
-+ uint8_t prsPortId;
-+
-+ SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE);
-+
-+ PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+
-+ GET_FM_PCD_PRS_PORT_ID(prsPortId, hardwarePortId);
-+ GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId);
-+
-+ if (include)
-+ p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics |= bitMask;
-+ else
-+ p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics &= ~bitMask;
-+
-+ fman_prs_set_stst_port_msk(PrsRegs,
-+ p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPcdPrsIncludePortInStatistics(t_Handle h_FmPcd, uint8_t hardwarePortId, bool include)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE);
-+
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ p_FmPcd->h_IpcSession)
-+ {
-+ t_FmPcdIpcPrsIncludePort prsIncludePortParams;
-+ t_FmPcdIpcMsg msg;
-+
-+ prsIncludePortParams.hardwarePortId = hardwarePortId;
-+ prsIncludePortParams.include = include;
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_PCD_PRS_INC_PORT_STATS;
-+ memcpy(msg.msgBody, &prsIncludePortParams, sizeof(prsIncludePortParams));
-+ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) +sizeof(prsIncludePortParams),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+
-+ return PrsIncludePortInStatistics(p_FmPcd, hardwarePortId, include);
-+}
-+
-+uint32_t FmPcdGetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
-+ t_FmPcdPrsLabelParams *p_Label;
-+ int i;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE, 0);
-+
-+ if ((p_FmPcd->guestId != NCSW_MASTER_ID) &&
-+ p_FmPcd->h_IpcSession)
-+ {
-+ t_Error err = E_OK;
-+ t_FmPcdIpcSwPrsLable labelParams;
-+ t_FmPcdIpcMsg msg;
-+ uint32_t prsOffset = 0;
-+ t_FmPcdIpcReply reply;
-+ uint32_t replyLength;
-+
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&msg, 0, sizeof(msg));
-+ labelParams.enumHdr = (uint32_t)hdr;
-+ labelParams.indexPerHdr = indexPerHdr;
-+ msg.msgId = FM_PCD_GET_SW_PRS_OFFSET;
-+ memcpy(msg.msgBody, &labelParams, sizeof(labelParams));
-+ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ err = XX_IpcSendMessage(p_FmPcd->h_IpcSession,
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) +sizeof(labelParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t) + sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+
-+ memcpy((uint8_t*)&prsOffset, reply.replyBody, sizeof(uint32_t));
-+ return prsOffset;
-+ }
-+ else if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+
-+ ASSERT_COND(p_FmPcd->p_FmPcdPrs->currLabel < FM_PCD_PRS_NUM_OF_LABELS);
-+
-+ for (i=0; i<p_FmPcd->p_FmPcdPrs->currLabel; i++)
-+ {
-+ p_Label = &p_FmPcd->p_FmPcdPrs->labelsTable[i];
-+
-+ if ((hdr == p_Label->hdr) && (indexPerHdr == p_Label->indexPerHdr))
-+ return p_Label->instructionOffset;
-+ }
-+
-+ REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Sw Parser attachment Not found"));
-+ return (uint32_t)ILLEGAL_BASE;
-+}
-+
-+void FM_PCD_SetPrsStatistics(t_Handle h_FmPcd, bool enable)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ struct fman_prs_regs *PrsRegs;
-+
-+ SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE);
-+
-+ PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs;
-+
-+
-+ if(p_FmPcd->guestId != NCSW_MASTER_ID)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPrsStatistics - guest mode!"));
-+ return;
-+ }
-+
-+ fman_prs_set_stst(PrsRegs, enable);
-+}
-+
-+t_Error FM_PCD_PrsLoadSw(t_Handle h_FmPcd, t_FmPcdPrsSwParams *p_SwPrs)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+ uint32_t *p_LoadTarget;
-+ uint32_t *p_TmpCode;
-+ int i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_SwPrs, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPcd->enabled, E_INVALID_HANDLE);
-+
-+ if (p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM in guest-mode!"));
-+
-+ if (!p_SwPrs->override)
-+ {
-+ if(p_FmPcd->p_FmPcdPrs->p_CurrSwPrs > p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("SW parser base must be larger than current loaded code"));
-+ }
-+ else
-+ p_FmPcd->p_FmPcdPrs->currLabel = 0;
-+
-+ if (p_SwPrs->size > FM_PCD_SW_PRS_SIZE - FM_PCD_PRS_SW_TAIL_SIZE - p_SwPrs->base*2)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_SwPrs->size may not be larger than MAX_SW_PRS_CODE_SIZE"));
-+
-+ if (p_FmPcd->p_FmPcdPrs->currLabel + p_SwPrs->numOfLabels > FM_PCD_PRS_NUM_OF_LABELS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceeded number of labels allowed "));
-+
-+ p_TmpCode = (uint32_t *)XX_MallocSmart(ROUND_UP(p_SwPrs->size,4), 0, sizeof(uint32_t));
-+ if (!p_TmpCode)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Tmp Sw-Parser code allocation FAILED"));
-+ memset((uint8_t *)p_TmpCode, 0, ROUND_UP(p_SwPrs->size,4));
-+ memcpy((uint8_t *)p_TmpCode, p_SwPrs->p_Code, p_SwPrs->size);
-+
-+ /* save sw parser labels */
-+ memcpy(&p_FmPcd->p_FmPcdPrs->labelsTable[p_FmPcd->p_FmPcdPrs->currLabel],
-+ p_SwPrs->labelsTable,
-+ p_SwPrs->numOfLabels*sizeof(t_FmPcdPrsLabelParams));
-+ p_FmPcd->p_FmPcdPrs->currLabel += p_SwPrs->numOfLabels;
-+
-+ /* load sw parser code */
-+ p_LoadTarget = p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4;
-+
-+ for(i=0; i<DIV_CEIL(p_SwPrs->size, 4); i++)
-+ WRITE_UINT32(p_LoadTarget[i], GET_UINT32(p_TmpCode[i]));
-+
-+ p_FmPcd->p_FmPcdPrs->p_CurrSwPrs =
-+ p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4 + ROUND_UP(p_SwPrs->size,4);
-+
-+ /* copy data parameters */
-+ for (i=0;i<FM_PCD_PRS_NUM_OF_HDRS;i++)
-+ WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+PRS_SW_DATA/4+i), p_SwPrs->swPrsDataParams[i]);
-+
-+ /* Clear last 4 bytes */
-+ WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+(PRS_SW_DATA-FM_PCD_PRS_SW_TAIL_SIZE)/4), 0);
-+
-+ XX_FreeSmart(p_TmpCode);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_ConfigPrsMaxCycleLimit(t_Handle h_FmPcd,uint16_t value)
-+{
-+ t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE);
-+
-+ if(p_FmPcd->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPrsMaxCycleLimit - guest mode!"));
-+
-+ p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = value;
-+
-+ return E_OK;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h
-@@ -0,0 +1,316 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_prs.h
-+
-+ @Description FM Parser private header
-+ *//***************************************************************************/
-+#ifndef __FM_PRS_H
-+#define __FM_PRS_H
-+
-+#include "std_ext.h"
-+
-+/***********************************************************************/
-+/* SW parser IP_FRAG patch */
-+/***********************************************************************/
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+#define SW_PRS_UDP_LITE_PATCH \
-+{\
-+ 0x31,0x52,0x00,0xDA,0xFC,0x00,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x50,0x2C,0x40,0x00,0x31,0x92,0x50,0x2C, \
-+ 0x00,0x88,0x18,0x2F,0x00,0x01,0x1B,0xFE,0x18,0x71, \
-+ 0x02,0x1F,0x00,0x08,0x00,0x83,0x02,0x1F,0x00,0x20, \
-+ 0x28,0x1B,0x00,0x05,0x29,0x1F,0x30,0xD0,0x60,0x4F, \
-+ 0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F,0x00,0x52, \
-+ 0x00,0x01,0x07,0x01,0x60,0x3B,0x00,0x00,0x30,0xD0, \
-+ 0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
-+ 0x40,0x4C,0x00,0x00,0x02,0x8F,0x00,0x00,0x30,0xF2, \
-+ 0x00,0x06,0x18,0x5D,0x00,0x00,0x9F,0xFF,0x30,0xF2, \
-+ 0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0,0x00,0x52, \
-+ 0x00,0x08,0x28,0x1A,0x60,0x37,0x00,0x00,0x30,0xF2, \
-+ 0x18,0x5D,0x06,0x00,0x29,0x1E,0x30,0xF2,0x2F,0x0E, \
-+ 0x30,0x72,0x00,0x00,0x9B,0x8F,0x00,0x06,0x2F,0x0E, \
-+ 0x32,0xF1,0x32,0xB0,0x00,0x4F,0x00,0x57,0x00,0x28, \
-+ 0x00,0x00,0x97,0x9E,0x00,0x4E,0x30,0x72,0x00,0x06, \
-+ 0x2F,0x0E,0x32,0xC1,0x32,0xF0,0x00,0x4A,0x00,0x80, \
-+ 0x00,0x02,0x00,0x00,0x97,0x9E,0x40,0x7E,0x00,0x08, \
-+ 0x08,0x16,0x00,0x54,0x00,0x01,0x1B,0xFE,0x00,0x00, \
-+ 0x9F,0x9E,0x40,0xB3,0x00,0x00,0x02,0x1F,0x00,0x08, \
-+ 0x28,0x1B,0x30,0x73,0x29,0x1F,0x30,0xD0,0x60,0x9F, \
-+ 0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F,0x00,0x52, \
-+ 0x00,0x01,0x07,0x01,0x60,0x8B,0x00,0x00,0x30,0xD0, \
-+ 0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
-+ 0x40,0x9C,0x00,0x00,0x02,0x8F,0x00,0x00,0x30,0xF2, \
-+ 0x00,0x06,0x18,0xAD,0x00,0x00,0x9F,0xFF,0x30,0xF2, \
-+ 0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0,0x00,0x52, \
-+ 0x00,0x08,0x28,0x1A,0x60,0x87,0x00,0x00,0x30,0xF2, \
-+ 0x18,0xAD,0x06,0x00,0x29,0x1E,0x30,0xF2,0x50,0xB3, \
-+ 0xFF,0xFF,0x18,0xB8,0x08,0x16,0x00,0x54,0x00,0x01, \
-+ 0x1B,0xFE,0x18,0xC5,0x32,0xF1,0x28,0x5D,0x32,0xF1, \
-+ 0x00,0x55,0x00,0x08,0x28,0x5F,0x00,0x00,0x8F,0x9F, \
-+ 0x29,0x33,0x08,0x16,0x00,0x49,0x00,0x01,0x1B,0xFF, \
-+ 0x00,0x01,0x1B,0xFF \
-+}
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+
-+#if (DPAA_VERSION == 10)
-+/* Version: 106.1.9 */
-+#define SW_PRS_OFFLOAD_PATCH \
-+{ \
-+ 0x31,0x52,0x00,0xDA,0x0A,0x00,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x43,0x0A,0x00,0x00,0x00,0x01,0x1B,0xFE, \
-+ 0x00,0x00,0x99,0x00,0x53,0x13,0x00,0x00,0x00,0x00, \
-+ 0x9F,0x98,0x53,0x13,0x00,0x00,0x1B,0x23,0x33,0xF1, \
-+ 0x00,0xF9,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
-+ 0x28,0x7F,0x00,0x03,0x00,0x02,0x00,0x00,0x00,0x01, \
-+ 0x32,0xC1,0x32,0xF0,0x00,0x4A,0x00,0x80,0x1F,0xFF, \
-+ 0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA,0x06,0x00, \
-+ 0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x2F,0x00,0x00, \
-+ 0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA,0x00,0x40, \
-+ 0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x95,0x00,0x00, \
-+ 0x00,0x00,0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55, \
-+ 0x00,0x28,0x28,0x43,0x30,0x7E,0x43,0x45,0x00,0x00, \
-+ 0x30,0x7E,0x43,0x45,0x00,0x3C,0x1B,0x5D,0x32,0x11, \
-+ 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x83,0x8F, \
-+ 0x2F,0x0F,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \
-+ 0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11,0x00,0x00, \
-+ 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \
-+ 0x28,0x43,0x06,0x00,0x1B,0x3E,0x30,0x7E,0x53,0x79, \
-+ 0x00,0x2B,0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81, \
-+ 0x00,0x00,0x87,0x8F,0x28,0x23,0x06,0x00,0x32,0x11, \
-+ 0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81, \
-+ 0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01, \
-+ 0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00,0x00,0x01, \
-+ 0x1B,0xFE,0x00,0x00,0x9B,0x8E,0x53,0x90,0x00,0x00, \
-+ 0x06,0x29,0x00,0x00,0x83,0x8F,0x28,0x23,0x06,0x00, \
-+ 0x06,0x29,0x32,0xC1,0x00,0x55,0x00,0x28,0x00,0x00, \
-+ 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \
-+ 0x28,0x43,0x06,0x00,0x00,0x01,0x1B,0xFE,0x32,0xC1, \
-+ 0x00,0x55,0x00,0x28,0x28,0x43,0x1B,0xCF,0x00,0x00, \
-+ 0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55,0x00,0x28, \
-+ 0x28,0x43,0x30,0x7E,0x43,0xBF,0x00,0x2C,0x32,0x11, \
-+ 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F, \
-+ 0x28,0x23,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \
-+ 0x00,0x81,0x00,0x00,0x83,0x8F,0x2F,0x0F,0x06,0x00, \
-+ 0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01, \
-+ 0x00,0x81,0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50, \
-+ 0x00,0x01,0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00, \
-+ 0x1B,0x9C,0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00, \
-+ 0x00,0x00,0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02, \
-+ 0x00,0x00,0x00,0x01,0x32,0xC1,0x32,0xF0,0x00,0x4A, \
-+ 0x00,0x80,0x1F,0xFF,0x00,0x01,0x1B,0xFE, \
-+}
-+
-+#else
-+#define SW_PRS_OFFLOAD_PATCH \
-+{ \
-+ 0x31,0x52,0x00,0xDA,0x0E,0x4F,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x51,0x16,0x08,0x4B,0x31,0x53,0x00,0xFB, \
-+ 0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x29,0x2B, \
-+ 0x33,0xF1,0x00,0xFB,0x00,0xDF,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x28,0x7F,0x31,0x52,0x00,0xDA,0x0A,0x00, \
-+ 0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x20,0x00,0x00, \
-+ 0x00,0x01,0x1B,0xFE,0x00,0x00,0x99,0x00,0x51,0x29, \
-+ 0x00,0x00,0x00,0x00,0x9F,0x98,0x51,0x29,0x00,0x00, \
-+ 0x19,0x44,0x09,0x5F,0x00,0x20,0x00,0x00,0x09,0x4F, \
-+ 0x00,0x20,0x00,0x00,0x34,0xB7,0x00,0xF9,0x00,0x00, \
-+ 0x01,0x00,0x00,0x00,0x00,0x00,0x2B,0x97,0x31,0xB3, \
-+ 0x29,0x8F,0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00, \
-+ 0x00,0x00,0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02, \
-+ 0x00,0x00,0x00,0x01,0x1B,0xFE,0x00,0x01,0x1B,0xFE, \
-+ 0x31,0x52,0x00,0xDA,0xFC,0x00,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x51,0x52,0x40,0x00,0x31,0x92,0x51,0x52, \
-+ 0x00,0x88,0x19,0x55,0x08,0x05,0x00,0x00,0x19,0x99, \
-+ 0x02,0x1F,0x00,0x08,0x00,0x83,0x02,0x1F,0x00,0x20, \
-+ 0x28,0x1B,0x00,0x05,0x29,0x1F,0x30,0xD0,0x61,0x75, \
-+ 0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F,0x00,0x52, \
-+ 0x00,0x01,0x07,0x01,0x61,0x61,0x00,0x00,0x30,0xD0, \
-+ 0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \
-+ 0x41,0x72,0x00,0x00,0x02,0x8F,0x00,0x00,0x30,0xF2, \
-+ 0x00,0x06,0x19,0x83,0x00,0x00,0x9F,0xFF,0x30,0xF2, \
-+ 0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0,0x00,0x52, \
-+ 0x00,0x08,0x28,0x1A,0x61,0x5D,0x00,0x00,0x30,0xF2, \
-+ 0x19,0x83,0x06,0x00,0x29,0x1E,0x30,0xF2,0x29,0x0E, \
-+ 0x30,0x72,0x00,0x00,0x9B,0x8F,0x00,0x06,0x29,0x0E, \
-+ 0x32,0xF1,0x32,0xB0,0x00,0x4F,0x00,0x57,0x00,0x28, \
-+ 0x00,0x00,0x97,0x9E,0x00,0x4E,0x30,0x72,0x00,0x06, \
-+ 0x29,0x0E,0x08,0x05,0x00,0x01,0x31,0x52,0x00,0xDA, \
-+ 0x0E,0x4F,0x00,0x00,0x00,0x00,0x00,0x00,0x51,0xAF, \
-+ 0x04,0x4B,0x31,0x53,0x00,0xFB,0xFF,0xF0,0x00,0x00, \
-+ 0x00,0x00,0x00,0x00,0x29,0x2B,0x33,0xF1,0x00,0xFB, \
-+ 0x00,0xDF,0x00,0x00,0x00,0x00,0x00,0x00,0x28,0x7F, \
-+ 0x31,0x52,0x00,0xDA,0x06,0x00,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x41,0xB9,0x00,0x00,0x00,0x01,0x1B,0xFE, \
-+ 0x31,0x52,0x00,0xDA,0x00,0x40,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x42,0x06,0x00,0x00,0x00,0x00,0x9B,0x8F, \
-+ 0x28,0x01,0x32,0xC1,0x00,0x55,0x00,0x28,0x28,0x43, \
-+ 0x30,0x00,0x41,0xEB,0x00,0x2C,0x32,0x11,0x32,0xC0, \
-+ 0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F,0x28,0x23, \
-+ 0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81, \
-+ 0x00,0x00,0x83,0x8F,0x28,0x01,0x06,0x00,0x32,0x11, \
-+ 0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81, \
-+ 0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01, \
-+ 0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00,0x19,0xC8, \
-+ 0x09,0x5F,0x00,0x20,0x00,0x00,0x09,0x4F,0x00,0x20, \
-+ 0x00,0x00,0x34,0xB7,0x00,0xF9,0x00,0x00,0x01,0x00, \
-+ 0x00,0x00,0x00,0x00,0x2B,0x97,0x31,0xB3,0x29,0x8F, \
-+ 0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02,0x00,0x00, \
-+ 0x00,0x01,0x1B,0xFE,0x30,0x50,0x52,0x0B,0x00,0x00, \
-+ 0x00,0x01,0x1B,0xFE,0x32,0xF1,0x32,0xC0,0x00,0x4F, \
-+ 0x00,0x81,0x00,0x02,0x00,0x00,0x97,0x9E,0x42,0x18, \
-+ 0x00,0x08,0x08,0x16,0x00,0x54,0x00,0x01,0x1B,0xFE, \
-+ 0x00,0x00,0x9F,0x9E,0x42,0x4D,0x00,0x00,0x02,0x1F, \
-+ 0x00,0x08,0x28,0x1B,0x30,0x73,0x29,0x1F,0x30,0xD0, \
-+ 0x62,0x39,0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F, \
-+ 0x00,0x52,0x00,0x01,0x07,0x01,0x62,0x25,0x00,0x00, \
-+ 0x30,0xD0,0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x42,0x36,0x00,0x00,0x02,0x8F,0x00,0x00, \
-+ 0x30,0xF2,0x00,0x06,0x1A,0x47,0x00,0x00,0x9F,0xFF, \
-+ 0x30,0xF2,0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0, \
-+ 0x00,0x52,0x00,0x08,0x28,0x1A,0x62,0x21,0x00,0x00, \
-+ 0x30,0xF2,0x1A,0x47,0x06,0x00,0x29,0x1E,0x30,0xF2, \
-+ 0x52,0x4D,0xFF,0xFF,0x1A,0x52,0x08,0x16,0x00,0x54, \
-+ 0x00,0x01,0x1B,0xFE,0x1A,0x5F,0x32,0xF1,0x28,0x5D, \
-+ 0x32,0xF1,0x00,0x55,0x00,0x08,0x28,0x5F,0x00,0x00, \
-+ 0x8F,0x9F,0x29,0x33,0x08,0x16,0x00,0x49,0x00,0x01, \
-+ 0x1B,0xFF,0x00,0x01,0x1B,0xFF,0x31,0x52,0x00,0xDA, \
-+ 0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x6D, \
-+ 0x40,0x00,0x31,0x92,0x52,0x6D,0x00,0x88,0x1A,0x70, \
-+ 0x08,0x05,0x00,0x00,0x1A,0xB4,0x02,0x1F,0x00,0x08, \
-+ 0x00,0x83,0x02,0x1F,0x00,0x20,0x28,0x1B,0x00,0x05, \
-+ 0x29,0x1F,0x30,0xD0,0x62,0x90,0x00,0x07,0x00,0x05, \
-+ 0x00,0x00,0xC3,0x8F,0x00,0x52,0x00,0x01,0x07,0x01, \
-+ 0x62,0x7C,0x00,0x00,0x30,0xD0,0x00,0xDA,0x00,0x01, \
-+ 0x00,0x00,0x00,0x00,0x00,0x00,0x42,0x8D,0x00,0x00, \
-+ 0x02,0x8F,0x00,0x00,0x30,0xF2,0x00,0x06,0x1A,0x9E, \
-+ 0x00,0x00,0x9F,0xFF,0x30,0xF2,0x00,0x06,0x29,0x1E, \
-+ 0x07,0x08,0x30,0xD0,0x00,0x52,0x00,0x08,0x28,0x1A, \
-+ 0x62,0x78,0x00,0x00,0x30,0xF2,0x1A,0x9E,0x06,0x00, \
-+ 0x29,0x1E,0x30,0xF2,0x29,0x0E,0x30,0x72,0x00,0x00, \
-+ 0x9B,0x8F,0x00,0x06,0x29,0x0E,0x32,0xF1,0x32,0xB0, \
-+ 0x00,0x4F,0x00,0x57,0x00,0x28,0x00,0x00,0x97,0x9E, \
-+ 0x00,0x4E,0x30,0x72,0x00,0x06,0x29,0x0E,0x08,0x05, \
-+ 0x00,0x01,0x31,0x52,0x00,0xDA,0x0E,0x4F,0x00,0x00, \
-+ 0x00,0x00,0x00,0x00,0x52,0xCA,0x04,0x4B,0x31,0x53, \
-+ 0x00,0xFB,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00, \
-+ 0x29,0x2B,0x33,0xF1,0x00,0xFB,0x00,0xDF,0x00,0x00, \
-+ 0x00,0x00,0x00,0x00,0x28,0x7F,0x31,0x52,0x00,0xDA, \
-+ 0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x42,0xD4, \
-+ 0x00,0x00,0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA, \
-+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x37, \
-+ 0x00,0x00,0x00,0x00,0x9B,0x8F,0x28,0x01,0x32,0xC1, \
-+ 0x00,0x55,0x00,0x28,0x28,0x43,0x30,0x00,0x42,0xEA, \
-+ 0x00,0x00,0x30,0x00,0x42,0xEA,0x00,0x3C,0x1B,0x02, \
-+ 0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00, \
-+ 0x83,0x8F,0x28,0x01,0x06,0x00,0x32,0x11,0x32,0xC0, \
-+ 0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11, \
-+ 0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04, \
-+ 0x00,0x4D,0x28,0x43,0x06,0x00,0x1A,0xE3,0x30,0x00, \
-+ 0x43,0x20,0x00,0x2B,0x00,0x00,0x9B,0x8E,0x43,0x0E, \
-+ 0x00,0x00,0x32,0xC1,0x00,0x55,0x00,0x28,0x28,0x43, \
-+ 0x1B,0x1F,0x06,0x29,0x00,0x00,0x83,0x8F,0x28,0x23, \
-+ 0x06,0x00,0x06,0x29,0x32,0xC1,0x00,0x55,0x00,0x28, \
-+ 0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04, \
-+ 0x00,0x4D,0x28,0x43,0x06,0x00,0x1B,0x37,0x32,0x11, \
-+ 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F, \
-+ 0x28,0x23,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \
-+ 0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11,0x00,0x00, \
-+ 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \
-+ 0x28,0x43,0x06,0x00,0x30,0x50,0x53,0x3C,0x00,0x00, \
-+ 0x00,0x01,0x1B,0xFE,0x32,0xF1,0x32,0xC0,0x00,0x4F, \
-+ 0x00,0x81,0x00,0x02,0x00,0x00,0x97,0x9E,0x43,0x49, \
-+ 0x00,0x08,0x08,0x16,0x00,0x54,0x00,0x01,0x1B,0xFE, \
-+ 0x00,0x00,0x9F,0x9E,0x43,0x7E,0x00,0x00,0x02,0x1F, \
-+ 0x00,0x08,0x28,0x1B,0x30,0x73,0x29,0x1F,0x30,0xD0, \
-+ 0x63,0x6A,0x00,0x07,0x00,0x05,0x00,0x00,0xC3,0x8F, \
-+ 0x00,0x52,0x00,0x01,0x07,0x01,0x63,0x56,0x00,0x00, \
-+ 0x30,0xD0,0x00,0xDA,0x00,0x01,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x43,0x67,0x00,0x00,0x02,0x8F,0x00,0x00, \
-+ 0x30,0xF2,0x00,0x06,0x1B,0x78,0x00,0x00,0x9F,0xFF, \
-+ 0x30,0xF2,0x00,0x06,0x29,0x1E,0x07,0x08,0x30,0xD0, \
-+ 0x00,0x52,0x00,0x08,0x28,0x1A,0x63,0x52,0x00,0x00, \
-+ 0x30,0xF2,0x1B,0x78,0x06,0x00,0x29,0x1E,0x30,0xF2, \
-+ 0x53,0x7E,0xFF,0xFF,0x1B,0x83,0x08,0x16,0x00,0x54, \
-+ 0x00,0x01,0x1B,0xFE,0x1B,0x90,0x32,0xF1,0x28,0x5D, \
-+ 0x32,0xF1,0x00,0x55,0x00,0x08,0x28,0x5F,0x00,0x00, \
-+ 0x8F,0x9F,0x29,0x33,0x08,0x16,0x00,0x49,0x00,0x01, \
-+ 0x1B,0xFF,0x00,0x01,0x1B,0xFF,0x08,0x07,0x00,0x02, \
-+ 0x00,0x00,0x8D,0x80,0x53,0x9C,0x00,0x01,0x30,0x71, \
-+ 0x00,0x55,0x00,0x01,0x28,0x0F,0x00,0x00,0x8D,0x00, \
-+ 0x53,0xA4,0x00,0x01,0x30,0x71,0x00,0x55,0x00,0x01, \
-+ 0x28,0x0F,0x00,0x00,0x83,0x8E,0x53,0xB9,0x00,0x00, \
-+ 0x00,0x00,0x86,0x08,0x30,0x71,0x00,0x7B,0x03,0xB9, \
-+ 0x33,0xB4,0x00,0xDA,0xFF,0xFF,0x00,0x0F,0x00,0x00, \
-+ 0x00,0x00,0x00,0x00,0x86,0x09,0x01,0x03,0x00,0x7D, \
-+ 0x03,0xB9,0x1B,0xC8,0x33,0xD1,0x00,0xF9,0x00,0x10, \
-+ 0x00,0x00,0x00,0x00,0x00,0x00,0x28,0x7B,0x09,0x5F, \
-+ 0x00,0x1A,0x00,0x00,0x09,0x4F,0x00,0x1A,0x00,0x00, \
-+ 0x00,0x01,0x1B,0xFF,0x00,0x00,0x8C,0x00,0x53,0xF0, \
-+ 0x00,0x01,0x34,0xF5,0x00,0xFB,0xFF,0xFF,0x00,0x7F, \
-+ 0x00,0x00,0x00,0x00,0x2A,0x9F,0x00,0x00,0x93,0x8F, \
-+ 0x28,0x49,0x00,0x00,0x97,0x8F,0x28,0x4B,0x34,0x61, \
-+ 0x28,0x4D,0x34,0x71,0x28,0x4F,0x34,0xB7,0x00,0xF9, \
-+ 0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x2B,0x97, \
-+ 0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00,0x00,0x00, \
-+ 0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02,0x00,0x00, \
-+ 0x00,0x01,0x1B,0xFF,0x00,0x01,0x1B,0xFF, \
-+}
-+#endif /* (DPAA_VERSION == 10) */
-+
-+/****************************/
-+/* Parser defines */
-+/****************************/
-+#define FM_PCD_PRS_SW_TAIL_SIZE 4 /**< Number of bytes that must be cleared at
-+ the end of the SW parser area */
-+
-+/* masks */
-+#define PRS_ERR_CAP 0x80000000
-+#define PRS_ERR_TYPE_DOUBLE 0x40000000
-+#define PRS_ERR_SINGLE_ECC_CNT_MASK 0x00FF0000
-+#define PRS_ERR_ADDR_MASK 0x000001FF
-+
-+/* others */
-+#define PRS_MAX_CYCLE_LIMIT 8191
-+#define PRS_SW_DATA 0x00000800
-+#define PRS_REGS_OFFSET 0x00000840
-+
-+#define GET_FM_PCD_PRS_PORT_ID(prsPortId,hardwarePortId) \
-+ prsPortId = (uint8_t)(hardwarePortId & 0x0f)
-+
-+#define GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId) \
-+ bitMask = 0x80000000>>prsPortId
-+
-+#endif /* __FM_PRS_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.c
-@@ -0,0 +1,984 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_replic.c
-+
-+ @Description FM frame replicator
-+*//***************************************************************************/
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_muram_ext.h"
-+#include "fm_common.h"
-+#include "fm_hc.h"
-+#include "fm_replic.h"
-+#include "fm_cc.h"
-+#include "list_ext.h"
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+static uint8_t GetMemberPosition(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ uint32_t memberIndex,
-+ bool isAddOperation)
-+{
-+ uint8_t memberPosition;
-+ uint32_t lastMemberIndex;
-+
-+ ASSERT_COND(p_ReplicGroup);
-+
-+ /* the last member index is different between add and remove operation -
-+ in case of remove - this is exactly the last member index
-+ in case of add - this is the last member index + 1 - e.g.
-+ if we have 4 members, the index of the actual last member is 3(because the
-+ index starts from 0) therefore in order to add a new member as the last
-+ member we shall use memberIndex = 4 and not 3
-+ */
-+ if (isAddOperation)
-+ lastMemberIndex = p_ReplicGroup->numOfEntries;
-+ else
-+ lastMemberIndex = p_ReplicGroup->numOfEntries-1;
-+
-+ /* last */
-+ if (memberIndex == lastMemberIndex)
-+ memberPosition = FRM_REPLIC_LAST_MEMBER_INDEX;
-+ else
-+ {
-+ /* first */
-+ if (memberIndex == 0)
-+ memberPosition = FRM_REPLIC_FIRST_MEMBER_INDEX;
-+ else
-+ {
-+ /* middle */
-+ ASSERT_COND(memberIndex < lastMemberIndex);
-+ memberPosition = FRM_REPLIC_MIDDLE_MEMBER_INDEX;
-+ }
-+ }
-+ return memberPosition;
-+}
-+
-+static t_Error MemberCheckParams(t_Handle h_FmPcd,
-+ t_FmPcdCcNextEngineParams *p_MemberParams)
-+{
-+ t_Error err;
-+
-+
-+ if ((p_MemberParams->nextEngine != e_FM_PCD_DONE) &&
-+ (p_MemberParams->nextEngine != e_FM_PCD_KG) &&
-+ (p_MemberParams->nextEngine != e_FM_PCD_PLCR))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Next engine of a member should be MatchTable(cc) or Done or Policer"));
-+
-+ /* check the regular parameters of the next engine */
-+ err = ValidateNextEngineParams(h_FmPcd, p_MemberParams, e_FM_PCD_CC_STATS_MODE_NONE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("member next engine parameters"));
-+
-+ return E_OK;
-+}
-+
-+static t_Error CheckParams(t_Handle h_FmPcd,
-+ t_FmPcdFrmReplicGroupParams *p_ReplicGroupParam)
-+{
-+ int i;
-+ t_Error err;
-+
-+ /* check that max num of entries is at least 2 */
-+ if (!IN_RANGE(2, p_ReplicGroupParam->maxNumOfEntries, FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES))
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("maxNumOfEntries in the frame replicator parameters should be 2-%d",FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES));
-+
-+ /* check that number of entries is greater than zero */
-+ if (!p_ReplicGroupParam->numOfEntries)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOFEntries in the frame replicator group should be greater than zero"));
-+
-+ /* check that max num of entries is equal or greater than number of entries */
-+ if (p_ReplicGroupParam->maxNumOfEntries < p_ReplicGroupParam->numOfEntries)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxNumOfEntries should be equal or greater than numOfEntries"));
-+
-+ for (i=0; i<p_ReplicGroupParam->numOfEntries; i++)
-+ {
-+ err = MemberCheckParams(h_FmPcd, &p_ReplicGroupParam->nextEngineParams[i]);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("member check parameters"));
-+ }
-+ return E_OK;
-+}
-+
-+static t_FmPcdFrmReplicMember *GetAvailableMember(t_FmPcdFrmReplicGroup *p_ReplicGroup)
-+{
-+ t_FmPcdFrmReplicMember *p_ReplicMember = NULL;
-+ t_List *p_Next;
-+
-+ if (!LIST_IsEmpty(&p_ReplicGroup->availableMembersList))
-+ {
-+ p_Next = LIST_FIRST(&p_ReplicGroup->availableMembersList);
-+ p_ReplicMember = LIST_OBJECT(p_Next, t_FmPcdFrmReplicMember, node);
-+ ASSERT_COND(p_ReplicMember);
-+ LIST_DelAndInit(p_Next);
-+ }
-+ return p_ReplicMember;
-+}
-+
-+static void PutAvailableMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_FmPcdFrmReplicMember *p_ReplicMember)
-+{
-+ LIST_AddToTail(&p_ReplicMember->node, &p_ReplicGroup->availableMembersList);
-+}
-+
-+static void AddMemberToList(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_FmPcdFrmReplicMember *p_CurrentMember,
-+ t_List *p_ListHead)
-+{
-+ LIST_Add(&p_CurrentMember->node, p_ListHead);
-+
-+ p_ReplicGroup->numOfEntries++;
-+}
-+
-+static void RemoveMemberFromList(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_FmPcdFrmReplicMember *p_CurrentMember)
-+{
-+ ASSERT_COND(p_ReplicGroup->numOfEntries);
-+ LIST_DelAndInit(&p_CurrentMember->node);
-+ p_ReplicGroup->numOfEntries--;
-+}
-+
-+static void LinkSourceToMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_AdOfTypeContLookup *p_SourceTd,
-+ t_FmPcdFrmReplicMember *p_ReplicMember)
-+{
-+ t_FmPcd *p_FmPcd;
-+
-+ ASSERT_COND(p_SourceTd);
-+ ASSERT_COND(p_ReplicMember);
-+ ASSERT_COND(p_ReplicGroup);
-+ ASSERT_COND(p_ReplicGroup->h_FmPcd);
-+
-+ /* Link the first member in the group to the source TD */
-+ p_FmPcd = p_ReplicGroup->h_FmPcd;
-+
-+ WRITE_UINT32(p_SourceTd->matchTblPtr,
-+ (uint32_t)(XX_VirtToPhys(p_ReplicMember->p_MemberAd) -
-+ p_FmPcd->physicalMuramBase));
-+}
-+
-+static void LinkMemberToMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_FmPcdFrmReplicMember *p_CurrentMember,
-+ t_FmPcdFrmReplicMember *p_NextMember)
-+{
-+ t_AdOfTypeResult *p_CurrReplicAd = (t_AdOfTypeResult*)p_CurrentMember->p_MemberAd;
-+ t_AdOfTypeResult *p_NextReplicAd = NULL;
-+ t_FmPcd *p_FmPcd;
-+ uint32_t offset = 0;
-+
-+ /* Check if the next member exists or it's NULL (- means that this is the last member) */
-+ if (p_NextMember)
-+ {
-+ p_NextReplicAd = (t_AdOfTypeResult*)p_NextMember->p_MemberAd;
-+ p_FmPcd = p_ReplicGroup->h_FmPcd;
-+ offset = (XX_VirtToPhys(p_NextReplicAd) - (p_FmPcd->physicalMuramBase));
-+ offset = ((offset>>NEXT_FRM_REPLIC_ADDR_SHIFT)<< NEXT_FRM_REPLIC_MEMBER_INDEX_SHIFT);
-+ }
-+
-+ /* link the current AD to point to the AD of the next member */
-+ WRITE_UINT32(p_CurrReplicAd->res, offset);
-+}
-+
-+static t_Error ModifyDescriptor(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ void *p_OldDescriptor,
-+ void *p_NewDescriptor)
-+{
-+ t_Handle h_Hc;
-+ t_Error err;
-+ t_FmPcd *p_FmPcd;
-+
-+ ASSERT_COND(p_ReplicGroup);
-+ ASSERT_COND(p_ReplicGroup->h_FmPcd);
-+ ASSERT_COND(p_OldDescriptor);
-+ ASSERT_COND(p_NewDescriptor);
-+
-+ p_FmPcd = p_ReplicGroup->h_FmPcd;
-+ h_Hc = FmPcdGetHcHandle(p_FmPcd);
-+ if (!h_Hc)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Host command"));
-+
-+ err = FmHcPcdCcDoDynamicChange(h_Hc,
-+ (uint32_t)(XX_VirtToPhys(p_OldDescriptor) - p_FmPcd->physicalMuramBase),
-+ (uint32_t)(XX_VirtToPhys(p_NewDescriptor) - p_FmPcd->physicalMuramBase));
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("Dynamic change host command"));
-+
-+ return E_OK;
-+}
-+
-+static void FillReplicAdOfTypeResult(void *p_ReplicAd, bool last)
-+{
-+ t_AdOfTypeResult *p_CurrReplicAd = (t_AdOfTypeResult*)p_ReplicAd;
-+ uint32_t tmp;
-+
-+ tmp = GET_UINT32(p_CurrReplicAd->plcrProfile);
-+ if (last)
-+ /* clear the NL bit in case it's the last member in the group*/
-+ WRITE_UINT32(p_CurrReplicAd->plcrProfile,(tmp & ~FRM_REPLIC_NL_BIT));
-+ else
-+ /* set the NL bit in case it's not the last member in the group */
-+ WRITE_UINT32(p_CurrReplicAd->plcrProfile, (tmp |FRM_REPLIC_NL_BIT));
-+
-+ /* set FR bit in the action descriptor */
-+ tmp = GET_UINT32(p_CurrReplicAd->nia);
-+ WRITE_UINT32(p_CurrReplicAd->nia,
-+ (tmp | FRM_REPLIC_FR_BIT | FM_PCD_AD_RESULT_EXTENDED_MODE ));
-+}
-+
-+static void BuildSourceTd(void *p_Ad)
-+{
-+ t_AdOfTypeContLookup *p_SourceTd;
-+
-+ ASSERT_COND(p_Ad);
-+
-+ p_SourceTd = (t_AdOfTypeContLookup *)p_Ad;
-+
-+ IOMemSet32((uint8_t*)p_SourceTd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* initialize the source table descriptor */
-+ WRITE_UINT32(p_SourceTd->ccAdBase, FM_PCD_AD_CONT_LOOKUP_TYPE);
-+ WRITE_UINT32(p_SourceTd->pcAndOffsets, FRM_REPLIC_SOURCE_TD_OPCODE);
-+}
-+
-+static t_Error BuildShadowAndModifyDescriptor(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_FmPcdFrmReplicMember *p_NextMember,
-+ t_FmPcdFrmReplicMember *p_CurrentMember,
-+ bool sourceDescriptor,
-+ bool last)
-+{
-+ t_FmPcd *p_FmPcd;
-+ t_FmPcdFrmReplicMember shadowMember;
-+ t_Error err;
-+
-+ ASSERT_COND(p_ReplicGroup);
-+ ASSERT_COND(p_ReplicGroup->h_FmPcd);
-+
-+ p_FmPcd = p_ReplicGroup->h_FmPcd;
-+ ASSERT_COND(p_FmPcd->p_CcShadow);
-+
-+ if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock))
-+ return ERROR_CODE(E_BUSY);
-+
-+ if (sourceDescriptor)
-+ {
-+ BuildSourceTd(p_FmPcd->p_CcShadow);
-+ LinkSourceToMember(p_ReplicGroup, p_FmPcd->p_CcShadow, p_NextMember);
-+
-+ /* Modify the source table descriptor according to the prepared shadow descriptor */
-+ err = ModifyDescriptor(p_ReplicGroup,
-+ p_ReplicGroup->p_SourceTd,
-+ p_FmPcd->p_CcShadow/* new prepared source td */);
-+
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("Modify source Descriptor in BuildShadowAndModifyDescriptor"));
-+
-+ }
-+ else
-+ {
-+ IO2IOCpy32(p_FmPcd->p_CcShadow,
-+ p_CurrentMember->p_MemberAd,
-+ FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* update the last bit in the shadow ad */
-+ FillReplicAdOfTypeResult(p_FmPcd->p_CcShadow, last);
-+
-+ shadowMember.p_MemberAd = p_FmPcd->p_CcShadow;
-+
-+ /* update the next FR member index */
-+ LinkMemberToMember(p_ReplicGroup, &shadowMember, p_NextMember);
-+
-+ /* Modify the next member according to the prepared shadow descriptor */
-+ err = ModifyDescriptor(p_ReplicGroup,
-+ p_CurrentMember->p_MemberAd,
-+ p_FmPcd->p_CcShadow);
-+
-+ RELEASE_LOCK(p_FmPcd->shadowLock);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, ("Modify Descriptor in BuildShadowAndModifyDescriptor"));
-+ }
-+
-+
-+ return E_OK;
-+}
-+
-+static t_FmPcdFrmReplicMember* GetMemberByIndex(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ uint16_t memberIndex)
-+{
-+ int i=0;
-+ t_List *p_Pos;
-+ t_FmPcdFrmReplicMember *p_Member = NULL;
-+
-+ LIST_FOR_EACH(p_Pos, &p_ReplicGroup->membersList)
-+ {
-+ if (i == memberIndex)
-+ {
-+ p_Member = LIST_OBJECT(p_Pos, t_FmPcdFrmReplicMember, node);
-+ return p_Member;
-+ }
-+ i++;
-+ }
-+ return p_Member;
-+}
-+
-+static t_Error AllocMember(t_FmPcdFrmReplicGroup *p_ReplicGroup)
-+{
-+ t_FmPcdFrmReplicMember *p_CurrentMember;
-+ t_Handle h_Muram;
-+
-+ ASSERT_COND(p_ReplicGroup);
-+
-+ h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd);
-+ ASSERT_COND(h_Muram);
-+
-+ /* Initialize an internal structure of a member to add to the available members list */
-+ p_CurrentMember = (t_FmPcdFrmReplicMember *)XX_Malloc(sizeof(t_FmPcdFrmReplicMember));
-+ if (!p_CurrentMember)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Frame replicator member"));
-+
-+ memset(p_CurrentMember, 0 ,sizeof(t_FmPcdFrmReplicMember));
-+
-+ /* Allocate the member AD */
-+ p_CurrentMember->p_MemberAd =
-+ (t_AdOfTypeResult*)FM_MURAM_AllocMem(h_Muram,
-+ FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_CurrentMember->p_MemberAd)
-+ {
-+ XX_Free(p_CurrentMember);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("member AD table"));
-+ }
-+ IOMemSet32((uint8_t*)p_CurrentMember->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ /* Add the new member to the available members list */
-+ LIST_AddToTail(&p_CurrentMember->node, &(p_ReplicGroup->availableMembersList));
-+
-+ return E_OK;
-+}
-+
-+static t_FmPcdFrmReplicMember* InitMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_FmPcdCcNextEngineParams *p_MemberParams,
-+ bool last)
-+{
-+ t_FmPcdFrmReplicMember *p_CurrentMember = NULL;
-+
-+ ASSERT_COND(p_ReplicGroup);
-+
-+ /* Get an available member from the internal members list */
-+ p_CurrentMember = GetAvailableMember(p_ReplicGroup);
-+ if (!p_CurrentMember)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Available member"));
-+ return NULL;
-+ }
-+ p_CurrentMember->h_Manip = NULL;
-+
-+ /* clear the Ad of the new member */
-+ IOMemSet32((uint8_t*)p_CurrentMember->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+ INIT_LIST(&p_CurrentMember->node);
-+
-+ /* Initialize the Ad of the member */
-+ NextStepAd(p_CurrentMember->p_MemberAd,
-+ NULL,
-+ p_MemberParams,
-+ p_ReplicGroup->h_FmPcd);
-+
-+ /* save Manip handle (for free needs) */
-+ if (p_MemberParams->h_Manip)
-+ p_CurrentMember->h_Manip = p_MemberParams->h_Manip;
-+
-+ /* Initialize the relevant frame replicator fields in the AD */
-+ FillReplicAdOfTypeResult(p_CurrentMember->p_MemberAd, last);
-+
-+ return p_CurrentMember;
-+}
-+
-+static void FreeMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ t_FmPcdFrmReplicMember *p_Member)
-+{
-+ /* Note: Can't free the member AD just returns the member to the available
-+ member list - therefore only memset the AD */
-+
-+ /* zero the AD */
-+ IOMemSet32(p_Member->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE);
-+
-+
-+ /* return the member to the available members list */
-+ PutAvailableMember(p_ReplicGroup, p_Member);
-+}
-+
-+static t_Error RemoveMember(t_FmPcdFrmReplicGroup *p_ReplicGroup,
-+ uint16_t memberIndex)
-+{
-+ t_FmPcd *p_FmPcd = NULL;
-+ t_FmPcdFrmReplicMember *p_CurrentMember = NULL, *p_PreviousMember = NULL, *p_NextMember = NULL;
-+ t_Error err;
-+ uint8_t memberPosition;
-+
-+ p_FmPcd = p_ReplicGroup->h_FmPcd;
-+ ASSERT_COND(p_FmPcd);
-+ UNUSED(p_FmPcd);
-+
-+ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex);
-+ ASSERT_COND(p_CurrentMember);
-+
-+ /* determine the member position in the group */
-+ memberPosition = GetMemberPosition(p_ReplicGroup,
-+ memberIndex,
-+ FALSE/*remove operation*/);
-+
-+ switch (memberPosition)
-+ {
-+ case FRM_REPLIC_FIRST_MEMBER_INDEX:
-+ p_NextMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex+1));
-+ ASSERT_COND(p_NextMember);
-+
-+ /* update the source td itself by using a host command */
-+ err = BuildShadowAndModifyDescriptor(p_ReplicGroup,
-+ p_NextMember,
-+ NULL,
-+ TRUE/*sourceDescriptor*/,
-+ FALSE/*last*/);
-+ break;
-+
-+ case FRM_REPLIC_MIDDLE_MEMBER_INDEX:
-+ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
-+ ASSERT_COND(p_PreviousMember);
-+
-+ p_NextMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex+1));
-+ ASSERT_COND(p_NextMember);
-+
-+ err = BuildShadowAndModifyDescriptor(p_ReplicGroup,
-+ p_NextMember,
-+ p_PreviousMember,
-+ FALSE/*sourceDescriptor*/,
-+ FALSE/*last*/);
-+
-+ break;
-+
-+ case FRM_REPLIC_LAST_MEMBER_INDEX:
-+ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
-+ ASSERT_COND(p_PreviousMember);
-+
-+ err = BuildShadowAndModifyDescriptor(p_ReplicGroup,
-+ NULL,
-+ p_PreviousMember,
-+ FALSE/*sourceDescriptor*/,
-+ TRUE/*last*/);
-+ break;
-+
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member position in remove member"));
-+ }
-+
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (p_CurrentMember->h_Manip)
-+ {
-+ FmPcdManipUpdateOwner(p_CurrentMember->h_Manip, FALSE);
-+ p_CurrentMember->h_Manip = NULL;
-+ }
-+
-+ /* remove the member from the driver internal members list */
-+ RemoveMemberFromList(p_ReplicGroup, p_CurrentMember);
-+
-+ /* return the member to the available members list */
-+ FreeMember(p_ReplicGroup, p_CurrentMember);
-+
-+ return E_OK;
-+}
-+
-+static void DeleteGroup(t_FmPcdFrmReplicGroup *p_ReplicGroup)
-+{
-+ int i, j;
-+ t_Handle h_Muram;
-+ t_FmPcdFrmReplicMember *p_Member, *p_CurrentMember;
-+
-+ if (p_ReplicGroup)
-+ {
-+ ASSERT_COND(p_ReplicGroup->h_FmPcd);
-+ h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd);
-+ ASSERT_COND(h_Muram);
-+
-+ /* free the source table descriptor */
-+ if (p_ReplicGroup->p_SourceTd)
-+ {
-+ FM_MURAM_FreeMem(h_Muram, p_ReplicGroup->p_SourceTd);
-+ p_ReplicGroup->p_SourceTd = NULL;
-+ }
-+
-+ /* Remove all members from the members linked list (hw and sw) and
-+ return the members to the available members list */
-+ if (p_ReplicGroup->numOfEntries)
-+ {
-+ j = p_ReplicGroup->numOfEntries-1;
-+
-+ /* manually removal of the member because there are no owners of
-+ this group */
-+ for (i=j; i>=0; i--)
-+ {
-+ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)i/*memberIndex*/);
-+ ASSERT_COND(p_CurrentMember);
-+
-+ if (p_CurrentMember->h_Manip)
-+ {
-+ FmPcdManipUpdateOwner(p_CurrentMember->h_Manip, FALSE);
-+ p_CurrentMember->h_Manip = NULL;
-+ }
-+
-+ /* remove the member from the internal driver members list */
-+ RemoveMemberFromList(p_ReplicGroup, p_CurrentMember);
-+
-+ /* return the member to the available members list */
-+ FreeMember(p_ReplicGroup, p_CurrentMember);
-+ }
-+ }
-+
-+ /* Free members AD */
-+ for (i=0; i<p_ReplicGroup->maxNumOfEntries; i++)
-+ {
-+ p_Member = GetAvailableMember(p_ReplicGroup);
-+ ASSERT_COND(p_Member);
-+ if (p_Member->p_MemberAd)
-+ {
-+ FM_MURAM_FreeMem(h_Muram, p_Member->p_MemberAd);
-+ p_Member->p_MemberAd = NULL;
-+ }
-+ XX_Free(p_Member);
-+ }
-+
-+ /* release the group lock */
-+ if (p_ReplicGroup->p_Lock)
-+ FmPcdReleaseLock(p_ReplicGroup->h_FmPcd, p_ReplicGroup->p_Lock);
-+
-+ /* free the replicator group */
-+ XX_Free(p_ReplicGroup);
-+ }
-+}
-+
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+
-+/* NOTE: the inter-module routines are locked by cc in case of using them */
-+void * FrmReplicGroupGetSourceTableDescriptor(t_Handle h_ReplicGroup)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
-+ ASSERT_COND(p_ReplicGroup);
-+
-+ return (p_ReplicGroup->p_SourceTd);
-+}
-+
-+void FrmReplicGroupUpdateAd(t_Handle h_ReplicGroup,
-+ void *p_Ad,
-+ t_Handle *h_AdNew)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
-+ t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult*)p_Ad;
-+ t_FmPcd *p_FmPcd;
-+
-+ ASSERT_COND(p_ReplicGroup);
-+ p_FmPcd = p_ReplicGroup->h_FmPcd;
-+
-+ /* build a bypass ad */
-+ WRITE_UINT32(p_AdResult->fqid, FM_PCD_AD_BYPASS_TYPE |
-+ (uint32_t)((XX_VirtToPhys(p_ReplicGroup->p_SourceTd)) - p_FmPcd->physicalMuramBase));
-+
-+ *h_AdNew = NULL;
-+}
-+
-+void FrmReplicGroupUpdateOwner(t_Handle h_ReplicGroup,
-+ bool add)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
-+ ASSERT_COND(p_ReplicGroup);
-+
-+ /* update the group owner counter */
-+ if (add)
-+ p_ReplicGroup->owners++;
-+ else
-+ {
-+ ASSERT_COND(p_ReplicGroup->owners);
-+ p_ReplicGroup->owners--;
-+ }
-+}
-+
-+t_Error FrmReplicGroupTryLock(t_Handle h_ReplicGroup)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
-+
-+ ASSERT_COND(h_ReplicGroup);
-+
-+ if (FmPcdLockTryLock(p_ReplicGroup->p_Lock))
-+ return E_OK;
-+
-+ return ERROR_CODE(E_BUSY);
-+}
-+
-+void FrmReplicGroupUnlock(t_Handle h_ReplicGroup)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
-+
-+ ASSERT_COND(h_ReplicGroup);
-+
-+ FmPcdLockUnlock(p_ReplicGroup->p_Lock);
-+}
-+/*********************** End of inter-module routines ************************/
-+
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+t_Handle FM_PCD_FrmReplicSetGroup(t_Handle h_FmPcd,
-+ t_FmPcdFrmReplicGroupParams *p_ReplicGroupParam)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup;
-+ t_FmPcdFrmReplicMember *p_CurrentMember, *p_NextMember = NULL;
-+ int i;
-+ t_Error err;
-+ bool last = FALSE;
-+ t_Handle h_Muram;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_ReplicGroupParam, E_INVALID_HANDLE, NULL);
-+
-+ if (!FmPcdIsAdvancedOffloadSupported(h_FmPcd))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Advanced-offload must be enabled"));
-+ return NULL;
-+ }
-+
-+ err = CheckParams(h_FmPcd, p_ReplicGroupParam);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, (NO_MSG));
-+ return NULL;
-+ }
-+
-+ p_ReplicGroup = (t_FmPcdFrmReplicGroup*)XX_Malloc(sizeof(t_FmPcdFrmReplicGroup));
-+ if (!p_ReplicGroup)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
-+ return NULL;
-+ }
-+ memset(p_ReplicGroup, 0, sizeof(t_FmPcdFrmReplicGroup));
-+
-+ /* initialize lists for internal driver use */
-+ INIT_LIST(&p_ReplicGroup->availableMembersList);
-+ INIT_LIST(&p_ReplicGroup->membersList);
-+
-+ p_ReplicGroup->h_FmPcd = h_FmPcd;
-+
-+ h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd);
-+ ASSERT_COND(h_Muram);
-+
-+ /* initialize the group lock */
-+ p_ReplicGroup->p_Lock = FmPcdAcquireLock(p_ReplicGroup->h_FmPcd);
-+ if (!p_ReplicGroup->p_Lock)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Replic group lock"));
-+ DeleteGroup(p_ReplicGroup);
-+ return NULL;
-+ }
-+
-+ /* Allocate the frame replicator source table descriptor */
-+ p_ReplicGroup->p_SourceTd =
-+ (t_Handle)FM_MURAM_AllocMem(h_Muram,
-+ FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (!p_ReplicGroup->p_SourceTd)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("frame replicator source table descriptor"));
-+ DeleteGroup(p_ReplicGroup);
-+ return NULL;
-+ }
-+
-+ /* update the shadow size - required for the host commands */
-+ err = FmPcdUpdateCcShadow(p_ReplicGroup->h_FmPcd,
-+ FM_PCD_CC_AD_ENTRY_SIZE,
-+ FM_PCD_CC_AD_TABLE_ALIGN);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, ("Update CC shadow"));
-+ DeleteGroup(p_ReplicGroup);
-+ return NULL;
-+ }
-+
-+ p_ReplicGroup->maxNumOfEntries = p_ReplicGroupParam->maxNumOfEntries;
-+
-+ /* Allocate the maximal number of members ADs and Statistics AD for the group
-+ It prevents allocation of Muram in run-time */
-+ for (i=0; i<p_ReplicGroup->maxNumOfEntries; i++)
-+ {
-+ err = AllocMember(p_ReplicGroup);
-+ if (err)
-+ {
-+ REPORT_ERROR(MAJOR, err, ("allocate a new member"));
-+ DeleteGroup(p_ReplicGroup);
-+ return NULL;
-+ }
-+ }
-+
-+ /* Initialize the members linked lists:
-+ (hw - the one that is used by the FMan controller and
-+ sw - the one that is managed by the driver internally) */
-+ for (i=(p_ReplicGroupParam->numOfEntries-1); i>=0; i--)
-+ {
-+ /* check if this is the last member in the group */
-+ if (i == (p_ReplicGroupParam->numOfEntries-1))
-+ last = TRUE;
-+ else
-+ last = FALSE;
-+
-+ /* Initialize a new member */
-+ p_CurrentMember = InitMember(p_ReplicGroup,
-+ &(p_ReplicGroupParam->nextEngineParams[i]),
-+ last);
-+ if (!p_CurrentMember)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("No available member"));
-+ DeleteGroup(p_ReplicGroup);
-+ return NULL;
-+ }
-+
-+ /* Build the members group - link two consecutive members in the hw linked list */
-+ LinkMemberToMember(p_ReplicGroup, p_CurrentMember, p_NextMember);
-+
-+ /* update the driver internal members list to be compatible to the hw members linked list */
-+ AddMemberToList(p_ReplicGroup, p_CurrentMember, &p_ReplicGroup->membersList);
-+
-+ p_NextMember = p_CurrentMember;
-+ }
-+
-+ /* initialize the source table descriptor */
-+ BuildSourceTd(p_ReplicGroup->p_SourceTd);
-+
-+ /* link the source table descriptor to point to the first member in the group */
-+ LinkSourceToMember(p_ReplicGroup, p_ReplicGroup->p_SourceTd, p_NextMember);
-+
-+ return p_ReplicGroup;
-+}
-+
-+t_Error FM_PCD_FrmReplicDeleteGroup(t_Handle h_ReplicGroup)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE);
-+
-+ if (p_ReplicGroup->owners)
-+ RETURN_ERROR(MAJOR,
-+ E_INVALID_STATE,
-+ ("the group has owners and can't be deleted"));
-+
-+ DeleteGroup(p_ReplicGroup);
-+
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* API Run-time Frame replicator Control unit functions */
-+/*****************************************************************************/
-+t_Error FM_PCD_FrmReplicAddMember(t_Handle h_ReplicGroup,
-+ uint16_t memberIndex,
-+ t_FmPcdCcNextEngineParams *p_MemberParams)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup*) h_ReplicGroup;
-+ t_FmPcdFrmReplicMember *p_NewMember, *p_CurrentMember = NULL, *p_PreviousMember = NULL;
-+ t_Error err;
-+ uint8_t memberPosition;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_MemberParams, E_INVALID_HANDLE);
-+
-+ /* group lock */
-+ err = FrmReplicGroupTryLock(p_ReplicGroup);
-+ if (GET_ERROR_TYPE(err) == E_BUSY)
-+ return ERROR_CODE(E_BUSY);
-+
-+ if (memberIndex > p_ReplicGroup->numOfEntries)
-+ {
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
-+ ("memberIndex is greater than the members in the list"));
-+ }
-+
-+ if (memberIndex >= p_ReplicGroup->maxNumOfEntries)
-+ {
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("memberIndex is greater than the allowed number of members in the group"));
-+ }
-+
-+ if ((p_ReplicGroup->numOfEntries + 1) > FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES)
-+ {
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("numOfEntries with new entry can not be larger than %d\n",
-+ FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES));
-+ }
-+
-+ err = MemberCheckParams(p_ReplicGroup->h_FmPcd, p_MemberParams);
-+ if (err)
-+ {
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+ RETURN_ERROR(MAJOR, err, ("member check parameters in add operation"));
-+ }
-+ /* determine the member position in the group */
-+ memberPosition = GetMemberPosition(p_ReplicGroup,
-+ memberIndex,
-+ TRUE/* add operation */);
-+
-+ /* Initialize a new member */
-+ p_NewMember = InitMember(p_ReplicGroup,
-+ p_MemberParams,
-+ (memberPosition == FRM_REPLIC_LAST_MEMBER_INDEX ? TRUE : FALSE));
-+ if (!p_NewMember)
-+ {
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("No available member"));
-+ }
-+
-+ switch (memberPosition)
-+ {
-+ case FRM_REPLIC_FIRST_MEMBER_INDEX:
-+ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex);
-+ ASSERT_COND(p_CurrentMember);
-+
-+ LinkMemberToMember(p_ReplicGroup, p_NewMember, p_CurrentMember);
-+
-+ /* update the internal group source TD */
-+ LinkSourceToMember(p_ReplicGroup,
-+ p_ReplicGroup->p_SourceTd,
-+ p_NewMember);
-+
-+ /* add member to the internal sw member list */
-+ AddMemberToList(p_ReplicGroup,
-+ p_NewMember,
-+ &p_ReplicGroup->membersList);
-+ break;
-+
-+ case FRM_REPLIC_MIDDLE_MEMBER_INDEX:
-+ p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex);
-+ ASSERT_COND(p_CurrentMember);
-+
-+ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
-+ ASSERT_COND(p_PreviousMember);
-+
-+ LinkMemberToMember(p_ReplicGroup, p_NewMember, p_CurrentMember);
-+ LinkMemberToMember(p_ReplicGroup, p_PreviousMember, p_NewMember);
-+
-+ AddMemberToList(p_ReplicGroup, p_NewMember, &p_PreviousMember->node);
-+ break;
-+
-+ case FRM_REPLIC_LAST_MEMBER_INDEX:
-+ p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1));
-+ ASSERT_COND(p_PreviousMember);
-+
-+ LinkMemberToMember(p_ReplicGroup, p_PreviousMember, p_NewMember);
-+ FillReplicAdOfTypeResult(p_PreviousMember->p_MemberAd, FALSE/*last*/);
-+
-+ /* add the new member to the internal sw member list */
-+ AddMemberToList(p_ReplicGroup, p_NewMember, &p_PreviousMember->node);
-+ break;
-+
-+ default:
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member position in add member"));
-+
-+ }
-+
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PCD_FrmReplicRemoveMember(t_Handle h_ReplicGroup,
-+ uint16_t memberIndex)
-+{
-+ t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup*) h_ReplicGroup;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE);
-+
-+ /* lock */
-+ err = FrmReplicGroupTryLock(p_ReplicGroup);
-+ if (GET_ERROR_TYPE(err) == E_BUSY)
-+ return ERROR_CODE(E_BUSY);
-+
-+ if (memberIndex >= p_ReplicGroup->numOfEntries)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member index to remove"));
-+
-+ /* Design decision: group must contain at least one member
-+ No possibility to remove the last member from the group */
-+ if (p_ReplicGroup->numOfEntries == 1)
-+ RETURN_ERROR(MAJOR, E_CONFLICT, ("Can't remove the last member. At least one member should be related to a group."));
-+
-+ err = RemoveMember(p_ReplicGroup, memberIndex);
-+
-+ /* unlock */
-+ FrmReplicGroupUnlock(p_ReplicGroup);
-+
-+ switch (GET_ERROR_TYPE(err))
-+ {
-+ case E_OK:
-+ return E_OK;
-+
-+ case E_BUSY:
-+ DBG(TRACE, ("E_BUSY error"));
-+ return ERROR_CODE(E_BUSY);
-+
-+ default:
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+}
-+
-+/*********************** End of API routines ************************/
-+
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fm_replic.h
-@@ -0,0 +1,101 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_replic.h
-+
-+ @Description FM frame replicator
-+*//***************************************************************************/
-+#ifndef __FM_REPLIC_H
-+#define __FM_REPLIC_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+
-+
-+#define FRM_REPLIC_SOURCE_TD_OPCODE 0x75
-+#define NEXT_FRM_REPLIC_ADDR_SHIFT 4
-+#define NEXT_FRM_REPLIC_MEMBER_INDEX_SHIFT 16
-+#define FRM_REPLIC_FR_BIT 0x08000000
-+#define FRM_REPLIC_NL_BIT 0x10000000
-+#define FRM_REPLIC_INVALID_MEMBER_INDEX 0xffff
-+#define FRM_REPLIC_FIRST_MEMBER_INDEX 0
-+
-+#define FRM_REPLIC_MIDDLE_MEMBER_INDEX 1
-+#define FRM_REPLIC_LAST_MEMBER_INDEX 2
-+
-+#define SOURCE_TD_ITSELF_OPTION 0x01
-+#define SOURCE_TD_COPY_OPTION 0x02
-+#define SOURCE_TD_ITSELF_AND_COPY_OPTION SOURCE_TD_ITSELF_OPTION | SOURCE_TD_COPY_OPTION
-+#define SOURCE_TD_NONE 0x04
-+
-+/*typedef enum e_SourceTdOption
-+{
-+ e_SOURCE_TD_NONE = 0,
-+ e_SOURCE_TD_ITSELF_OPTION = 1,
-+ e_SOURCE_TD_COPY_OPTION = 2,
-+ e_SOURCE_TD_ITSELF_AND_COPY_OPTION = e_SOURCE_TD_ITSELF_OPTION | e_SOURCE_TD_COPY_OPTION
-+} e_SourceTdOption;
-+*/
-+
-+typedef struct
-+{
-+ volatile uint32_t type;
-+ volatile uint32_t frGroupPointer;
-+ volatile uint32_t operationCode;
-+ volatile uint32_t reserved;
-+} t_FrmReplicGroupSourceAd;
-+
-+typedef struct t_FmPcdFrmReplicMember
-+{
-+ void *p_MemberAd; /**< pointer to the member AD */
-+ void *p_StatisticsAd;/**< pointer to the statistics AD of the member */
-+ t_Handle h_Manip; /**< manip handle - need for free routines */
-+ t_List node;
-+} t_FmPcdFrmReplicMember;
-+
-+typedef struct t_FmPcdFrmReplicGroup
-+{
-+ t_Handle h_FmPcd;
-+
-+ uint8_t maxNumOfEntries;/**< maximal number of members in the group */
-+ uint8_t numOfEntries; /**< actual number of members in the group */
-+ uint16_t owners; /**< how many keys share this frame replicator group */
-+ void *p_SourceTd; /**< pointer to the frame replicator source table descriptor */
-+ t_List membersList; /**< the members list - should reflect the order of the members as in the hw linked list*/
-+ t_List availableMembersList;/**< list of all the available members in the group */
-+ t_FmPcdLock *p_Lock;
-+} t_FmPcdFrmReplicGroup;
-+
-+
-+#endif /* __FM_REPLIC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_kg.c
-@@ -0,0 +1,890 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "fsl_fman_kg.h"
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+
-+
-+static uint32_t build_ar_bind_scheme(uint8_t hwport_id, bool write)
-+{
-+ uint32_t rw;
-+
-+ rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ;
-+
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ rw |
-+ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
-+ hwport_id |
-+ FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP);
-+}
-+
-+static void clear_pe_all_scheme(struct fman_kg_regs *regs, uint8_t hwport_id)
-+{
-+ uint32_t ar;
-+
-+ fman_kg_write_sp(regs, 0xffffffff, 0);
-+
-+ ar = build_ar_bind_scheme(hwport_id, TRUE);
-+ fman_kg_write_ar_wait(regs, ar);
-+}
-+
-+static uint32_t build_ar_bind_cls_plan(uint8_t hwport_id, bool write)
-+{
-+ uint32_t rw;
-+
-+ rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ;
-+
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ rw |
-+ FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
-+ hwport_id |
-+ FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP);
-+}
-+
-+static void clear_pe_all_cls_plan(struct fman_kg_regs *regs, uint8_t hwport_id)
-+{
-+ uint32_t ar;
-+
-+ fman_kg_write_cpp(regs, 0);
-+
-+ ar = build_ar_bind_cls_plan(hwport_id, TRUE);
-+ fman_kg_write_ar_wait(regs, ar);
-+}
-+
-+static uint8_t get_gen_ht_code(enum fman_kg_gen_extract_src src,
-+ bool no_validation,
-+ uint8_t *offset)
-+{
-+ int code;
-+
-+ switch (src) {
-+ case E_FMAN_KG_GEN_EXTRACT_ETH:
-+ code = no_validation ? 0x73 : 0x3;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_ETYPE:
-+ code = no_validation ? 0x77 : 0x7;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_SNAP:
-+ code = no_validation ? 0x74 : 0x4;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_1:
-+ code = no_validation ? 0x75 : 0x5;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_N:
-+ code = no_validation ? 0x76 : 0x6;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_PPPoE:
-+ code = no_validation ? 0x78 : 0x8;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_MPLS_1:
-+ code = no_validation ? 0x79 : 0x9;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_MPLS_2:
-+ code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x19;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_MPLS_3:
-+ code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x29;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_MPLS_N:
-+ code = no_validation ? 0x7a : 0xa;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_IPv4_1:
-+ code = no_validation ? 0x7b : 0xb;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_IPv6_1:
-+ code = no_validation ? 0x7b : 0x1b;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_IPv4_2:
-+ code = no_validation ? 0x7c : 0xc;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_IPv6_2:
-+ code = no_validation ? 0x7c : 0x1c;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_MINENCAP:
-+ code = no_validation ? 0x7c : 0x2c;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_IP_PID:
-+ code = no_validation ? 0x72 : 0x2;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_GRE:
-+ code = no_validation ? 0x7d : 0xd;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_TCP:
-+ code = no_validation ? 0x7e : 0xe;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_UDP:
-+ code = no_validation ? 0x7e : 0x1e;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_SCTP:
-+ code = no_validation ? 0x7e : 0x3e;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_DCCP:
-+ code = no_validation ? 0x7e : 0x4e;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_IPSEC_AH:
-+ code = no_validation ? 0x7e : 0x2e;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_IPSEC_ESP:
-+ code = no_validation ? 0x7e : 0x6e;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_SHIM_1:
-+ code = 0x70;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_SHIM_2:
-+ code = 0x71;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_FROM_DFLT:
-+ code = 0x10;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_FROM_FRAME_START:
-+ code = 0x40;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_FROM_PARSE_RESULT:
-+ code = 0x20;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_FROM_END_OF_PARSE:
-+ code = 0x7f;
-+ break;
-+
-+ case E_FMAN_KG_GEN_EXTRACT_FROM_FQID:
-+ code = 0x20;
-+ *offset += 0x20;
-+ break;
-+
-+ default:
-+ code = FM_KG_SCH_GEN_HT_INVALID;
-+ }
-+
-+ return (uint8_t)code;
-+}
-+
-+static uint32_t build_ar_scheme(uint8_t scheme,
-+ uint8_t hwport_id,
-+ bool update_counter,
-+ bool write)
-+{
-+ uint32_t rw;
-+
-+ rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
-+
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ rw |
-+ FM_KG_KGAR_SEL_SCHEME_ENTRY |
-+ hwport_id |
-+ ((uint32_t)scheme << FM_KG_KGAR_NUM_SHIFT) |
-+ (update_counter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT : 0));
-+}
-+
-+static uint32_t build_ar_cls_plan(uint8_t grp,
-+ uint8_t entries_mask,
-+ uint8_t hwport_id,
-+ bool write)
-+{
-+ uint32_t rw;
-+
-+ rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
-+
-+ return (uint32_t)(FM_KG_KGAR_GO |
-+ rw |
-+ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
-+ hwport_id |
-+ ((uint32_t)grp << FM_KG_KGAR_NUM_SHIFT) |
-+ ((uint32_t)entries_mask << FM_KG_KGAR_WSEL_SHIFT));
-+}
-+
-+int fman_kg_write_ar_wait(struct fman_kg_regs *regs, uint32_t fmkg_ar)
-+{
-+ iowrite32be(fmkg_ar, &regs->fmkg_ar);
-+ /* Wait for GO to be idle and read error */
-+ while ((fmkg_ar = ioread32be(&regs->fmkg_ar)) & FM_KG_KGAR_GO) ;
-+ if (fmkg_ar & FM_PCD_KG_KGAR_ERR)
-+ return -EINVAL;
-+ return 0;
-+}
-+
-+void fman_kg_write_sp(struct fman_kg_regs *regs, uint32_t sp, bool add)
-+{
-+
-+ struct fman_kg_pe_regs *kgpe_regs;
-+ uint32_t tmp;
-+
-+ kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
-+ tmp = ioread32be(&kgpe_regs->fmkg_pe_sp);
-+
-+ if (add)
-+ tmp |= sp;
-+ else /* clear */
-+ tmp &= ~sp;
-+
-+ iowrite32be(tmp, &kgpe_regs->fmkg_pe_sp);
-+
-+}
-+
-+void fman_kg_write_cpp(struct fman_kg_regs *regs, uint32_t cpp)
-+{
-+ struct fman_kg_pe_regs *kgpe_regs;
-+
-+ kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
-+
-+ iowrite32be(cpp, &kgpe_regs->fmkg_pe_cpp);
-+}
-+
-+void fman_kg_get_event(struct fman_kg_regs *regs,
-+ uint32_t *event,
-+ uint32_t *scheme_idx)
-+{
-+ uint32_t mask, force;
-+
-+ *event = ioread32be(&regs->fmkg_eer);
-+ mask = ioread32be(&regs->fmkg_eeer);
-+ *scheme_idx = ioread32be(&regs->fmkg_seer);
-+ *scheme_idx &= ioread32be(&regs->fmkg_seeer);
-+
-+ *event &= mask;
-+
-+ /* clear the forced events */
-+ force = ioread32be(&regs->fmkg_feer);
-+ if (force & *event)
-+ iowrite32be(force & ~*event ,&regs->fmkg_feer);
-+
-+ iowrite32be(*event, &regs->fmkg_eer);
-+ iowrite32be(*scheme_idx, &regs->fmkg_seer);
-+}
-+
-+
-+void fman_kg_init(struct fman_kg_regs *regs,
-+ uint32_t exceptions,
-+ uint32_t dflt_nia)
-+{
-+ uint32_t tmp;
-+ int i;
-+
-+ iowrite32be(FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW,
-+ &regs->fmkg_eer);
-+
-+ tmp = 0;
-+ if (exceptions & FM_EX_KG_DOUBLE_ECC)
-+ tmp |= FM_EX_KG_DOUBLE_ECC;
-+
-+ if (exceptions & FM_EX_KG_KEYSIZE_OVERFLOW)
-+ tmp |= FM_EX_KG_KEYSIZE_OVERFLOW;
-+
-+ iowrite32be(tmp, &regs->fmkg_eeer);
-+ iowrite32be(0, &regs->fmkg_fdor);
-+ iowrite32be(0, &regs->fmkg_gdv0r);
-+ iowrite32be(0, &regs->fmkg_gdv1r);
-+ iowrite32be(dflt_nia, &regs->fmkg_gcr);
-+
-+ /* Clear binding between ports to schemes and classification plans
-+ * so that all ports are not bound to any scheme/classification plan */
-+ for (i = 0; i < FMAN_MAX_NUM_OF_HW_PORTS; i++) {
-+ clear_pe_all_scheme(regs, (uint8_t)i);
-+ clear_pe_all_cls_plan(regs, (uint8_t)i);
-+ }
-+}
-+
-+void fman_kg_enable_scheme_interrupts(struct fman_kg_regs *regs)
-+{
-+ /* enable and enable all scheme interrupts */
-+ iowrite32be(0xFFFFFFFF, &regs->fmkg_seer);
-+ iowrite32be(0xFFFFFFFF, &regs->fmkg_seeer);
-+}
-+
-+void fman_kg_enable(struct fman_kg_regs *regs)
-+{
-+ iowrite32be(ioread32be(&regs->fmkg_gcr) | FM_KG_KGGCR_EN,
-+ &regs->fmkg_gcr);
-+}
-+
-+void fman_kg_disable(struct fman_kg_regs *regs)
-+{
-+ iowrite32be(ioread32be(&regs->fmkg_gcr) & ~FM_KG_KGGCR_EN,
-+ &regs->fmkg_gcr);
-+}
-+
-+void fman_kg_set_data_after_prs(struct fman_kg_regs *regs, uint8_t offset)
-+{
-+ iowrite32be(offset, &regs->fmkg_fdor);
-+}
-+
-+void fman_kg_set_dflt_val(struct fman_kg_regs *regs,
-+ uint8_t def_id,
-+ uint32_t val)
-+{
-+ if(def_id == 0)
-+ iowrite32be(val, &regs->fmkg_gdv0r);
-+ else
-+ iowrite32be(val, &regs->fmkg_gdv1r);
-+}
-+
-+
-+void fman_kg_set_exception(struct fman_kg_regs *regs,
-+ uint32_t exception,
-+ bool enable)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->fmkg_eeer);
-+
-+ if (enable) {
-+ tmp |= exception;
-+ } else {
-+ tmp &= ~exception;
-+ }
-+
-+ iowrite32be(tmp, &regs->fmkg_eeer);
-+}
-+
-+void fman_kg_get_exception(struct fman_kg_regs *regs,
-+ uint32_t *events,
-+ uint32_t *scheme_ids,
-+ bool clear)
-+{
-+ uint32_t mask;
-+
-+ *events = ioread32be(&regs->fmkg_eer);
-+ mask = ioread32be(&regs->fmkg_eeer);
-+ *events &= mask;
-+
-+ *scheme_ids = 0;
-+
-+ if (*events & FM_EX_KG_KEYSIZE_OVERFLOW) {
-+ *scheme_ids = ioread32be(&regs->fmkg_seer);
-+ mask = ioread32be(&regs->fmkg_seeer);
-+ *scheme_ids &= mask;
-+ }
-+
-+ if (clear) {
-+ iowrite32be(*scheme_ids, &regs->fmkg_seer);
-+ iowrite32be(*events, &regs->fmkg_eer);
-+ }
-+}
-+
-+void fman_kg_get_capture(struct fman_kg_regs *regs,
-+ struct fman_kg_ex_ecc_attr *ecc_attr,
-+ bool clear)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->fmkg_serc);
-+
-+ if (tmp & KG_FMKG_SERC_CAP) {
-+ /* Captured data is valid */
-+ ecc_attr->valid = TRUE;
-+ ecc_attr->double_ecc =
-+ (bool)((tmp & KG_FMKG_SERC_CET) ? TRUE : FALSE);
-+ ecc_attr->single_ecc_count =
-+ (uint8_t)((tmp & KG_FMKG_SERC_CNT_MSK) >>
-+ KG_FMKG_SERC_CNT_SHIFT);
-+ ecc_attr->addr = (uint16_t)(tmp & KG_FMKG_SERC_ADDR_MSK);
-+
-+ if (clear)
-+ iowrite32be(KG_FMKG_SERC_CAP, &regs->fmkg_serc);
-+ } else {
-+ /* No ECC error is captured */
-+ ecc_attr->valid = FALSE;
-+ }
-+}
-+
-+int fman_kg_build_scheme(struct fman_kg_scheme_params *params,
-+ struct fman_kg_scheme_regs *scheme_regs)
-+{
-+ struct fman_kg_extract_params *extract_params;
-+ struct fman_kg_gen_extract_params *gen_params;
-+ uint32_t tmp_reg, i, select, mask, fqb;
-+ uint8_t offset, shift, ht;
-+
-+ /* Zero out all registers so no need to care about unused ones */
-+ memset(scheme_regs, 0, sizeof(struct fman_kg_scheme_regs));
-+
-+ /* Mode register */
-+ tmp_reg = fm_kg_build_nia(params->next_engine,
-+ params->next_engine_action);
-+ if (tmp_reg == KG_NIA_INVALID) {
-+ return -EINVAL;
-+ }
-+
-+ if (params->next_engine == E_FMAN_PCD_PLCR) {
-+ tmp_reg |= FMAN_KG_SCH_MODE_NIA_PLCR;
-+ }
-+ else if (params->next_engine == E_FMAN_PCD_CC) {
-+ tmp_reg |= (uint32_t)params->cc_params.base_offset <<
-+ FMAN_KG_SCH_MODE_CCOBASE_SHIFT;
-+ }
-+
-+ tmp_reg |= FMAN_KG_SCH_MODE_EN;
-+ scheme_regs->kgse_mode = tmp_reg;
-+
-+ /* Match vector */
-+ scheme_regs->kgse_mv = params->match_vector;
-+
-+ extract_params = &params->extract_params;
-+
-+ /* Scheme default values registers */
-+ scheme_regs->kgse_dv0 = extract_params->def_scheme_0;
-+ scheme_regs->kgse_dv1 = extract_params->def_scheme_1;
-+
-+ /* Extract Known Fields Command register */
-+ scheme_regs->kgse_ekfc = extract_params->known_fields;
-+
-+ /* Entry Extract Known Default Value register */
-+ tmp_reg = 0;
-+ tmp_reg |= extract_params->known_fields_def.mac_addr <<
-+ FMAN_KG_SCH_DEF_MAC_ADDR_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.vlan_tci <<
-+ FMAN_KG_SCH_DEF_VLAN_TCI_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.etype <<
-+ FMAN_KG_SCH_DEF_ETYPE_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.ppp_sid <<
-+ FMAN_KG_SCH_DEF_PPP_SID_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.ppp_pid <<
-+ FMAN_KG_SCH_DEF_PPP_PID_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.mpls <<
-+ FMAN_KG_SCH_DEF_MPLS_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.ip_addr <<
-+ FMAN_KG_SCH_DEF_IP_ADDR_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.ptype <<
-+ FMAN_KG_SCH_DEF_PTYPE_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.ip_tos_tc <<
-+ FMAN_KG_SCH_DEF_IP_TOS_TC_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.ipv6_fl <<
-+ FMAN_KG_SCH_DEF_IPv6_FL_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.ipsec_spi <<
-+ FMAN_KG_SCH_DEF_IPSEC_SPI_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.l4_port <<
-+ FMAN_KG_SCH_DEF_L4_PORT_SHIFT;
-+ tmp_reg |= extract_params->known_fields_def.tcp_flg <<
-+ FMAN_KG_SCH_DEF_TCP_FLG_SHIFT;
-+
-+ scheme_regs->kgse_ekdv = tmp_reg;
-+
-+ /* Generic extract registers */
-+ if (extract_params->gen_extract_num > FM_KG_NUM_OF_GENERIC_REGS) {
-+ return -EINVAL;
-+ }
-+
-+ for (i = 0; i < extract_params->gen_extract_num; i++) {
-+ gen_params = extract_params->gen_extract + i;
-+
-+ tmp_reg = FMAN_KG_SCH_GEN_VALID;
-+ tmp_reg |= (uint32_t)gen_params->def_val <<
-+ FMAN_KG_SCH_GEN_DEF_SHIFT;
-+
-+ if (gen_params->type == E_FMAN_KG_HASH_EXTRACT) {
-+ if ((gen_params->extract > FMAN_KG_SCH_GEN_SIZE_MAX) ||
-+ (gen_params->extract == 0)) {
-+ return -EINVAL;
-+ }
-+ } else {
-+ tmp_reg |= FMAN_KG_SCH_GEN_OR;
-+ }
-+
-+ tmp_reg |= (uint32_t)gen_params->extract <<
-+ FMAN_KG_SCH_GEN_SIZE_SHIFT;
-+ tmp_reg |= (uint32_t)gen_params->mask <<
-+ FMAN_KG_SCH_GEN_MASK_SHIFT;
-+
-+ offset = gen_params->offset;
-+ ht = get_gen_ht_code(gen_params->src,
-+ gen_params->no_validation,
-+ &offset);
-+ tmp_reg |= (uint32_t)ht << FMAN_KG_SCH_GEN_HT_SHIFT;
-+ tmp_reg |= offset;
-+
-+ scheme_regs->kgse_gec[i] = tmp_reg;
-+ }
-+
-+ /* Masks registers */
-+ if (extract_params->masks_num > FM_KG_EXTRACT_MASKS_NUM) {
-+ return -EINVAL;
-+ }
-+
-+ select = 0;
-+ mask = 0;
-+ fqb = 0;
-+ for (i = 0; i < extract_params->masks_num; i++) {
-+ /* MCSx fields */
-+ KG_GET_MASK_SEL_SHIFT(shift, i);
-+ if (extract_params->masks[i].is_known) {
-+ /* Mask known field */
-+ select |= extract_params->masks[i].field_or_gen_idx <<
-+ shift;
-+ } else {
-+ /* Mask generic extract */
-+ select |= (extract_params->masks[i].field_or_gen_idx +
-+ FM_KG_MASK_SEL_GEN_BASE) << shift;
-+ }
-+
-+ /* MOx fields - spread between se_bmch and se_fqb registers */
-+ KG_GET_MASK_OFFSET_SHIFT(shift, i);
-+ if (i < 2) {
-+ select |= (uint32_t)extract_params->masks[i].offset <<
-+ shift;
-+ } else {
-+ fqb |= (uint32_t)extract_params->masks[i].offset <<
-+ shift;
-+ }
-+
-+ /* BMx fields */
-+ KG_GET_MASK_SHIFT(shift, i);
-+ mask |= (uint32_t)extract_params->masks[i].mask << shift;
-+ }
-+
-+ /* Finish with rest of BMx fileds -
-+ * don't mask bits for unused masks by setting
-+ * corresponding BMx field = 0xFF */
-+ for (i = extract_params->masks_num; i < FM_KG_EXTRACT_MASKS_NUM; i++) {
-+ KG_GET_MASK_SHIFT(shift, i);
-+ mask |= 0xFF << shift;
-+ }
-+
-+ scheme_regs->kgse_bmch = select;
-+ scheme_regs->kgse_bmcl = mask;
-+
-+ /* Finish with FQB register initialization.
-+ * Check fqid is 24-bit value. */
-+ if (params->base_fqid & ~0x00FFFFFF) {
-+ return -EINVAL;
-+ }
-+
-+ fqb |= params->base_fqid;
-+ scheme_regs->kgse_fqb = fqb;
-+
-+ /* Hash Configuration register */
-+ tmp_reg = 0;
-+ if (params->hash_params.use_hash) {
-+ /* Check hash mask is 24-bit value */
-+ if (params->hash_params.mask & ~0x00FFFFFF) {
-+ return -EINVAL;
-+ }
-+
-+ /* Hash function produces 64-bit value, 24 bits of that
-+ * are used to generate fq_id and policer profile.
-+ * Thus, maximal shift is 40 bits to allow 24 bits out of 64.
-+ */
-+ if (params->hash_params.shift_r > FMAN_KG_SCH_HASH_HSHIFT_MAX) {
-+ return -EINVAL;
-+ }
-+
-+ tmp_reg |= params->hash_params.mask;
-+ tmp_reg |= (uint32_t)params->hash_params.shift_r <<
-+ FMAN_KG_SCH_HASH_HSHIFT_SHIFT;
-+
-+ if (params->hash_params.sym) {
-+ tmp_reg |= FMAN_KG_SCH_HASH_SYM;
-+ }
-+
-+ }
-+
-+ if (params->bypass_fqid_gen) {
-+ tmp_reg |= FMAN_KG_SCH_HASH_NO_FQID_GEN;
-+ }
-+
-+ scheme_regs->kgse_hc = tmp_reg;
-+
-+ /* Policer Profile register */
-+ if (params->policer_params.bypass_pp_gen) {
-+ tmp_reg = 0;
-+ } else {
-+ /* Lower 8 bits of 24-bits extracted from hash result
-+ * are used for policer profile generation.
-+ * That leaves maximum shift value = 23. */
-+ if (params->policer_params.shift > FMAN_KG_SCH_PP_SHIFT_MAX) {
-+ return -EINVAL;
-+ }
-+
-+ tmp_reg = params->policer_params.base;
-+ tmp_reg |= ((uint32_t)params->policer_params.shift <<
-+ FMAN_KG_SCH_PP_SH_SHIFT) &
-+ FMAN_KG_SCH_PP_SH_MASK;
-+ tmp_reg |= ((uint32_t)params->policer_params.shift <<
-+ FMAN_KG_SCH_PP_SL_SHIFT) &
-+ FMAN_KG_SCH_PP_SL_MASK;
-+ tmp_reg |= (uint32_t)params->policer_params.mask <<
-+ FMAN_KG_SCH_PP_MASK_SHIFT;
-+ }
-+
-+ scheme_regs->kgse_ppc = tmp_reg;
-+
-+ /* Coarse Classification Bit Select register */
-+ if (params->next_engine == E_FMAN_PCD_CC) {
-+ scheme_regs->kgse_ccbs = params->cc_params.qlcv_bits_sel;
-+ }
-+
-+ /* Packets Counter register */
-+ if (params->update_counter) {
-+ scheme_regs->kgse_spc = params->counter_value;
-+ }
-+
-+ return 0;
-+}
-+
-+int fman_kg_write_scheme(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id,
-+ struct fman_kg_scheme_regs *scheme_regs,
-+ bool update_counter)
-+{
-+ struct fman_kg_scheme_regs *kgse_regs;
-+ uint32_t tmp_reg;
-+ int err, i;
-+
-+ /* Write indirect scheme registers */
-+ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
-+
-+ iowrite32be(scheme_regs->kgse_mode, &kgse_regs->kgse_mode);
-+ iowrite32be(scheme_regs->kgse_ekfc, &kgse_regs->kgse_ekfc);
-+ iowrite32be(scheme_regs->kgse_ekdv, &kgse_regs->kgse_ekdv);
-+ iowrite32be(scheme_regs->kgse_bmch, &kgse_regs->kgse_bmch);
-+ iowrite32be(scheme_regs->kgse_bmcl, &kgse_regs->kgse_bmcl);
-+ iowrite32be(scheme_regs->kgse_fqb, &kgse_regs->kgse_fqb);
-+ iowrite32be(scheme_regs->kgse_hc, &kgse_regs->kgse_hc);
-+ iowrite32be(scheme_regs->kgse_ppc, &kgse_regs->kgse_ppc);
-+ iowrite32be(scheme_regs->kgse_spc, &kgse_regs->kgse_spc);
-+ iowrite32be(scheme_regs->kgse_dv0, &kgse_regs->kgse_dv0);
-+ iowrite32be(scheme_regs->kgse_dv1, &kgse_regs->kgse_dv1);
-+ iowrite32be(scheme_regs->kgse_ccbs, &kgse_regs->kgse_ccbs);
-+ iowrite32be(scheme_regs->kgse_mv, &kgse_regs->kgse_mv);
-+
-+ for (i = 0 ; i < FM_KG_NUM_OF_GENERIC_REGS ; i++)
-+ iowrite32be(scheme_regs->kgse_gec[i], &kgse_regs->kgse_gec[i]);
-+
-+ /* Write AR (Action register) */
-+ tmp_reg = build_ar_scheme(scheme_id, hwport_id, update_counter, TRUE);
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+ return err;
-+}
-+
-+int fman_kg_delete_scheme(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id)
-+{
-+ struct fman_kg_scheme_regs *kgse_regs;
-+ uint32_t tmp_reg;
-+ int err, i;
-+
-+ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
-+
-+ /* Clear all registers including enable bit in mode register */
-+ for (i = 0; i < (sizeof(struct fman_kg_scheme_regs)) / 4; ++i) {
-+ iowrite32be(0, ((uint32_t *)kgse_regs + i));
-+ }
-+
-+ /* Write AR (Action register) */
-+ tmp_reg = build_ar_scheme(scheme_id, hwport_id, FALSE, TRUE);
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+ return err;
-+}
-+
-+int fman_kg_get_scheme_counter(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id,
-+ uint32_t *counter)
-+{
-+ struct fman_kg_scheme_regs *kgse_regs;
-+ uint32_t tmp_reg;
-+ int err;
-+
-+ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
-+
-+ tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE);
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+
-+ if (err != 0)
-+ return err;
-+
-+ *counter = ioread32be(&kgse_regs->kgse_spc);
-+
-+ return 0;
-+}
-+
-+int fman_kg_set_scheme_counter(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id,
-+ uint32_t counter)
-+{
-+ struct fman_kg_scheme_regs *kgse_regs;
-+ uint32_t tmp_reg;
-+ int err;
-+
-+ kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
-+
-+ tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE);
-+
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+ if (err != 0)
-+ return err;
-+
-+ /* Keygen indirect access memory contains all scheme_id registers
-+ * by now. Change only counter value. */
-+ iowrite32be(counter, &kgse_regs->kgse_spc);
-+
-+ /* Write back scheme registers */
-+ tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, TRUE);
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+
-+ return err;
-+}
-+
-+uint32_t fman_kg_get_schemes_total_counter(struct fman_kg_regs *regs)
-+{
-+ return ioread32be(&regs->fmkg_tpc);
-+}
-+
-+int fman_kg_build_cls_plan(struct fman_kg_cls_plan_params *params,
-+ struct fman_kg_cp_regs *cls_plan_regs)
-+{
-+ uint8_t entries_set, entry_bit;
-+ int i;
-+
-+ /* Zero out all group's register */
-+ memset(cls_plan_regs, 0, sizeof(struct fman_kg_cp_regs));
-+
-+ /* Go over all classification entries in params->entries_mask and
-+ * configure the corresponding cpe register */
-+ entries_set = params->entries_mask;
-+ for (i = 0; entries_set; i++) {
-+ entry_bit = (uint8_t)(0x80 >> i);
-+ if ((entry_bit & entries_set) == 0)
-+ continue;
-+ entries_set ^= entry_bit;
-+ cls_plan_regs->kgcpe[i] = params->mask_vector[i];
-+ }
-+
-+ return 0;
-+}
-+
-+int fman_kg_write_cls_plan(struct fman_kg_regs *regs,
-+ uint8_t grp_id,
-+ uint8_t entries_mask,
-+ uint8_t hwport_id,
-+ struct fman_kg_cp_regs *cls_plan_regs)
-+{
-+ struct fman_kg_cp_regs *kgcpe_regs;
-+ uint32_t tmp_reg;
-+ int i, err;
-+
-+ /* Check group index is valid and the group isn't empty */
-+ if (grp_id >= FM_KG_CLS_PLAN_GRPS_NUM)
-+ return -EINVAL;
-+
-+ /* Write indirect classification plan registers */
-+ kgcpe_regs = (struct fman_kg_cp_regs *)&(regs->fmkg_indirect[0]);
-+
-+ for (i = 0; i < FM_KG_NUM_CLS_PLAN_ENTR; i++) {
-+ iowrite32be(cls_plan_regs->kgcpe[i], &kgcpe_regs->kgcpe[i]);
-+ }
-+
-+ tmp_reg = build_ar_cls_plan(grp_id, entries_mask, hwport_id, TRUE);
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+ return err;
-+}
-+
-+int fman_kg_write_bind_schemes(struct fman_kg_regs *regs,
-+ uint8_t hwport_id,
-+ uint32_t schemes)
-+{
-+ struct fman_kg_pe_regs *kg_pe_regs;
-+ uint32_t tmp_reg;
-+ int err;
-+
-+ kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
-+
-+ iowrite32be(schemes, &kg_pe_regs->fmkg_pe_sp);
-+
-+ tmp_reg = build_ar_bind_scheme(hwport_id, TRUE);
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+ return err;
-+}
-+
-+int fman_kg_build_bind_cls_plans(uint8_t grp_base,
-+ uint8_t grp_mask,
-+ uint32_t *bind_cls_plans)
-+{
-+ /* Check grp_base and grp_mask are 5-bits values */
-+ if ((grp_base & ~0x0000001F) || (grp_mask & ~0x0000001F))
-+ return -EINVAL;
-+
-+ *bind_cls_plans = (uint32_t) ((grp_mask << FMAN_KG_PE_CPP_MASK_SHIFT) | grp_base);
-+ return 0;
-+}
-+
-+
-+int fman_kg_write_bind_cls_plans(struct fman_kg_regs *regs,
-+ uint8_t hwport_id,
-+ uint32_t bind_cls_plans)
-+{
-+ struct fman_kg_pe_regs *kg_pe_regs;
-+ uint32_t tmp_reg;
-+ int err;
-+
-+ kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
-+
-+ iowrite32be(bind_cls_plans, &kg_pe_regs->fmkg_pe_cpp);
-+
-+ tmp_reg = build_ar_bind_cls_plan(hwport_id, TRUE);
-+ err = fman_kg_write_ar_wait(regs, tmp_reg);
-+ return err;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Pcd/fman_prs.c
-@@ -0,0 +1,129 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "fsl_fman_prs.h"
-+
-+uint32_t fman_prs_get_err_event(struct fman_prs_regs *regs, uint32_t ev_mask)
-+{
-+ return ioread32be(&regs->fmpr_perr) & ev_mask;
-+}
-+
-+uint32_t fman_prs_get_err_ev_mask(struct fman_prs_regs *regs)
-+{
-+ return ioread32be(&regs->fmpr_perer);
-+}
-+
-+void fman_prs_ack_err_event(struct fman_prs_regs *regs, uint32_t event)
-+{
-+ iowrite32be(event, &regs->fmpr_perr);
-+}
-+
-+uint32_t fman_prs_get_expt_event(struct fman_prs_regs *regs, uint32_t ev_mask)
-+{
-+ return ioread32be(&regs->fmpr_pevr) & ev_mask;
-+}
-+
-+uint32_t fman_prs_get_expt_ev_mask(struct fman_prs_regs *regs)
-+{
-+ return ioread32be(&regs->fmpr_pever);
-+}
-+
-+void fman_prs_ack_expt_event(struct fman_prs_regs *regs, uint32_t event)
-+{
-+ iowrite32be(event, &regs->fmpr_pevr);
-+}
-+
-+void fman_prs_defconfig(struct fman_prs_cfg *cfg)
-+{
-+ cfg->port_id_stat = 0;
-+ cfg->max_prs_cyc_lim = DEFAULT_MAX_PRS_CYC_LIM;
-+ cfg->prs_exceptions = 0x03000000;
-+}
-+
-+int fman_prs_init(struct fman_prs_regs *regs, struct fman_prs_cfg *cfg)
-+{
-+ uint32_t tmp;
-+
-+ iowrite32be(cfg->max_prs_cyc_lim, &regs->fmpr_rpclim);
-+ iowrite32be((FM_PCD_PRS_SINGLE_ECC | FM_PCD_PRS_PORT_IDLE_STS),
-+ &regs->fmpr_pevr);
-+
-+ if (cfg->prs_exceptions & FM_PCD_EX_PRS_SINGLE_ECC)
-+ iowrite32be(FM_PCD_PRS_SINGLE_ECC, &regs->fmpr_pever);
-+ else
-+ iowrite32be(0, &regs->fmpr_pever);
-+
-+ iowrite32be(FM_PCD_PRS_DOUBLE_ECC, &regs->fmpr_perr);
-+
-+ tmp = 0;
-+ if (cfg->prs_exceptions & FM_PCD_EX_PRS_DOUBLE_ECC)
-+ tmp |= FM_PCD_PRS_DOUBLE_ECC;
-+ iowrite32be(tmp, &regs->fmpr_perer);
-+
-+ iowrite32be(cfg->port_id_stat, &regs->fmpr_ppsc);
-+
-+ return 0;
-+}
-+
-+void fman_prs_enable(struct fman_prs_regs *regs)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->fmpr_rpimac) | FM_PCD_PRS_RPIMAC_EN;
-+ iowrite32be(tmp, &regs->fmpr_rpimac);
-+}
-+
-+void fman_prs_disable(struct fman_prs_regs *regs)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&regs->fmpr_rpimac) & ~FM_PCD_PRS_RPIMAC_EN;
-+ iowrite32be(tmp, &regs->fmpr_rpimac);
-+}
-+
-+int fman_prs_is_enabled(struct fman_prs_regs *regs)
-+{
-+ return ioread32be(&regs->fmpr_rpimac) & FM_PCD_PRS_RPIMAC_EN;
-+}
-+
-+void fman_prs_set_stst_port_msk(struct fman_prs_regs *regs, uint32_t pid_msk)
-+{
-+ iowrite32be(pid_msk, &regs->fmpr_ppsc);
-+}
-+
-+void fman_prs_set_stst(struct fman_prs_regs *regs, bool enable)
-+{
-+ if (enable)
-+ iowrite32be(FM_PCD_PRS_PPSC_ALL_PORTS, &regs->fmpr_ppsc);
-+ else
-+ iowrite32be(0, &regs->fmpr_ppsc);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/Makefile
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+obj-y += fsl-ncsw-Pcd.o
-+
-+fsl-ncsw-Pcd-objs := fm_port.o fm_port_im.o fman_port.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.c
-@@ -0,0 +1,6437 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_port.c
-+
-+ @Description FM driver routines implementation.
-+ *//***************************************************************************/
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+#include "debug_ext.h"
-+#include "fm_muram_ext.h"
-+
-+#include "fman_common.h"
-+#include "fm_port.h"
-+#include "fm_port_dsar.h"
-+#include "common/general.h"
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+static t_Error FmPortConfigAutoResForDeepSleepSupport1(t_FmPort *p_FmPort);
-+
-+static t_Error CheckInitParameters(t_FmPort *p_FmPort)
-+{
-+ t_FmPortDriverParam *p_Params = p_FmPort->p_FmPortDriverParam;
-+ struct fman_port_cfg *p_DfltConfig = &p_Params->dfltCfg;
-+ t_Error ans = E_OK;
-+ uint32_t unusedMask;
-+
-+ if (p_FmPort->imEn)
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ if (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
-+ > 2)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("fifoDeqPipelineDepth for IM 10G can't be larger than 2"));
-+
-+ if ((ans = FmPortImCheckInitParameters(p_FmPort)) != E_OK)
-+ return ERROR_CODE(ans);
-+ }
-+ else
-+ {
-+ /****************************************/
-+ /* Rx only */
-+ /****************************************/
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ /* external buffer pools */
-+ if (!p_Params->extBufPools.numOfPoolsUsed)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("extBufPools.numOfPoolsUsed=0. At least one buffer pool must be defined"));
-+
-+ if (FmSpCheckBufPoolsParams(&p_Params->extBufPools,
-+ p_Params->p_BackupBmPools,
-+ &p_Params->bufPoolDepletion) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+
-+ /* Check that part of IC that needs copying is small enough to enter start margin */
-+ if (p_Params->intContext.size
-+ && (p_Params->intContext.size
-+ + p_Params->intContext.extBufOffset
-+ > p_Params->bufMargins.startMargins))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("intContext.size is larger than start margins"));
-+
-+ if ((p_Params->liodnOffset != (uint16_t)DPAA_LIODN_DONT_OVERRIDE)
-+ && (p_Params->liodnOffset & ~FM_LIODN_OFFSET_MASK))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1));
-+
-+#ifdef FM_NO_BACKUP_POOLS
-+ if ((p_FmPort->fmRevInfo.majorRev != 4) && (p_FmPort->fmRevInfo.majorRev < 6))
-+ if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("BackupBmPools"));
-+#endif /* FM_NO_BACKUP_POOLS */
-+ }
-+
-+ /****************************************/
-+ /* Non Rx ports */
-+ /****************************************/
-+ else
-+ {
-+ if (p_Params->deqSubPortal >= FM_MAX_NUM_OF_SUB_PORTALS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ (" deqSubPortal has to be in the range of 0 - %d", FM_MAX_NUM_OF_SUB_PORTALS));
-+
-+ /* to protect HW internal-context from overwrite */
-+ if ((p_Params->intContext.size)
-+ && (p_Params->intContext.intContextOffset
-+ < MIN_TX_INT_OFFSET))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("non-Rx intContext.intContextOffset can't be smaller than %d", MIN_TX_INT_OFFSET));
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
-+ /* in O/H DEFAULT_notSupported indicates that it is not supported and should not be checked */
-+ || (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
-+ != DEFAULT_notSupported))
-+ {
-+ /* Check that not larger than 8 */
-+ if ((!p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth)
-+ || (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
-+ > MAX_FIFO_PIPELINE_DEPTH))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("fifoDeqPipelineDepth can't be larger than %d", MAX_FIFO_PIPELINE_DEPTH));
-+ }
-+ }
-+
-+ /****************************************/
-+ /* Rx Or Offline Parsing */
-+ /****************************************/
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ {
-+ if (!p_Params->dfltFqid)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("dfltFqid must be between 1 and 2^24-1"));
-+#if defined(FM_CAPWAP_SUPPORT) && defined(FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004)
-+ if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace % 16)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufferPrefixContent.manipExtraSpace has to be devidable by 16"));
-+#endif /* defined(FM_CAPWAP_SUPPORT) && ... */
-+ }
-+
-+ /****************************************/
-+ /* All ports */
-+ /****************************************/
-+ /* common BMI registers values */
-+ /* Check that Queue Id is not larger than 2^24, and is not 0 */
-+ if ((p_Params->errFqid & ~0x00FFFFFF) || !p_Params->errFqid)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("errFqid must be between 1 and 2^24-1"));
-+ if (p_Params->dfltFqid & ~0x00FFFFFF)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("dfltFqid must be between 1 and 2^24-1"));
-+ }
-+
-+ /****************************************/
-+ /* Rx only */
-+ /****************************************/
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ if (p_DfltConfig->rx_pri_elevation % BMI_FIFO_UNITS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("rxFifoPriElevationLevel has to be divisible by %d", BMI_FIFO_UNITS));
-+ if ((p_DfltConfig->rx_pri_elevation < BMI_FIFO_UNITS)
-+ || (p_DfltConfig->rx_pri_elevation > MAX_PORT_FIFO_SIZE))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("rxFifoPriElevationLevel has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
-+ if (p_DfltConfig->rx_fifo_thr % BMI_FIFO_UNITS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("rxFifoThreshold has to be divisible by %d", BMI_FIFO_UNITS));
-+ if ((p_DfltConfig->rx_fifo_thr < BMI_FIFO_UNITS)
-+ || (p_DfltConfig->rx_fifo_thr > MAX_PORT_FIFO_SIZE))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("rxFifoThreshold has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
-+
-+ /* Check that not larger than 16 */
-+ if (p_DfltConfig->rx_cut_end_bytes > FRAME_END_DATA_SIZE)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE));
-+
-+ if (FmSpCheckBufMargins(&p_Params->bufMargins) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+
-+ /* extra FIFO size (allowed only to Rx ports) */
-+ if (p_Params->setSizeOfFifo
-+ && (p_FmPort->fifoBufs.extra % BMI_FIFO_UNITS))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("fifoBufs.extra has to be divisible by %d", BMI_FIFO_UNITS));
-+
-+ if (p_Params->bufPoolDepletion.poolsGrpModeEnable
-+ && !p_Params->bufPoolDepletion.numOfPools)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("bufPoolDepletion.numOfPools can not be 0 when poolsGrpModeEnable=TRUE"));
-+#ifdef FM_CSI_CFED_LIMIT
-+ if (p_FmPort->fmRevInfo.majorRev == 4)
-+ {
-+ /* Check that not larger than 16 */
-+ if (p_DfltConfig->rx_cut_end_bytes + p_DfltConfig->checksum_bytes_ignore > FRAME_END_DATA_SIZE)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("cheksumLastBytesIgnore + cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE));
-+ }
-+#endif /* FM_CSI_CFED_LIMIT */
-+ }
-+
-+ /****************************************/
-+ /* Non Rx ports */
-+ /****************************************/
-+ /* extra FIFO size (allowed only to Rx ports) */
-+ else
-+ if (p_FmPort->fifoBufs.extra)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ (" No fifoBufs.extra for non Rx ports"));
-+
-+ /****************************************/
-+ /* Tx only */
-+ /****************************************/
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G))
-+ {
-+ if (p_DfltConfig->tx_fifo_min_level % BMI_FIFO_UNITS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("txFifoMinFillLevel has to be divisible by %d", BMI_FIFO_UNITS));
-+ if (p_DfltConfig->tx_fifo_min_level > (MAX_PORT_FIFO_SIZE - 256))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("txFifoMinFillLevel has to be in the range of 0 - %d", (MAX_PORT_FIFO_SIZE - 256)));
-+ if (p_DfltConfig->tx_fifo_low_comf_level % BMI_FIFO_UNITS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("txFifoLowComfLevel has to be divisible by %d", BMI_FIFO_UNITS));
-+ if ((p_DfltConfig->tx_fifo_low_comf_level < BMI_FIFO_UNITS)
-+ || (p_DfltConfig->tx_fifo_low_comf_level > MAX_PORT_FIFO_SIZE))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("txFifoLowComfLevel has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
-+
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_TX)
-+ if (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
-+ > 2)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("fifoDeqPipelineDepth for 1G can't be larger than 2"));
-+ }
-+
-+ /****************************************/
-+ /* Non Tx Ports */
-+ /****************************************/
-+ /* If discard override was selected , no frames may be discarded. */
-+ else
-+ if (p_DfltConfig->discard_override && p_Params->errorsToDiscard)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_CONFLICT,
-+ ("errorsToDiscard is not empty, but frmDiscardOverride selected (all discarded frames to be enqueued to error queue)."));
-+
-+ /****************************************/
-+ /* Rx and Offline parsing */
-+ /****************************************/
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ unusedMask = BMI_STATUS_OP_MASK_UNUSED;
-+ else
-+ unusedMask = BMI_STATUS_RX_MASK_UNUSED;
-+
-+ /* Check that no common bits with BMI_STATUS_MASK_UNUSED */
-+ if (p_Params->errorsToDiscard & unusedMask)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
-+ ("errorsToDiscard contains undefined bits"));
-+ }
-+
-+ /****************************************/
-+ /* Offline Ports */
-+ /****************************************/
-+#ifdef FM_OP_OPEN_DMA_MIN_LIMIT
-+ if ((p_FmPort->fmRevInfo.majorRev >= 6)
-+ && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ && p_Params->setNumOfOpenDmas
-+ && (p_FmPort->openDmas.num < MIN_NUM_OF_OP_DMAS))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For Offline port, openDmas.num can't be smaller than %d", MIN_NUM_OF_OP_DMAS));
-+#endif /* FM_OP_OPEN_DMA_MIN_LIMIT */
-+
-+ /****************************************/
-+ /* Offline & HC Ports */
-+ /****************************************/
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
-+ {
-+#ifndef FM_FRAME_END_PARAMS_FOR_OP
-+ if ((p_FmPort->fmRevInfo.majorRev < 6) &&
-+ (p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore != DEFAULT_notSupported))
-+ /* this is an indication that user called config for this mode which is not supported in this integration */
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("cheksumLastBytesIgnore is available for Rx & Tx ports only"));
-+#endif /* !FM_FRAME_END_PARAMS_FOR_OP */
-+
-+#ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP
-+ if ((!((p_FmPort->fmRevInfo.majorRev == 4) ||
-+ (p_FmPort->fmRevInfo.majorRev >= 6))) &&
-+ (p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth != DEFAULT_notSupported))
-+ /* this is an indication that user called config for this mode which is not supported in this integration */
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("fifoDeqPipelineDepth is available for Tx ports only"));
-+#endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */
-+ }
-+
-+ /****************************************/
-+ /* All ports */
-+ /****************************************/
-+ /* Check that not larger than 16 */
-+ if ((p_Params->cheksumLastBytesIgnore > FRAME_END_DATA_SIZE)
-+ && ((p_Params->cheksumLastBytesIgnore != DEFAULT_notSupported)))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("cheksumLastBytesIgnore can't be larger than %d", FRAME_END_DATA_SIZE));
-+
-+ if (FmSpCheckIntContextParams(&p_Params->intContext) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+
-+ /* common BMI registers values */
-+ if (p_Params->setNumOfTasks
-+ && ((!p_FmPort->tasks.num)
-+ || (p_FmPort->tasks.num > MAX_NUM_OF_TASKS)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("tasks.num can't be larger than %d", MAX_NUM_OF_TASKS));
-+ if (p_Params->setNumOfTasks
-+ && (p_FmPort->tasks.extra > MAX_NUM_OF_EXTRA_TASKS))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("tasks.extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS));
-+ if (p_Params->setNumOfOpenDmas
-+ && ((!p_FmPort->openDmas.num)
-+ || (p_FmPort->openDmas.num > MAX_NUM_OF_DMAS)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("openDmas.num can't be larger than %d", MAX_NUM_OF_DMAS));
-+ if (p_Params->setNumOfOpenDmas
-+ && (p_FmPort->openDmas.extra > MAX_NUM_OF_EXTRA_DMAS))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("openDmas.extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS));
-+ if (p_Params->setSizeOfFifo
-+ && (!p_FmPort->fifoBufs.num
-+ || (p_FmPort->fifoBufs.num > MAX_PORT_FIFO_SIZE)))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("fifoBufs.num has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
-+ if (p_Params->setSizeOfFifo && (p_FmPort->fifoBufs.num % BMI_FIFO_UNITS))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("fifoBufs.num has to be divisible by %d", BMI_FIFO_UNITS));
-+
-+#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+ if (p_FmPort->fmRevInfo.majorRev == 4)
-+ if (p_FmPort->p_FmPortDriverParam->deqPrefetchOption != DEFAULT_notSupported)
-+ /* this is an indication that user called config for this mode which is not supported in this integration */
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("deqPrefetchOption"));
-+#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
-+
-+ return E_OK;
-+}
-+
-+static t_Error VerifySizeOfFifo(t_FmPort *p_FmPort)
-+{
-+ uint32_t minFifoSizeRequired = 0, optFifoSizeForB2B = 0;
-+
-+ /*************************/
-+ /* TX PORTS */
-+ /*************************/
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G))
-+ {
-+ minFifoSizeRequired =
-+ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
-+ + (3 * BMI_FIFO_UNITS));
-+ if (!p_FmPort->imEn)
-+ minFifoSizeRequired +=
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
-+ * BMI_FIFO_UNITS;
-+
-+ optFifoSizeForB2B = minFifoSizeRequired;
-+
-+ /* Add some margin for back-to-back capability to improve performance,
-+ allows the hardware to pipeline new frame dma while the previous
-+ frame not yet transmitted. */
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
-+ optFifoSizeForB2B += 3 * BMI_FIFO_UNITS;
-+ else
-+ optFifoSizeForB2B += 2 * BMI_FIFO_UNITS;
-+ }
-+
-+ /*************************/
-+ /* RX IM PORTS */
-+ /*************************/
-+ else
-+ if (((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ && p_FmPort->imEn)
-+ {
-+ optFifoSizeForB2B =
-+ minFifoSizeRequired =
-+ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
-+ + (4 * BMI_FIFO_UNITS));
-+ }
-+
-+ /*************************/
-+ /* RX non-IM PORTS */
-+ /*************************/
-+ else
-+ if (((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ && !p_FmPort->imEn)
-+ {
-+ if (p_FmPort->fmRevInfo.majorRev == 4)
-+ {
-+ if (p_FmPort->rxPoolsParams.numOfPools == 1)
-+ minFifoSizeRequired = 8 * BMI_FIFO_UNITS;
-+ else
-+ minFifoSizeRequired =
-+ (uint32_t)(ROUND_UP(p_FmPort->rxPoolsParams.secondLargestBufSize, BMI_FIFO_UNITS)
-+ + (7 * BMI_FIFO_UNITS));
-+ }
-+ else
-+ {
-+#if (DPAA_VERSION >= 11)
-+ minFifoSizeRequired =
-+ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
-+ + (5 * BMI_FIFO_UNITS));
-+ /* 4 according to spec + 1 for FOF>0 */
-+#else
-+ minFifoSizeRequired = (uint32_t)
-+ (ROUND_UP(MIN(p_FmPort->maxFrameLength, p_FmPort->rxPoolsParams.largestBufSize), BMI_FIFO_UNITS)
-+ + (7*BMI_FIFO_UNITS));
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ optFifoSizeForB2B = minFifoSizeRequired;
-+
-+ /* Add some margin for back-to-back capability to improve performance,
-+ allows the hardware to pipeline new frame dma while the previous
-+ frame not yet transmitted. */
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ optFifoSizeForB2B += 8 * BMI_FIFO_UNITS;
-+ else
-+ optFifoSizeForB2B += 3 * BMI_FIFO_UNITS;
-+ }
-+
-+ /* For O/H ports, check fifo size and update if necessary */
-+ else
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
-+ {
-+#if (DPAA_VERSION >= 11)
-+ optFifoSizeForB2B =
-+ minFifoSizeRequired =
-+ (uint32_t)(ROUND_UP(p_FmPort->maxFrameLength, BMI_FIFO_UNITS)
-+ + ((p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth
-+ + 5) * BMI_FIFO_UNITS));
-+ /* 4 according to spec + 1 for FOF>0 */
-+#else
-+ optFifoSizeForB2B = minFifoSizeRequired = (uint32_t)((p_FmPort->tasks.num + 2) * BMI_FIFO_UNITS);
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ ASSERT_COND(minFifoSizeRequired > 0);
-+ ASSERT_COND(optFifoSizeForB2B >= minFifoSizeRequired);
-+
-+ /* Verify the size */
-+ if (p_FmPort->fifoBufs.num < minFifoSizeRequired)
-+ DBG(INFO,
-+ ("FIFO size is %d and should be enlarged to %d bytes",p_FmPort->fifoBufs.num, minFifoSizeRequired));
-+ else if (p_FmPort->fifoBufs.num < optFifoSizeForB2B)
-+ DBG(INFO,
-+ ("For back-to-back frames processing, FIFO size is %d and needs to enlarge to %d bytes", p_FmPort->fifoBufs.num, optFifoSizeForB2B));
-+
-+ return E_OK;
-+}
-+
-+static void FmPortDriverParamFree(t_FmPort *p_FmPort)
-+{
-+ if (p_FmPort->p_FmPortDriverParam)
-+ {
-+ XX_Free(p_FmPort->p_FmPortDriverParam);
-+ p_FmPort->p_FmPortDriverParam = NULL;
-+ }
-+}
-+
-+static t_Error SetExtBufferPools(t_FmPort *p_FmPort)
-+{
-+ t_FmExtPools *p_ExtBufPools = &p_FmPort->p_FmPortDriverParam->extBufPools;
-+ t_FmBufPoolDepletion *p_BufPoolDepletion =
-+ &p_FmPort->p_FmPortDriverParam->bufPoolDepletion;
-+ uint8_t orderedArray[FM_PORT_MAX_NUM_OF_EXT_POOLS];
-+ uint16_t sizesArray[BM_MAX_NUM_OF_POOLS];
-+ int i = 0, j = 0, err;
-+ struct fman_port_bpools bpools;
-+
-+ memset(&orderedArray, 0, sizeof(uint8_t) * FM_PORT_MAX_NUM_OF_EXT_POOLS);
-+ memset(&sizesArray, 0, sizeof(uint16_t) * BM_MAX_NUM_OF_POOLS);
-+ memcpy(&p_FmPort->extBufPools, p_ExtBufPools, sizeof(t_FmExtPools));
-+
-+ FmSpSetBufPoolsInAscOrderOfBufSizes(p_ExtBufPools, orderedArray,
-+ sizesArray);
-+
-+ /* Prepare flibs bpools structure */
-+ memset(&bpools, 0, sizeof(struct fman_port_bpools));
-+ bpools.count = p_ExtBufPools->numOfPoolsUsed;
-+ bpools.counters_enable = TRUE;
-+ for (i = 0; i < p_ExtBufPools->numOfPoolsUsed; i++)
-+ {
-+ bpools.bpool[i].bpid = orderedArray[i];
-+ bpools.bpool[i].size = sizesArray[orderedArray[i]];
-+ /* functionality available only for some derivatives (limited by config) */
-+ if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
-+ for (j = 0;
-+ j
-+ < p_FmPort->p_FmPortDriverParam->p_BackupBmPools->numOfBackupPools;
-+ j++)
-+ if (orderedArray[i]
-+ == p_FmPort->p_FmPortDriverParam->p_BackupBmPools->poolIds[j])
-+ {
-+ bpools.bpool[i].is_backup = TRUE;
-+ break;
-+ }
-+ }
-+
-+ /* save pools parameters for later use */
-+ p_FmPort->rxPoolsParams.numOfPools = p_ExtBufPools->numOfPoolsUsed;
-+ p_FmPort->rxPoolsParams.largestBufSize =
-+ sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 1]];
-+ if (p_ExtBufPools->numOfPoolsUsed > 1)
-+ p_FmPort->rxPoolsParams.secondLargestBufSize =
-+ sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 2]];
-+
-+ /* FMBM_RMPD reg. - pool depletion */
-+ if (p_BufPoolDepletion->poolsGrpModeEnable)
-+ {
-+ bpools.grp_bp_depleted_num = p_BufPoolDepletion->numOfPools;
-+ for (i = 0; i < BM_MAX_NUM_OF_POOLS; i++)
-+ {
-+ if (p_BufPoolDepletion->poolsToConsider[i])
-+ {
-+ for (j = 0; j < p_ExtBufPools->numOfPoolsUsed; j++)
-+ {
-+ if (i == orderedArray[j])
-+ {
-+ bpools.bpool[j].grp_bp_depleted = TRUE;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+ if (p_BufPoolDepletion->singlePoolModeEnable)
-+ {
-+ for (i = 0; i < BM_MAX_NUM_OF_POOLS; i++)
-+ {
-+ if (p_BufPoolDepletion->poolsToConsiderForSingleMode[i])
-+ {
-+ for (j = 0; j < p_ExtBufPools->numOfPoolsUsed; j++)
-+ {
-+ if (i == orderedArray[j])
-+ {
-+ bpools.bpool[j].single_bp_depleted = TRUE;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ /* fill QbbPEV */
-+ if (p_BufPoolDepletion->poolsGrpModeEnable
-+ || p_BufPoolDepletion->singlePoolModeEnable)
-+ {
-+ for (i = 0; i < FM_MAX_NUM_OF_PFC_PRIORITIES; i++)
-+ {
-+ if (p_BufPoolDepletion->pfcPrioritiesEn[i] == TRUE)
-+ {
-+ bpools.bpool[i].pfc_priorities_en = TRUE;
-+ }
-+ }
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* Issue flibs function */
-+ err = fman_port_set_bpools(&p_FmPort->port, &bpools);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_bpools"));
-+
-+ if (p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
-+ XX_Free(p_FmPort->p_FmPortDriverParam->p_BackupBmPools);
-+
-+ return E_OK;
-+}
-+
-+static t_Error ClearPerfCnts(t_FmPort *p_FmPort)
-+{
-+ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL, 0);
-+ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL, 0);
-+ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL, 0);
-+ FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL, 0);
-+ return E_OK;
-+}
-+
-+static t_Error InitLowLevelDriver(t_FmPort *p_FmPort)
-+{
-+ t_FmPortDriverParam *p_DriverParams = p_FmPort->p_FmPortDriverParam;
-+ struct fman_port_params portParams;
-+ uint32_t tmpVal;
-+ t_Error err;
-+
-+ /* Set up flibs parameters and issue init function */
-+
-+ memset(&portParams, 0, sizeof(struct fman_port_params));
-+ portParams.discard_mask = p_DriverParams->errorsToDiscard;
-+ portParams.dflt_fqid = p_DriverParams->dfltFqid;
-+ portParams.err_fqid = p_DriverParams->errFqid;
-+ portParams.deq_sp = p_DriverParams->deqSubPortal;
-+ portParams.dont_release_buf = p_DriverParams->dontReleaseBuf;
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ portParams.err_mask = (RX_ERRS_TO_ENQ & ~portParams.discard_mask);
-+ if (!p_FmPort->imEn)
-+ {
-+ if (p_DriverParams->forwardReuseIntContext)
-+ p_DriverParams->dfltCfg.rx_fd_bits =
-+ (uint8_t)(BMI_PORT_RFNE_FRWD_RPD >> 24);
-+ }
-+ break;
-+
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ portParams.err_mask = (OP_ERRS_TO_ENQ & ~portParams.discard_mask);
-+ break;
-+ break;
-+
-+ default:
-+ break;
-+ }
-+
-+ tmpVal =
-+ (uint32_t)(
-+ (p_FmPort->internalBufferOffset % OFFSET_UNITS) ? (p_FmPort->internalBufferOffset
-+ / OFFSET_UNITS + 1) :
-+ (p_FmPort->internalBufferOffset / OFFSET_UNITS));
-+ p_FmPort->internalBufferOffset = (uint8_t)(tmpVal * OFFSET_UNITS);
-+ p_DriverParams->dfltCfg.int_buf_start_margin =
-+ p_FmPort->internalBufferOffset;
-+
-+ p_DriverParams->dfltCfg.ext_buf_start_margin =
-+ p_DriverParams->bufMargins.startMargins;
-+ p_DriverParams->dfltCfg.ext_buf_end_margin =
-+ p_DriverParams->bufMargins.endMargins;
-+
-+ p_DriverParams->dfltCfg.ic_ext_offset =
-+ p_DriverParams->intContext.extBufOffset;
-+ p_DriverParams->dfltCfg.ic_int_offset =
-+ p_DriverParams->intContext.intContextOffset;
-+ p_DriverParams->dfltCfg.ic_size = p_DriverParams->intContext.size;
-+
-+ p_DriverParams->dfltCfg.stats_counters_enable = TRUE;
-+ p_DriverParams->dfltCfg.perf_counters_enable = TRUE;
-+ p_DriverParams->dfltCfg.queue_counters_enable = TRUE;
-+
-+ p_DriverParams->dfltCfg.perf_cnt_params.task_val =
-+ (uint8_t)p_FmPort->tasks.num;
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING ||
-+ p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)p_DriverParams->dfltCfg.perf_cnt_params.queue_val = 0;
-+ else
-+ p_DriverParams->dfltCfg.perf_cnt_params.queue_val = 1;
-+ p_DriverParams->dfltCfg.perf_cnt_params.dma_val =
-+ (uint8_t)p_FmPort->openDmas.num;
-+ p_DriverParams->dfltCfg.perf_cnt_params.fifo_val = p_FmPort->fifoBufs.num;
-+
-+ if (0
-+ != fman_port_init(&p_FmPort->port, &p_DriverParams->dfltCfg,
-+ &portParams))
-+ RETURN_ERROR(MAJOR, E_NO_DEVICE, ("fman_port_init"));
-+
-+ if (p_FmPort->imEn && ((err = FmPortImInit(p_FmPort)) != E_OK))
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ else
-+ {
-+ // from QMIInit
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ {
-+ if (p_DriverParams->deqPrefetchOption == e_FM_PORT_DEQ_NO_PREFETCH)
-+ FmSetPortPreFetchConfiguration(p_FmPort->h_Fm, p_FmPort->portId,
-+ FALSE);
-+ else
-+ FmSetPortPreFetchConfiguration(p_FmPort->h_Fm, p_FmPort->portId,
-+ TRUE);
-+ }
-+ }
-+ /* The code bellow is a trick so the FM will not release the buffer
-+ to BM nor will try to enqueue the frame to QM */
-+ if (((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX)) && (!p_FmPort->imEn))
-+ {
-+ if (!p_DriverParams->dfltFqid && p_DriverParams->dontReleaseBuf)
-+ {
-+ /* override fmbm_tcfqid 0 with a false non-0 value. This will force FM to
-+ * act according to tfene. Otherwise, if fmbm_tcfqid is 0 the FM will release
-+ * buffers to BM regardless of fmbm_tfene
-+ */
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF);
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->tx.fmbm_tfene,
-+ NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE);
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+static bool CheckRxBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter)
-+{
-+ UNUSED(p_FmPort);
-+
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_CYCLE):
-+ case (e_FM_PORT_COUNTERS_TASK_UTIL):
-+ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
-+ case (e_FM_PORT_COUNTERS_DMA_UTIL):
-+ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
-+ case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION):
-+ case (e_FM_PORT_COUNTERS_FRAME):
-+ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_BAD_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
-+ case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD):
-+ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
-+ case (e_FM_PORT_COUNTERS_PREPARE_TO_ENQUEUE_COUNTER):
-+ return TRUE;
-+ default:
-+ return FALSE;
-+ }
-+}
-+
-+static bool CheckTxBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter)
-+{
-+ UNUSED(p_FmPort);
-+
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_CYCLE):
-+ case (e_FM_PORT_COUNTERS_TASK_UTIL):
-+ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
-+ case (e_FM_PORT_COUNTERS_DMA_UTIL):
-+ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
-+ case (e_FM_PORT_COUNTERS_FRAME):
-+ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
-+ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
-+ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
-+ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
-+ return TRUE;
-+ default:
-+ return FALSE;
-+ }
-+}
-+
-+static bool CheckOhBmiCounter(t_FmPort *p_FmPort, e_FmPortCounters counter)
-+{
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_CYCLE):
-+ case (e_FM_PORT_COUNTERS_TASK_UTIL):
-+ case (e_FM_PORT_COUNTERS_DMA_UTIL):
-+ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
-+ case (e_FM_PORT_COUNTERS_FRAME):
-+ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
-+ case (e_FM_PORT_COUNTERS_WRED_DISCARD):
-+ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
-+ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
-+ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
-+ return TRUE;
-+ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
-+ return FALSE;
-+ else
-+ return TRUE;
-+ default:
-+ return FALSE;
-+ }
-+}
-+
-+static t_Error BmiPortCheckAndGetCounterType(
-+ t_FmPort *p_FmPort, e_FmPortCounters counter,
-+ enum fman_port_stats_counters *p_StatsType,
-+ enum fman_port_perf_counters *p_PerfType, bool *p_IsStats)
-+{
-+ volatile uint32_t *p_Reg;
-+ bool isValid;
-+
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_Reg = &p_FmPort->port.bmi_regs->rx.fmbm_rstc;
-+ isValid = CheckRxBmiCounter(p_FmPort, counter);
-+ break;
-+ case (e_FM_PORT_TYPE_TX_10G):
-+ case (e_FM_PORT_TYPE_TX):
-+ p_Reg = &p_FmPort->port.bmi_regs->tx.fmbm_tstc;
-+ isValid = CheckTxBmiCounter(p_FmPort, counter);
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
-+ p_Reg = &p_FmPort->port.bmi_regs->oh.fmbm_ostc;
-+ isValid = CheckOhBmiCounter(p_FmPort, counter);
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported port type"));
-+ }
-+
-+ if (!isValid)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE,
-+ ("Requested counter is not available for this port type"));
-+
-+ /* check that counters are enabled */
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_CYCLE):
-+ case (e_FM_PORT_COUNTERS_TASK_UTIL):
-+ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
-+ case (e_FM_PORT_COUNTERS_DMA_UTIL):
-+ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
-+ case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION):
-+ /* performance counters - may be read when disabled */
-+ *p_IsStats = FALSE;
-+ break;
-+ case (e_FM_PORT_COUNTERS_FRAME):
-+ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
-+ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
-+ case (e_FM_PORT_COUNTERS_RX_BAD_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
-+ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
-+ case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD):
-+ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
-+ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
-+ case (e_FM_PORT_COUNTERS_WRED_DISCARD):
-+ *p_IsStats = TRUE;
-+ if (!(GET_UINT32(*p_Reg) & BMI_COUNTERS_EN))
-+ RETURN_ERROR(MINOR, E_INVALID_STATE,
-+ ("Requested counter was not enabled"));
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ /* Set counter */
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_CYCLE):
-+ *p_PerfType = E_FMAN_PORT_PERF_CNT_CYCLE;
-+ break;
-+ case (e_FM_PORT_COUNTERS_TASK_UTIL):
-+ *p_PerfType = E_FMAN_PORT_PERF_CNT_TASK_UTIL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_QUEUE_UTIL):
-+ *p_PerfType = E_FMAN_PORT_PERF_CNT_QUEUE_UTIL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DMA_UTIL):
-+ *p_PerfType = E_FMAN_PORT_PERF_CNT_DMA_UTIL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_FIFO_UTIL):
-+ *p_PerfType = E_FMAN_PORT_PERF_CNT_FIFO_UTIL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION):
-+ *p_PerfType = E_FMAN_PORT_PERF_CNT_RX_PAUSE;
-+ break;
-+ case (e_FM_PORT_COUNTERS_FRAME):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_FRAME;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DISCARD_FRAME):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_DISCARD;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DEALLOC_BUF):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_DEALLOC_BUF;
-+ break;
-+ case (e_FM_PORT_COUNTERS_RX_BAD_FRAME):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME;
-+ break;
-+ case (e_FM_PORT_COUNTERS_RX_LARGE_FRAME):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME;
-+ break;
-+ case (e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF;
-+ break;
-+ case (e_FM_PORT_COUNTERS_RX_FILTER_FRAME):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_FILTERED_FRAME;
-+ break;
-+ case (e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_DMA_ERR;
-+ break;
-+ case (e_FM_PORT_COUNTERS_WRED_DISCARD):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_WRED_DISCARD;
-+ break;
-+ case (e_FM_PORT_COUNTERS_LENGTH_ERR):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_LEN_ERR;
-+ break;
-+ case (e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT):
-+ *p_StatsType = E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error AdditionalPrsParams(t_FmPort *p_FmPort,
-+ t_FmPcdPrsAdditionalHdrParams *p_HdrParams,
-+ uint32_t *p_SoftSeqAttachReg)
-+{
-+ uint8_t hdrNum, Ipv4HdrNum;
-+ u_FmPcdHdrPrsOpts *p_prsOpts;
-+ uint32_t tmpReg = *p_SoftSeqAttachReg, tmpPrsOffset;
-+
-+ if (IS_PRIVATE_HEADER(p_HdrParams->hdr)
-+ || IS_SPECIAL_HEADER(p_HdrParams->hdr))
-+ RETURN_ERROR(
-+ MAJOR, E_NOT_SUPPORTED,
-+ ("No additional parameters for private or special headers."));
-+
-+ if (p_HdrParams->errDisable)
-+ tmpReg |= PRS_HDR_ERROR_DIS;
-+
-+ /* Set parser options */
-+ if (p_HdrParams->usePrsOpts)
-+ {
-+ p_prsOpts = &p_HdrParams->prsOpts;
-+ switch (p_HdrParams->hdr)
-+ {
-+ case (HEADER_TYPE_MPLS):
-+ if (p_prsOpts->mplsPrsOptions.labelInterpretationEnable)
-+ tmpReg |= PRS_HDR_MPLS_LBL_INTER_EN;
-+ hdrNum = GetPrsHdrNum(p_prsOpts->mplsPrsOptions.nextParse);
-+ if (hdrNum == ILLEGAL_HDR_NUM)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+ Ipv4HdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
-+ if (hdrNum < Ipv4HdrNum)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("Header must be equal or higher than IPv4"));
-+ tmpReg |= ((uint32_t)hdrNum * PRS_HDR_ENTRY_SIZE)
-+ << PRS_HDR_MPLS_NEXT_HDR_SHIFT;
-+ break;
-+ case (HEADER_TYPE_PPPoE):
-+ if (p_prsOpts->pppoePrsOptions.enableMTUCheck)
-+ tmpReg |= PRS_HDR_PPPOE_MTU_CHECK_EN;
-+ break;
-+ case (HEADER_TYPE_IPv6):
-+ if (p_prsOpts->ipv6PrsOptions.routingHdrEnable)
-+ tmpReg |= PRS_HDR_IPV6_ROUTE_HDR_EN;
-+ break;
-+ case (HEADER_TYPE_TCP):
-+ if (p_prsOpts->tcpPrsOptions.padIgnoreChecksum)
-+ tmpReg |= PRS_HDR_TCP_PAD_REMOVAL;
-+ else
-+ tmpReg &= ~PRS_HDR_TCP_PAD_REMOVAL;
-+ break;
-+ case (HEADER_TYPE_UDP):
-+ if (p_prsOpts->udpPrsOptions.padIgnoreChecksum)
-+ tmpReg |= PRS_HDR_UDP_PAD_REMOVAL;
-+ else
-+ tmpReg &= ~PRS_HDR_UDP_PAD_REMOVAL;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid header"));
-+ }
-+ }
-+
-+ /* set software parsing (address is divided in 2 since parser uses 2 byte access. */
-+ if (p_HdrParams->swPrsEnable)
-+ {
-+ tmpPrsOffset = FmPcdGetSwPrsOffset(p_FmPort->h_FmPcd, p_HdrParams->hdr,
-+ p_HdrParams->indexPerHdr);
-+ if (tmpPrsOffset == ILLEGAL_BASE)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+ tmpReg |= (PRS_HDR_SW_PRS_EN | tmpPrsOffset);
-+ }
-+ *p_SoftSeqAttachReg = tmpReg;
-+
-+ return E_OK;
-+}
-+
-+static uint32_t GetPortSchemeBindParams(
-+ t_Handle h_FmPort, t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ uint32_t walking1Mask = 0x80000000, tmp;
-+ uint8_t idx = 0;
-+
-+ p_SchemeBind->netEnvId = p_FmPort->netEnvId;
-+ p_SchemeBind->hardwarePortId = p_FmPort->hardwarePortId;
-+ p_SchemeBind->useClsPlan = p_FmPort->useClsPlan;
-+ p_SchemeBind->numOfSchemes = 0;
-+ tmp = p_FmPort->schemesPerPortVector;
-+ if (tmp)
-+ {
-+ while (tmp)
-+ {
-+ if (tmp & walking1Mask)
-+ {
-+ p_SchemeBind->schemesIds[p_SchemeBind->numOfSchemes] = idx;
-+ p_SchemeBind->numOfSchemes++;
-+ tmp &= ~walking1Mask;
-+ }
-+ walking1Mask >>= 1;
-+ idx++;
-+ }
-+ }
-+
-+ return tmp;
-+}
-+
-+static void FmPortCheckNApplyMacsec(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ volatile uint32_t *p_BmiCfgReg = NULL;
-+ uint32_t macsecEn = BMI_PORT_CFG_EN_MACSEC;
-+ uint32_t lcv, walking1Mask = 0x80000000;
-+ uint8_t cnt = 0;
-+
-+ ASSERT_COND(p_FmPort);
-+ ASSERT_COND(p_FmPort->h_FmPcd);
-+ ASSERT_COND(!p_FmPort->p_FmPortDriverParam);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ return;
-+
-+ p_BmiCfgReg = &p_FmPort->port.bmi_regs->rx.fmbm_rcfg;
-+ /* get LCV for MACSEC */
-+ if ((lcv = FmPcdGetMacsecLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId))
-+ != 0)
-+ {
-+ while (!(lcv & walking1Mask))
-+ {
-+ cnt++;
-+ walking1Mask >>= 1;
-+ }
-+
-+ macsecEn |= (uint32_t)cnt << BMI_PORT_CFG_MS_SEL_SHIFT;
-+ WRITE_UINT32(*p_BmiCfgReg, GET_UINT32(*p_BmiCfgReg) | macsecEn);
-+ }
-+}
-+
-+static t_Error SetPcd(t_FmPort *p_FmPort, t_FmPortPcdParams *p_PcdParams)
-+{
-+ t_Error err = E_OK;
-+ uint32_t tmpReg;
-+ volatile uint32_t *p_BmiNia = NULL;
-+ volatile uint32_t *p_BmiPrsNia = NULL;
-+ volatile uint32_t *p_BmiPrsStartOffset = NULL;
-+ volatile uint32_t *p_BmiInitPrsResult = NULL;
-+ volatile uint32_t *p_BmiCcBase = NULL;
-+ uint16_t hdrNum, L3HdrNum, greHdrNum;
-+ int i;
-+ bool isEmptyClsPlanGrp;
-+ uint32_t tmpHxs[FM_PCD_PRS_NUM_OF_HDRS];
-+ uint16_t absoluteProfileId;
-+ uint8_t physicalSchemeId;
-+ uint32_t ccTreePhysOffset;
-+ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
-+ uint32_t initialSwPrs = 0;
-+
-+ ASSERT_COND(p_FmPort);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for non-independant mode ports only"));
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+
-+ p_FmPort->netEnvId = FmPcdGetNetEnvId(p_PcdParams->h_NetEnv);
-+
-+ p_FmPort->pcdEngines = 0;
-+
-+ /* initialize p_FmPort->pcdEngines field in port's structure */
-+ switch (p_PcdParams->pcdSupport)
-+ {
-+ case (e_FM_PORT_PCD_SUPPORT_NONE):
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("No PCD configuration required if e_FM_PORT_PCD_SUPPORT_NONE selected"));
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_ONLY):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PLCR_ONLY):
-+ p_FmPort->pcdEngines |= FM_PCD_PLCR;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ p_FmPort->pcdEngines |= FM_PCD_PLCR;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ p_FmPort->pcdEngines |= FM_PCD_KG;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+ p_FmPort->pcdEngines |= FM_PCD_KG;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ p_FmPort->pcdEngines |= FM_PCD_KG;
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+ p_FmPort->pcdEngines |= FM_PCD_PLCR;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+ p_FmPort->pcdEngines |= FM_PCD_PLCR;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR):
-+ p_FmPort->pcdEngines |= FM_PCD_PRS;
-+ p_FmPort->pcdEngines |= FM_PCD_KG;
-+ p_FmPort->pcdEngines |= FM_PCD_PLCR;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_CC_ONLY):
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+ break;
-+#ifdef FM_CAPWAP_SUPPORT
-+ case (e_FM_PORT_PCD_SUPPORT_CC_AND_KG):
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+ p_FmPort->pcdEngines |= FM_PCD_KG;
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR):
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+ p_FmPort->pcdEngines |= FM_PCD_KG;
-+ p_FmPort->pcdEngines |= FM_PCD_PLCR;
-+ break;
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid pcdSupport"));
-+ }
-+
-+ if ((p_FmPort->pcdEngines & FM_PCD_PRS)
-+ && (p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams
-+ > FM_PCD_PRS_NUM_OF_HDRS))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("Port parser numOfHdrsWithAdditionalParams may not exceed %d", FM_PCD_PRS_NUM_OF_HDRS));
-+
-+ /* check that parameters exist for each and only each defined engine */
-+ if ((!!(p_FmPort->pcdEngines & FM_PCD_PRS) != !!p_PcdParams->p_PrsParams)
-+ || (!!(p_FmPort->pcdEngines & FM_PCD_KG)
-+ != !!p_PcdParams->p_KgParams)
-+ || (!!(p_FmPort->pcdEngines & FM_PCD_CC)
-+ != !!p_PcdParams->p_CcParams))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("PCD initialization structure is not consistent with pcdSupport"));
-+
-+ /* get PCD registers pointers */
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
-+ p_BmiPrsNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne;
-+ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso;
-+ p_BmiInitPrsResult = &p_FmPort->port.bmi_regs->rx.fmbm_rprai[0];
-+ p_BmiCcBase = &p_FmPort->port.bmi_regs->rx.fmbm_rccb;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
-+ p_BmiPrsNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne;
-+ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso;
-+ p_BmiInitPrsResult = &p_FmPort->port.bmi_regs->oh.fmbm_oprai[0];
-+ p_BmiCcBase = &p_FmPort->port.bmi_regs->oh.fmbm_occb;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ }
-+
-+ /* set PCD port parameter */
-+ if (p_FmPort->pcdEngines & FM_PCD_CC)
-+ {
-+ err = FmPcdCcBindTree(p_FmPort->h_FmPcd, p_PcdParams,
-+ p_PcdParams->p_CcParams->h_CcTree,
-+ &ccTreePhysOffset, p_FmPort);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset);
-+ p_FmPort->ccTreeId = p_PcdParams->p_CcParams->h_CcTree;
-+ }
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_KG)
-+ {
-+ if (p_PcdParams->p_KgParams->numOfSchemes == 0)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("For ports using Keygen, at least one scheme must be bound. "));
-+
-+ err = FmPcdKgSetOrBindToClsPlanGrp(p_FmPort->h_FmPcd,
-+ p_FmPort->hardwarePortId,
-+ p_FmPort->netEnvId,
-+ p_FmPort->optArray,
-+ &p_FmPort->clsPlanGrpId,
-+ &isEmptyClsPlanGrp);
-+ if (err)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("FmPcdKgSetOrBindToClsPlanGrp failed. "));
-+
-+ p_FmPort->useClsPlan = !isEmptyClsPlanGrp;
-+
-+ schemeBind.netEnvId = p_FmPort->netEnvId;
-+ schemeBind.hardwarePortId = p_FmPort->hardwarePortId;
-+ schemeBind.numOfSchemes = p_PcdParams->p_KgParams->numOfSchemes;
-+ schemeBind.useClsPlan = p_FmPort->useClsPlan;
-+
-+ /* for each scheme */
-+ for (i = 0; i < p_PcdParams->p_KgParams->numOfSchemes; i++)
-+ {
-+ ASSERT_COND(p_PcdParams->p_KgParams->h_Schemes[i]);
-+ physicalSchemeId = FmPcdKgGetSchemeId(
-+ p_PcdParams->p_KgParams->h_Schemes[i]);
-+ schemeBind.schemesIds[i] = physicalSchemeId;
-+ /* build vector */
-+ p_FmPort->schemesPerPortVector |= 1
-+ << (31 - (uint32_t)physicalSchemeId);
-+#if (DPAA_VERSION >= 11)
-+ /*because of the state that VSPE is defined per port - all PCD path should be according to this requirement
-+ if !VSPE - in port, for relevant scheme VSPE can not be set*/
-+ if (!p_FmPort->vspe
-+ && FmPcdKgGetVspe((p_PcdParams->p_KgParams->h_Schemes[i])))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("VSPE is not at port level"));
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /***************************/
-+ /* configure NIA after BMI */
-+ /***************************/
-+ /* rfne may contain FDCS bits, so first we read them. */
-+ p_FmPort->savedBmiNia = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK;
-+
-+ /* If policer is used directly after BMI or PRS */
-+ if ((p_FmPort->pcdEngines & FM_PCD_PLCR)
-+ && ((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PLCR_ONLY)
-+ || (p_PcdParams->pcdSupport
-+ == e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR)))
-+ {
-+ if (!p_PcdParams->p_PlcrParams->h_Profile)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Profile should be initialized"));
-+
-+ absoluteProfileId = (uint16_t)FmPcdPlcrProfileGetAbsoluteId(
-+ p_PcdParams->p_PlcrParams->h_Profile);
-+
-+ if (!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Private port profile not valid."));
-+
-+ tmpReg = (uint32_t)(absoluteProfileId | NIA_PLCR_ABSOLUTE);
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */
-+ /* update BMI HPNIA */
-+ WRITE_UINT32(*p_BmiPrsNia, (uint32_t)(NIA_ENG_PLCR | tmpReg));
-+ else
-+ /* e_FM_PCD_SUPPORT_PLCR_ONLY */
-+ /* update BMI NIA */
-+ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PLCR);
-+ }
-+
-+ /* if CC is used directly after BMI */
-+ if ((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_ONLY)
-+#ifdef FM_CAPWAP_SUPPORT
-+ || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG)
-+ || (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR)
-+#endif /* FM_CAPWAP_SUPPORT */
-+ )
-+ {
-+ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_OPERATION,
-+ ("e_FM_PORT_PCD_SUPPORT_CC_xx available for offline parsing ports only"));
-+ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC);
-+ /* check that prs start offset == RIM[FOF] */
-+ }
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_PRS)
-+ {
-+ ASSERT_COND(p_PcdParams->p_PrsParams);
-+#if (DPAA_VERSION >= 11)
-+ if (p_PcdParams->p_PrsParams->firstPrsHdr == HEADER_TYPE_CAPWAP)
-+ hdrNum = OFFLOAD_SW_PATCH_CAPWAP_LABEL;
-+ else
-+ {
-+#endif /* (DPAA_VERSION >= 11) */
-+ /* if PRS is used it is always first */
-+ hdrNum = GetPrsHdrNum(p_PcdParams->p_PrsParams->firstPrsHdr);
-+ if (hdrNum == ILLEGAL_HDR_NUM)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unsupported header."));
-+#if (DPAA_VERSION >= 11)
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+ p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PRS | (uint32_t)(hdrNum));
-+ /* set after parser NIA */
-+ tmpReg = 0;
-+ switch (p_PcdParams->pcdSupport)
-+ {
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_ONLY):
-+ WRITE_UINT32(*p_BmiPrsNia,
-+ GET_NIA_BMI_AC_ENQ_FRAME(p_FmPort->h_FmPcd));
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC):
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR):
-+ tmpReg = NIA_KG_CC_EN;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG):
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR):
-+ if (p_PcdParams->p_KgParams->directScheme)
-+ {
-+ physicalSchemeId = FmPcdKgGetSchemeId(
-+ p_PcdParams->p_KgParams->h_DirectScheme);
-+ /* check that this scheme was bound to this port */
-+ for (i = 0; i < p_PcdParams->p_KgParams->numOfSchemes; i++)
-+ if (p_PcdParams->p_KgParams->h_DirectScheme
-+ == p_PcdParams->p_KgParams->h_Schemes[i])
-+ break;
-+ if (i == p_PcdParams->p_KgParams->numOfSchemes)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("Direct scheme is not one of the port selected schemes."));
-+ tmpReg |= (uint32_t)(NIA_KG_DIRECT | physicalSchemeId);
-+ }
-+ WRITE_UINT32(*p_BmiPrsNia, NIA_ENG_KG | tmpReg);
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC):
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR):
-+ WRITE_UINT32(*p_BmiPrsNia,
-+ (uint32_t)(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC));
-+ break;
-+ case (e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR):
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid PCD support"));
-+ }
-+
-+ /* set start parsing offset */
-+ WRITE_UINT32(*p_BmiPrsStartOffset,
-+ p_PcdParams->p_PrsParams->parsingOffset);
-+
-+ /************************************/
-+ /* Parser port parameters */
-+ /************************************/
-+ /* stop before configuring */
-+ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP);
-+ /* wait for parser to be in idle state */
-+ while (GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE)
-+ ;
-+
-+ /* set soft seq attachment register */
-+ memset(tmpHxs, 0, FM_PCD_PRS_NUM_OF_HDRS * sizeof(uint32_t));
-+
-+ /* set protocol options */
-+ for (i = 0; p_FmPort->optArray[i]; i++)
-+ switch (p_FmPort->optArray[i])
-+ {
-+ case (ETH_BROADCAST):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_ETH);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_ETH_BC_SHIFT;
-+ break;
-+ case (ETH_MULTICAST):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_ETH);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_ETH_MC_SHIFT;
-+ break;
-+ case (VLAN_STACKED):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_VLAN);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_VLAN_STACKED_SHIFT;
-+ break;
-+ case (MPLS_STACKED):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_MPLS);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_MPLS_STACKED_SHIFT;
-+ break;
-+ case (IPV4_BROADCAST_1):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_1_BC_SHIFT;
-+ break;
-+ case (IPV4_MULTICAST_1):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_1_MC_SHIFT;
-+ break;
-+ case (IPV4_UNICAST_2):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_2_UC_SHIFT;
-+ break;
-+ case (IPV4_MULTICAST_BROADCAST_2):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV4_2_MC_BC_SHIFT;
-+ break;
-+ case (IPV6_MULTICAST_1):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_1_MC_SHIFT;
-+ break;
-+ case (IPV6_UNICAST_2):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_2_UC_SHIFT;
-+ break;
-+ case (IPV6_MULTICAST_2):
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ tmpHxs[hdrNum] |= (i + 1) << PRS_HDR_IPV6_2_MC_SHIFT;
-+ break;
-+ }
-+
-+ if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId,
-+ HEADER_TYPE_UDP_ENCAP_ESP))
-+ {
-+ if (p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams == FM_PCD_PRS_NUM_OF_HDRS)
-+ RETURN_ERROR(
-+ MINOR, E_INVALID_VALUE,
-+ ("If HEADER_TYPE_UDP_ENCAP_ESP is used, numOfHdrsWithAdditionalParams may be up to FM_PCD_PRS_NUM_OF_HDRS - 1"));
-+
-+ p_PcdParams->p_PrsParams->additionalParams[p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].hdr =
-+ HEADER_TYPE_UDP;
-+ p_PcdParams->p_PrsParams->additionalParams[p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].swPrsEnable =
-+ TRUE;
-+ p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams++;
-+ }
-+
-+ /* set MPLS default next header - HW reset workaround */
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_MPLS);
-+ tmpHxs[hdrNum] |= PRS_HDR_MPLS_LBL_INTER_EN;
-+ L3HdrNum = GetPrsHdrNum(HEADER_TYPE_USER_DEFINED_L3);
-+ tmpHxs[hdrNum] |= (uint32_t)L3HdrNum << PRS_HDR_MPLS_NEXT_HDR_SHIFT;
-+
-+ /* for GRE, disable errors */
-+ greHdrNum = GetPrsHdrNum(HEADER_TYPE_GRE);
-+ tmpHxs[greHdrNum] |= PRS_HDR_ERROR_DIS;
-+
-+ /* For UDP remove PAD from L4 checksum calculation */
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_UDP);
-+ tmpHxs[hdrNum] |= PRS_HDR_UDP_PAD_REMOVAL;
-+ /* For TCP remove PAD from L4 checksum calculation */
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_TCP);
-+ tmpHxs[hdrNum] |= PRS_HDR_TCP_PAD_REMOVAL;
-+
-+ /* config additional params for specific headers */
-+ for (i = 0; i < p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams;
-+ i++)
-+ {
-+ /* case for using sw parser as the initial NIA address, before
-+ * HW parsing
-+ */
-+ if ((p_PcdParams->p_PrsParams->additionalParams[i].hdr == HEADER_TYPE_NONE) &&
-+ p_PcdParams->p_PrsParams->additionalParams[i].swPrsEnable)
-+ {
-+ initialSwPrs = FmPcdGetSwPrsOffset(p_FmPort->h_FmPcd, HEADER_TYPE_NONE,
-+ p_PcdParams->p_PrsParams->additionalParams[i].indexPerHdr);
-+ if (initialSwPrs == ILLEGAL_BASE)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+
-+ /* clear parser first HXS */
-+ p_FmPort->savedBmiNia &= ~BMI_RFNE_HXS_MASK; /* 0x000000FF */
-+ /* rewrite with soft parser start */
-+ p_FmPort->savedBmiNia |= initialSwPrs;
-+ continue;
-+ }
-+
-+ hdrNum =
-+ GetPrsHdrNum(p_PcdParams->p_PrsParams->additionalParams[i].hdr);
-+ if (hdrNum == ILLEGAL_HDR_NUM)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+ if (hdrNum == NO_HDR_NUM)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("Private headers may not use additional parameters"));
-+
-+ err = AdditionalPrsParams(
-+ p_FmPort, &p_PcdParams->p_PrsParams->additionalParams[i],
-+ &tmpHxs[hdrNum]);
-+ if (err)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+ }
-+
-+ /* Check if ip-reassembly port - need to link sw-parser code */
-+ if (p_FmPort->h_IpReassemblyManip)
-+ {
-+ /* link to sw parser code for IP Frag - only if no other code is applied. */
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv4);
-+ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
-+ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv4_IPR_LABEL);
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
-+ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPR_LABEL);
-+ } else {
-+ if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId, HEADER_TYPE_UDP_LITE))
-+ {
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
-+ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL);
-+ } else if ((FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)
-+ && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)))
-+ {
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
-+ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL);
-+ }
-+ }
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId,
-+ HEADER_TYPE_UDP_LITE))
-+ {
-+ /* link to sw parser code for udp lite - only if no other code is applied. */
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ if (!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN))
-+ tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | UDP_LITE_SW_PATCH_LABEL);
-+ }
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+ for (i = 0; i < FM_PCD_PRS_NUM_OF_HDRS; i++)
-+ {
-+ /* For all header set LCV as taken from netEnv*/
-+ WRITE_UINT32(
-+ p_FmPort->p_FmPortPrsRegs->hdrs[i].lcv,
-+ FmPcdGetLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId, (uint8_t)i));
-+ /* set HXS register according to default+Additional params+protocol options */
-+ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->hdrs[i].softSeqAttach,
-+ tmpHxs[i]);
-+ }
-+
-+ /* set tpid. */
-+ tmpReg = PRS_TPID_DFLT;
-+ if (p_PcdParams->p_PrsParams->setVlanTpid1)
-+ {
-+ tmpReg &= PRS_TPID2_MASK;
-+ tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid1
-+ << PRS_PCTPID_SHIFT;
-+ }
-+ if (p_PcdParams->p_PrsParams->setVlanTpid2)
-+ {
-+ tmpReg &= PRS_TPID1_MASK;
-+ tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid2;
-+ }WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pctpid, tmpReg);
-+
-+ /* enable parser */
-+ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, 0);
-+
-+ if (p_PcdParams->p_PrsParams->prsResultPrivateInfo)
-+ p_FmPort->privateInfo =
-+ p_PcdParams->p_PrsParams->prsResultPrivateInfo;
-+
-+ } /* end parser */
-+ else {
-+ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)
-+ && (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ {
-+ hdrNum = GetPrsHdrNum(HEADER_TYPE_IPv6);
-+ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->hdrs[hdrNum].softSeqAttach,
-+ (PRS_HDR_SW_PRS_EN | OFFLOAD_SW_PATCH_IPv6_IPF_LABEL));
-+ }
-+
-+ WRITE_UINT32(*p_BmiPrsStartOffset, 0);
-+
-+ p_FmPort->privateInfo = 0;
-+ }
-+
-+ FmPortCheckNApplyMacsec(p_FmPort);
-+
-+ WRITE_UINT32(
-+ *p_BmiPrsStartOffset,
-+ GET_UINT32(*p_BmiPrsStartOffset) + p_FmPort->internalBufferOffset);
-+
-+ /* set initial parser result - used for all engines */
-+ for (i = 0; i < FM_PORT_PRS_RESULT_NUM_OF_WORDS; i++)
-+ {
-+ if (!i)
-+ WRITE_UINT32(
-+ *(p_BmiInitPrsResult),
-+ (uint32_t)(((uint32_t)p_FmPort->privateInfo << BMI_PR_PORTID_SHIFT) | BMI_PRS_RESULT_HIGH));
-+ else
-+ {
-+ if (i < FM_PORT_PRS_RESULT_NUM_OF_WORDS / 2)
-+ WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_HIGH);
-+ else
-+ WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_LOW);
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error DeletePcd(t_FmPort *p_FmPort)
-+{
-+ t_Error err = E_OK;
-+ volatile uint32_t *p_BmiNia = NULL;
-+ volatile uint32_t *p_BmiPrsStartOffset = NULL;
-+
-+ ASSERT_COND(p_FmPort);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for non-independant mode ports only"));
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+
-+ if (!p_FmPort->pcdEngines)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("called for non PCD port"));
-+
-+ /* get PCD registers pointers */
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
-+ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
-+ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ }
-+
-+ if ((GET_UINT32(*p_BmiNia) & GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME())
-+ != GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME())
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("port has to be detached previousely"));
-+
-+ WRITE_UINT32(*p_BmiPrsStartOffset, 0);
-+
-+ /* "cut" PCD out of the port's flow - go to BMI */
-+ /* WRITE_UINT32(*p_BmiNia, (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); */
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_PRS)
-+ {
-+ /* stop parser */
-+ WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP);
-+ /* wait for parser to be in idle state */
-+ while (GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE)
-+ ;
-+ }
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_KG)
-+ {
-+ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
-+
-+ /* unbind all schemes */
-+ p_FmPort->schemesPerPortVector = GetPortSchemeBindParams(p_FmPort,
-+ &schemeBind);
-+
-+ err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ err = FmPcdKgDeleteOrUnbindPortToClsPlanGrp(p_FmPort->h_FmPcd,
-+ p_FmPort->hardwarePortId,
-+ p_FmPort->clsPlanGrpId);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ p_FmPort->useClsPlan = FALSE;
-+ }
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_CC)
-+ {
-+ /* unbind - we need to get the treeId too */
-+ err = FmPcdCcUnbindTree(p_FmPort->h_FmPcd, p_FmPort->ccTreeId);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ p_FmPort->pcdEngines = 0;
-+
-+ return E_OK;
-+}
-+
-+static t_Error AttachPCD(t_FmPort *p_FmPort)
-+{
-+ volatile uint32_t *p_BmiNia = NULL;
-+
-+ ASSERT_COND(p_FmPort);
-+
-+ /* get PCD registers pointers */
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
-+ else
-+ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
-+
-+ /* check that current NIA is BMI to BMI */
-+ if ((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK)
-+ != GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME())
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("may be called only for ports in BMI-to-BMI state."));
-+
-+ if (p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)
-+ if (FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 1,
-+ p_FmPort->orFmanCtrl) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (p_FmPort->requiredAction & UPDATE_NIA_CMNE)
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ocmne,
-+ p_FmPort->savedBmiCmne);
-+ else
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcmne,
-+ p_FmPort->savedBmiCmne);
-+ }
-+
-+ if (p_FmPort->requiredAction & UPDATE_NIA_PNEN)
-+ WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen,
-+ p_FmPort->savedQmiPnen);
-+
-+ if (p_FmPort->requiredAction & UPDATE_NIA_FENE)
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofene,
-+ p_FmPort->savedBmiFene);
-+ else
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfene,
-+ p_FmPort->savedBmiFene);
-+ }
-+
-+ if (p_FmPort->requiredAction & UPDATE_NIA_FPNE)
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofpne,
-+ p_FmPort->savedBmiFpne);
-+ else
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfpne,
-+ p_FmPort->savedBmiFpne);
-+ }
-+
-+ if (p_FmPort->requiredAction & UPDATE_OFP_DPTE)
-+ {
-+ ASSERT_COND(p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING);
-+
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofp,
-+ p_FmPort->savedBmiOfp);
-+ }
-+
-+ WRITE_UINT32(*p_BmiNia, p_FmPort->savedBmiNia);
-+
-+ if (p_FmPort->requiredAction & UPDATE_NIA_PNDN)
-+ {
-+ p_FmPort->origNonRxQmiRegsPndn =
-+ GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn);
-+ WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn,
-+ p_FmPort->savedNonRxQmiRegsPndn);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_Error DetachPCD(t_FmPort *p_FmPort)
-+{
-+ volatile uint32_t *p_BmiNia = NULL;
-+
-+ ASSERT_COND(p_FmPort);
-+
-+ /* get PCD registers pointers */
-+ if (p_FmPort->requiredAction & UPDATE_NIA_PNDN)
-+ WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pndn,
-+ p_FmPort->origNonRxQmiRegsPndn);
-+
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
-+ else
-+ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
-+
-+ WRITE_UINT32(
-+ *p_BmiNia,
-+ (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME());
-+
-+ if (FmPcdGetHcHandle(p_FmPort->h_FmPcd))
-+ FmPcdHcSync(p_FmPort->h_FmPcd);
-+
-+ if (p_FmPort->requiredAction & UPDATE_NIA_FENE)
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofene,
-+ NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR);
-+ else
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfene,
-+ NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR);
-+ }
-+
-+ if (p_FmPort->requiredAction & UPDATE_NIA_PNEN)
-+ WRITE_UINT32(p_FmPort->port.qmi_regs->fmqm_pnen,
-+ NIA_ENG_BMI | NIA_BMI_AC_RELEASE);
-+
-+ if (p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)
-+ if (FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 2,
-+ p_FmPort->orFmanCtrl) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ p_FmPort->requiredAction = 0;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+void FmPortSetMacsecCmd(t_Handle h_FmPort, uint8_t dfltSci)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ volatile uint32_t *p_BmiCfgReg = NULL;
-+ uint32_t tmpReg;
-+
-+ SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_OPERATION, ("The routine is relevant for Tx ports only"));
-+ return;
-+ }
-+
-+ p_BmiCfgReg = &p_FmPort->port.bmi_regs->tx.fmbm_tfca;
-+ tmpReg = GET_UINT32(*p_BmiCfgReg) & ~BMI_CMD_ATTR_MACCMD_MASK;
-+ tmpReg |= BMI_CMD_ATTR_MACCMD_SECURED;
-+ tmpReg |= (((uint32_t)dfltSci << BMI_CMD_ATTR_MACCMD_SC_SHIFT)
-+ & BMI_CMD_ATTR_MACCMD_SC_MASK);
-+
-+ WRITE_UINT32(*p_BmiCfgReg, tmpReg);
-+}
-+
-+uint8_t FmPortGetNetEnvId(t_Handle h_FmPort)
-+{
-+ return ((t_FmPort*)h_FmPort)->netEnvId;
-+}
-+
-+uint8_t FmPortGetHardwarePortId(t_Handle h_FmPort)
-+{
-+ return ((t_FmPort*)h_FmPort)->hardwarePortId;
-+}
-+
-+uint32_t FmPortGetPcdEngines(t_Handle h_FmPort)
-+{
-+ return ((t_FmPort*)h_FmPort)->pcdEngines;
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FmPortSetGprFunc(t_Handle h_FmPort, e_FmPortGprFuncType gprFunc,
-+ void **p_Value)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ uint32_t muramPageOffset;
-+
-+ ASSERT_COND(p_FmPort);
-+ ASSERT_COND(p_Value);
-+
-+ if (p_FmPort->gprFunc != e_FM_PORT_GPR_EMPTY)
-+ {
-+ if (p_FmPort->gprFunc != gprFunc)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("gpr was assigned with different func"));
-+ }
-+ else
-+ {
-+ switch (gprFunc)
-+ {
-+ case (e_FM_PORT_GPR_MURAM_PAGE):
-+ p_FmPort->p_ParamsPage = FM_MURAM_AllocMem(p_FmPort->h_FmMuram,
-+ 256, 8);
-+ if (!p_FmPort->p_ParamsPage)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for page"));
-+
-+ IOMemSet32(p_FmPort->p_ParamsPage, 0, 256);
-+ muramPageOffset =
-+ (uint32_t)(XX_VirtToPhys(p_FmPort->p_ParamsPage)
-+ - p_FmPort->fmMuramPhysBaseAddr);
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ WRITE_UINT32(
-+ p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr,
-+ muramPageOffset);
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ WRITE_UINT32(
-+ p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ogpr,
-+ muramPageOffset);
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Invalid port type"));
-+ }
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+ p_FmPort->gprFunc = gprFunc;
-+ }
-+
-+ switch (p_FmPort->gprFunc)
-+ {
-+ case (e_FM_PORT_GPR_MURAM_PAGE):
-+ *p_Value = p_FmPort->p_ParamsPage;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+t_Error FmPortGetSetCcParams(t_Handle h_FmPort,
-+ t_FmPortGetSetCcParams *p_CcParams)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int tmpInt;
-+ volatile uint32_t *p_BmiPrsStartOffset = NULL;
-+
-+ /* this function called from Cc for pass and receive parameters port params between CC and PORT*/
-+
-+ if ((p_CcParams->getCcParams.type & OFFSET_OF_PR)
-+ && (p_FmPort->bufferOffsets.prsResultOffset != ILLEGAL_BASE))
-+ {
-+ p_CcParams->getCcParams.prOffset =
-+ (uint8_t)p_FmPort->bufferOffsets.prsResultOffset;
-+ p_CcParams->getCcParams.type &= ~OFFSET_OF_PR;
-+ }
-+ if (p_CcParams->getCcParams.type & HW_PORT_ID)
-+ {
-+ p_CcParams->getCcParams.hardwarePortId =
-+ (uint8_t)p_FmPort->hardwarePortId;
-+ p_CcParams->getCcParams.type &= ~HW_PORT_ID;
-+ }
-+ if ((p_CcParams->getCcParams.type & OFFSET_OF_DATA)
-+ && (p_FmPort->bufferOffsets.dataOffset != ILLEGAL_BASE))
-+ {
-+ p_CcParams->getCcParams.dataOffset =
-+ (uint16_t)p_FmPort->bufferOffsets.dataOffset;
-+ p_CcParams->getCcParams.type &= ~OFFSET_OF_DATA;
-+ }
-+ if (p_CcParams->getCcParams.type & NUM_OF_TASKS)
-+ {
-+ p_CcParams->getCcParams.numOfTasks = (uint8_t)p_FmPort->tasks.num;
-+ p_CcParams->getCcParams.type &= ~NUM_OF_TASKS;
-+ }
-+ if (p_CcParams->getCcParams.type & NUM_OF_EXTRA_TASKS)
-+ {
-+ p_CcParams->getCcParams.numOfExtraTasks =
-+ (uint8_t)p_FmPort->tasks.extra;
-+ p_CcParams->getCcParams.type &= ~NUM_OF_EXTRA_TASKS;
-+ }
-+ if (p_CcParams->getCcParams.type & FM_REV)
-+ {
-+ p_CcParams->getCcParams.revInfo.majorRev = p_FmPort->fmRevInfo.majorRev;
-+ p_CcParams->getCcParams.revInfo.minorRev = p_FmPort->fmRevInfo.minorRev;
-+ p_CcParams->getCcParams.type &= ~FM_REV;
-+ }
-+ if (p_CcParams->getCcParams.type & DISCARD_MASK)
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ p_CcParams->getCcParams.discardMask =
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm);
-+ else
-+ p_CcParams->getCcParams.discardMask =
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm);
-+ p_CcParams->getCcParams.type &= ~DISCARD_MASK;
-+ }
-+ if (p_CcParams->getCcParams.type & MANIP_EXTRA_SPACE)
-+ {
-+ p_CcParams->getCcParams.internalBufferOffset =
-+ p_FmPort->internalBufferOffset;
-+ p_CcParams->getCcParams.type &= ~MANIP_EXTRA_SPACE;
-+ }
-+ if (p_CcParams->getCcParams.type & GET_NIA_FPNE)
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ p_CcParams->getCcParams.nia =
-+ GET_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofpne);
-+ else
-+ p_CcParams->getCcParams.nia =
-+ GET_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rfpne);
-+ p_CcParams->getCcParams.type &= ~GET_NIA_FPNE;
-+ }
-+ if (p_CcParams->getCcParams.type & GET_NIA_PNDN)
-+ {
-+ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ p_CcParams->getCcParams.nia =
-+ GET_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn);
-+ p_CcParams->getCcParams.type &= ~GET_NIA_PNDN;
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)
-+ && !(p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY))
-+ {
-+ p_FmPort->requiredAction |= UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY;
-+ p_FmPort->orFmanCtrl = p_CcParams->setCcParams.orFmanCtrl;
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_NIA_PNEN)
-+ && !(p_FmPort->requiredAction & UPDATE_NIA_PNEN))
-+ {
-+ p_FmPort->savedQmiPnen = p_CcParams->setCcParams.nia;
-+ p_FmPort->requiredAction |= UPDATE_NIA_PNEN;
-+ }
-+ else
-+ if (p_CcParams->setCcParams.type & UPDATE_NIA_PNEN)
-+ {
-+ if (p_FmPort->savedQmiPnen != p_CcParams->setCcParams.nia)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("PNEN was defined previously different"));
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_NIA_PNDN)
-+ && !(p_FmPort->requiredAction & UPDATE_NIA_PNDN))
-+ {
-+ p_FmPort->savedNonRxQmiRegsPndn = p_CcParams->setCcParams.nia;
-+ p_FmPort->requiredAction |= UPDATE_NIA_PNDN;
-+ }
-+ else
-+ if (p_CcParams->setCcParams.type & UPDATE_NIA_PNDN)
-+ {
-+ if (p_FmPort->savedNonRxQmiRegsPndn != p_CcParams->setCcParams.nia)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("PNDN was defined previously different"));
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_NIA_FENE)
-+ && (p_CcParams->setCcParams.overwrite
-+ || !(p_FmPort->requiredAction & UPDATE_NIA_FENE)))
-+ {
-+ p_FmPort->savedBmiFene = p_CcParams->setCcParams.nia;
-+ p_FmPort->requiredAction |= UPDATE_NIA_FENE;
-+ }
-+ else
-+ if (p_CcParams->setCcParams.type & UPDATE_NIA_FENE)
-+ {
-+ if (p_FmPort->savedBmiFene != p_CcParams->setCcParams.nia)
-+ RETURN_ERROR( MAJOR, E_INVALID_STATE,
-+ ("xFENE was defined previously different"));
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_NIA_FPNE)
-+ && !(p_FmPort->requiredAction & UPDATE_NIA_FPNE))
-+ {
-+ p_FmPort->savedBmiFpne = p_CcParams->setCcParams.nia;
-+ p_FmPort->requiredAction |= UPDATE_NIA_FPNE;
-+ }
-+ else
-+ if (p_CcParams->setCcParams.type & UPDATE_NIA_FPNE)
-+ {
-+ if (p_FmPort->savedBmiFpne != p_CcParams->setCcParams.nia)
-+ RETURN_ERROR( MAJOR, E_INVALID_STATE,
-+ ("xFPNE was defined previously different"));
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_NIA_CMNE)
-+ && !(p_FmPort->requiredAction & UPDATE_NIA_CMNE))
-+ {
-+ p_FmPort->savedBmiCmne = p_CcParams->setCcParams.nia;
-+ p_FmPort->requiredAction |= UPDATE_NIA_CMNE;
-+ }
-+ else
-+ if (p_CcParams->setCcParams.type & UPDATE_NIA_CMNE)
-+ {
-+ if (p_FmPort->savedBmiCmne != p_CcParams->setCcParams.nia)
-+ RETURN_ERROR( MAJOR, E_INVALID_STATE,
-+ ("xCMNE was defined previously different"));
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_PSO)
-+ && !(p_FmPort->requiredAction & UPDATE_PSO))
-+ {
-+ /* get PCD registers pointers */
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->rx.fmbm_rpso;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_BmiPrsStartOffset = &p_FmPort->port.bmi_regs->oh.fmbm_opso;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ }
-+
-+ /* set start parsing offset */
-+ tmpInt = (int)GET_UINT32(*p_BmiPrsStartOffset)
-+ + p_CcParams->setCcParams.psoSize;
-+ if (tmpInt > 0)
-+ WRITE_UINT32(*p_BmiPrsStartOffset, (uint32_t)tmpInt);
-+
-+ p_FmPort->requiredAction |= UPDATE_PSO;
-+ p_FmPort->savedPrsStartOffset = p_CcParams->setCcParams.psoSize;
-+ }
-+ else
-+ if (p_CcParams->setCcParams.type & UPDATE_PSO)
-+ {
-+ if (p_FmPort->savedPrsStartOffset
-+ != p_CcParams->setCcParams.psoSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("parser start offset was defoned previousley different"));
-+ }
-+
-+ if ((p_CcParams->setCcParams.type & UPDATE_OFP_DPTE)
-+ && !(p_FmPort->requiredAction & UPDATE_OFP_DPTE))
-+ {
-+ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ p_FmPort->savedBmiOfp = GET_UINT32(p_FmPort->port.bmi_regs->oh.fmbm_ofp);
-+ p_FmPort->savedBmiOfp &= ~BMI_FIFO_PIPELINE_DEPTH_MASK;
-+ p_FmPort->savedBmiOfp |= p_CcParams->setCcParams.ofpDpde
-+ << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
-+ p_FmPort->requiredAction |= UPDATE_OFP_DPTE;
-+ }
-+
-+ return E_OK;
-+}
-+/*********************** End of inter-module routines ************************/
-+
-+/****************************************/
-+/* API Init unit functions */
-+/****************************************/
-+
-+t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams)
-+{
-+ t_FmPort *p_FmPort;
-+ uintptr_t baseAddr = p_FmPortParams->baseAddr;
-+ uint32_t tmpReg;
-+
-+ /* Allocate FM structure */
-+ p_FmPort = (t_FmPort *)XX_Malloc(sizeof(t_FmPort));
-+ if (!p_FmPort)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver structure"));
-+ return NULL;
-+ }
-+ memset(p_FmPort, 0, sizeof(t_FmPort));
-+
-+ /* Allocate the FM driver's parameters structure */
-+ p_FmPort->p_FmPortDriverParam = (t_FmPortDriverParam *)XX_Malloc(
-+ sizeof(t_FmPortDriverParam));
-+ if (!p_FmPort->p_FmPortDriverParam)
-+ {
-+ XX_Free(p_FmPort);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver parameters"));
-+ return NULL;
-+ }
-+ memset(p_FmPort->p_FmPortDriverParam, 0, sizeof(t_FmPortDriverParam));
-+
-+ /* Initialize FM port parameters which will be kept by the driver */
-+ p_FmPort->portType = p_FmPortParams->portType;
-+ p_FmPort->portId = p_FmPortParams->portId;
-+ p_FmPort->pcdEngines = FM_PCD_NONE;
-+ p_FmPort->f_Exception = p_FmPortParams->f_Exception;
-+ p_FmPort->h_App = p_FmPortParams->h_App;
-+ p_FmPort->h_Fm = p_FmPortParams->h_Fm;
-+
-+ /* get FM revision */
-+ FM_GetRevision(p_FmPort->h_Fm, &p_FmPort->fmRevInfo);
-+
-+ /* calculate global portId number */
-+ p_FmPort->hardwarePortId = SwPortIdToHwPortId(p_FmPort->portType,
-+ p_FmPortParams->portId,
-+ p_FmPort->fmRevInfo.majorRev,
-+ p_FmPort->fmRevInfo.minorRev);
-+
-+ if (p_FmPort->fmRevInfo.majorRev >= 6)
-+ {
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
-+ && (p_FmPortParams->portId != FM_OH_PORT_ID))
-+ DBG(WARNING,
-+ ("Port ID %d is recommended for HC port. Overwriting HW defaults to be suitable for HC.",
-+ FM_OH_PORT_ID));
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ && (p_FmPortParams->portId == FM_OH_PORT_ID))
-+ DBG(WARNING, ("Use non-zero portId for OP port due to insufficient resources on portId 0."));
-+ }
-+
-+ /* Set up FM port parameters for initialization phase only */
-+
-+ /* First, fill in flibs struct */
-+ fman_port_defconfig(&p_FmPort->p_FmPortDriverParam->dfltCfg,
-+ (enum fman_port_type)p_FmPort->portType);
-+ /* Overwrite some integration specific parameters */
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_pri_elevation =
-+ DEFAULT_PORT_rxFifoPriElevationLevel;
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_fifo_thr =
-+ DEFAULT_PORT_rxFifoThreshold;
-+
-+#if defined(FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675) || defined(FM_ERROR_VSP_NO_MATCH_SW006)
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006675 = TRUE;
-+#else
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006675 = FALSE;
-+#endif
-+ if ((p_FmPort->fmRevInfo.majorRev == 6)
-+ && (p_FmPort->fmRevInfo.minorRev == 0))
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006320 = TRUE;
-+ else
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.errata_A006320 = FALSE;
-+
-+ /* Excessive Threshold register - exists for pre-FMv3 chips only */
-+ if (p_FmPort->fmRevInfo.majorRev < 6)
-+ {
-+#ifdef FM_NO_RESTRICT_ON_ACCESS_RSRC
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.excessive_threshold_register =
-+ TRUE;
-+#endif
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_rebm_has_sgd = FALSE;
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_tfne_has_features = FALSE;
-+ }
-+ else
-+ {
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.excessive_threshold_register =
-+ FALSE;
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_rebm_has_sgd = TRUE;
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.fmbm_tfne_has_features = TRUE;
-+ }
-+ if (p_FmPort->fmRevInfo.majorRev == 4)
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.qmi_deq_options_support = FALSE;
-+ else
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.qmi_deq_options_support = TRUE;
-+
-+ /* Continue with other parameters */
-+ p_FmPort->p_FmPortDriverParam->baseAddr = baseAddr;
-+ /* set memory map pointers */
-+ p_FmPort->p_FmPortQmiRegs =
-+ (t_FmPortQmiRegs *)UINT_TO_PTR(baseAddr + QMI_PORT_REGS_OFFSET);
-+ p_FmPort->p_FmPortBmiRegs =
-+ (u_FmPortBmiRegs *)UINT_TO_PTR(baseAddr + BMI_PORT_REGS_OFFSET);
-+ p_FmPort->p_FmPortPrsRegs =
-+ (t_FmPortPrsRegs *)UINT_TO_PTR(baseAddr + PRS_PORT_REGS_OFFSET);
-+
-+ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.privDataSize =
-+ DEFAULT_PORT_bufferPrefixContent_privDataSize;
-+ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult =
-+ DEFAULT_PORT_bufferPrefixContent_passPrsResult;
-+ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp =
-+ DEFAULT_PORT_bufferPrefixContent_passTimeStamp;
-+ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passAllOtherPCDInfo =
-+ DEFAULT_PORT_bufferPrefixContent_passTimeStamp;
-+ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign =
-+ DEFAULT_PORT_bufferPrefixContent_dataAlign;
-+ /* p_FmPort->p_FmPortDriverParam->dmaSwapData = (e_FmDmaSwapOption)DEFAULT_PORT_dmaSwapData;
-+ p_FmPort->p_FmPortDriverParam->dmaIntContextCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaIntContextCacheAttr;
-+ p_FmPort->p_FmPortDriverParam->dmaHeaderCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaHeaderCacheAttr;
-+ p_FmPort->p_FmPortDriverParam->dmaScatterGatherCacheAttr = (e_FmDmaCacheOption)DEFAULT_PORT_dmaScatterGatherCacheAttr;
-+ p_FmPort->p_FmPortDriverParam->dmaWriteOptimize = DEFAULT_PORT_dmaWriteOptimize;
-+ */
-+ p_FmPort->p_FmPortDriverParam->liodnBase = p_FmPortParams->liodnBase;
-+ p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore =
-+ DEFAULT_PORT_cheksumLastBytesIgnore;
-+
-+ p_FmPort->maxFrameLength = DEFAULT_PORT_maxFrameLength;
-+ /* resource distribution. */
-+ p_FmPort->fifoBufs.num = DEFAULT_PORT_numOfFifoBufs(p_FmPort->portType)
-+ * BMI_FIFO_UNITS;
-+ p_FmPort->fifoBufs.extra = DEFAULT_PORT_extraNumOfFifoBufs
-+ * BMI_FIFO_UNITS;
-+ p_FmPort->openDmas.num = DEFAULT_PORT_numOfOpenDmas(p_FmPort->portType);
-+ p_FmPort->openDmas.extra =
-+ DEFAULT_PORT_extraNumOfOpenDmas(p_FmPort->portType);
-+ p_FmPort->tasks.num = DEFAULT_PORT_numOfTasks(p_FmPort->portType);
-+ p_FmPort->tasks.extra = DEFAULT_PORT_extraNumOfTasks(p_FmPort->portType);
-+
-+
-+#ifdef FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
-+ if ((p_FmPort->fmRevInfo.majorRev == 6)
-+ && (p_FmPort->fmRevInfo.minorRev == 0)
-+ && ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX)))
-+ {
-+ p_FmPort->openDmas.num = 16;
-+ p_FmPort->openDmas.extra = 0;
-+ }
-+#endif /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 */
-+
-+ /* Port type specific initialization: */
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX):
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ /* Initialize FM port parameters for initialization phase only */
-+ p_FmPort->p_FmPortDriverParam->cutBytesFromEnd =
-+ DEFAULT_PORT_cutBytesFromEnd;
-+ p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = FALSE;
-+ p_FmPort->p_FmPortDriverParam->frmDiscardOverride =
-+ DEFAULT_PORT_frmDiscardOverride;
-+
-+ tmpReg =
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfp);
-+ p_FmPort->p_FmPortDriverParam->rxFifoPriElevationLevel =
-+ (((tmpReg & BMI_RX_FIFO_PRI_ELEVATION_MASK)
-+ >> BMI_RX_FIFO_PRI_ELEVATION_SHIFT) + 1)
-+ * BMI_FIFO_UNITS;
-+ p_FmPort->p_FmPortDriverParam->rxFifoThreshold = (((tmpReg
-+ & BMI_RX_FIFO_THRESHOLD_MASK)
-+ >> BMI_RX_FIFO_THRESHOLD_SHIFT) + 1) * BMI_FIFO_UNITS;
-+
-+ p_FmPort->p_FmPortDriverParam->bufMargins.endMargins =
-+ DEFAULT_PORT_BufMargins_endMargins;
-+ p_FmPort->p_FmPortDriverParam->errorsToDiscard =
-+ DEFAULT_PORT_errorsToDiscard;
-+ p_FmPort->p_FmPortDriverParam->forwardReuseIntContext =
-+ DEFAULT_PORT_forwardIntContextReuse;
-+#if (DPAA_VERSION >= 11)
-+ p_FmPort->p_FmPortDriverParam->noScatherGather =
-+ DEFAULT_PORT_noScatherGather;
-+#endif /* (DPAA_VERSION >= 11) */
-+ break;
-+
-+ case (e_FM_PORT_TYPE_TX):
-+ p_FmPort->p_FmPortDriverParam->dontReleaseBuf = FALSE;
-+#ifdef FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127
-+ tmpReg = 0x00001013;
-+ WRITE_UINT32( p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfp,
-+ tmpReg);
-+#endif /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 */
-+ case (e_FM_PORT_TYPE_TX_10G):
-+ tmpReg =
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfp);
-+ p_FmPort->p_FmPortDriverParam->txFifoMinFillLevel = ((tmpReg
-+ & BMI_TX_FIFO_MIN_FILL_MASK)
-+ >> BMI_TX_FIFO_MIN_FILL_SHIFT) * BMI_FIFO_UNITS;
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
-+ (uint8_t)(((tmpReg & BMI_FIFO_PIPELINE_DEPTH_MASK)
-+ >> BMI_FIFO_PIPELINE_DEPTH_SHIFT) + 1);
-+ p_FmPort->p_FmPortDriverParam->txFifoLowComfLevel = (((tmpReg
-+ & BMI_TX_LOW_COMF_MASK) >> BMI_TX_LOW_COMF_SHIFT) + 1)
-+ * BMI_FIFO_UNITS;
-+
-+ p_FmPort->p_FmPortDriverParam->deqType = DEFAULT_PORT_deqType;
-+ p_FmPort->p_FmPortDriverParam->deqPrefetchOption =
-+ DEFAULT_PORT_deqPrefetchOption;
-+ p_FmPort->p_FmPortDriverParam->deqHighPriority =
-+ (bool)((p_FmPort->portType == e_FM_PORT_TYPE_TX) ? DEFAULT_PORT_deqHighPriority_1G :
-+ DEFAULT_PORT_deqHighPriority_10G);
-+ p_FmPort->p_FmPortDriverParam->deqByteCnt =
-+ (uint16_t)(
-+ (p_FmPort->portType == e_FM_PORT_TYPE_TX) ? DEFAULT_PORT_deqByteCnt_1G :
-+ DEFAULT_PORT_deqByteCnt_10G);
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_FmPort->p_FmPortDriverParam->errorsToDiscard =
-+ DEFAULT_PORT_errorsToDiscard;
-+#if (DPAA_VERSION >= 11)
-+ p_FmPort->p_FmPortDriverParam->noScatherGather =
-+ DEFAULT_PORT_noScatherGather;
-+#endif /* (DPAA_VERSION >= 11) */
-+ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
-+ p_FmPort->p_FmPortDriverParam->deqPrefetchOption =
-+ DEFAULT_PORT_deqPrefetchOption_HC;
-+ p_FmPort->p_FmPortDriverParam->deqHighPriority =
-+ DEFAULT_PORT_deqHighPriority_1G;
-+ p_FmPort->p_FmPortDriverParam->deqType = DEFAULT_PORT_deqType;
-+ p_FmPort->p_FmPortDriverParam->deqByteCnt =
-+ DEFAULT_PORT_deqByteCnt_1G;
-+
-+ tmpReg =
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofp);
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
-+ (uint8_t)(((tmpReg & BMI_FIFO_PIPELINE_DEPTH_MASK)
-+ >> BMI_FIFO_PIPELINE_DEPTH_SHIFT) + 1);
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
-+ && (p_FmPortParams->portId != FM_OH_PORT_ID))
-+ {
-+ /* Overwrite HC defaults */
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
-+ DEFAULT_PORT_fifoDeqPipelineDepth_OH;
-+ }
-+
-+#ifndef FM_FRAME_END_PARAMS_FOR_OP
-+ if (p_FmPort->fmRevInfo.majorRev < 6)
-+ p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore = DEFAULT_notSupported;
-+#endif /* !FM_FRAME_END_PARAMS_FOR_OP */
-+
-+#ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP
-+ if (!((p_FmPort->fmRevInfo.majorRev == 4) ||
-+ (p_FmPort->fmRevInfo.majorRev >= 6)))
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth = DEFAULT_notSupported;
-+#endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */
-+ break;
-+
-+ default:
-+ XX_Free(p_FmPort->p_FmPortDriverParam);
-+ XX_Free(p_FmPort);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ return NULL;
-+ }
-+#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+ if (p_FmPort->fmRevInfo.majorRev == 4)
-+ p_FmPort->p_FmPortDriverParam->deqPrefetchOption = (e_FmPortDeqPrefetchOption)DEFAULT_notSupported;
-+#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
-+
-+ p_FmPort->imEn = p_FmPortParams->independentModeEnable;
-+
-+ if (p_FmPort->imEn)
-+ {
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G))
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
-+ DEFAULT_PORT_fifoDeqPipelineDepth_IM;
-+ FmPortConfigIM(p_FmPort, p_FmPortParams);
-+ }
-+ else
-+ {
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX):
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ /* Initialize FM port parameters for initialization phase only */
-+ memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools,
-+ &p_FmPortParams->specificParams.rxParams.extBufPools,
-+ sizeof(t_FmExtPools));
-+ p_FmPort->p_FmPortDriverParam->errFqid =
-+ p_FmPortParams->specificParams.rxParams.errFqid;
-+ p_FmPort->p_FmPortDriverParam->dfltFqid =
-+ p_FmPortParams->specificParams.rxParams.dfltFqid;
-+ p_FmPort->p_FmPortDriverParam->liodnOffset =
-+ p_FmPortParams->specificParams.rxParams.liodnOffset;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ case (e_FM_PORT_TYPE_TX):
-+ case (e_FM_PORT_TYPE_TX_10G):
-+ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
-+ p_FmPort->p_FmPortDriverParam->errFqid =
-+ p_FmPortParams->specificParams.nonRxParams.errFqid;
-+ p_FmPort->p_FmPortDriverParam->deqSubPortal =
-+ (uint8_t)(p_FmPortParams->specificParams.nonRxParams.qmChannel
-+ & QMI_DEQ_CFG_SUBPORTAL_MASK);
-+ p_FmPort->p_FmPortDriverParam->dfltFqid =
-+ p_FmPortParams->specificParams.nonRxParams.dfltFqid;
-+ break;
-+ default:
-+ XX_Free(p_FmPort->p_FmPortDriverParam);
-+ XX_Free(p_FmPort);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ return NULL;
-+ }
-+ }
-+
-+ memset(p_FmPort->name, 0, (sizeof(char)) * MODULE_NAME_SIZE);
-+ if (Sprint(
-+ p_FmPort->name,
-+ "FM-%d-port-%s-%d",
-+ FmGetId(p_FmPort->h_Fm),
-+ ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) ? "OH" :
-+ (p_FmPort->portType == e_FM_PORT_TYPE_RX ? "1g-RX" :
-+ (p_FmPort->portType == e_FM_PORT_TYPE_TX ? "1g-TX" :
-+ (p_FmPort->portType
-+ == e_FM_PORT_TYPE_RX_10G ? "10g-RX" :
-+ "10g-TX")))),
-+ p_FmPort->portId) == 0)
-+ {
-+ XX_Free(p_FmPort->p_FmPortDriverParam);
-+ XX_Free(p_FmPort);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+ return NULL;
-+ }
-+
-+ p_FmPort->h_Spinlock = XX_InitSpinlock();
-+ if (!p_FmPort->h_Spinlock)
-+ {
-+ XX_Free(p_FmPort->p_FmPortDriverParam);
-+ XX_Free(p_FmPort);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+ return NULL;
-+ }
-+
-+ return p_FmPort;
-+}
-+
-+t_FmPort *rx_port = 0;
-+t_FmPort *tx_port = 0;
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Init
-+
-+ @Description Initializes the FM module
-+
-+ @Param[in] h_FmPort - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+ *//***************************************************************************/
-+t_Error FM_PORT_Init(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_FmPortDriverParam *p_DriverParams;
-+ t_Error errCode;
-+ t_FmInterModulePortInitParams fmParams;
-+ t_FmRevisionInfo revInfo;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ errCode = FmSpBuildBufferStructure(
-+ &p_FmPort->p_FmPortDriverParam->intContext,
-+ &p_FmPort->p_FmPortDriverParam->bufferPrefixContent,
-+ &p_FmPort->p_FmPortDriverParam->bufMargins,
-+ &p_FmPort->bufferOffsets, &p_FmPort->internalBufferOffset);
-+ if (errCode != E_OK)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
-+ if ((p_FmPort->p_FmPortDriverParam->bcbWorkaround) &&
-+ (p_FmPort->portType == e_FM_PORT_TYPE_RX))
-+ {
-+ p_FmPort->p_FmPortDriverParam->errorsToDiscard |= FM_PORT_FRM_ERR_PHYSICAL;
-+ if (!p_FmPort->fifoBufs.num)
-+ p_FmPort->fifoBufs.num = DEFAULT_PORT_numOfFifoBufs(p_FmPort->portType)*BMI_FIFO_UNITS;
-+ p_FmPort->fifoBufs.num += 4*KILOBYTE;
-+ }
-+#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
-+
-+ CHECK_INIT_PARAMETERS(p_FmPort, CheckInitParameters);
-+
-+ p_DriverParams = p_FmPort->p_FmPortDriverParam;
-+
-+ /* Set up flibs port structure */
-+ memset(&p_FmPort->port, 0, sizeof(struct fman_port));
-+ p_FmPort->port.type = (enum fman_port_type)p_FmPort->portType;
-+ FM_GetRevision(p_FmPort->h_Fm, &revInfo);
-+ p_FmPort->port.fm_rev_maj = revInfo.majorRev;
-+ p_FmPort->port.fm_rev_min = revInfo.minorRev;
-+ p_FmPort->port.bmi_regs =
-+ (union fman_port_bmi_regs *)UINT_TO_PTR(p_DriverParams->baseAddr + BMI_PORT_REGS_OFFSET);
-+ p_FmPort->port.qmi_regs =
-+ (struct fman_port_qmi_regs *)UINT_TO_PTR(p_DriverParams->baseAddr + QMI_PORT_REGS_OFFSET);
-+ p_FmPort->port.ext_pools_num = (uint8_t)((revInfo.majorRev == 4) ? 4 : 8);
-+ p_FmPort->port.im_en = p_FmPort->imEn;
-+ p_FmPort->p_FmPortPrsRegs =
-+ (t_FmPortPrsRegs *)UINT_TO_PTR(p_DriverParams->baseAddr + PRS_PORT_REGS_OFFSET);
-+
-+ if (((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) && !p_FmPort->imEn)
-+ {
-+ /* Call the external Buffer routine which also checks fifo
-+ size and updates it if necessary */
-+ /* define external buffer pools and pool depletion*/
-+ errCode = SetExtBufferPools(p_FmPort);
-+ if (errCode)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+ /* check if the largest external buffer pool is large enough */
-+ if (p_DriverParams->bufMargins.startMargins + MIN_EXT_BUF_SIZE
-+ + p_DriverParams->bufMargins.endMargins
-+ > p_FmPort->rxPoolsParams.largestBufSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("bufMargins.startMargins (%d) + minimum buf size (64) + bufMargins.endMargins (%d) is larger than maximum external buffer size (%d)", p_DriverParams->bufMargins.startMargins, p_DriverParams->bufMargins.endMargins, p_FmPort->rxPoolsParams.largestBufSize));
-+ }
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ {
-+ {
-+#ifdef FM_NO_OP_OBSERVED_POOLS
-+ t_FmRevisionInfo revInfo;
-+
-+ FM_GetRevision(p_FmPort->h_Fm, &revInfo);
-+ if ((revInfo.majorRev == 4) && (p_DriverParams->enBufPoolDepletion))
-+#endif /* FM_NO_OP_OBSERVED_POOLS */
-+ {
-+ /* define external buffer pools */
-+ errCode = SetExtBufferPools(p_FmPort);
-+ if (errCode)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+ }
-+ }
-+ }
-+
-+ /************************************************************/
-+ /* Call FM module routine for communicating parameters */
-+ /************************************************************/
-+ memset(&fmParams, 0, sizeof(fmParams));
-+ fmParams.hardwarePortId = p_FmPort->hardwarePortId;
-+ fmParams.portType = (e_FmPortType)p_FmPort->portType;
-+ fmParams.numOfTasks = (uint8_t)p_FmPort->tasks.num;
-+ fmParams.numOfExtraTasks = (uint8_t)p_FmPort->tasks.extra;
-+ fmParams.numOfOpenDmas = (uint8_t)p_FmPort->openDmas.num;
-+ fmParams.numOfExtraOpenDmas = (uint8_t)p_FmPort->openDmas.extra;
-+
-+ if (p_FmPort->fifoBufs.num)
-+ {
-+ errCode = VerifySizeOfFifo(p_FmPort);
-+ if (errCode != E_OK)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+ }
-+ fmParams.sizeOfFifo = p_FmPort->fifoBufs.num;
-+ fmParams.extraSizeOfFifo = p_FmPort->fifoBufs.extra;
-+ fmParams.independentMode = p_FmPort->imEn;
-+ fmParams.liodnOffset = p_DriverParams->liodnOffset;
-+ fmParams.liodnBase = p_DriverParams->liodnBase;
-+ fmParams.deqPipelineDepth =
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth;
-+ fmParams.maxFrameLength = p_FmPort->maxFrameLength;
-+#ifndef FM_DEQ_PIPELINE_PARAMS_FOR_OP
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) ||
-+ (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
-+ {
-+ if (!((p_FmPort->fmRevInfo.majorRev == 4) ||
-+ (p_FmPort->fmRevInfo.majorRev >= 6)))
-+ /* HC ports do not have fifoDeqPipelineDepth, but it is needed only
-+ * for deq threshold calculation.
-+ */
-+ fmParams.deqPipelineDepth = 2;
-+ }
-+#endif /* !FM_DEQ_PIPELINE_PARAMS_FOR_OP */
-+
-+ errCode = FmGetSetPortParams(p_FmPort->h_Fm, &fmParams);
-+ if (errCode)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+
-+ /* get params for use in init */
-+ p_FmPort->fmMuramPhysBaseAddr =
-+ (uint64_t)((uint64_t)(fmParams.fmMuramPhysBaseAddr.low)
-+ | ((uint64_t)(fmParams.fmMuramPhysBaseAddr.high) << 32));
-+ p_FmPort->h_FmMuram = FmGetMuramHandle(p_FmPort->h_Fm);
-+
-+ errCode = InitLowLevelDriver(p_FmPort);
-+ if (errCode != E_OK)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+
-+ FmPortDriverParamFree(p_FmPort);
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ {
-+ t_FmPcdCtrlParamsPage *p_ParamsPage;
-+
-+ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
-+ (void**)&p_ParamsPage);
-+ ASSERT_COND(p_ParamsPage);
-+
-+ WRITE_UINT32(p_ParamsPage->misc, FM_CTL_PARAMS_PAGE_ALWAYS_ON);
-+#ifdef FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ {
-+ WRITE_UINT32(
-+ p_ParamsPage->misc,
-+ (GET_UINT32(p_ParamsPage->misc) | FM_CTL_PARAMS_PAGE_OP_FIX_EN));
-+ WRITE_UINT32(
-+ p_ParamsPage->discardMask,
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm));
-+ }
-+#endif /* FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675 */
-+#ifdef FM_ERROR_VSP_NO_MATCH_SW006
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ WRITE_UINT32(
-+ p_ParamsPage->errorsDiscardMask,
-+ (GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm) | GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsem)));
-+ else
-+ WRITE_UINT32(
-+ p_ParamsPage->errorsDiscardMask,
-+ (GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm) | GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsem)));
-+#endif /* FM_ERROR_VSP_NO_MATCH_SW006 */
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (p_FmPort->deepSleepVars.autoResMaxSizes)
-+ FmPortConfigAutoResForDeepSleepSupport1(p_FmPort);
-+ return E_OK;
-+}
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Free
-+
-+ @Description Frees all resources that were assigned to FM module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmPort - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+ *//***************************************************************************/
-+t_Error FM_PORT_Free(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_FmInterModulePortFreeParams fmParams;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+
-+ if (p_FmPort->pcdEngines)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("Trying to free a port with PCD. FM_PORT_DeletePCD must be called first."));
-+
-+ if (p_FmPort->enabled)
-+ {
-+ if (FM_PORT_Disable(p_FmPort) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM_PORT_Disable FAILED"));
-+ }
-+
-+ if (p_FmPort->imEn)
-+ FmPortImFree(p_FmPort);
-+
-+ FmPortDriverParamFree(p_FmPort);
-+
-+ memset(&fmParams, 0, sizeof(fmParams));
-+ fmParams.hardwarePortId = p_FmPort->hardwarePortId;
-+ fmParams.portType = (e_FmPortType)p_FmPort->portType;
-+ fmParams.deqPipelineDepth =
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth;
-+
-+ FmFreePortParams(p_FmPort->h_Fm, &fmParams);
-+
-+#if (DPAA_VERSION >= 11)
-+ if (FmVSPFreeForPort(p_FmPort->h_Fm, p_FmPort->portType, p_FmPort->portId)
-+ != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("VSP free of port FAILED"));
-+
-+ if (p_FmPort->p_ParamsPage)
-+ FM_MURAM_FreeMem(p_FmPort->h_FmMuram, p_FmPort->p_ParamsPage);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (p_FmPort->h_Spinlock)
-+ XX_FreeSpinlock(p_FmPort->h_Spinlock);
-+
-+ XX_Free(p_FmPort);
-+
-+ return E_OK;
-+}
-+
-+/*************************************************/
-+/* API Advanced Init unit functions */
-+/*************************************************/
-+
-+t_Error FM_PORT_ConfigNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_OpenDmas)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->setNumOfOpenDmas = TRUE;
-+ memcpy(&p_FmPort->openDmas, p_OpenDmas, sizeof(t_FmPortRsrc));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ memcpy(&p_FmPort->tasks, p_NumOfTasks, sizeof(t_FmPortRsrc));
-+ p_FmPort->p_FmPortDriverParam->setNumOfTasks = TRUE;
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->setSizeOfFifo = TRUE;
-+ memcpy(&p_FmPort->fifoBufs, p_SizeOfFifo, sizeof(t_FmPortRsrc));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDeqHighPriority(t_Handle h_FmPort, bool highPri)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("not available for Rx ports"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_high_pri = highPri;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDeqType(t_Handle h_FmPort, e_FmPortDeqType deqType)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("not available for Rx ports"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_type =
-+ (enum fman_port_deq_type)deqType;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDeqPrefetchOption(
-+ t_Handle h_FmPort, e_FmPortDeqPrefetchOption deqPrefetchOption)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("not available for Rx ports"));
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_prefetch_opt =
-+ (enum fman_port_deq_prefetch)deqPrefetchOption;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigBackupPools(t_Handle h_FmPort,
-+ t_FmBackupBmPools *p_BackupBmPools)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->p_BackupBmPools =
-+ (t_FmBackupBmPools *)XX_Malloc(sizeof(t_FmBackupBmPools));
-+ if (!p_FmPort->p_FmPortDriverParam->p_BackupBmPools)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BackupBmPools allocation failed"));
-+ memcpy(p_FmPort->p_FmPortDriverParam->p_BackupBmPools, p_BackupBmPools,
-+ sizeof(t_FmBackupBmPools));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDeqByteCnt(t_Handle h_FmPort, uint16_t deqByteCnt)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("not available for Rx ports"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.deq_byte_cnt = deqByteCnt;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigBufferPrefixContent(
-+ t_Handle h_FmPort, t_FmBufferPrefixContent *p_FmBufferPrefixContent)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ memcpy(&p_FmPort->p_FmPortDriverParam->bufferPrefixContent,
-+ p_FmBufferPrefixContent, sizeof(t_FmBufferPrefixContent));
-+ /* if dataAlign was not initialized by user, we return to driver's default */
-+ if (!p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign)
-+ p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign =
-+ DEFAULT_PORT_bufferPrefixContent_dataAlign;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigCheksumLastBytesIgnore(t_Handle h_FmPort,
-+ uint8_t checksumLastBytesIgnore)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.checksum_bytes_ignore =
-+ checksumLastBytesIgnore;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigCutBytesFromEnd(t_Handle h_FmPort,
-+ uint8_t cutBytesFromEnd)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_cut_end_bytes = cutBytesFromEnd;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigPoolDepletion(t_Handle h_FmPort,
-+ t_FmBufPoolDepletion *p_BufPoolDepletion)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE;
-+ memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion, p_BufPoolDepletion,
-+ sizeof(t_FmBufPoolDepletion));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigObservedPoolDepletion(
-+ t_Handle h_FmPort,
-+ t_FmPortObservedBufPoolDepletion *p_FmPortObservedBufPoolDepletion)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for OP ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE;
-+ memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion,
-+ &p_FmPortObservedBufPoolDepletion->poolDepletionParams,
-+ sizeof(t_FmBufPoolDepletion));
-+ memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools,
-+ &p_FmPortObservedBufPoolDepletion->poolsParams,
-+ sizeof(t_FmExtPools));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigExtBufPools(t_Handle h_FmPort, t_FmExtPools *p_FmExtPools)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for OP ports only"));
-+
-+ memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, p_FmExtPools,
-+ sizeof(t_FmExtPools));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDontReleaseTxBufToBM(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Tx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->dontReleaseBuf = TRUE;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDfltColor(t_Handle h_FmPort, e_FmPortColor color)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.color = (enum fman_port_color)color;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigSyncReq(t_Handle h_FmPort, bool syncReq)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("Not available for Tx ports"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.sync_req = syncReq;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigFrmDiscardOverride(t_Handle h_FmPort, bool override)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("Not available for Tx ports"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.discard_override = override;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigErrorsToDiscard(t_Handle h_FmPort,
-+ fmPortFrameErrSelect_t errs)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->errorsToDiscard = errs;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDmaSwapData(t_Handle h_FmPort, e_FmDmaSwapOption swapData)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_swap_data =
-+ (enum fman_port_dma_swap)swapData;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDmaIcCacheAttr(t_Handle h_FmPort,
-+ e_FmDmaCacheOption intContextCacheAttr)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_ic_stash_on =
-+ (bool)(intContextCacheAttr == e_FM_DMA_STASH);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDmaHdrAttr(t_Handle h_FmPort,
-+ e_FmDmaCacheOption headerCacheAttr)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_header_stash_on =
-+ (bool)(headerCacheAttr == e_FM_DMA_STASH);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDmaScatterGatherAttr(
-+ t_Handle h_FmPort, e_FmDmaCacheOption scatterGatherCacheAttr)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_sg_stash_on =
-+ (bool)(scatterGatherCacheAttr == e_FM_DMA_STASH);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigDmaWriteOptimize(t_Handle h_FmPort, bool optimize)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("Not available for Tx ports"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.dma_write_optimize = optimize;
-+
-+ return E_OK;
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FM_PORT_ConfigNoScatherGather(t_Handle h_FmPort, bool noScatherGather)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ UNUSED(noScatherGather);
-+ UNUSED(p_FmPort);
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->noScatherGather = noScatherGather;
-+
-+ return E_OK;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+t_Error FM_PORT_ConfigForwardReuseIntContext(t_Handle h_FmPort,
-+ bool forwardReuse)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->forwardReuseIntContext = forwardReuse;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigMaxFrameLength(t_Handle h_FmPort, uint16_t length)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->maxFrameLength = length;
-+
-+ return E_OK;
-+}
-+
-+#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
-+t_Error FM_PORT_ConfigBCBWorkaround(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->p_FmPortDriverParam->bcbWorkaround = TRUE;
-+
-+ return E_OK;
-+}
-+#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
-+
-+/****************************************************/
-+/* Hidden-DEBUG Only API */
-+/****************************************************/
-+
-+t_Error FM_PORT_ConfigTxFifoMinFillLevel(t_Handle h_FmPort,
-+ uint32_t minFillLevel)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Tx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_min_level = minFillLevel;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigFifoDeqPipelineDepth(t_Handle h_FmPort,
-+ uint8_t deqPipelineDepth)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("Not available for Rx ports"));
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("Not available for IM ports!"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_deq_pipeline_depth =
-+ deqPipelineDepth;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigTxFifoLowComfLevel(t_Handle h_FmPort,
-+ uint32_t fifoLowComfLevel)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_TX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Tx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.tx_fifo_low_comf_level =
-+ fifoLowComfLevel;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigRxFifoThreshold(t_Handle h_FmPort, uint32_t fifoThreshold)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_fifo_thr = fifoThreshold;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigRxFifoPriElevationLevel(t_Handle h_FmPort,
-+ uint32_t priElevationLevel)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ p_FmPort->p_FmPortDriverParam->dfltCfg.rx_pri_elevation = priElevationLevel;
-+
-+ return E_OK;
-+}
-+/****************************************************/
-+/* API Run-time Control unit functions */
-+/****************************************************/
-+
-+t_Error FM_PORT_SetNumOfOpenDmas(t_Handle h_FmPort,
-+ t_FmPortRsrc *p_NumOfOpenDmas)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if ((!p_NumOfOpenDmas->num) || (p_NumOfOpenDmas->num > MAX_NUM_OF_DMAS))
-+ RETURN_ERROR( MAJOR, E_INVALID_VALUE,
-+ ("openDmas-num can't be larger than %d", MAX_NUM_OF_DMAS));
-+ if (p_NumOfOpenDmas->extra > MAX_NUM_OF_EXTRA_DMAS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("openDmas-extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS));
-+ err = FmSetNumOfOpenDmas(p_FmPort->h_Fm, p_FmPort->hardwarePortId,
-+ (uint8_t*)&p_NumOfOpenDmas->num,
-+ (uint8_t*)&p_NumOfOpenDmas->extra, FALSE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ memcpy(&p_FmPort->openDmas, p_NumOfOpenDmas, sizeof(t_FmPortRsrc));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ /* only driver uses host command port, so ASSERT rather than RETURN_ERROR */
-+ ASSERT_COND(p_FmPort->portType != e_FM_PORT_TYPE_OH_HOST_COMMAND);
-+
-+ if ((!p_NumOfTasks->num) || (p_NumOfTasks->num > MAX_NUM_OF_TASKS))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("NumOfTasks-num can't be larger than %d", MAX_NUM_OF_TASKS));
-+ if (p_NumOfTasks->extra > MAX_NUM_OF_EXTRA_TASKS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("NumOfTasks-extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS));
-+
-+ err = FmSetNumOfTasks(p_FmPort->h_Fm, p_FmPort->hardwarePortId,
-+ (uint8_t*)&p_NumOfTasks->num,
-+ (uint8_t*)&p_NumOfTasks->extra, FALSE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /* update driver's struct */
-+ memcpy(&p_FmPort->tasks, p_NumOfTasks, sizeof(t_FmPortRsrc));
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if (!p_SizeOfFifo->num || (p_SizeOfFifo->num > MAX_PORT_FIFO_SIZE))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("SizeOfFifo-num has to be in the range of 256 - %d", MAX_PORT_FIFO_SIZE));
-+ if (p_SizeOfFifo->num % BMI_FIFO_UNITS)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("SizeOfFifo-num has to be divisible by %d", BMI_FIFO_UNITS));
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ /* extra FIFO size (allowed only to Rx ports) */
-+ if (p_SizeOfFifo->extra % BMI_FIFO_UNITS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("SizeOfFifo-extra has to be divisible by %d", BMI_FIFO_UNITS));
-+ }
-+ else
-+ if (p_SizeOfFifo->extra)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ (" No SizeOfFifo-extra for non Rx ports"));
-+
-+ memcpy(&p_FmPort->fifoBufs, p_SizeOfFifo, sizeof(t_FmPortRsrc));
-+
-+ /* we do not change user's parameter */
-+ err = VerifySizeOfFifo(p_FmPort);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ err = FmSetSizeOfFifo(p_FmPort->h_Fm, p_FmPort->hardwarePortId,
-+ &p_SizeOfFifo->num, &p_SizeOfFifo->extra, FALSE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+uint32_t FM_PORT_GetBufferDataOffset(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
-+ 0);
-+
-+ return p_FmPort->bufferOffsets.dataOffset;
-+}
-+
-+uint8_t * FM_PORT_GetBufferICInfo(t_Handle h_FmPort, char *p_Data)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
-+ NULL);
-+
-+ if (p_FmPort->bufferOffsets.pcdInfoOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.pcdInfoOffset);
-+}
-+
-+t_FmPrsResult * FM_PORT_GetBufferPrsResult(t_Handle h_FmPort, char *p_Data)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
-+ NULL);
-+
-+ if (p_FmPort->bufferOffsets.prsResultOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (t_FmPrsResult *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.prsResultOffset);
-+}
-+
-+uint64_t * FM_PORT_GetBufferTimeStamp(t_Handle h_FmPort, char *p_Data)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
-+ NULL);
-+
-+ if (p_FmPort->bufferOffsets.timeStampOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (uint64_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.timeStampOffset);
-+}
-+
-+uint8_t * FM_PORT_GetBufferHashResult(t_Handle h_FmPort, char *p_Data)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
-+ NULL);
-+
-+ if (p_FmPort->bufferOffsets.hashResultOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.hashResultOffset);
-+}
-+
-+t_Error FM_PORT_Disable(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPort->imEn)
-+ FmPortImDisable(p_FmPort);
-+
-+ err = fman_port_disable(&p_FmPort->port);
-+ if (err == -EBUSY)
-+ {
-+ DBG(WARNING, ("%s: BMI or QMI is Busy. Port forced down",
-+ p_FmPort->name));
-+ }
-+ else
-+ if (err != 0)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_disable"));
-+ }
-+
-+ p_FmPort->enabled = FALSE;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_Enable(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ /* Used by FM_PORT_Free routine as indication
-+ if to disable port. Thus set it to TRUE prior
-+ to enabling itself. This way if part of enable
-+ process fails there will be still things
-+ to disable during Free. For example, if BMI
-+ enable succeeded but QMI failed, still BMI
-+ needs to be disabled by Free. */
-+ p_FmPort->enabled = TRUE;
-+
-+ if (p_FmPort->imEn)
-+ FmPortImEnable(p_FmPort);
-+
-+ err = fman_port_enable(&p_FmPort->port);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_enable"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetRateLimit(t_Handle h_FmPort, t_FmPortRateLimit *p_RateLimit)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ uint8_t factor, countUnitBit;
-+ uint16_t baseGran;
-+ struct fman_port_rate_limiter params;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_TX_10G):
-+ case (e_FM_PORT_TYPE_TX):
-+ baseGran = BMI_RATE_LIMIT_GRAN_TX;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ baseGran = BMI_RATE_LIMIT_GRAN_OP;
-+ break;
-+ default:
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Tx and Offline parsing ports only"));
-+ }
-+
-+ countUnitBit = (uint8_t)FmGetTimeStampScale(p_FmPort->h_Fm); /* TimeStamp per nano seconds units */
-+ /* normally, we use 1 usec as the reference count */
-+ factor = 1;
-+ /* if ratelimit is too small for a 1usec factor, multiply the factor */
-+ while (p_RateLimit->rateLimit < baseGran / factor)
-+ {
-+ if (countUnitBit == 31)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too small"));
-+
-+ countUnitBit++;
-+ factor <<= 1;
-+ }
-+ /* if ratelimit is too large for a 1usec factor, it is also larger than max rate*/
-+ if (p_RateLimit->rateLimit
-+ > ((uint32_t)baseGran * (1 << 10) * (uint32_t)factor))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too large"));
-+
-+ if (!p_RateLimit->maxBurstSize
-+ || (p_RateLimit->maxBurstSize > BMI_RATE_LIMIT_MAX_BURST_SIZE))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("maxBurstSize must be between 1K and %dk", BMI_RATE_LIMIT_MAX_BURST_SIZE));
-+
-+ params.count_1micro_bit = (uint8_t)FmGetTimeStampScale(p_FmPort->h_Fm);
-+ params.high_burst_size_gran = FALSE;
-+ params.burst_size = p_RateLimit->maxBurstSize;
-+ params.rate = p_RateLimit->rateLimit;
-+ params.rate_factor = E_FMAN_PORT_RATE_DOWN_NONE;
-+
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ {
-+#ifndef FM_NO_ADVANCED_RATE_LIMITER
-+
-+ if ((p_FmPort->fmRevInfo.majorRev == 4)
-+ || (p_FmPort->fmRevInfo.majorRev >= 6))
-+ {
-+ params.high_burst_size_gran = TRUE;
-+ }
-+ else
-+#endif /* ! FM_NO_ADVANCED_RATE_LIMITER */
-+ {
-+ if (p_RateLimit->rateLimitDivider
-+ != e_FM_PORT_DUAL_RATE_LIMITER_NONE)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("FM_PORT_ConfigDualRateLimitScaleDown"));
-+
-+ if (p_RateLimit->maxBurstSize % 1000)
-+ {
-+ p_RateLimit->maxBurstSize =
-+ (uint16_t)((p_RateLimit->maxBurstSize / 1000) + 1);
-+ DBG(WARNING, ("rateLimit.maxBurstSize rounded up to %d", (p_RateLimit->maxBurstSize/1000+1)*1000));
-+ }
-+ else
-+ p_RateLimit->maxBurstSize = (uint16_t)(p_RateLimit->maxBurstSize
-+ / 1000);
-+ }
-+ params.rate_factor =
-+ (enum fman_port_rate_limiter_scale_down)p_RateLimit->rateLimitDivider;
-+ params.burst_size = p_RateLimit->maxBurstSize;
-+ }
-+
-+ err = fman_port_set_rate_limiter(&p_FmPort->port, &params);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_rate_limiter"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_DeleteRateLimit(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Tx and Offline parsing ports only"));
-+
-+ err = fman_port_delete_rate_limiter(&p_FmPort->port);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_rate_limiter"));
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetPfcPrioritiesMappingToQmanWQ(t_Handle h_FmPort, uint8_t prio,
-+ uint8_t wq)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ uint32_t tmpReg;
-+ uint32_t wqTmpReg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_TX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_TX_10G))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("PFC mapping is available for Tx ports only"));
-+
-+ if (prio > 7)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
-+ ("PFC priority (%d) is out of range (0-7)", prio));
-+ if (wq > 7)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
-+ ("WQ (%d) is out of range (0-7)", wq));
-+
-+ tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpfcm[0]);
-+ tmpReg &= ~(0xf << ((7 - prio) * 4));
-+ wqTmpReg = ((uint32_t)wq << ((7 - prio) * 4));
-+ tmpReg |= wqTmpReg;
-+
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpfcm[0],
-+ tmpReg);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetFrameQueueCounters(t_Handle h_FmPort, bool enable)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ fman_port_set_queue_cnt_mode(&p_FmPort->port, enable);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetPerformanceCounters(t_Handle h_FmPort, bool enable)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ err = fman_port_set_perf_cnt_mode(&p_FmPort->port, enable);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_perf_cnt_mode"));
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetPerformanceCountersParams(
-+ t_Handle h_FmPort, t_FmPortPerformanceCnt *p_FmPortPerformanceCnt)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ struct fman_port_perf_cnt_params params;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+
-+ /* check parameters */
-+ if (!p_FmPortPerformanceCnt->taskCompVal
-+ || (p_FmPortPerformanceCnt->taskCompVal > p_FmPort->tasks.num))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("taskCompVal (%d) has to be in the range of 1 - %d (current value)!", p_FmPortPerformanceCnt->taskCompVal, p_FmPort->tasks.num));
-+ if (!p_FmPortPerformanceCnt->dmaCompVal
-+ || (p_FmPortPerformanceCnt->dmaCompVal > p_FmPort->openDmas.num))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("dmaCompVal (%d) has to be in the range of 1 - %d (current value)!", p_FmPortPerformanceCnt->dmaCompVal, p_FmPort->openDmas.num));
-+ if (!p_FmPortPerformanceCnt->fifoCompVal
-+ || (p_FmPortPerformanceCnt->fifoCompVal > p_FmPort->fifoBufs.num))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("fifoCompVal (%d) has to be in the range of 256 - %d (current value)!", p_FmPortPerformanceCnt->fifoCompVal, p_FmPort->fifoBufs.num));
-+ if (p_FmPortPerformanceCnt->fifoCompVal % BMI_FIFO_UNITS)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("fifoCompVal (%d) has to be divisible by %d", p_FmPortPerformanceCnt->fifoCompVal, BMI_FIFO_UNITS));
-+
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ if (!p_FmPortPerformanceCnt->queueCompVal
-+ || (p_FmPortPerformanceCnt->queueCompVal
-+ > MAX_PERFORMANCE_RX_QUEUE_COMP))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("performanceCnt.queueCompVal for Rx has to be in the range of 1 - %d", MAX_PERFORMANCE_RX_QUEUE_COMP));
-+ break;
-+ case (e_FM_PORT_TYPE_TX_10G):
-+ case (e_FM_PORT_TYPE_TX):
-+ if (!p_FmPortPerformanceCnt->queueCompVal
-+ || (p_FmPortPerformanceCnt->queueCompVal
-+ > MAX_PERFORMANCE_TX_QUEUE_COMP))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("performanceCnt.queueCompVal for Tx has to be in the range of 1 - %d", MAX_PERFORMANCE_TX_QUEUE_COMP));
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
-+ if (p_FmPortPerformanceCnt->queueCompVal)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("performanceCnt.queueCompVal is not relevant for H/O ports."));
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ }
-+
-+ params.task_val = p_FmPortPerformanceCnt->taskCompVal;
-+ params.queue_val = p_FmPortPerformanceCnt->queueCompVal;
-+ params.dma_val = p_FmPortPerformanceCnt->dmaCompVal;
-+ params.fifo_val = p_FmPortPerformanceCnt->fifoCompVal;
-+
-+ err = fman_port_set_perf_cnt_params(&p_FmPort->port, &params);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_perf_cnt_params"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_AnalyzePerformanceParams(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_FmPortPerformanceCnt currParams, savedParams;
-+ t_Error err;
-+ bool underTest, failed = FALSE;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+
-+ XX_Print("Analyzing Performance parameters for port (type %d, id%d)\n",
-+ p_FmPort->portType, p_FmPort->portId);
-+
-+ currParams.taskCompVal = (uint8_t)p_FmPort->tasks.num;
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND))
-+ currParams.queueCompVal = 0;
-+ else
-+ currParams.queueCompVal = 1;
-+ currParams.dmaCompVal = (uint8_t)p_FmPort->openDmas.num;
-+ currParams.fifoCompVal = p_FmPort->fifoBufs.num;
-+
-+ FM_PORT_SetPerformanceCounters(p_FmPort, FALSE);
-+ ClearPerfCnts(p_FmPort);
-+ if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams))
-+ != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ FM_PORT_SetPerformanceCounters(p_FmPort, TRUE);
-+ XX_UDelay(1000000);
-+ FM_PORT_SetPerformanceCounters(p_FmPort, FALSE);
-+ if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL))
-+ {
-+ XX_Print(
-+ "Max num of defined port tasks (%d) utilized - Please enlarge\n",
-+ p_FmPort->tasks.num);
-+ failed = TRUE;
-+ }
-+ if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL))
-+ {
-+ XX_Print(
-+ "Max num of defined port openDmas (%d) utilized - Please enlarge\n",
-+ p_FmPort->openDmas.num);
-+ failed = TRUE;
-+ }
-+ if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL))
-+ {
-+ XX_Print(
-+ "Max size of defined port fifo (%d) utilized - Please enlarge\n",
-+ p_FmPort->fifoBufs.num);
-+ failed = TRUE;
-+ }
-+ if (failed)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ memset(&savedParams, 0, sizeof(savedParams));
-+ while (TRUE)
-+ {
-+ underTest = FALSE;
-+ if ((currParams.taskCompVal != 1) && !savedParams.taskCompVal)
-+ {
-+ currParams.taskCompVal--;
-+ underTest = TRUE;
-+ }
-+ if ((currParams.dmaCompVal != 1) && !savedParams.dmaCompVal)
-+ {
-+ currParams.dmaCompVal--;
-+ underTest = TRUE;
-+ }
-+ if ((currParams.fifoCompVal != BMI_FIFO_UNITS)
-+ && !savedParams.fifoCompVal)
-+ {
-+ currParams.fifoCompVal -= BMI_FIFO_UNITS;
-+ underTest = TRUE;
-+ }
-+ if (!underTest)
-+ break;
-+
-+ ClearPerfCnts(p_FmPort);
-+ if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams))
-+ != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ FM_PORT_SetPerformanceCounters(p_FmPort, TRUE);
-+ XX_UDelay(1000000);
-+ FM_PORT_SetPerformanceCounters(p_FmPort, FALSE);
-+
-+ if (!savedParams.taskCompVal
-+ && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL))
-+ savedParams.taskCompVal = (uint8_t)(currParams.taskCompVal + 2);
-+ if (!savedParams.dmaCompVal
-+ && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL))
-+ savedParams.dmaCompVal = (uint8_t)(currParams.dmaCompVal + 2);
-+ if (!savedParams.fifoCompVal
-+ && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL))
-+ savedParams.fifoCompVal = currParams.fifoCompVal
-+ + (2 * BMI_FIFO_UNITS);
-+ }
-+
-+ XX_Print("best vals: tasks %d, dmas %d, fifos %d\n",
-+ savedParams.taskCompVal, savedParams.dmaCompVal,
-+ savedParams.fifoCompVal);
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetStatisticsCounters(t_Handle h_FmPort, bool enable)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ err = fman_port_set_stats_cnt_mode(&p_FmPort->port, enable);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_stats_cnt_mode"));
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetErrorsRoute(t_Handle h_FmPort, fmPortFrameErrSelect_t errs)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ volatile uint32_t *p_ErrDiscard = NULL;
-+ int err;
-+
-+ UNUSED(p_ErrDiscard);
-+ err = fman_port_set_err_mask(&p_FmPort->port, (uint32_t)errs);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_err_mask"));
-+
-+#ifdef FM_ERROR_VSP_NO_MATCH_SW006
-+ if (p_FmPort->fmRevInfo.majorRev >= 6)
-+ {
-+ t_FmPcdCtrlParamsPage *p_ParamsPage;
-+
-+ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
-+ (void**)&p_ParamsPage);
-+ ASSERT_COND(p_ParamsPage);
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_ErrDiscard =
-+ &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_ErrDiscard =
-+ &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm;
-+ break;
-+ default:
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+ }
-+ WRITE_UINT32(p_ParamsPage->errorsDiscardMask,
-+ GET_UINT32(*p_ErrDiscard) | errs);
-+ }
-+#endif /* FM_ERROR_VSP_NO_MATCH_SW006 */
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId,
-+ bool enable)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(poolId<BM_MAX_NUM_OF_POOLS, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ err = fman_port_set_bpool_cnt_mode(&p_FmPort->port, poolId, enable);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_set_bpool_cnt_mode"));
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_GetBmiCounters(t_Handle h_FmPort, t_FmPortBmiStats *p_BmiStats)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)){
-+ p_BmiStats->cntCycle =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE);
-+ /* fmbm_rccn */
-+ p_BmiStats->cntTaskUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL);
-+ /* fmbm_rtuc */
-+ p_BmiStats->cntQueueUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL);
-+ /* fmbm_rrquc */
-+ p_BmiStats->cntDmaUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL);
-+ /* fmbm_rduc */
-+ p_BmiStats->cntFifoUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL);
-+ /* fmbm_rfuc */
-+ p_BmiStats->cntRxPauseActivation =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION);
-+ /* fmbm_rpac */
-+ p_BmiStats->cntFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME);
-+ /* fmbm_rfrc */
-+ p_BmiStats->cntDiscardFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME);
-+ /* fmbm_rfdc */
-+ p_BmiStats->cntDeallocBuf =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF);
-+ /* fmbm_rbdc */
-+ p_BmiStats->cntRxBadFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_BAD_FRAME);
-+ /* fmbm_rfbc */
-+ p_BmiStats->cntRxLargeFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LARGE_FRAME);
-+ /* fmbm_rlfc */
-+ p_BmiStats->cntRxFilterFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_FILTER_FRAME);
-+ /* fmbm_rffc */
-+ p_BmiStats->cntRxListDmaErr =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR);
-+ /* fmbm_rfldec */
-+ p_BmiStats->cntRxOutOfBuffersDiscard =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD);
-+ /* fmbm_rodc */
-+ p_BmiStats->cntWredDiscard = 0;
-+ p_BmiStats->cntLengthErr = 0;
-+ p_BmiStats->cntUnsupportedFormat = 0;
-+ }
-+ else if ((p_FmPort->portType == e_FM_PORT_TYPE_TX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)){
-+ p_BmiStats->cntCycle =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE);
-+ /* fmbm_tccn */
-+ p_BmiStats->cntTaskUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL);
-+ /* fmbm_ttuc */
-+ p_BmiStats->cntQueueUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL);
-+ /* fmbm_ttcquc */
-+ p_BmiStats->cntDmaUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL);
-+ /* fmbm_tduc */
-+ p_BmiStats->cntFifoUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL);
-+ /* fmbm_tfuc */
-+ p_BmiStats->cntRxPauseActivation = 0;
-+ p_BmiStats->cntFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME);
-+ /* fmbm_tfrc */
-+ p_BmiStats->cntDiscardFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME);
-+ /* fmbm_tfdc */
-+ p_BmiStats->cntDeallocBuf =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF);
-+ /* fmbm_tbdc */
-+ p_BmiStats->cntRxBadFrame = 0;
-+ p_BmiStats->cntRxLargeFrame = 0;
-+ p_BmiStats->cntRxFilterFrame = 0;
-+ p_BmiStats->cntRxListDmaErr = 0;
-+ p_BmiStats->cntRxOutOfBuffersDiscard = 0;
-+ p_BmiStats->cntWredDiscard = 0;
-+ p_BmiStats->cntLengthErr =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_LENGTH_ERR);
-+ /* fmbm_tfledc */
-+ p_BmiStats->cntUnsupportedFormat =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT);
-+ /* fmbm_tfufdc */
-+ }
-+ else if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) {
-+ p_BmiStats->cntCycle =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_CYCLE);
-+ /* fmbm_occn */
-+ p_BmiStats->cntTaskUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL);
-+ /* fmbm_otuc */
-+ p_BmiStats->cntQueueUtil = 0;
-+ p_BmiStats->cntDmaUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL);
-+ /* fmbm_oduc */
-+ p_BmiStats->cntFifoUtil =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL);
-+ /* fmbm_ofuc*/
-+ p_BmiStats->cntRxPauseActivation = 0;
-+ p_BmiStats->cntFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_FRAME);
-+ /* fmbm_ofrc */
-+ p_BmiStats->cntDiscardFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DISCARD_FRAME);
-+ /* fmbm_ofdc */
-+ p_BmiStats->cntDeallocBuf =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_DEALLOC_BUF);
-+ /* fmbm_obdc*/
-+ p_BmiStats->cntRxBadFrame = 0;
-+ p_BmiStats->cntRxLargeFrame = 0;
-+ p_BmiStats->cntRxFilterFrame =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_FILTER_FRAME);
-+ /* fmbm_offc */
-+ p_BmiStats->cntRxListDmaErr =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR);
-+ /* fmbm_ofldec */
-+ p_BmiStats->cntRxOutOfBuffersDiscard =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD);
-+ /* fmbm_rodc */
-+ p_BmiStats->cntWredDiscard =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_WRED_DISCARD);
-+ /* fmbm_ofwdc */
-+ p_BmiStats->cntLengthErr =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_LENGTH_ERR);
-+ /* fmbm_ofledc */
-+ p_BmiStats->cntUnsupportedFormat =
-+ FM_PORT_GetCounter(h_FmPort, e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT);
-+ /* fmbm_ofufdc */
-+ }
-+ return E_OK;
-+}
-+
-+uint32_t FM_PORT_GetCounter(t_Handle h_FmPort, e_FmPortCounters counter)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ bool bmiCounter = FALSE;
-+ enum fman_port_stats_counters statsType;
-+ enum fman_port_perf_counters perfType;
-+ enum fman_port_qmi_counters queueType;
-+ bool isStats;
-+ t_Error errCode;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
-+ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
-+ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
-+ /* check that counter is available for the port type */
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE,
-+ ("Requested counter is not available for Rx ports"));
-+ return 0;
-+ }
-+ bmiCounter = FALSE;
-+ break;
-+ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
-+ bmiCounter = FALSE;
-+ break;
-+ default: /* BMI counters (or error - will be checked in BMI routine )*/
-+ bmiCounter = TRUE;
-+ break;
-+ }
-+
-+ if (bmiCounter)
-+ {
-+ errCode = BmiPortCheckAndGetCounterType(p_FmPort, counter, &statsType,
-+ &perfType, &isStats);
-+ if (errCode != E_OK)
-+ {
-+ REPORT_ERROR(MINOR, errCode, NO_MSG);
-+ return 0;
-+ }
-+ if (isStats)
-+ return fman_port_get_stats_counter(&p_FmPort->port, statsType);
-+ else
-+ return fman_port_get_perf_counter(&p_FmPort->port, perfType);
-+ }
-+ else /* QMI counter */
-+ {
-+ /* check that counters are enabled */
-+ if (!(GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pnc)
-+ & QMI_PORT_CFG_EN_COUNTERS))
-+
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled"));
-+ return 0;
-+ }
-+
-+ /* Set counter */
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
-+ queueType = E_FMAN_PORT_ENQ_TOTAL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
-+ queueType = E_FMAN_PORT_DEQ_TOTAL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
-+ queueType = E_FMAN_PORT_DEQ_FROM_DFLT;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
-+ queueType = E_FMAN_PORT_DEQ_CONFIRM;
-+ break;
-+ default:
-+ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available"));
-+ return 0;
-+ }
-+
-+ return fman_port_get_qmi_counter(&p_FmPort->port, queueType);
-+ }
-+
-+ return 0;
-+}
-+
-+t_Error FM_PORT_ModifyCounter(t_Handle h_FmPort, e_FmPortCounters counter,
-+ uint32_t value)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ bool bmiCounter = FALSE;
-+ enum fman_port_stats_counters statsType;
-+ enum fman_port_perf_counters perfType;
-+ enum fman_port_qmi_counters queueType;
-+ bool isStats;
-+ t_Error errCode;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
-+ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
-+ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
-+ /* check that counter is available for the port type */
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX)
-+ || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ RETURN_ERROR(
-+ MINOR, E_INVALID_STATE,
-+ ("Requested counter is not available for Rx ports"));
-+ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
-+ bmiCounter = FALSE;
-+ break;
-+ default: /* BMI counters (or error - will be checked in BMI routine )*/
-+ bmiCounter = TRUE;
-+ break;
-+ }
-+
-+ if (bmiCounter)
-+ {
-+ errCode = BmiPortCheckAndGetCounterType(p_FmPort, counter, &statsType,
-+ &perfType, &isStats);
-+ if (errCode != E_OK)
-+ {
-+ RETURN_ERROR(MINOR, errCode, NO_MSG);
-+ }
-+ if (isStats)
-+ fman_port_set_stats_counter(&p_FmPort->port, statsType, value);
-+ else
-+ fman_port_set_perf_counter(&p_FmPort->port, perfType, value);
-+ }
-+ else /* QMI counter */
-+ {
-+ /* check that counters are enabled */
-+ if (!(GET_UINT32(p_FmPort->port.qmi_regs->fmqm_pnc)
-+ & QMI_PORT_CFG_EN_COUNTERS))
-+ {
-+ RETURN_ERROR(MINOR, E_INVALID_STATE,
-+ ("Requested counter was not enabled"));
-+ }
-+
-+ /* Set counter */
-+ switch (counter)
-+ {
-+ case (e_FM_PORT_COUNTERS_ENQ_TOTAL):
-+ queueType = E_FMAN_PORT_ENQ_TOTAL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DEQ_TOTAL):
-+ queueType = E_FMAN_PORT_DEQ_TOTAL;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT):
-+ queueType = E_FMAN_PORT_DEQ_FROM_DFLT;
-+ break;
-+ case (e_FM_PORT_COUNTERS_DEQ_CONFIRM):
-+ queueType = E_FMAN_PORT_DEQ_CONFIRM;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Requested counter is not available"));
-+ }
-+
-+ fman_port_set_qmi_counter(&p_FmPort->port, queueType, value);
-+ }
-+
-+ return E_OK;
-+}
-+
-+uint32_t FM_PORT_GetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for non-Rx ports"));
-+ return 0;
-+ }
-+ return fman_port_get_bpool_counter(&p_FmPort->port, poolId);
-+}
-+
-+t_Error FM_PORT_ModifyAllocBufCounter(t_Handle h_FmPort, uint8_t poolId,
-+ uint32_t value)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort *)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ RETURN_ERROR( MINOR, E_INVALID_STATE,
-+ ("Requested counter is not available for non-Rx ports"));
-+
-+ fman_port_set_bpool_counter(&p_FmPort->port, poolId, value);
-+ return E_OK;
-+}
-+bool FM_PORT_IsStalled(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err;
-+ bool isStalled;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, FALSE);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE,
-+ FALSE);
-+
-+ err = FmIsPortStalled(p_FmPort->h_Fm, p_FmPort->hardwarePortId, &isStalled);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return TRUE;
-+ }
-+ return isStalled;
-+}
-+
-+t_Error FM_PORT_ReleaseStalled(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ return FmResumeStalledPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId);
-+}
-+
-+t_Error FM_PORT_SetRxL4ChecksumVerify(t_Handle h_FmPort, bool l4Checksum)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx ports only"));
-+
-+ if (l4Checksum)
-+ err = fman_port_modify_rx_fd_bits(
-+ &p_FmPort->port, (uint8_t)(BMI_PORT_RFNE_FRWD_DCL4C >> 24),
-+ TRUE);
-+ else
-+ err = fman_port_modify_rx_fd_bits(
-+ &p_FmPort->port, (uint8_t)(BMI_PORT_RFNE_FRWD_DCL4C >> 24),
-+ FALSE);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_modify_rx_fd_bits"));
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+/* API Run-time PCD Control unit functions */
-+/*****************************************************************************/
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FM_PORT_VSPAlloc(t_Handle h_FmPort, t_FmPortVSPAllocParams *p_VSPParams)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+ volatile uint32_t *p_BmiStorageProfileId = NULL, *p_BmiVspe = NULL;
-+ uint32_t tmpReg = 0, tmp = 0;
-+ uint16_t hwStoragePrflId;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->h_Fm, E_INVALID_HANDLE);
-+ /*for numOfProfiles = 0 don't call this function*/
-+ SANITY_CHECK_RETURN_ERROR(p_VSPParams->numOfProfiles, E_INVALID_VALUE);
-+ /*dfltRelativeId should be in the range of numOfProfiles*/
-+ SANITY_CHECK_RETURN_ERROR(
-+ p_VSPParams->dfltRelativeId < p_VSPParams->numOfProfiles,
-+ E_INVALID_VALUE);
-+ /*p_FmPort should be from Rx type or OP*/
-+ SANITY_CHECK_RETURN_ERROR(
-+ ((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)),
-+ E_INVALID_VALUE);
-+ /*port should be disabled*/
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->enabled, E_INVALID_STATE);
-+ /*if its called for Rx port relevant Tx Port should be passed (initialized) too and it should be disabled*/
-+ SANITY_CHECK_RETURN_ERROR(
-+ ((p_VSPParams->h_FmTxPort && !((t_FmPort *)(p_VSPParams->h_FmTxPort))->enabled) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)),
-+ E_INVALID_VALUE);
-+ /*should be called before SetPCD - this port should be without PCD*/
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->pcdEngines, E_INVALID_STATE);
-+
-+ /*alloc window of VSPs for this port*/
-+ err = FmVSPAllocForPort(p_FmPort->h_Fm, p_FmPort->portType,
-+ p_FmPort->portId, p_VSPParams->numOfProfiles);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /*get absolute VSP ID for dfltRelative*/
-+ err = FmVSPGetAbsoluteProfileId(p_FmPort->h_Fm, p_FmPort->portType,
-+ p_FmPort->portId,
-+ p_VSPParams->dfltRelativeId,
-+ &hwStoragePrflId);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ /*fill relevant registers for p_FmPort and relative TxPort in the case p_FmPort from Rx type*/
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiStorageProfileId =
-+ &(((t_FmPort *)(p_VSPParams->h_FmTxPort))->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfqid);
-+ p_BmiVspe =
-+ &(((t_FmPort *)(p_VSPParams->h_FmTxPort))->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfne);
-+
-+ tmpReg = GET_UINT32(*p_BmiStorageProfileId) & ~BMI_SP_ID_MASK;
-+ tmpReg |= (uint32_t)hwStoragePrflId << BMI_SP_ID_SHIFT;
-+ WRITE_UINT32(*p_BmiStorageProfileId, tmpReg);
-+
-+ tmpReg = GET_UINT32(*p_BmiVspe);
-+ WRITE_UINT32(*p_BmiVspe, tmpReg | BMI_SP_EN);
-+
-+ p_BmiStorageProfileId =
-+ &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfqid;
-+ p_BmiVspe = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpp;
-+ hwStoragePrflId = p_VSPParams->dfltRelativeId;
-+ break;
-+
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ tmpReg = NIA_ENG_BMI | NIA_BMI_AC_FETCH_ALL_FRAME;
-+ WRITE_UINT32( p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn,
-+ tmpReg);
-+
-+ p_BmiStorageProfileId =
-+ &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofqid;
-+ p_BmiVspe = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opp;
-+ tmp |= BMI_EBD_EN;
-+ break;
-+
-+ default:
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+ }
-+
-+ p_FmPort->vspe = TRUE;
-+ p_FmPort->dfltRelativeId = p_VSPParams->dfltRelativeId;
-+
-+ tmpReg = GET_UINT32(*p_BmiStorageProfileId) & ~BMI_SP_ID_MASK;
-+ tmpReg |= (uint32_t)hwStoragePrflId << BMI_SP_ID_SHIFT;
-+ WRITE_UINT32(*p_BmiStorageProfileId, tmpReg);
-+
-+ tmpReg = GET_UINT32(*p_BmiVspe);
-+ WRITE_UINT32(*p_BmiVspe, tmpReg | BMI_SP_EN | tmp);
-+ return E_OK;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+t_Error FM_PORT_PcdPlcrAllocProfiles(t_Handle h_FmPort, uint16_t numOfProfiles)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+
-+ p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm);
-+ ASSERT_COND(p_FmPort->h_FmPcd);
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ if (numOfProfiles)
-+ {
-+ err = FmPcdPlcrAllocProfiles(p_FmPort->h_FmPcd,
-+ p_FmPort->hardwarePortId, numOfProfiles);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ /* set the port handle within the PCD policer, even if no profiles defined */
-+ FmPcdPortRegister(p_FmPort->h_FmPcd, h_FmPort, p_FmPort->hardwarePortId);
-+
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_PcdPlcrFreeProfiles(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdPlcrFreeProfiles(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId);
-+
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_PcdKgModifyInitialScheme(t_Handle h_FmPort,
-+ t_FmPcdKgSchemeSelect *p_FmPcdKgScheme)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ volatile uint32_t *p_BmiHpnia = NULL;
-+ uint32_t tmpReg;
-+ uint8_t relativeSchemeId;
-+ uint8_t physicalSchemeId;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG,
-+ E_INVALID_STATE);
-+
-+ tmpReg = (uint32_t)((p_FmPort->pcdEngines & FM_PCD_CC) ? NIA_KG_CC_EN : 0);
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiHpnia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_BmiHpnia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne;
-+ break;
-+ default:
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+ }
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ /* if we want to change to direct scheme, we need to check that this scheme is valid */
-+ if (p_FmPcdKgScheme->direct)
-+ {
-+ physicalSchemeId = FmPcdKgGetSchemeId(p_FmPcdKgScheme->h_DirectScheme);
-+ /* check that this scheme is bound to this port */
-+ if (!(p_FmPort->schemesPerPortVector
-+ & (uint32_t)(1 << (31 - (uint32_t)physicalSchemeId))))
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_STATE,
-+ ("called with a scheme that is not bound to this port"));
-+ }
-+
-+ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPort->h_FmPcd,
-+ physicalSchemeId);
-+ if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES)
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE,
-+ ("called with invalid Scheme "));
-+ }
-+
-+ if (!FmPcdKgIsSchemeValidSw(p_FmPcdKgScheme->h_DirectScheme))
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("called with uninitialized Scheme "));
-+ }
-+
-+ WRITE_UINT32(
-+ *p_BmiHpnia,
-+ NIA_ENG_KG | tmpReg | NIA_KG_DIRECT | (uint32_t)physicalSchemeId);
-+ }
-+ else
-+ /* change to indirect scheme */
-+ WRITE_UINT32(*p_BmiHpnia, NIA_ENG_KG | tmpReg);
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_PcdPlcrModifyInitialProfile(t_Handle h_FmPort,
-+ t_Handle h_Profile)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ volatile uint32_t *p_BmiNia;
-+ volatile uint32_t *p_BmiHpnia;
-+ uint32_t tmpReg;
-+ uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile);
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_PLCR,
-+ E_INVALID_STATE);
-+
-+ /* check relevance of this routine - only when policer is used
-+ directly after BMI or Parser */
-+ if ((p_FmPort->pcdEngines & FM_PCD_KG)
-+ || (p_FmPort->pcdEngines & FM_PCD_CC))
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("relevant only when PCD support mode is e_FM_PCD_SUPPORT_PLCR_ONLY or e_FM_PCD_SUPPORT_PRS_AND_PLCR"));
-+
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
-+ p_BmiHpnia = &p_FmPort->port.bmi_regs->rx.fmbm_rfpne;
-+ tmpReg = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
-+ p_BmiHpnia = &p_FmPort->port.bmi_regs->oh.fmbm_ofpne;
-+ tmpReg = 0;
-+ break;
-+ default:
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+ }
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ if (!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId))
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Invalid profile"));
-+ }
-+
-+ tmpReg |= (uint32_t)(NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId);
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */
-+ {
-+ /* update BMI HPNIA */
-+ WRITE_UINT32(*p_BmiHpnia, tmpReg);
-+ }
-+ else /* e_FM_PCD_SUPPORT_PLCR_ONLY */
-+ {
-+ /* rfne may contain FDCS bits, so first we read them. */
-+ tmpReg |= (GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK);
-+ /* update BMI NIA */
-+ WRITE_UINT32(*p_BmiNia, tmpReg);
-+ }RELEASE_LOCK(p_FmPort->lock);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_PcdCcModifyTree(t_Handle h_FmPort, t_Handle h_CcTree)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+ volatile uint32_t *p_BmiCcBase = NULL;
-+ volatile uint32_t *p_BmiNia = NULL;
-+ uint32_t ccTreePhysOffset;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(h_CcTree, E_INVALID_HANDLE);
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for non-independent mode ports only"));
-+
-+ /* get PCD registers pointers */
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->rx.fmbm_rfne;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_BmiNia = &p_FmPort->port.bmi_regs->oh.fmbm_ofne;
-+ break;
-+ default:
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+ }
-+
-+ /* check that current NIA is BMI to BMI */
-+ if ((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK)
-+ != GET_NIA_BMI_AC_ENQ_FRAME(p_FmPort->h_FmPcd))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("may be called only for ports in BMI-to-BMI state."));
-+
-+ if (p_FmPort->pcdEngines & FM_PCD_CC)
-+ {
-+ if (p_FmPort->h_IpReassemblyManip)
-+ {
-+ err = FmPcdCcTreeAddIPR(p_FmPort->h_FmPcd, h_CcTree, NULL,
-+ p_FmPort->h_IpReassemblyManip, FALSE);
-+ if (err != E_OK)
-+ {
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+ else
-+ if (p_FmPort->h_CapwapReassemblyManip)
-+ {
-+ err = FmPcdCcTreeAddCPR(p_FmPort->h_FmPcd, h_CcTree, NULL,
-+ p_FmPort->h_CapwapReassemblyManip,
-+ FALSE);
-+ if (err != E_OK)
-+ {
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+ switch (p_FmPort->portType)
-+ {
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ case (e_FM_PORT_TYPE_RX):
-+ p_BmiCcBase = &p_FmPort->port.bmi_regs->rx.fmbm_rccb;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ p_BmiCcBase = &p_FmPort->port.bmi_regs->oh.fmbm_occb;
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type"));
-+ }
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+ err = FmPcdCcBindTree(p_FmPort->h_FmPcd, NULL, h_CcTree,
-+ &ccTreePhysOffset, h_FmPort);
-+ if (err)
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset);
-+
-+ p_FmPort->ccTreeId = h_CcTree;
-+ RELEASE_LOCK(p_FmPort->lock);
-+ }
-+ else
-+ RETURN_ERROR( MAJOR, E_INVALID_STATE,
-+ ("Coarse Classification not defined for this port."));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_AttachPCD(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for non-independent mode ports only"));
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ if (p_FmPort->h_ReassemblyTree)
-+ p_FmPort->pcdEngines |= FM_PCD_CC;
-+
-+ err = AttachPCD(h_FmPort);
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ return err;
-+}
-+
-+t_Error FM_PORT_DetachPCD(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for non-independent mode ports only"));
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = DetachPCD(h_FmPort);
-+ if (err != E_OK)
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (p_FmPort->h_ReassemblyTree)
-+ p_FmPort->pcdEngines &= ~FM_PCD_CC;
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetPCD(t_Handle h_FmPort, t_FmPortPcdParams *p_PcdParam)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+ t_FmPortPcdParams modifiedPcdParams, *p_PcdParams;
-+ t_FmPcdCcTreeParams *p_FmPcdCcTreeParams;
-+ t_FmPortPcdCcParams fmPortPcdCcParams;
-+ t_FmPortGetSetCcParams fmPortGetSetCcParams;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_PcdParam, E_NULL_POINTER);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for non-independent mode ports only"));
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm);
-+ ASSERT_COND(p_FmPort->h_FmPcd);
-+
-+ if (p_PcdParam->p_CcParams && !p_PcdParam->p_CcParams->h_CcTree)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE,
-+ ("Tree handle must be given if CC is required"));
-+
-+ memcpy(&modifiedPcdParams, p_PcdParam, sizeof(t_FmPortPcdParams));
-+ p_PcdParams = &modifiedPcdParams;
-+ if ((p_PcdParams->h_IpReassemblyManip)
-+#if (DPAA_VERSION >= 11)
-+ || (p_PcdParams->h_CapwapReassemblyManip)
-+#endif /* (DPAA_VERSION >= 11) */
-+ )
-+ {
-+ if ((p_PcdParams->pcdSupport != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG)
-+ && (p_PcdParams->pcdSupport
-+ != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC)
-+ && (p_PcdParams->pcdSupport
-+ != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR)
-+ && (p_PcdParams->pcdSupport
-+ != e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR))
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR( MAJOR, E_INVALID_STATE,
-+ ("pcdSupport must have KG for supporting Reassembly"));
-+ }
-+ p_FmPort->h_IpReassemblyManip = p_PcdParams->h_IpReassemblyManip;
-+#if (DPAA_VERSION >= 11)
-+ if ((p_PcdParams->h_IpReassemblyManip)
-+ && (p_PcdParams->h_CapwapReassemblyManip))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Either IP-R or CAPWAP-R is allowed"));
-+ if ((p_PcdParams->h_CapwapReassemblyManip)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("CAPWAP-R is allowed only on offline-port"));
-+ if (p_PcdParams->h_CapwapReassemblyManip)
-+ p_FmPort->h_CapwapReassemblyManip =
-+ p_PcdParams->h_CapwapReassemblyManip;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (!p_PcdParams->p_CcParams)
-+ {
-+ if (!((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG)
-+ || (p_PcdParams->pcdSupport
-+ == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR)))
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_STATE,
-+ ("PCD initialization structure is not consistent with pcdSupport"));
-+ }
-+
-+ /* No user-tree, need to build internal tree */
-+ p_FmPcdCcTreeParams = (t_FmPcdCcTreeParams*)XX_Malloc(
-+ sizeof(t_FmPcdCcTreeParams));
-+ if (!p_FmPcdCcTreeParams)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcTreeParams"));
-+ memset(p_FmPcdCcTreeParams, 0, sizeof(t_FmPcdCcTreeParams));
-+ p_FmPcdCcTreeParams->h_NetEnv = p_PcdParams->h_NetEnv;
-+ p_FmPort->h_ReassemblyTree = FM_PCD_CcRootBuild(
-+ p_FmPort->h_FmPcd, p_FmPcdCcTreeParams);
-+
-+ if (!p_FmPort->h_ReassemblyTree)
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ XX_Free(p_FmPcdCcTreeParams);
-+ RETURN_ERROR( MAJOR, E_INVALID_HANDLE,
-+ ("FM_PCD_CcBuildTree for Reassembly failed"));
-+ }
-+ if (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_KG)
-+ p_PcdParams->pcdSupport =
-+ e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC;
-+ else
-+ p_PcdParams->pcdSupport =
-+ e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR;
-+
-+ memset(&fmPortPcdCcParams, 0, sizeof(t_FmPortPcdCcParams));
-+ fmPortPcdCcParams.h_CcTree = p_FmPort->h_ReassemblyTree;
-+ p_PcdParams->p_CcParams = &fmPortPcdCcParams;
-+ XX_Free(p_FmPcdCcTreeParams);
-+ }
-+
-+ if (p_FmPort->h_IpReassemblyManip)
-+ err = FmPcdCcTreeAddIPR(p_FmPort->h_FmPcd,
-+ p_PcdParams->p_CcParams->h_CcTree,
-+ p_PcdParams->h_NetEnv,
-+ p_FmPort->h_IpReassemblyManip, TRUE);
-+#if (DPAA_VERSION >= 11)
-+ else
-+ if (p_FmPort->h_CapwapReassemblyManip)
-+ err = FmPcdCcTreeAddCPR(p_FmPort->h_FmPcd,
-+ p_PcdParams->p_CcParams->h_CcTree,
-+ p_PcdParams->h_NetEnv,
-+ p_FmPort->h_CapwapReassemblyManip,
-+ TRUE);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (err != E_OK)
-+ {
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+
-+ if (!FmPcdLockTryLockAll(p_FmPort->h_FmPcd))
-+ {
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+ DBG(TRACE, ("Try LockAll - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = SetPcd(h_FmPort, p_PcdParams);
-+ if (err)
-+ {
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }
-+ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if ((p_FmPort->pcdEngines & FM_PCD_PRS)
-+ && (p_PcdParams->p_PrsParams->includeInPrsStatistics))
-+ {
-+ err = FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd,
-+ p_FmPort->hardwarePortId, TRUE);
-+ if (err)
-+ {
-+ DeletePcd(p_FmPort);
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }
-+ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ p_FmPort->includeInPrsStatistics = TRUE;
-+ }
-+
-+ FmPcdIncNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId);
-+
-+ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd))
-+ {
-+ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
-+
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ {
-+#ifdef FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004
-+ if ((p_FmPort->fmRevInfo.majorRev < 6) &&
-+ (p_FmPort->pcdEngines & FM_PCD_KG))
-+ {
-+ int i;
-+ for (i = 0; i<p_PcdParams->p_KgParams->numOfSchemes; i++)
-+ /* The following function must be locked */
-+ FmPcdKgCcGetSetParams(p_FmPort->h_FmPcd,
-+ p_PcdParams->p_KgParams->h_Schemes[i],
-+ UPDATE_KG_NIA_CC_WA,
-+ 0);
-+ }
-+#endif /* FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 */
-+
-+#if (DPAA_VERSION >= 11)
-+ {
-+ t_FmPcdCtrlParamsPage *p_ParamsPage;
-+
-+ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
-+ (void**)&p_ParamsPage);
-+ ASSERT_COND(p_ParamsPage);
-+ WRITE_UINT32(p_ParamsPage->postBmiFetchNia,
-+ p_FmPort->savedBmiNia);
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* Set post-bmi-fetch nia */
-+ p_FmPort->savedBmiNia &= BMI_RFNE_FDCS_MASK;
-+ p_FmPort->savedBmiNia |= (NIA_FM_CTL_AC_POST_BMI_FETCH
-+ | NIA_ENG_FM_CTL);
-+
-+ /* Set pre-bmi-fetch nia */
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN;
-+#if (DPAA_VERSION >= 11)
-+ fmPortGetSetCcParams.setCcParams.nia =
-+ (NIA_FM_CTL_AC_PRE_BMI_FETCH_FULL_FRAME | NIA_ENG_FM_CTL);
-+#else
-+ fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_PRE_BMI_FETCH_HEADER | NIA_ENG_FM_CTL);
-+#endif /* (DPAA_VERSION >= 11) */
-+ if ((err = FmPortGetSetCcParams(p_FmPort, &fmPortGetSetCcParams))
-+ != E_OK)
-+ {
-+ DeletePcd(p_FmPort);
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }
-+ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+
-+ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
-+
-+ /* Set pop-to-next-step nia */
-+#if (DPAA_VERSION == 10)
-+ if (p_FmPort->fmRevInfo.majorRev < 6)
-+ {
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN;
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
-+ }
-+ else
-+ {
-+#endif /* (DPAA_VERSION == 10) */
-+ fmPortGetSetCcParams.getCcParams.type = GET_NIA_FPNE;
-+#if (DPAA_VERSION == 10)
-+ }
-+#endif /* (DPAA_VERSION == 10) */
-+ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
-+ != E_OK)
-+ {
-+ DeletePcd(p_FmPort);
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /* Set post-bmi-prepare-to-enq nia */
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FENE;
-+ fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_POST_BMI_ENQ
-+ | NIA_ENG_FM_CTL);
-+ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
-+ != E_OK)
-+ {
-+ DeletePcd(p_FmPort);
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if ((p_FmPort->h_IpReassemblyManip)
-+ || (p_FmPort->h_CapwapReassemblyManip))
-+ {
-+#if (DPAA_VERSION == 10)
-+ if (p_FmPort->fmRevInfo.majorRev < 6)
-+ {
-+ /* Overwrite post-bmi-prepare-to-enq nia */
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FENE;
-+ fmPortGetSetCcParams.setCcParams.nia = (NIA_FM_CTL_AC_POST_BMI_ENQ_ORR | NIA_ENG_FM_CTL | NIA_ORDER_RESTOR);
-+ fmPortGetSetCcParams.setCcParams.overwrite = TRUE;
-+ }
-+ else
-+ {
-+#endif /* (DPAA_VERSION == 10) */
-+ /* Set the ORR bit (for order-restoration) */
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_FPNE;
-+ fmPortGetSetCcParams.setCcParams.nia =
-+ fmPortGetSetCcParams.getCcParams.nia | NIA_ORDER_RESTOR;
-+#if (DPAA_VERSION == 10)
-+ }
-+#endif /* (DPAA_VERSION == 10) */
-+ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
-+ != E_OK)
-+ {
-+ DeletePcd(p_FmPort);
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ }
-+ }
-+ else
-+ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
-+
-+#if (DPAA_VERSION >= 11)
-+ {
-+ t_FmPcdCtrlParamsPage *p_ParamsPage;
-+
-+ memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams));
-+
-+ fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_CMNE;
-+ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd))
-+ fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_POP_TO_N_STEP
-+ | NIA_ENG_FM_CTL;
-+ else
-+ fmPortGetSetCcParams.setCcParams.nia =
-+ NIA_FM_CTL_AC_NO_IPACC_POP_TO_N_STEP | NIA_ENG_FM_CTL;
-+ if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams))
-+ != E_OK)
-+ {
-+ DeletePcd(p_FmPort);
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ FmPortSetGprFunc(p_FmPort, e_FM_PORT_GPR_MURAM_PAGE,
-+ (void**)&p_ParamsPage);
-+ ASSERT_COND(p_ParamsPage);
-+
-+ if (FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd))
-+ WRITE_UINT32(
-+ p_ParamsPage->misc,
-+ GET_UINT32(p_ParamsPage->misc) | FM_CTL_PARAMS_PAGE_OFFLOAD_SUPPORT_EN);
-+
-+ if ((p_FmPort->h_IpReassemblyManip)
-+ || (p_FmPort->h_CapwapReassemblyManip))
-+ {
-+ if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ WRITE_UINT32(
-+ p_ParamsPage->discardMask,
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm));
-+ else
-+ WRITE_UINT32(
-+ p_ParamsPage->discardMask,
-+ GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm));
-+ }
-+#ifdef FM_ERROR_VSP_NO_MATCH_SW006
-+ if (p_FmPort->vspe)
-+ WRITE_UINT32(
-+ p_ParamsPage->misc,
-+ GET_UINT32(p_ParamsPage->misc) | (p_FmPort->dfltRelativeId & FM_CTL_PARAMS_PAGE_ERROR_VSP_MASK));
-+#endif /* FM_ERROR_VSP_NO_MATCH_SW006 */
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ err = AttachPCD(h_FmPort);
-+ if (err)
-+ {
-+ DeletePcd(p_FmPort);
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ return err;
-+}
-+
-+t_Error FM_PORT_DeletePCD(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmPort->imEn)
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION,
-+ ("available for non-independant mode ports only"));
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR( MAJOR, E_INVALID_OPERATION,
-+ ("available for Rx and offline parsing ports only"));
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = DetachPCD(h_FmPort);
-+ if (err)
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ FmPcdDecNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId);
-+
-+ /* we do it anyway, instead of checking if included */
-+ if ((p_FmPort->pcdEngines & FM_PCD_PRS) && p_FmPort->includeInPrsStatistics)
-+ {
-+ FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd,
-+ p_FmPort->hardwarePortId, FALSE);
-+ p_FmPort->includeInPrsStatistics = FALSE;
-+ }
-+
-+ if (!FmPcdLockTryLockAll(p_FmPort->h_FmPcd))
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ DBG(TRACE, ("Try LockAll - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = DeletePcd(h_FmPort);
-+ FmPcdLockUnlockAll(p_FmPort->h_FmPcd);
-+ if (err)
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ if (p_FmPort->h_ReassemblyTree)
-+ {
-+ err = FM_PCD_CcRootDelete(p_FmPort->h_ReassemblyTree);
-+ if (err)
-+ {
-+ RELEASE_LOCK(p_FmPort->lock);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ p_FmPort->h_ReassemblyTree = NULL;
-+ }RELEASE_LOCK(p_FmPort->lock);
-+
-+ return err;
-+}
-+
-+t_Error FM_PORT_PcdKgBindSchemes(t_Handle h_FmPort,
-+ t_FmPcdPortSchemesParams *p_PortScheme)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
-+ t_Error err = E_OK;
-+ uint32_t tmpScmVec = 0;
-+ int i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG,
-+ E_INVALID_STATE);
-+
-+ schemeBind.netEnvId = p_FmPort->netEnvId;
-+ schemeBind.hardwarePortId = p_FmPort->hardwarePortId;
-+ schemeBind.numOfSchemes = p_PortScheme->numOfSchemes;
-+ schemeBind.useClsPlan = p_FmPort->useClsPlan;
-+ for (i = 0; i < schemeBind.numOfSchemes; i++)
-+ {
-+ schemeBind.schemesIds[i] = FmPcdKgGetSchemeId(
-+ p_PortScheme->h_Schemes[i]);
-+ /* build vector */
-+ tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]);
-+ }
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
-+ if (err == E_OK)
-+ p_FmPort->schemesPerPortVector |= tmpScmVec;
-+
-+#ifdef FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004
-+ if ((FmPcdIsAdvancedOffloadSupported(p_FmPort->h_FmPcd)) &&
-+ (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
-+ (p_FmPort->fmRevInfo.majorRev < 6))
-+ {
-+ for (i=0; i<p_PortScheme->numOfSchemes; i++)
-+ FmPcdKgCcGetSetParams(p_FmPort->h_FmPcd, p_PortScheme->h_Schemes[i], UPDATE_KG_NIA_CC_WA, 0);
-+ }
-+#endif /* FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004 */
-+
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ return err;
-+}
-+
-+t_Error FM_PORT_PcdKgUnbindSchemes(t_Handle h_FmPort,
-+ t_FmPcdPortSchemesParams *p_PortScheme)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_FmPcdKgInterModuleBindPortToSchemes schemeBind;
-+ t_Error err = E_OK;
-+ uint32_t tmpScmVec = 0;
-+ int i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG,
-+ E_INVALID_STATE);
-+
-+ schemeBind.netEnvId = p_FmPort->netEnvId;
-+ schemeBind.hardwarePortId = p_FmPort->hardwarePortId;
-+ schemeBind.numOfSchemes = p_PortScheme->numOfSchemes;
-+ for (i = 0; i < schemeBind.numOfSchemes; i++)
-+ {
-+ schemeBind.schemesIds[i] = FmPcdKgGetSchemeId(
-+ p_PortScheme->h_Schemes[i]);
-+ /* build vector */
-+ tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]);
-+ }
-+
-+ if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock))
-+ {
-+ DBG(TRACE, ("FM Port Try Lock - BUSY"));
-+ return ERROR_CODE(E_BUSY);
-+ }
-+
-+ err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind);
-+ if (err == E_OK)
-+ p_FmPort->schemesPerPortVector &= ~tmpScmVec;
-+ RELEASE_LOCK(p_FmPort->lock);
-+
-+ return err;
-+}
-+
-+t_Error FM_PORT_AddCongestionGrps(t_Handle h_FmPort,
-+ t_FmPortCongestionGrps *p_CongestionGrps)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ uint8_t priorityTmpArray[FM_PORT_NUM_OF_CONGESTION_GRPS];
-+ uint8_t mod, index;
-+ uint32_t i, grpsMap[FMAN_PORT_CG_MAP_NUM];
-+ int err;
-+#if (DPAA_VERSION >= 11)
-+ int j;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+
-+ /* un-necessary check of the indexes; probably will be needed in the future when there
-+ will be more CGs available ....
-+ for (i=0; i<p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
-+ if (p_CongestionGrps->congestionGrpsToConsider[i] >= FM_PORT_NUM_OF_CONGESTION_GRPS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("CG id!"));
-+ */
-+
-+#ifdef FM_NO_OP_OBSERVED_CGS
-+ if ((p_FmPort->fmRevInfo.majorRev != 4) &&
-+ (p_FmPort->fmRevInfo.majorRev < 6))
-+ {
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) &&
-+ (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only"));
-+ }
-+ else
-+#endif /* FM_NO_OP_OBSERVED_CGS */
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("Available for Rx & OP ports only"));
-+
-+ /* Prepare groups map array */
-+ memset(grpsMap, 0, FMAN_PORT_CG_MAP_NUM * sizeof(uint32_t));
-+ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
-+ {
-+ index = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] / 32);
-+ mod = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] % 32);
-+ if (p_FmPort->fmRevInfo.majorRev != 4)
-+ grpsMap[7 - index] |= (uint32_t)(1 << mod);
-+ else
-+ grpsMap[0] |= (uint32_t)(1 << mod);
-+ }
-+
-+ memset(&priorityTmpArray, 0,
-+ FM_PORT_NUM_OF_CONGESTION_GRPS * sizeof(uint8_t));
-+
-+ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
-+ {
-+#if (DPAA_VERSION >= 11)
-+ for (j = 0; j < FM_MAX_NUM_OF_PFC_PRIORITIES; j++)
-+ if (p_CongestionGrps->pfcPrioritiesEn[i][j])
-+ priorityTmpArray[p_CongestionGrps->congestionGrpsToConsider[i]] |=
-+ (0x01 << (FM_MAX_NUM_OF_PFC_PRIORITIES - j - 1));
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ for (i = 0; i < FM_PORT_NUM_OF_CONGESTION_GRPS; i++)
-+ {
-+ err = FmSetCongestionGroupPFCpriority(p_FmPort->h_Fm, i,
-+ priorityTmpArray[i]);
-+ if (err)
-+ return err;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ err = fman_port_add_congestion_grps(&p_FmPort->port, grpsMap);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fman_port_add_congestion_grps"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_RemoveCongestionGrps(t_Handle h_FmPort,
-+ t_FmPortCongestionGrps *p_CongestionGrps)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ uint8_t mod, index;
-+ uint32_t i, grpsMap[FMAN_PORT_CG_MAP_NUM];
-+ int err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+
-+ {
-+#ifdef FM_NO_OP_OBSERVED_CGS
-+ t_FmRevisionInfo revInfo;
-+
-+ FM_GetRevision(p_FmPort->h_Fm, &revInfo);
-+ if (revInfo.majorRev != 4)
-+ {
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) &&
-+ (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only"));
-+ }
-+ else
-+#endif /* FM_NO_OP_OBSERVED_CGS */
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_RX)
-+ && (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("Available for Rx & OP ports only"));
-+ }
-+
-+ /* Prepare groups map array */
-+ memset(grpsMap, 0, FMAN_PORT_CG_MAP_NUM * sizeof(uint32_t));
-+ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
-+ {
-+ index = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] / 32);
-+ mod = (uint8_t)(p_CongestionGrps->congestionGrpsToConsider[i] % 32);
-+ if (p_FmPort->fmRevInfo.majorRev != 4)
-+ grpsMap[7 - index] |= (uint32_t)(1 << mod);
-+ else
-+ grpsMap[0] |= (uint32_t)(1 << mod);
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ for (i = 0; i < p_CongestionGrps->numOfCongestionGrpsToConsider; i++)
-+ {
-+ t_Error err = FmSetCongestionGroupPFCpriority(
-+ p_FmPort->h_Fm, p_CongestionGrps->congestionGrpsToConsider[i],
-+ 0);
-+ if (err)
-+ return err;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ err = fman_port_remove_congestion_grps(&p_FmPort->port, grpsMap);
-+ if (err != 0)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("fman_port_remove_congestion_grps"));
-+ return E_OK;
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FM_PORT_GetIPv4OptionsCount(t_Handle h_FmPort,
-+ uint32_t *p_Ipv4OptionsCount)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(
-+ (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING),
-+ E_INVALID_VALUE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_ParamsPage, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_Ipv4OptionsCount, E_NULL_POINTER);
-+
-+ *p_Ipv4OptionsCount = GET_UINT32(p_FmPort->p_ParamsPage->ipfOptionsCounter);
-+
-+ return E_OK;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+t_Error FM_PORT_ConfigDsarSupport(t_Handle h_FmPortRx,
-+ t_FmPortDsarTablesSizes *params)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
-+ p_FmPort->deepSleepVars.autoResMaxSizes = XX_Malloc(
-+ sizeof(struct t_FmPortDsarTablesSizes));
-+ memcpy(p_FmPort->deepSleepVars.autoResMaxSizes, params,
-+ sizeof(struct t_FmPortDsarTablesSizes));
-+ return E_OK;
-+}
-+
-+static t_Error FmPortConfigAutoResForDeepSleepSupport1(t_FmPort *p_FmPort)
-+{
-+ uint32_t *param_page;
-+ t_FmPortDsarTablesSizes *params = p_FmPort->deepSleepVars.autoResMaxSizes;
-+ t_ArCommonDesc *ArCommonDescPtr;
-+ uint32_t size = sizeof(t_ArCommonDesc);
-+ // ARP
-+ // should put here if (params->max_num_of_arp_entries)?
-+ size = ROUND_UP(size,4);
-+ size += sizeof(t_DsarArpDescriptor);
-+ size += sizeof(t_DsarArpBindingEntry) * params->maxNumOfArpEntries;
-+ size += sizeof(t_DsarArpStatistics);
-+ //ICMPV4
-+ size = ROUND_UP(size,4);
-+ size += sizeof(t_DsarIcmpV4Descriptor);
-+ size += sizeof(t_DsarIcmpV4BindingEntry) * params->maxNumOfEchoIpv4Entries;
-+ size += sizeof(t_DsarIcmpV4Statistics);
-+ //ICMPV6
-+ size = ROUND_UP(size,4);
-+ size += sizeof(t_DsarIcmpV6Descriptor);
-+ size += sizeof(t_DsarIcmpV6BindingEntry) * params->maxNumOfEchoIpv6Entries;
-+ size += sizeof(t_DsarIcmpV6Statistics);
-+ //ND
-+ size = ROUND_UP(size,4);
-+ size += sizeof(t_DsarNdDescriptor);
-+ size += sizeof(t_DsarIcmpV6BindingEntry) * params->maxNumOfNdpEntries;
-+ size += sizeof(t_DsarIcmpV6Statistics);
-+ //SNMP
-+ size = ROUND_UP(size,4);
-+ size += sizeof(t_DsarSnmpDescriptor);
-+ size += sizeof(t_DsarSnmpIpv4AddrTblEntry)
-+ * params->maxNumOfSnmpIPV4Entries;
-+ size += sizeof(t_DsarSnmpIpv6AddrTblEntry)
-+ * params->maxNumOfSnmpIPV6Entries;
-+ size += sizeof(t_OidsTblEntry) * params->maxNumOfSnmpOidEntries;
-+ size += params->maxNumOfSnmpOidChar;
-+ size += sizeof(t_DsarIcmpV6Statistics);
-+ //filters
-+ size = ROUND_UP(size,4);
-+ size += params->maxNumOfIpProtFiltering;
-+ size = ROUND_UP(size,4);
-+ size += params->maxNumOfUdpPortFiltering * sizeof(t_PortTblEntry);
-+ size = ROUND_UP(size,4);
-+ size += params->maxNumOfTcpPortFiltering * sizeof(t_PortTblEntry);
-+
-+ // add here for more protocols
-+
-+ // statistics
-+ size = ROUND_UP(size,4);
-+ size += sizeof(t_ArStatistics);
-+
-+ ArCommonDescPtr = FM_MURAM_AllocMem(p_FmPort->h_FmMuram, size, 0x10);
-+
-+ param_page =
-+ XX_PhysToVirt(
-+ p_FmPort->fmMuramPhysBaseAddr
-+ + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr));
-+ WRITE_UINT32(
-+ *param_page,
-+ (uint32_t)(XX_VirtToPhys(ArCommonDescPtr) - p_FmPort->fmMuramPhysBaseAddr));
-+ return E_OK;
-+}
-+
-+t_FmPortDsarTablesSizes* FM_PORT_GetDsarTablesMaxSizes(t_Handle h_FmPortRx)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
-+ return p_FmPort->deepSleepVars.autoResMaxSizes;
-+}
-+
-+struct arOffsets
-+{
-+ uint32_t arp;
-+ uint32_t nd;
-+ uint32_t icmpv4;
-+ uint32_t icmpv6;
-+ uint32_t snmp;
-+ uint32_t stats;
-+ uint32_t filtIp;
-+ uint32_t filtUdp;
-+ uint32_t filtTcp;
-+};
-+
-+static uint32_t AR_ComputeOffsets(struct arOffsets* of,
-+ struct t_FmPortDsarParams *params,
-+ t_FmPort *p_FmPort)
-+{
-+ uint32_t size = sizeof(t_ArCommonDesc);
-+ // ARP
-+ if (params->p_AutoResArpInfo)
-+ {
-+ size = ROUND_UP(size,4);
-+ of->arp = size;
-+ size += sizeof(t_DsarArpDescriptor);
-+ size += sizeof(t_DsarArpBindingEntry)
-+ * params->p_AutoResArpInfo->tableSize;
-+ size += sizeof(t_DsarArpStatistics);
-+ }
-+ // ICMPV4
-+ if (params->p_AutoResEchoIpv4Info)
-+ {
-+ size = ROUND_UP(size,4);
-+ of->icmpv4 = size;
-+ size += sizeof(t_DsarIcmpV4Descriptor);
-+ size += sizeof(t_DsarIcmpV4BindingEntry)
-+ * params->p_AutoResEchoIpv4Info->tableSize;
-+ size += sizeof(t_DsarIcmpV4Statistics);
-+ }
-+ // ICMPV6
-+ if (params->p_AutoResEchoIpv6Info)
-+ {
-+ size = ROUND_UP(size,4);
-+ of->icmpv6 = size;
-+ size += sizeof(t_DsarIcmpV6Descriptor);
-+ size += sizeof(t_DsarIcmpV6BindingEntry)
-+ * params->p_AutoResEchoIpv6Info->tableSize;
-+ size += sizeof(t_DsarIcmpV6Statistics);
-+ }
-+ // ND
-+ if (params->p_AutoResNdpInfo)
-+ {
-+ size = ROUND_UP(size,4);
-+ of->nd = size;
-+ size += sizeof(t_DsarNdDescriptor);
-+ size += sizeof(t_DsarIcmpV6BindingEntry)
-+ * (params->p_AutoResNdpInfo->tableSizeAssigned
-+ + params->p_AutoResNdpInfo->tableSizeTmp);
-+ size += sizeof(t_DsarIcmpV6Statistics);
-+ }
-+ // SNMP
-+ if (params->p_AutoResSnmpInfo)
-+ {
-+ size = ROUND_UP(size,4);
-+ of->snmp = size;
-+ size += sizeof(t_DsarSnmpDescriptor);
-+ size += sizeof(t_DsarSnmpIpv4AddrTblEntry)
-+ * params->p_AutoResSnmpInfo->numOfIpv4Addresses;
-+ size += sizeof(t_DsarSnmpIpv6AddrTblEntry)
-+ * params->p_AutoResSnmpInfo->numOfIpv6Addresses;
-+ size += sizeof(t_OidsTblEntry) * params->p_AutoResSnmpInfo->oidsTblSize;
-+ size += p_FmPort->deepSleepVars.autoResMaxSizes->maxNumOfSnmpOidChar;
-+ size += sizeof(t_DsarIcmpV6Statistics);
-+ }
-+ //filters
-+ size = ROUND_UP(size,4);
-+ if (params->p_AutoResFilteringInfo)
-+ {
-+ of->filtIp = size;
-+ size += params->p_AutoResFilteringInfo->ipProtTableSize;
-+ size = ROUND_UP(size,4);
-+ of->filtUdp = size;
-+ size += params->p_AutoResFilteringInfo->udpPortsTableSize
-+ * sizeof(t_PortTblEntry);
-+ size = ROUND_UP(size,4);
-+ of->filtTcp = size;
-+ size += params->p_AutoResFilteringInfo->tcpPortsTableSize
-+ * sizeof(t_PortTblEntry);
-+ }
-+ // add here for more protocols
-+ // statistics
-+ size = ROUND_UP(size,4);
-+ of->stats = size;
-+ size += sizeof(t_ArStatistics);
-+ return size;
-+}
-+
-+uint32_t* ARDesc;
-+void PrsEnable(t_Handle p_FmPcd);
-+void PrsDisable(t_Handle p_FmPcd);
-+int PrsIsEnabled(t_Handle p_FmPcd);
-+t_Handle FM_PCD_GetHcPort(t_Handle h_FmPcd);
-+
-+static t_Error DsarCheckParams(t_FmPortDsarParams *params,
-+ t_FmPortDsarTablesSizes *sizes)
-+{
-+ bool macInit = FALSE;
-+ uint8_t mac[6];
-+ int i = 0;
-+
-+ // check table sizes
-+ if (params->p_AutoResArpInfo
-+ && sizes->maxNumOfArpEntries < params->p_AutoResArpInfo->tableSize)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("DSAR: Arp table size exceeds the configured maximum size."));
-+ if (params->p_AutoResEchoIpv4Info
-+ && sizes->maxNumOfEchoIpv4Entries
-+ < params->p_AutoResEchoIpv4Info->tableSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: EchoIpv4 table size exceeds the configured maximum size."));
-+ if (params->p_AutoResNdpInfo
-+ && sizes->maxNumOfNdpEntries
-+ < params->p_AutoResNdpInfo->tableSizeAssigned
-+ + params->p_AutoResNdpInfo->tableSizeTmp)
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("DSAR: NDP table size exceeds the configured maximum size."));
-+ if (params->p_AutoResEchoIpv6Info
-+ && sizes->maxNumOfEchoIpv6Entries
-+ < params->p_AutoResEchoIpv6Info->tableSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: EchoIpv6 table size exceeds the configured maximum size."));
-+ if (params->p_AutoResSnmpInfo
-+ && sizes->maxNumOfSnmpOidEntries
-+ < params->p_AutoResSnmpInfo->oidsTblSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: Snmp Oid table size exceeds the configured maximum size."));
-+ if (params->p_AutoResSnmpInfo
-+ && sizes->maxNumOfSnmpIPV4Entries
-+ < params->p_AutoResSnmpInfo->numOfIpv4Addresses)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: Snmp ipv4 table size exceeds the configured maximum size."));
-+ if (params->p_AutoResSnmpInfo
-+ && sizes->maxNumOfSnmpIPV6Entries
-+ < params->p_AutoResSnmpInfo->numOfIpv6Addresses)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: Snmp ipv6 table size exceeds the configured maximum size."));
-+ if (params->p_AutoResFilteringInfo)
-+ {
-+ if (sizes->maxNumOfIpProtFiltering
-+ < params->p_AutoResFilteringInfo->ipProtTableSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: ip filter table size exceeds the configured maximum size."));
-+ if (sizes->maxNumOfTcpPortFiltering
-+ < params->p_AutoResFilteringInfo->udpPortsTableSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: udp filter table size exceeds the configured maximum size."));
-+ if (sizes->maxNumOfUdpPortFiltering
-+ < params->p_AutoResFilteringInfo->tcpPortsTableSize)
-+ RETURN_ERROR(
-+ MAJOR,
-+ E_INVALID_VALUE,
-+ ("DSAR: tcp filter table size exceeds the configured maximum size."));
-+ }
-+ /* check only 1 MAC address is configured (this is what ucode currently supports) */
-+ if (params->p_AutoResArpInfo && params->p_AutoResArpInfo->tableSize)
-+ {
-+ memcpy(mac, params->p_AutoResArpInfo->p_AutoResTable[0].mac, 6);
-+ i = 1;
-+ macInit = TRUE;
-+
-+ for (; i < params->p_AutoResArpInfo->tableSize; i++)
-+ if (memcmp(mac, params->p_AutoResArpInfo->p_AutoResTable[i].mac, 6))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("DSAR: Only 1 mac address is currently supported."));
-+ }
-+ if (params->p_AutoResEchoIpv4Info
-+ && params->p_AutoResEchoIpv4Info->tableSize)
-+ {
-+ i = 0;
-+ if (!macInit)
-+ {
-+ memcpy(mac, params->p_AutoResEchoIpv4Info->p_AutoResTable[0].mac,
-+ 6);
-+ i = 1;
-+ macInit = TRUE;
-+ }
-+ for (; i < params->p_AutoResEchoIpv4Info->tableSize; i++)
-+ if (memcmp(mac,
-+ params->p_AutoResEchoIpv4Info->p_AutoResTable[i].mac, 6))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("DSAR: Only 1 mac address is currently supported."));
-+ }
-+ if (params->p_AutoResEchoIpv6Info
-+ && params->p_AutoResEchoIpv6Info->tableSize)
-+ {
-+ i = 0;
-+ if (!macInit)
-+ {
-+ memcpy(mac, params->p_AutoResEchoIpv6Info->p_AutoResTable[0].mac,
-+ 6);
-+ i = 1;
-+ macInit = TRUE;
-+ }
-+ for (; i < params->p_AutoResEchoIpv6Info->tableSize; i++)
-+ if (memcmp(mac,
-+ params->p_AutoResEchoIpv6Info->p_AutoResTable[i].mac, 6))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("DSAR: Only 1 mac address is currently supported."));
-+ }
-+ if (params->p_AutoResNdpInfo && params->p_AutoResNdpInfo->tableSizeAssigned)
-+ {
-+ i = 0;
-+ if (!macInit)
-+ {
-+ memcpy(mac, params->p_AutoResNdpInfo->p_AutoResTableAssigned[0].mac,
-+ 6);
-+ i = 1;
-+ macInit = TRUE;
-+ }
-+ for (; i < params->p_AutoResNdpInfo->tableSizeAssigned; i++)
-+ if (memcmp(mac,
-+ params->p_AutoResNdpInfo->p_AutoResTableAssigned[i].mac,
-+ 6))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("DSAR: Only 1 mac address is currently supported."));
-+ }
-+ if (params->p_AutoResNdpInfo && params->p_AutoResNdpInfo->tableSizeTmp)
-+ {
-+ i = 0;
-+ if (!macInit)
-+ {
-+ memcpy(mac, params->p_AutoResNdpInfo->p_AutoResTableTmp[0].mac, 6);
-+ i = 1;
-+ }
-+ for (; i < params->p_AutoResNdpInfo->tableSizeTmp; i++)
-+ if (memcmp(mac, params->p_AutoResNdpInfo->p_AutoResTableTmp[i].mac,
-+ 6))
-+ RETURN_ERROR(
-+ MAJOR, E_INVALID_VALUE,
-+ ("DSAR: Only 1 mac address is currently supported."));
-+ }
-+ return E_OK;
-+}
-+
-+static int GetBERLen(uint8_t* buf)
-+{
-+ if (*buf & 0x80)
-+ {
-+ if ((*buf & 0x7F) == 1)
-+ return buf[1];
-+ else
-+ return *(uint16_t*)&buf[1]; // assuming max len is 2
-+ }
-+ else
-+ return buf[0];
-+}
-+#define TOTAL_BER_LEN(len) (len < 128) ? len + 2 : len + 3
-+
-+#define SCFG_FMCLKDPSLPCR_ADDR 0xFFE0FC00C
-+#define SCFG_FMCLKDPSLPCR_DS_VAL 0x08402000
-+#define SCFG_FMCLKDPSLPCR_NORMAL_VAL 0x00402000
-+static int fm_soc_suspend(void)
-+{
-+ uint32_t *fmclk, tmp32;
-+ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
-+ tmp32 = GET_UINT32(*fmclk);
-+ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL);
-+ tmp32 = GET_UINT32(*fmclk);
-+ iounmap(fmclk);
-+ return 0;
-+}
-+
-+void fm_clk_down(void)
-+{
-+ uint32_t *fmclk, tmp32;
-+ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
-+ tmp32 = GET_UINT32(*fmclk);
-+ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL | 0x40000000);
-+ tmp32 = GET_UINT32(*fmclk);
-+ iounmap(fmclk);
-+}
-+
-+t_Error FM_PORT_EnterDsar(t_Handle h_FmPortRx, t_FmPortDsarParams *params)
-+{
-+ int i, j;
-+ t_Error err;
-+ uint32_t nia;
-+ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
-+ t_FmPort *p_FmPortTx = (t_FmPort *)params->h_FmPortTx;
-+ t_DsarArpDescriptor *ArpDescriptor;
-+ t_DsarIcmpV4Descriptor* ICMPV4Descriptor;
-+ t_DsarIcmpV6Descriptor* ICMPV6Descriptor;
-+ t_DsarNdDescriptor* NDDescriptor;
-+
-+ uint64_t fmMuramVirtBaseAddr = (uint64_t)PTR_TO_UINT(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr));
-+ uint32_t *param_page = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr));
-+ t_ArCommonDesc *ArCommonDescPtr = (t_ArCommonDesc*)(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(*param_page)));
-+ struct arOffsets* of;
-+ uint8_t tmp = 0;
-+ t_FmGetSetParams fmGetSetParams;
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
-+ fmGetSetParams.setParams.sleep = 1;
-+
-+ err = DsarCheckParams(params, p_FmPort->deepSleepVars.autoResMaxSizes);
-+ if (err != E_OK)
-+ return err;
-+
-+ p_FmPort->deepSleepVars.autoResOffsets = XX_Malloc(sizeof(struct arOffsets));
-+ of = (struct arOffsets *)p_FmPort->deepSleepVars.autoResOffsets;
-+ IOMemSet32(ArCommonDescPtr, 0, AR_ComputeOffsets(of, params, p_FmPort));
-+
-+ // common
-+ WRITE_UINT8(ArCommonDescPtr->arTxPort, p_FmPortTx->hardwarePortId);
-+ nia = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne); // bmi nia
-+ if ((nia & 0x007C0000) == 0x00440000) // bmi nia is parser
-+ WRITE_UINT32(ArCommonDescPtr->activeHPNIA, GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne));
-+ else
-+ WRITE_UINT32(ArCommonDescPtr->activeHPNIA, nia);
-+ WRITE_UINT16(ArCommonDescPtr->snmpPort, 161);
-+
-+ // ARP
-+ if (params->p_AutoResArpInfo)
-+ {
-+ t_DsarArpBindingEntry* arp_bindings;
-+ ArpDescriptor = (t_DsarArpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->arp);
-+ WRITE_UINT32(ArCommonDescPtr->p_ArpDescriptor, PTR_TO_UINT(ArpDescriptor) - fmMuramVirtBaseAddr);
-+ arp_bindings = (t_DsarArpBindingEntry*)(PTR_TO_UINT(ArpDescriptor) + sizeof(t_DsarArpDescriptor));
-+ if (params->p_AutoResArpInfo->enableConflictDetection)
-+ WRITE_UINT16(ArpDescriptor->control, 1);
-+ else
-+ WRITE_UINT16(ArpDescriptor->control, 0);
-+ if (params->p_AutoResArpInfo->tableSize)
-+ {
-+ t_FmPortDsarArpEntry* arp_entry = params->p_AutoResArpInfo->p_AutoResTable;
-+ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&arp_entry[0].mac[0]);
-+ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&arp_entry[0].mac[2]);
-+ WRITE_UINT16(ArpDescriptor->numOfBindings, params->p_AutoResArpInfo->tableSize);
-+
-+ for (i = 0; i < params->p_AutoResArpInfo->tableSize; i++)
-+ {
-+ WRITE_UINT32(arp_bindings[i].ipv4Addr, arp_entry[i].ipAddress);
-+ if (arp_entry[i].isVlan)
-+ WRITE_UINT16(arp_bindings[i].vlanId, arp_entry[i].vid & 0xFFF);
-+ }
-+ WRITE_UINT32(ArpDescriptor->p_Bindings, PTR_TO_UINT(arp_bindings) - fmMuramVirtBaseAddr);
-+ }
-+ WRITE_UINT32(ArpDescriptor->p_Statistics, PTR_TO_UINT(arp_bindings) +
-+ sizeof(t_DsarArpBindingEntry) * params->p_AutoResArpInfo->tableSize - fmMuramVirtBaseAddr);
-+ }
-+
-+ // ICMPV4
-+ if (params->p_AutoResEchoIpv4Info)
-+ {
-+ t_DsarIcmpV4BindingEntry* icmpv4_bindings;
-+ ICMPV4Descriptor = (t_DsarIcmpV4Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv4);
-+ WRITE_UINT32(ArCommonDescPtr->p_IcmpV4Descriptor, PTR_TO_UINT(ICMPV4Descriptor) - fmMuramVirtBaseAddr);
-+ icmpv4_bindings = (t_DsarIcmpV4BindingEntry*)(PTR_TO_UINT(ICMPV4Descriptor) + sizeof(t_DsarIcmpV4Descriptor));
-+ WRITE_UINT16(ICMPV4Descriptor->control, 0);
-+ if (params->p_AutoResEchoIpv4Info->tableSize)
-+ {
-+ t_FmPortDsarArpEntry* arp_entry = params->p_AutoResEchoIpv4Info->p_AutoResTable;
-+ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&arp_entry[0].mac[0]);
-+ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&arp_entry[0].mac[2]);
-+ WRITE_UINT16(ICMPV4Descriptor->numOfBindings, params->p_AutoResEchoIpv4Info->tableSize);
-+
-+ for (i = 0; i < params->p_AutoResEchoIpv4Info->tableSize; i++)
-+ {
-+ WRITE_UINT32(icmpv4_bindings[i].ipv4Addr, arp_entry[i].ipAddress);
-+ if (arp_entry[i].isVlan)
-+ WRITE_UINT16(icmpv4_bindings[i].vlanId, arp_entry[i].vid & 0xFFF);
-+ }
-+ WRITE_UINT32(ICMPV4Descriptor->p_Bindings, PTR_TO_UINT(icmpv4_bindings) - fmMuramVirtBaseAddr);
-+ }
-+ WRITE_UINT32(ICMPV4Descriptor->p_Statistics, PTR_TO_UINT(icmpv4_bindings) +
-+ sizeof(t_DsarIcmpV4BindingEntry) * params->p_AutoResEchoIpv4Info->tableSize - fmMuramVirtBaseAddr);
-+ }
-+
-+ // ICMPV6
-+ if (params->p_AutoResEchoIpv6Info)
-+ {
-+ t_DsarIcmpV6BindingEntry* icmpv6_bindings;
-+ ICMPV6Descriptor = (t_DsarIcmpV6Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv6);
-+ WRITE_UINT32(ArCommonDescPtr->p_IcmpV6Descriptor, PTR_TO_UINT(ICMPV6Descriptor) - fmMuramVirtBaseAddr);
-+ icmpv6_bindings = (t_DsarIcmpV6BindingEntry*)(PTR_TO_UINT(ICMPV6Descriptor) + sizeof(t_DsarIcmpV6Descriptor));
-+ WRITE_UINT16(ICMPV6Descriptor->control, 0);
-+ if (params->p_AutoResEchoIpv6Info->tableSize)
-+ {
-+ t_FmPortDsarNdpEntry* ndp_entry = params->p_AutoResEchoIpv6Info->p_AutoResTable;
-+ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&ndp_entry[0].mac[0]);
-+ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&ndp_entry[0].mac[2]);
-+ WRITE_UINT16(ICMPV6Descriptor->numOfBindings, params->p_AutoResEchoIpv6Info->tableSize);
-+
-+ for (i = 0; i < params->p_AutoResEchoIpv6Info->tableSize; i++)
-+ {
-+ for (j = 0; j < 4; j++)
-+ WRITE_UINT32(icmpv6_bindings[i].ipv6Addr[j], ndp_entry[i].ipAddress[j]);
-+ if (ndp_entry[i].isVlan)
-+ WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan
-+ }
-+ WRITE_UINT32(ICMPV6Descriptor->p_Bindings, PTR_TO_UINT(icmpv6_bindings) - fmMuramVirtBaseAddr);
-+ }
-+ WRITE_UINT32(ICMPV6Descriptor->p_Statistics, PTR_TO_UINT(icmpv6_bindings) +
-+ sizeof(t_DsarIcmpV6BindingEntry) * params->p_AutoResEchoIpv6Info->tableSize - fmMuramVirtBaseAddr);
-+ }
-+
-+ // ND
-+ if (params->p_AutoResNdpInfo)
-+ {
-+ t_DsarIcmpV6BindingEntry* icmpv6_bindings;
-+ NDDescriptor = (t_DsarNdDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->nd);
-+ WRITE_UINT32(ArCommonDescPtr->p_NdDescriptor, PTR_TO_UINT(NDDescriptor) - fmMuramVirtBaseAddr);
-+ icmpv6_bindings = (t_DsarIcmpV6BindingEntry*)(PTR_TO_UINT(NDDescriptor) + sizeof(t_DsarNdDescriptor));
-+ if (params->p_AutoResNdpInfo->enableConflictDetection)
-+ WRITE_UINT16(NDDescriptor->control, 1);
-+ else
-+ WRITE_UINT16(NDDescriptor->control, 0);
-+ if (params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp)
-+ {
-+ t_FmPortDsarNdpEntry* ndp_entry = params->p_AutoResNdpInfo->p_AutoResTableAssigned;
-+ WRITE_UINT16(*(uint16_t*)&ArCommonDescPtr->macStationAddr[0], *(uint16_t*)&ndp_entry[0].mac[0]);
-+ WRITE_UINT32(*(uint32_t*)&ArCommonDescPtr->macStationAddr[2], *(uint32_t*)&ndp_entry[0].mac[2]);
-+ WRITE_UINT16(NDDescriptor->numOfBindings, params->p_AutoResNdpInfo->tableSizeAssigned
-+ + params->p_AutoResNdpInfo->tableSizeTmp);
-+
-+ for (i = 0; i < params->p_AutoResNdpInfo->tableSizeAssigned; i++)
-+ {
-+ for (j = 0; j < 4; j++)
-+ WRITE_UINT32(icmpv6_bindings[i].ipv6Addr[j], ndp_entry[i].ipAddress[j]);
-+ if (ndp_entry[i].isVlan)
-+ WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan
-+ }
-+ ndp_entry = params->p_AutoResNdpInfo->p_AutoResTableTmp;
-+ for (i = 0; i < params->p_AutoResNdpInfo->tableSizeTmp; i++)
-+ {
-+ for (j = 0; j < 4; j++)
-+ WRITE_UINT32(icmpv6_bindings[i + params->p_AutoResNdpInfo->tableSizeAssigned].ipv6Addr[j], ndp_entry[i].ipAddress[j]);
-+ if (ndp_entry[i].isVlan)
-+ WRITE_UINT16(*(uint16_t*)&icmpv6_bindings[i + params->p_AutoResNdpInfo->tableSizeAssigned].ipv6Addr[4], ndp_entry[i].vid & 0xFFF); // writing vlan
-+ }
-+ WRITE_UINT32(NDDescriptor->p_Bindings, PTR_TO_UINT(icmpv6_bindings) - fmMuramVirtBaseAddr);
-+ }
-+ WRITE_UINT32(NDDescriptor->p_Statistics, PTR_TO_UINT(icmpv6_bindings) + sizeof(t_DsarIcmpV6BindingEntry)
-+ * (params->p_AutoResNdpInfo->tableSizeAssigned + params->p_AutoResNdpInfo->tableSizeTmp)
-+ - fmMuramVirtBaseAddr);
-+ WRITE_UINT32(NDDescriptor->solicitedAddr, 0xFFFFFFFF);
-+ }
-+
-+ // SNMP
-+ if (params->p_AutoResSnmpInfo)
-+ {
-+ t_FmPortDsarSnmpInfo *snmpSrc = params->p_AutoResSnmpInfo;
-+ t_DsarSnmpIpv4AddrTblEntry* snmpIpv4Addr;
-+ t_DsarSnmpIpv6AddrTblEntry* snmpIpv6Addr;
-+ t_OidsTblEntry* snmpOid;
-+ uint8_t *charPointer;
-+ int len;
-+ t_DsarSnmpDescriptor* SnmpDescriptor = (t_DsarSnmpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->snmp);
-+ WRITE_UINT32(ArCommonDescPtr->p_SnmpDescriptor, PTR_TO_UINT(SnmpDescriptor) - fmMuramVirtBaseAddr);
-+ WRITE_UINT16(SnmpDescriptor->control, snmpSrc->control);
-+ WRITE_UINT16(SnmpDescriptor->maxSnmpMsgLength, snmpSrc->maxSnmpMsgLength);
-+ snmpIpv4Addr = (t_DsarSnmpIpv4AddrTblEntry*)(PTR_TO_UINT(SnmpDescriptor) + sizeof(t_DsarSnmpDescriptor));
-+ if (snmpSrc->numOfIpv4Addresses)
-+ {
-+ t_FmPortDsarSnmpIpv4AddrTblEntry* snmpIpv4AddrSrc = snmpSrc->p_Ipv4AddrTbl;
-+ WRITE_UINT16(SnmpDescriptor->numOfIpv4Addresses, snmpSrc->numOfIpv4Addresses);
-+ for (i = 0; i < snmpSrc->numOfIpv4Addresses; i++)
-+ {
-+ WRITE_UINT32(snmpIpv4Addr[i].ipv4Addr, snmpIpv4AddrSrc[i].ipv4Addr);
-+ if (snmpIpv4AddrSrc[i].isVlan)
-+ WRITE_UINT16(snmpIpv4Addr[i].vlanId, snmpIpv4AddrSrc[i].vid & 0xFFF);
-+ }
-+ WRITE_UINT32(SnmpDescriptor->p_Ipv4AddrTbl, PTR_TO_UINT(snmpIpv4Addr) - fmMuramVirtBaseAddr);
-+ }
-+ snmpIpv6Addr = (t_DsarSnmpIpv6AddrTblEntry*)(PTR_TO_UINT(snmpIpv4Addr)
-+ + sizeof(t_DsarSnmpIpv4AddrTblEntry) * snmpSrc->numOfIpv4Addresses);
-+ if (snmpSrc->numOfIpv6Addresses)
-+ {
-+ t_FmPortDsarSnmpIpv6AddrTblEntry* snmpIpv6AddrSrc = snmpSrc->p_Ipv6AddrTbl;
-+ WRITE_UINT16(SnmpDescriptor->numOfIpv6Addresses, snmpSrc->numOfIpv6Addresses);
-+ for (i = 0; i < snmpSrc->numOfIpv6Addresses; i++)
-+ {
-+ for (j = 0; j < 4; j++)
-+ WRITE_UINT32(snmpIpv6Addr[i].ipv6Addr[j], snmpIpv6AddrSrc[i].ipv6Addr[j]);
-+ if (snmpIpv6AddrSrc[i].isVlan)
-+ WRITE_UINT16(snmpIpv6Addr[i].vlanId, snmpIpv6AddrSrc[i].vid & 0xFFF);
-+ }
-+ WRITE_UINT32(SnmpDescriptor->p_Ipv6AddrTbl, PTR_TO_UINT(snmpIpv6Addr) - fmMuramVirtBaseAddr);
-+ }
-+ snmpOid = (t_OidsTblEntry*)(PTR_TO_UINT(snmpIpv6Addr)
-+ + sizeof(t_DsarSnmpIpv6AddrTblEntry) * snmpSrc->numOfIpv6Addresses);
-+ charPointer = (uint8_t*)(PTR_TO_UINT(snmpOid)
-+ + sizeof(t_OidsTblEntry) * snmpSrc->oidsTblSize);
-+ len = TOTAL_BER_LEN(GetBERLen(&snmpSrc->p_RdOnlyCommunityStr[1]));
-+ Mem2IOCpy32(charPointer, snmpSrc->p_RdOnlyCommunityStr, len);
-+ WRITE_UINT32(SnmpDescriptor->p_RdOnlyCommunityStr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
-+ charPointer += len;
-+ len = TOTAL_BER_LEN(GetBERLen(&snmpSrc->p_RdWrCommunityStr[1]));
-+ Mem2IOCpy32(charPointer, snmpSrc->p_RdWrCommunityStr, len);
-+ WRITE_UINT32(SnmpDescriptor->p_RdWrCommunityStr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
-+ charPointer += len;
-+ WRITE_UINT32(SnmpDescriptor->oidsTblSize, snmpSrc->oidsTblSize);
-+ WRITE_UINT32(SnmpDescriptor->p_OidsTbl, PTR_TO_UINT(snmpOid) - fmMuramVirtBaseAddr);
-+ for (i = 0; i < snmpSrc->oidsTblSize; i++)
-+ {
-+ WRITE_UINT16(snmpOid->oidSize, snmpSrc->p_OidsTbl[i].oidSize);
-+ WRITE_UINT16(snmpOid->resSize, snmpSrc->p_OidsTbl[i].resSize);
-+ Mem2IOCpy32(charPointer, snmpSrc->p_OidsTbl[i].oidVal, snmpSrc->p_OidsTbl[i].oidSize);
-+ WRITE_UINT32(snmpOid->p_Oid, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
-+ charPointer += snmpSrc->p_OidsTbl[i].oidSize;
-+ if (snmpSrc->p_OidsTbl[i].resSize <= 4)
-+ WRITE_UINT32(snmpOid->resValOrPtr, *snmpSrc->p_OidsTbl[i].resVal);
-+ else
-+ {
-+ Mem2IOCpy32(charPointer, snmpSrc->p_OidsTbl[i].resVal, snmpSrc->p_OidsTbl[i].resSize);
-+ WRITE_UINT32(snmpOid->resValOrPtr, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
-+ charPointer += snmpSrc->p_OidsTbl[i].resSize;
-+ }
-+ snmpOid++;
-+ }
-+ charPointer = UINT_TO_PTR(ROUND_UP(PTR_TO_UINT(charPointer),4));
-+ WRITE_UINT32(SnmpDescriptor->p_Statistics, PTR_TO_UINT(charPointer) - fmMuramVirtBaseAddr);
-+ }
-+
-+ // filtering
-+ if (params->p_AutoResFilteringInfo)
-+ {
-+ if (params->p_AutoResFilteringInfo->ipProtPassOnHit)
-+ tmp |= IP_PROT_TBL_PASS_MASK;
-+ if (params->p_AutoResFilteringInfo->udpPortPassOnHit)
-+ tmp |= UDP_PORT_TBL_PASS_MASK;
-+ if (params->p_AutoResFilteringInfo->tcpPortPassOnHit)
-+ tmp |= TCP_PORT_TBL_PASS_MASK;
-+ WRITE_UINT8(ArCommonDescPtr->filterControl, tmp);
-+ WRITE_UINT16(ArCommonDescPtr->tcpControlPass, params->p_AutoResFilteringInfo->tcpFlagsMask);
-+
-+ // ip filtering
-+ if (params->p_AutoResFilteringInfo->ipProtTableSize)
-+ {
-+ uint8_t* ip_tbl = (uint8_t*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtIp);
-+ WRITE_UINT8(ArCommonDescPtr->ipProtocolTblSize, params->p_AutoResFilteringInfo->ipProtTableSize);
-+ for (i = 0; i < params->p_AutoResFilteringInfo->ipProtTableSize; i++)
-+ WRITE_UINT8(ip_tbl[i], params->p_AutoResFilteringInfo->p_IpProtTablePtr[i]);
-+ WRITE_UINT32(ArCommonDescPtr->p_IpProtocolFiltTbl, PTR_TO_UINT(ip_tbl) - fmMuramVirtBaseAddr);
-+ }
-+
-+ // udp filtering
-+ if (params->p_AutoResFilteringInfo->udpPortsTableSize)
-+ {
-+ t_PortTblEntry* udp_tbl = (t_PortTblEntry*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtUdp);
-+ WRITE_UINT8(ArCommonDescPtr->udpPortTblSize, params->p_AutoResFilteringInfo->udpPortsTableSize);
-+ for (i = 0; i < params->p_AutoResFilteringInfo->udpPortsTableSize; i++)
-+ {
-+ WRITE_UINT32(udp_tbl[i].Ports,
-+ (params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].srcPort << 16) +
-+ params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].dstPort);
-+ WRITE_UINT32(udp_tbl[i].PortsMask,
-+ (params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].srcPortMask << 16) +
-+ params->p_AutoResFilteringInfo->p_UdpPortsTablePtr[i].dstPortMask);
-+ }
-+ WRITE_UINT32(ArCommonDescPtr->p_UdpPortFiltTbl, PTR_TO_UINT(udp_tbl) - fmMuramVirtBaseAddr);
-+ }
-+
-+ // tcp filtering
-+ if (params->p_AutoResFilteringInfo->tcpPortsTableSize)
-+ {
-+ t_PortTblEntry* tcp_tbl = (t_PortTblEntry*)(PTR_TO_UINT(ArCommonDescPtr) + of->filtTcp);
-+ WRITE_UINT8(ArCommonDescPtr->tcpPortTblSize, params->p_AutoResFilteringInfo->tcpPortsTableSize);
-+ for (i = 0; i < params->p_AutoResFilteringInfo->tcpPortsTableSize; i++)
-+ {
-+ WRITE_UINT32(tcp_tbl[i].Ports,
-+ (params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].srcPort << 16) +
-+ params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].dstPort);
-+ WRITE_UINT32(tcp_tbl[i].PortsMask,
-+ (params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].srcPortMask << 16) +
-+ params->p_AutoResFilteringInfo->p_TcpPortsTablePtr[i].dstPortMask);
-+ }
-+ WRITE_UINT32(ArCommonDescPtr->p_TcpPortFiltTbl, PTR_TO_UINT(tcp_tbl) - fmMuramVirtBaseAddr);
-+ }
-+ }
-+ // common stats
-+ WRITE_UINT32(ArCommonDescPtr->p_ArStats, PTR_TO_UINT(ArCommonDescPtr) + of->stats - fmMuramVirtBaseAddr);
-+
-+ // get into Deep Sleep sequence:
-+
-+ // Ensures that FMan do not enter the idle state. This is done by programing
-+ // FMDPSLPCR[FM_STOP] to one.
-+ fm_soc_suspend();
-+
-+ ARDesc = UINT_TO_PTR(XX_VirtToPhys(ArCommonDescPtr));
-+ return E_OK;
-+
-+}
-+
-+void FM_ChangeClock(t_Handle h_Fm, int hardwarePortId);
-+t_Error FM_PORT_EnterDsarFinal(t_Handle h_DsarRxPort, t_Handle h_DsarTxPort)
-+{
-+ t_FmGetSetParams fmGetSetParams;
-+ t_FmPort *p_FmPort = (t_FmPort *)h_DsarRxPort;
-+ t_FmPort *p_FmPortTx = (t_FmPort *)h_DsarTxPort;
-+ t_Handle *h_FmPcd = FmGetPcd(p_FmPort->h_Fm);
-+ t_FmPort *p_FmPortHc = FM_PCD_GetHcPort(h_FmPcd);
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.setParams.type = UPDATE_FM_CLD;
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+
-+ /* Issue graceful stop to HC port */
-+ FM_PORT_Disable(p_FmPortHc);
-+
-+ // config tx port
-+ p_FmPort->deepSleepVars.fmbm_tcfg = GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg);
-+ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg, GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg) | BMI_PORT_CFG_IM | BMI_PORT_CFG_EN);
-+ // ????
-+ p_FmPort->deepSleepVars.fmbm_tcmne = GET_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne);
-+ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne, 0xE);
-+ // Stage 7:echo
-+ p_FmPort->deepSleepVars.fmbm_rfpne = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne);
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne, 0x2E);
-+ if (!PrsIsEnabled(h_FmPcd))
-+ {
-+ p_FmPort->deepSleepVars.dsarEnabledParser = TRUE;
-+ PrsEnable(h_FmPcd);
-+ }
-+ else
-+ p_FmPort->deepSleepVars.dsarEnabledParser = FALSE;
-+
-+ p_FmPort->deepSleepVars.fmbm_rfne = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne);
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne, 0x440000);
-+
-+ // save rcfg for restoring: accumulate mode is changed by ucode
-+ p_FmPort->deepSleepVars.fmbm_rcfg = GET_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcfg);
-+ WRITE_UINT32(p_FmPort->port.bmi_regs->rx.fmbm_rcfg, p_FmPort->deepSleepVars.fmbm_rcfg | BMI_PORT_CFG_AM);
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
-+ fmGetSetParams.setParams.sleep = 1;
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+
-+// ***** issue external request sync command
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.setParams.type = UPDATE_FPM_EXTC;
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+ // get
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.getParams.type = GET_FMFP_EXTC;
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+ if (fmGetSetParams.getParams.fmfp_extc != 0)
-+ {
-+ // clear
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.setParams.type = UPDATE_FPM_EXTC_CLEAR;
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+}
-+
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.getParams.type = GET_FMFP_EXTC | GET_FM_NPI;
-+ do
-+ {
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+ } while (fmGetSetParams.getParams.fmfp_extc != 0 && fmGetSetParams.getParams.fm_npi == 0);
-+ if (fmGetSetParams.getParams.fm_npi != 0)
-+ XX_Print("FM: Sync did not finish\n");
-+
-+ // check that all stoped
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.getParams.type = GET_FMQM_GS | GET_FM_NPI;
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+ while (fmGetSetParams.getParams.fmqm_gs & 0xF0000000)
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+ if (fmGetSetParams.getParams.fmqm_gs == 0 && fmGetSetParams.getParams.fm_npi == 0)
-+ XX_Print("FM: Sleeping\n");
-+// FM_ChangeClock(p_FmPort->h_Fm, p_FmPort->hardwarePortId);
-+
-+ return E_OK;
-+}
-+
-+EXPORT_SYMBOL(FM_PORT_EnterDsarFinal);
-+
-+void FM_PORT_Dsar_DumpRegs()
-+{
-+ uint32_t* hh = XX_PhysToVirt(PTR_TO_UINT(ARDesc));
-+ DUMP_MEMORY(hh, 0x220);
-+}
-+
-+void FM_PORT_ExitDsar(t_Handle h_FmPortRx, t_Handle h_FmPortTx)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
-+ t_FmPort *p_FmPortTx = (t_FmPort *)h_FmPortTx;
-+ t_Handle *h_FmPcd = FmGetPcd(p_FmPort->h_Fm);
-+ t_FmPort *p_FmPortHc = FM_PCD_GetHcPort(h_FmPcd);
-+ t_FmGetSetParams fmGetSetParams;
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
-+ fmGetSetParams.setParams.sleep = 0;
-+ if (p_FmPort->deepSleepVars.autoResOffsets)
-+ {
-+ XX_Free(p_FmPort->deepSleepVars.autoResOffsets);
-+ p_FmPort->deepSleepVars.autoResOffsets = 0;
-+ }
-+
-+ if (p_FmPort->deepSleepVars.dsarEnabledParser)
-+ PrsDisable(FmGetPcd(p_FmPort->h_Fm));
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne, p_FmPort->deepSleepVars.fmbm_rfpne);
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne, p_FmPort->deepSleepVars.fmbm_rfne);
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rcfg, p_FmPort->deepSleepVars.fmbm_rcfg);
-+ FmGetSetParams(p_FmPort->h_Fm, &fmGetSetParams);
-+ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcmne, p_FmPort->deepSleepVars.fmbm_tcmne);
-+ WRITE_UINT32(p_FmPortTx->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg, p_FmPort->deepSleepVars.fmbm_tcfg);
-+ FM_PORT_Enable(p_FmPortHc);
-+}
-+
-+bool FM_PORT_IsInDsar(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort *)h_FmPort;
-+ return PTR_TO_UINT(p_FmPort->deepSleepVars.autoResOffsets);
-+}
-+
-+t_Error FM_PORT_GetDsarStats(t_Handle h_FmPortRx, t_FmPortDsarStats *stats)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort *)h_FmPortRx;
-+ struct arOffsets *of = (struct arOffsets*)p_FmPort->deepSleepVars.autoResOffsets;
-+ uint8_t* fmMuramVirtBaseAddr = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr);
-+ uint32_t *param_page = XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr));
-+ t_ArCommonDesc *ArCommonDescPtr = (t_ArCommonDesc*)(XX_PhysToVirt(p_FmPort->fmMuramPhysBaseAddr + GET_UINT32(*param_page)));
-+ t_DsarArpDescriptor *ArpDescriptor = (t_DsarArpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->arp);
-+ t_DsarArpStatistics* arp_stats = (t_DsarArpStatistics*)(PTR_TO_UINT(ArpDescriptor->p_Statistics) + fmMuramVirtBaseAddr);
-+ t_DsarIcmpV4Descriptor* ICMPV4Descriptor = (t_DsarIcmpV4Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv4);
-+ t_DsarIcmpV4Statistics* icmpv4_stats = (t_DsarIcmpV4Statistics*)(PTR_TO_UINT(ICMPV4Descriptor->p_Statistics) + fmMuramVirtBaseAddr);
-+ t_DsarNdDescriptor* NDDescriptor = (t_DsarNdDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->nd);
-+ t_NdStatistics* nd_stats = (t_NdStatistics*)(PTR_TO_UINT(NDDescriptor->p_Statistics) + fmMuramVirtBaseAddr);
-+ t_DsarIcmpV6Descriptor* ICMPV6Descriptor = (t_DsarIcmpV6Descriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->icmpv6);
-+ t_DsarIcmpV6Statistics* icmpv6_stats = (t_DsarIcmpV6Statistics*)(PTR_TO_UINT(ICMPV6Descriptor->p_Statistics) + fmMuramVirtBaseAddr);
-+ t_DsarSnmpDescriptor* SnmpDescriptor = (t_DsarSnmpDescriptor*)(PTR_TO_UINT(ArCommonDescPtr) + of->snmp);
-+ t_DsarSnmpStatistics* snmp_stats = (t_DsarSnmpStatistics*)(PTR_TO_UINT(SnmpDescriptor->p_Statistics) + fmMuramVirtBaseAddr);
-+ stats->arpArCnt = arp_stats->arCnt;
-+ stats->echoIcmpv4ArCnt = icmpv4_stats->arCnt;
-+ stats->ndpArCnt = nd_stats->arCnt;
-+ stats->echoIcmpv6ArCnt = icmpv6_stats->arCnt;
-+ stats->snmpGetCnt = snmp_stats->snmpGetReqCnt;
-+ stats->snmpGetNextCnt = snmp_stats->snmpGetNextReqCnt;
-+ return E_OK;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.h
-@@ -0,0 +1,999 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_port.h
-+
-+ @Description FM Port internal structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_PORT_H
-+#define __FM_PORT_H
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_port_ext.h"
-+
-+#include "fm_common.h"
-+#include "fm_sp_common.h"
-+#include "fsl_fman_sp.h"
-+#include "fm_port_ext.h"
-+#include "fsl_fman_port.h"
-+
-+#define __ERR_MODULE__ MODULE_FM_PORT
-+
-+
-+#define MIN_EXT_BUF_SIZE 64
-+#define DATA_ALIGNMENT 64
-+#define MAX_LIODN_OFFSET 64
-+#define MAX_PORT_FIFO_SIZE MIN(BMI_MAX_FIFO_SIZE, 1024*BMI_FIFO_UNITS)
-+
-+/**************************************************************************//**
-+ @Description Memory Map defines
-+*//***************************************************************************/
-+#define BMI_PORT_REGS_OFFSET 0
-+#define QMI_PORT_REGS_OFFSET 0x400
-+#define PRS_PORT_REGS_OFFSET 0x800
-+
-+/**************************************************************************//**
-+ @Description defaults
-+*//***************************************************************************/
-+#define DEFAULT_PORT_deqHighPriority_1G FALSE
-+#define DEFAULT_PORT_deqHighPriority_10G TRUE
-+#define DEFAULT_PORT_deqType e_FM_PORT_DEQ_TYPE1
-+#define DEFAULT_PORT_deqPrefetchOption e_FM_PORT_DEQ_FULL_PREFETCH
-+#define DEFAULT_PORT_deqPrefetchOption_HC e_FM_PORT_DEQ_NO_PREFETCH
-+#define DEFAULT_PORT_deqByteCnt_10G 0x1400
-+#define DEFAULT_PORT_deqByteCnt_1G 0x400
-+#define DEFAULT_PORT_bufferPrefixContent_privDataSize DEFAULT_FM_SP_bufferPrefixContent_privDataSize
-+#define DEFAULT_PORT_bufferPrefixContent_passPrsResult DEFAULT_FM_SP_bufferPrefixContent_passPrsResult
-+#define DEFAULT_PORT_bufferPrefixContent_passTimeStamp DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp
-+#define DEFAULT_PORT_bufferPrefixContent_allOtherPCDInfo DEFAULT_FM_SP_bufferPrefixContent_allOtherPCDInfo
-+#define DEFAULT_PORT_bufferPrefixContent_dataAlign DEFAULT_FM_SP_bufferPrefixContent_dataAlign
-+#define DEFAULT_PORT_cheksumLastBytesIgnore 0
-+#define DEFAULT_PORT_cutBytesFromEnd 4
-+#define DEFAULT_PORT_fifoDeqPipelineDepth_IM 2
-+
-+#define DEFAULT_PORT_frmDiscardOverride FALSE
-+
-+#define DEFAULT_PORT_dmaSwapData (e_FmDmaSwapOption)DEFAULT_FMAN_SP_DMA_SWAP_DATA
-+#define DEFAULT_PORT_dmaIntContextCacheAttr (e_FmDmaCacheOption)DEFAULT_FMAN_SP_DMA_INT_CONTEXT_CACHE_ATTR
-+#define DEFAULT_PORT_dmaHeaderCacheAttr (e_FmDmaCacheOption)DEFAULT_FMAN_SP_DMA_HEADER_CACHE_ATTR
-+#define DEFAULT_PORT_dmaScatterGatherCacheAttr (e_FmDmaCacheOption)DEFAULT_FMAN_SP_DMA_SCATTER_GATHER_CACHE_ATTR
-+#define DEFAULT_PORT_dmaWriteOptimize DEFAULT_FMAN_SP_DMA_WRITE_OPTIMIZE
-+
-+#define DEFAULT_PORT_noScatherGather DEFAULT_FMAN_SP_NO_SCATTER_GATHER
-+#define DEFAULT_PORT_forwardIntContextReuse FALSE
-+#define DEFAULT_PORT_BufMargins_startMargins 32
-+#define DEFAULT_PORT_BufMargins_endMargins 0
-+#define DEFAULT_PORT_syncReq TRUE
-+#define DEFAULT_PORT_syncReqForHc FALSE
-+#define DEFAULT_PORT_color e_FM_PORT_COLOR_GREEN
-+#define DEFAULT_PORT_errorsToDiscard FM_PORT_FRM_ERR_CLS_DISCARD
-+/* #define DEFAULT_PORT_dualRateLimitScaleDown e_FM_PORT_DUAL_RATE_LIMITER_NONE */
-+/* #define DEFAULT_PORT_rateLimitBurstSizeHighGranularity FALSE */
-+#define DEFAULT_PORT_exception IM_EV_BSY
-+#define DEFAULT_PORT_maxFrameLength 9600
-+
-+#define DEFAULT_notSupported 0xff
-+
-+#if (DPAA_VERSION < 11)
-+#define DEFAULT_PORT_rxFifoPriElevationLevel MAX_PORT_FIFO_SIZE
-+#define DEFAULT_PORT_rxFifoThreshold (MAX_PORT_FIFO_SIZE*3/4)
-+
-+#define DEFAULT_PORT_txFifoMinFillLevel 0
-+#define DEFAULT_PORT_txFifoLowComfLevel (5*KILOBYTE)
-+#define DEFAULT_PORT_fifoDeqPipelineDepth_1G 1
-+#define DEFAULT_PORT_fifoDeqPipelineDepth_10G 4
-+
-+#define DEFAULT_PORT_fifoDeqPipelineDepth_OH 2
-+
-+/* Host command port MUST NOT be changed to more than 1 !!! */
-+#define DEFAULT_PORT_numOfTasks(type) \
-+ (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \
-+ ((type) == e_FM_PORT_TYPE_TX_10G)) ? 16 : \
-+ ((((type) == e_FM_PORT_TYPE_RX) || \
-+ ((type) == e_FM_PORT_TYPE_TX) || \
-+ ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) ? 3 : 1))
-+
-+#define DEFAULT_PORT_extraNumOfTasks(type) \
-+ (uint32_t)(((type) == e_FM_PORT_TYPE_RX_10G) ? 8 : \
-+ (((type) == e_FM_PORT_TYPE_RX) ? 2 : 0))
-+
-+#define DEFAULT_PORT_numOfOpenDmas(type) \
-+ (uint32_t)((((type) == e_FM_PORT_TYPE_TX_10G) || \
-+ ((type) == e_FM_PORT_TYPE_RX_10G)) ? 8 : 1 )
-+
-+#define DEFAULT_PORT_extraNumOfOpenDmas(type) \
-+ (uint32_t)(((type) == e_FM_PORT_TYPE_RX_10G) ? 8 : \
-+ (((type) == e_FM_PORT_TYPE_RX) ? 1 : 0))
-+
-+#define DEFAULT_PORT_numOfFifoBufs(type) \
-+ (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \
-+ ((type) == e_FM_PORT_TYPE_TX_10G)) ? 48 : \
-+ ((type) == e_FM_PORT_TYPE_RX) ? 45 : \
-+ ((type) == e_FM_PORT_TYPE_TX) ? 44 : 8)
-+
-+#define DEFAULT_PORT_extraNumOfFifoBufs 0
-+
-+#else /* (DPAA_VERSION < 11) */
-+/* Defaults are registers' reset values */
-+#define DEFAULT_PORT_rxFifoPriElevationLevel MAX_PORT_FIFO_SIZE
-+#define DEFAULT_PORT_rxFifoThreshold MAX_PORT_FIFO_SIZE
-+
-+#define DEFAULT_PORT_txFifoMinFillLevel 0
-+#define DEFAULT_PORT_txFifoLowComfLevel (5 * KILOBYTE)
-+#define DEFAULT_PORT_fifoDeqPipelineDepth_1G 2
-+#define DEFAULT_PORT_fifoDeqPipelineDepth_10G 4
-+
-+#define DEFAULT_PORT_fifoDeqPipelineDepth_OH 2
-+
-+#define DEFAULT_PORT_numOfTasks(type) \
-+ (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \
-+ ((type) == e_FM_PORT_TYPE_TX_10G)) ? 14 : \
-+ (((type) == e_FM_PORT_TYPE_RX) || \
-+ ((type) == e_FM_PORT_TYPE_TX)) ? 4 : \
-+ ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) ? 6 : 1)
-+
-+#define DEFAULT_PORT_extraNumOfTasks(type) 0
-+
-+#define DEFAULT_PORT_numOfOpenDmas(type) \
-+ (uint32_t)(((type) == e_FM_PORT_TYPE_RX_10G) ? 8 : \
-+ ((type) == e_FM_PORT_TYPE_TX_10G) ? 12 : \
-+ ((type) == e_FM_PORT_TYPE_RX) ? 2 : \
-+ ((type) == e_FM_PORT_TYPE_TX) ? 3 : \
-+ ((type) == e_FM_PORT_TYPE_OH_HOST_COMMAND) ? 2 : 4)
-+
-+#define DEFAULT_PORT_extraNumOfOpenDmas(type) 0
-+
-+#define DEFAULT_PORT_numOfFifoBufs(type) \
-+ (uint32_t) (((type) == e_FM_PORT_TYPE_RX_10G) ? 96 : \
-+ ((type) == e_FM_PORT_TYPE_TX_10G) ? 64 : \
-+ ((type) == e_FM_PORT_TYPE_OH_HOST_COMMAND) ? 10 : 50)
-+
-+#define DEFAULT_PORT_extraNumOfFifoBufs 0
-+
-+#endif /* (DPAA_VERSION < 11) */
-+
-+#define DEFAULT_PORT_txBdRingLength 16
-+#define DEFAULT_PORT_rxBdRingLength 128
-+#define DEFAULT_PORT_ImfwExtStructsMemId 0
-+#define DEFAULT_PORT_ImfwExtStructsMemAttr MEMORY_ATTR_CACHEABLE
-+
-+#define FM_PORT_CG_REG_NUM(_cgId) (((FM_PORT_NUM_OF_CONGESTION_GRPS/32)-1)-_cgId/32)
-+
-+/**************************************************************************//**
-+ @Collection PCD Engines
-+*//***************************************************************************/
-+typedef uint32_t fmPcdEngines_t; /**< options as defined below: */
-+
-+#define FM_PCD_NONE 0 /**< No PCD Engine indicated */
-+#define FM_PCD_PRS 0x80000000 /**< Parser indicated */
-+#define FM_PCD_KG 0x40000000 /**< Keygen indicated */
-+#define FM_PCD_CC 0x20000000 /**< Coarse classification indicated */
-+#define FM_PCD_PLCR 0x10000000 /**< Policer indicated */
-+#define FM_PCD_MANIP 0x08000000 /**< Manipulation indicated */
-+/* @} */
-+
-+#define FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS 8
-+#define FM_PORT_MAX_NUM_OF_CONGESTION_GRPS_ALL_INTEGRATIONS 256
-+#define FM_PORT_CG_REG_NUM(_cgId) (((FM_PORT_NUM_OF_CONGESTION_GRPS/32)-1)-_cgId/32)
-+
-+#define FM_OH_PORT_ID 0
-+
-+/***********************************************************************/
-+/* SW parser OFFLOAD labels (offsets) */
-+/***********************************************************************/
-+#if (DPAA_VERSION == 10)
-+#define OFFLOAD_SW_PATCH_IPv4_IPR_LABEL 0x300
-+#define OFFLOAD_SW_PATCH_IPv6_IPR_LABEL 0x325
-+#define OFFLOAD_SW_PATCH_IPv6_IPF_LABEL 0x325
-+#else
-+#define OFFLOAD_SW_PATCH_IPv4_IPR_LABEL 0x100
-+/* Will be used for:
-+ * 1. identify fragments
-+ * 2. udp-lite
-+ */
-+#define OFFLOAD_SW_PATCH_IPv6_IPR_LABEL 0x146
-+/* Will be used for:
-+ * 1. will identify the fragmentable area
-+ * 2. udp-lite
-+ */
-+#define OFFLOAD_SW_PATCH_IPv6_IPF_LABEL 0x261
-+#define OFFLOAD_SW_PATCH_CAPWAP_LABEL 0x38d
-+#endif /* (DPAA_VERSION == 10) */
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+#define UDP_LITE_SW_PATCH_LABEL 0x2E0
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+
-+
-+/**************************************************************************//**
-+ @Description Memory Mapped Registers
-+*//***************************************************************************/
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+typedef struct
-+{
-+ volatile uint32_t fmbm_rcfg; /**< Rx Configuration */
-+ volatile uint32_t fmbm_rst; /**< Rx Status */
-+ volatile uint32_t fmbm_rda; /**< Rx DMA attributes*/
-+ volatile uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
-+ volatile uint32_t fmbm_rfed; /**< Rx Frame End Data*/
-+ volatile uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
-+ volatile uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
-+ volatile uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
-+ volatile uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
-+ volatile uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
-+ volatile uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
-+ volatile uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
-+ volatile uint32_t fmbm_rpp; /**< Rx Policer Profile */
-+ volatile uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
-+ volatile uint32_t fmbm_reth; /**< Rx Excessive Threshold */
-+ volatile uint32_t reserved1[0x01];/**< (0x03C) */
-+ volatile uint32_t fmbm_rprai[FM_PORT_PRS_RESULT_NUM_OF_WORDS];
-+ /**< Rx Parse Results Array Initialization*/
-+ volatile uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
-+ volatile uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
-+ volatile uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
-+ volatile uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
-+ volatile uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
-+ volatile uint32_t reserved2[0x02];/**< (0x074-0x078) */
-+ volatile uint32_t fmbm_rcmne; /**< Rx Frame Continuous Mode Next Engine */
-+ volatile uint32_t reserved3[0x20];/**< (0x080 0x0FF) */
-+ volatile uint32_t fmbm_ebmpi[FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS];
-+ /**< Buffer Manager pool Information-*/
-+ volatile uint32_t fmbm_acnt[FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS];
-+ /**< Allocate Counter-*/
-+ volatile uint32_t reserved4[0x08];
-+ /**< 0x130/0x140 - 0x15F reserved -*/
-+ volatile uint32_t fmbm_rcgm[FM_PORT_MAX_NUM_OF_CONGESTION_GRPS_ALL_INTEGRATIONS/32];
-+ /**< Congestion Group Map*/
-+ volatile uint32_t fmbm_rmpd; /**< BM Pool Depletion */
-+ volatile uint32_t reserved5[0x1F];/**< (0x184 0x1FF) */
-+ volatile uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
-+ volatile uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
-+ volatile uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
-+ volatile uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
-+ volatile uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
-+ volatile uint32_t fmbm_rfcd; /**< Rx Frame Discard Counter*/
-+ volatile uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
-+ volatile uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard Counter-*/
-+ volatile uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter-*/
-+ volatile uint32_t fmbm_rpec; /**< Rx RX Prepare to enqueue Counter-*/
-+ volatile uint32_t reserved6[0x16];/**< (0x228 0x27F) */
-+ volatile uint32_t fmbm_rpc; /**< Rx Performance Counters*/
-+ volatile uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
-+ volatile uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
-+ volatile uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
-+ volatile uint32_t fmbm_rrquc; /**< Rx Receive Queue Utilization Counter*/
-+ volatile uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
-+ volatile uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
-+ volatile uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
-+ volatile uint32_t reserved7[0x18];/**< (0x2A0-0x2FF) */
-+ volatile uint32_t fmbm_rdcfg[0x3];/**< Rx Debug-*/
-+ volatile uint32_t fmbm_rgpr; /**< Rx General Purpose Register. */
-+ volatile uint32_t reserved8[0x3a];/**< (0x310-0x3FF) */
-+} t_FmPortRxBmiRegs;
-+
-+typedef struct
-+{
-+ volatile uint32_t fmbm_tcfg; /**< Tx Configuration */
-+ volatile uint32_t fmbm_tst; /**< Tx Status */
-+ volatile uint32_t fmbm_tda; /**< Tx DMA attributes */
-+ volatile uint32_t fmbm_tfp; /**< Tx FIFO Parameters */
-+ volatile uint32_t fmbm_tfed; /**< Tx Frame End Data */
-+ volatile uint32_t fmbm_ticp; /**< Tx Internal Context Parameters */
-+ volatile uint32_t fmbm_tfdne; /**< Tx Frame Dequeue Next Engine. */
-+ volatile uint32_t fmbm_tfca; /**< Tx Frame Command attribute. */
-+ volatile uint32_t fmbm_tcfqid; /**< Tx Confirmation Frame Queue ID. */
-+ volatile uint32_t fmbm_tfeqid; /**< Tx Frame Error Queue ID */
-+ volatile uint32_t fmbm_tfene; /**< Tx Frame Enqueue Next Engine */
-+ volatile uint32_t fmbm_trlmts; /**< Tx Rate Limiter Scale */
-+ volatile uint32_t fmbm_trlmt; /**< Tx Rate Limiter */
-+ volatile uint32_t fmbm_tccb; /**< Tx Coarse Classification Base */
-+ volatile uint32_t reserved0[0x0e];/**< (0x038-0x070) */
-+ volatile uint32_t fmbm_tfne; /**< Tx Frame Next Engine */
-+ volatile uint32_t fmbm_tpfcm[0x02];/**< Tx Priority based Flow Control (PFC) Mapping */
-+ volatile uint32_t fmbm_tcmne; /**< Tx Frame Continuous Mode Next Engine */
-+ volatile uint32_t reserved2[0x60];/**< (0x080-0x200) */
-+ volatile uint32_t fmbm_tstc; /**< Tx Statistics Counters */
-+ volatile uint32_t fmbm_tfrc; /**< Tx Frame Counter */
-+ volatile uint32_t fmbm_tfdc; /**< Tx Frames Discard Counter */
-+ volatile uint32_t fmbm_tfledc; /**< Tx Frame Length error discard counter */
-+ volatile uint32_t fmbm_tfufdc; /**< Tx Frame unsupported format discard Counter */
-+ volatile uint32_t fmbm_tbdc; /**< Tx Buffers Deallocate Counter */
-+ volatile uint32_t reserved3[0x1A];/**< (0x218-0x280) */
-+ volatile uint32_t fmbm_tpc; /**< Tx Performance Counters*/
-+ volatile uint32_t fmbm_tpcp; /**< Tx Performance Count Parameters*/
-+ volatile uint32_t fmbm_tccn; /**< Tx Cycle Counter*/
-+ volatile uint32_t fmbm_ttuc; /**< Tx Tasks Utilization Counter*/
-+ volatile uint32_t fmbm_ttcquc; /**< Tx Transmit Confirm Queue Utilization Counter*/
-+ volatile uint32_t fmbm_tduc; /**< Tx DMA Utilization Counter*/
-+ volatile uint32_t fmbm_tfuc; /**< Tx FIFO Utilization Counter*/
-+ volatile uint32_t reserved4[16]; /**< (0x29C-0x2FF) */
-+ volatile uint32_t fmbm_tdcfg[0x3];/**< Tx Debug-*/
-+ volatile uint32_t fmbm_tgpr; /**< O/H General Purpose Register */
-+ volatile uint32_t reserved5[0x3a];/**< (0x310-0x3FF) */
-+} t_FmPortTxBmiRegs;
-+
-+typedef struct
-+{
-+ volatile uint32_t fmbm_ocfg; /**< O/H Configuration */
-+ volatile uint32_t fmbm_ost; /**< O/H Status */
-+ volatile uint32_t fmbm_oda; /**< O/H DMA attributes */
-+ volatile uint32_t fmbm_oicp; /**< O/H Internal Context Parameters */
-+ volatile uint32_t fmbm_ofdne; /**< O/H Frame Dequeue Next Engine */
-+ volatile uint32_t fmbm_ofne; /**< O/H Frame Next Engine */
-+ volatile uint32_t fmbm_ofca; /**< O/H Frame Command Attributes. */
-+ volatile uint32_t fmbm_ofpne; /**< O/H Frame Parser Next Engine */
-+ volatile uint32_t fmbm_opso; /**< O/H Parse Start Offset */
-+ volatile uint32_t fmbm_opp; /**< O/H Policer Profile */
-+ volatile uint32_t fmbm_occb; /**< O/H Coarse Classification base */
-+ volatile uint32_t fmbm_oim; /**< O/H Internal margins*/
-+ volatile uint32_t fmbm_ofp; /**< O/H Fifo Parameters*/
-+ volatile uint32_t fmbm_ofed; /**< O/H Frame End Data*/
-+ volatile uint32_t reserved0[2]; /**< (0x038 - 0x03F) */
-+ volatile uint32_t fmbm_oprai[FM_PORT_PRS_RESULT_NUM_OF_WORDS];
-+ /**< O/H Parse Results Array Initialization */
-+ volatile uint32_t fmbm_ofqid; /**< O/H Frame Queue ID */
-+ volatile uint32_t fmbm_oefqid; /**< O/H Error Frame Queue ID */
-+ volatile uint32_t fmbm_ofsdm; /**< O/H Frame Status Discard Mask */
-+ volatile uint32_t fmbm_ofsem; /**< O/H Frame Status Error Mask */
-+ volatile uint32_t fmbm_ofene; /**< O/H Frame Enqueue Next Engine */
-+ volatile uint32_t fmbm_orlmts; /**< O/H Rate Limiter Scale */
-+ volatile uint32_t fmbm_orlmt; /**< O/H Rate Limiter */
-+ volatile uint32_t fmbm_ocmne; /**< O/H Continuous Mode Next Engine */
-+ volatile uint32_t reserved1[0x20];/**< (0x080 - 0x0FF) */
-+ volatile uint32_t fmbm_oebmpi[2]; /**< Buffer Manager Observed Pool Information */
-+ volatile uint32_t reserved2[0x16];/**< (0x108 - 0x15F) */
-+ volatile uint32_t fmbm_ocgm; /**< Observed Congestion Group Map */
-+ volatile uint32_t reserved3[0x7]; /**< (0x164 - 0x17F) */
-+ volatile uint32_t fmbm_ompd; /**< Observed BMan Pool Depletion */
-+ volatile uint32_t reserved4[0x1F];/**< (0x184 - 0x1FF) */
-+ volatile uint32_t fmbm_ostc; /**< O/H Statistics Counters */
-+ volatile uint32_t fmbm_ofrc; /**< O/H Frame Counter */
-+ volatile uint32_t fmbm_ofdc; /**< O/H Frames Discard Counter */
-+ volatile uint32_t fmbm_ofledc; /**< O/H Frames Length Error Discard Counter */
-+ volatile uint32_t fmbm_ofufdc; /**< O/H Frames Unsupported Format Discard Counter */
-+ volatile uint32_t fmbm_offc; /**< O/H Filter Frames Counter */
-+ volatile uint32_t fmbm_ofwdc; /**< - Rx Frames WRED Discard Counter */
-+ volatile uint32_t fmbm_ofldec; /**< O/H Frames List DMA Error Counter */
-+ volatile uint32_t fmbm_obdc; /**< O/H Buffers Deallocate Counter */
-+ volatile uint32_t fmbm_oodc; /**< O/H Out of Buffers Discard Counter */
-+ volatile uint32_t fmbm_opec; /**< O/H Prepare to enqueue Counter */
-+ volatile uint32_t reserved5[0x15];/**< ( - 0x27F) */
-+ volatile uint32_t fmbm_opc; /**< O/H Performance Counters */
-+ volatile uint32_t fmbm_opcp; /**< O/H Performance Count Parameters */
-+ volatile uint32_t fmbm_occn; /**< O/H Cycle Counter */
-+ volatile uint32_t fmbm_otuc; /**< O/H Tasks Utilization Counter */
-+ volatile uint32_t fmbm_oduc; /**< O/H DMA Utilization Counter */
-+ volatile uint32_t fmbm_ofuc; /**< O/H FIFO Utilization Counter */
-+ volatile uint32_t reserved6[26]; /**< (0x298-0x2FF) */
-+ volatile uint32_t fmbm_odcfg[0x3];/**< O/H Debug (only 1 in P1023) */
-+ volatile uint32_t fmbm_ogpr; /**< O/H General Purpose Register. */
-+ volatile uint32_t reserved7[0x3a];/**< (0x310 0x3FF) */
-+} t_FmPortOhBmiRegs;
-+
-+typedef union
-+{
-+ t_FmPortRxBmiRegs rxPortBmiRegs;
-+ t_FmPortTxBmiRegs txPortBmiRegs;
-+ t_FmPortOhBmiRegs ohPortBmiRegs;
-+} u_FmPortBmiRegs;
-+
-+typedef struct
-+{
-+ volatile uint32_t reserved1[2]; /**< 0xn024 - 0x02B */
-+ volatile uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
-+ volatile uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
-+ volatile uint32_t fmqm_pndtfc; /**< PortID n Dequeue Total Frame Counter */
-+ volatile uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID from Default Counter */
-+ volatile uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
-+} t_FmPortNonRxQmiRegs;
-+
-+typedef struct
-+{
-+ volatile uint32_t fmqm_pnc; /**< PortID n Configuration Register */
-+ volatile uint32_t fmqm_pns; /**< PortID n Status Register */
-+ volatile uint32_t fmqm_pnts; /**< PortID n Task Status Register */
-+ volatile uint32_t reserved0[4]; /**< 0xn00C - 0xn01B */
-+ volatile uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
-+ volatile uint32_t fmqm_pnetfc; /**< PortID n Enqueue Total Frame Counter */
-+ t_FmPortNonRxQmiRegs nonRxQmiRegs; /**< Registers for Tx Hc & Op ports */
-+} t_FmPortQmiRegs;
-+
-+typedef struct
-+{
-+ struct
-+ {
-+ volatile uint32_t softSeqAttach; /**< Soft Sequence Attachment */
-+ volatile uint32_t lcv; /**< Line-up Enable Confirmation Mask */
-+ } hdrs[FM_PCD_PRS_NUM_OF_HDRS];
-+ volatile uint32_t reserved0[0xde];
-+ volatile uint32_t pcac; /**< Parse Internal Memory Configuration Access Control Register */
-+ volatile uint32_t pctpid; /**< Parse Internal Memory Configured TPID Register */
-+} t_FmPortPrsRegs;
-+
-+/**************************************************************************//*
-+ @Description Basic buffer descriptor (BD) structure
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ volatile uint16_t status;
-+ volatile uint16_t length;
-+ volatile uint8_t reserved0[0x6];
-+ volatile uint8_t reserved1[0x1];
-+ volatile t_FmPhysAddr buff;
-+} _PackedType t_FmImBd;
-+
-+typedef _Packed struct
-+{
-+ volatile uint16_t gen; /**< tbd */
-+ volatile uint8_t reserved0[0x1];
-+ volatile t_FmPhysAddr bdRingBase; /**< tbd */
-+ volatile uint16_t bdRingSize; /**< tbd */
-+ volatile uint16_t offsetIn; /**< tbd */
-+ volatile uint16_t offsetOut; /**< tbd */
-+ volatile uint8_t reserved1[0x12]; /**< 0x0e - 0x1f */
-+} _PackedType t_FmPortImQd;
-+
-+typedef _Packed struct
-+{
-+ volatile uint32_t mode; /**< Mode register */
-+ volatile uint32_t rxQdPtr; /**< tbd */
-+ volatile uint32_t txQdPtr; /**< tbd */
-+ volatile uint16_t mrblr; /**< tbd */
-+ volatile uint16_t rxQdBsyCnt; /**< tbd */
-+ volatile uint8_t reserved0[0x10]; /**< 0x10 - 0x1f */
-+ t_FmPortImQd rxQd;
-+ t_FmPortImQd txQd;
-+ volatile uint8_t reserved1[0xa0]; /**< 0x60 - 0xff */
-+} _PackedType t_FmPortImPram;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/**************************************************************************//**
-+ @Description Registers bit fields
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description BMI defines
-+*//***************************************************************************/
-+#if (DPAA_VERSION >= 11)
-+#define BMI_SP_ID_MASK 0xff000000
-+#define BMI_SP_ID_SHIFT 24
-+#define BMI_SP_EN 0x01000000
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+#define BMI_PORT_CFG_EN 0x80000000
-+#define BMI_PORT_CFG_EN_MACSEC 0x00800000
-+#define BMI_PORT_CFG_FDOVR 0x02000000
-+#define BMI_PORT_CFG_IM 0x01000000
-+#define BMI_PORT_CFG_AM 0x00000040
-+#define BMI_PORT_STATUS_BSY 0x80000000
-+#define BMI_COUNTERS_EN 0x80000000
-+
-+#define BMI_PORT_RFNE_FRWD_DCL4C 0x10000000
-+#define BMI_PORT_RFNE_FRWD_RPD 0x40000000
-+#define BMI_RFNE_FDCS_MASK 0xFF000000
-+#define BMI_RFNE_HXS_MASK 0x000000FF
-+
-+#define BMI_CMD_MR_LEAC 0x00200000
-+#define BMI_CMD_MR_SLEAC 0x00100000
-+#define BMI_CMD_MR_MA 0x00080000
-+#define BMI_CMD_MR_DEAS 0x00040000
-+#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
-+ BMI_CMD_MR_SLEAC | \
-+ BMI_CMD_MR_MA | \
-+ BMI_CMD_MR_DEAS)
-+#define BMI_CMD_ATTR_ORDER 0x80000000
-+#define BMI_CMD_ATTR_SYNC 0x02000000
-+#define BMI_CMD_ATTR_MODE_MISS_ALLIGN_ADDR_EN 0x00080000
-+#define BMI_CMD_ATTR_MACCMD_MASK 0x0000ff00
-+#define BMI_CMD_ATTR_MACCMD_OVERRIDE 0x00008000
-+#define BMI_CMD_ATTR_MACCMD_SECURED 0x00001000
-+#define BMI_CMD_ATTR_MACCMD_SC_MASK 0x00000f00
-+
-+#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
-+#define BMI_STATUS_RX_MASK_UNUSED (uint32_t)(~(FM_PORT_FRM_ERR_DMA | \
-+ FM_PORT_FRM_ERR_PHYSICAL | \
-+ FM_PORT_FRM_ERR_SIZE | \
-+ FM_PORT_FRM_ERR_CLS_DISCARD | \
-+ FM_PORT_FRM_ERR_EXTRACTION | \
-+ FM_PORT_FRM_ERR_NO_SCHEME | \
-+ FM_PORT_FRM_ERR_COLOR_RED | \
-+ FM_PORT_FRM_ERR_COLOR_YELLOW | \
-+ FM_PORT_FRM_ERR_ILL_PLCR | \
-+ FM_PORT_FRM_ERR_PLCR_FRAME_LEN | \
-+ FM_PORT_FRM_ERR_PRS_TIMEOUT | \
-+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
-+ FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
-+ FM_PORT_FRM_ERR_PRS_HDR_ERR | \
-+ FM_PORT_FRM_ERR_IPRE | \
-+ FM_PORT_FRM_ERR_IPR_NCSP | \
-+ FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW))
-+
-+#define BMI_STATUS_OP_MASK_UNUSED (uint32_t)(BMI_STATUS_RX_MASK_UNUSED & \
-+ ~(FM_PORT_FRM_ERR_LENGTH | \
-+ FM_PORT_FRM_ERR_NON_FM | \
-+ FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT))
-+
-+#define BMI_RATE_LIMIT_EN 0x80000000
-+#define BMI_RATE_LIMIT_BURST_SIZE_GRAN 0x80000000
-+#define BMI_RATE_LIMIT_SCALE_BY_2 0x00000001
-+#define BMI_RATE_LIMIT_SCALE_BY_4 0x00000002
-+#define BMI_RATE_LIMIT_SCALE_BY_8 0x00000003
-+
-+#define BMI_RX_FIFO_THRESHOLD_BC 0x80000000
-+
-+#define BMI_PRS_RESULT_HIGH 0x00000000
-+#define BMI_PRS_RESULT_LOW 0xFFFFFFFF
-+
-+
-+#define RX_ERRS_TO_ENQ (FM_PORT_FRM_ERR_DMA | \
-+ FM_PORT_FRM_ERR_PHYSICAL | \
-+ FM_PORT_FRM_ERR_SIZE | \
-+ FM_PORT_FRM_ERR_EXTRACTION | \
-+ FM_PORT_FRM_ERR_NO_SCHEME | \
-+ FM_PORT_FRM_ERR_ILL_PLCR | \
-+ FM_PORT_FRM_ERR_PLCR_FRAME_LEN | \
-+ FM_PORT_FRM_ERR_PRS_TIMEOUT | \
-+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
-+ FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
-+ FM_PORT_FRM_ERR_PRS_HDR_ERR | \
-+ FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \
-+ FM_PORT_FRM_ERR_IPRE)
-+
-+#define OP_ERRS_TO_ENQ (RX_ERRS_TO_ENQ | \
-+ FM_PORT_FRM_ERR_LENGTH | \
-+ FM_PORT_FRM_ERR_NON_FM | \
-+ FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT)
-+
-+
-+#define BMI_RX_FIFO_PRI_ELEVATION_MASK 0x03FF0000
-+#define BMI_RX_FIFO_THRESHOLD_MASK 0x000003FF
-+#define BMI_TX_FIFO_MIN_FILL_MASK 0x03FF0000
-+#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000F000
-+#define BMI_TX_LOW_COMF_MASK 0x000003FF
-+
-+/* shifts */
-+#define BMI_PORT_CFG_MS_SEL_SHIFT 16
-+#define BMI_DMA_ATTR_IC_CACHE_SHIFT FMAN_SP_DMA_ATTR_IC_CACHE_SHIFT
-+#define BMI_DMA_ATTR_HDR_CACHE_SHIFT FMAN_SP_DMA_ATTR_HDR_CACHE_SHIFT
-+#define BMI_DMA_ATTR_SG_CACHE_SHIFT FMAN_SP_DMA_ATTR_SG_CACHE_SHIFT
-+
-+#define BMI_IM_FOF_SHIFT 28
-+#define BMI_PR_PORTID_SHIFT 24
-+
-+#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
-+#define BMI_RX_FIFO_THRESHOLD_SHIFT 0
-+
-+#define BMI_RX_FRAME_END_CS_IGNORE_SHIFT 24
-+#define BMI_RX_FRAME_END_CUT_SHIFT 16
-+
-+#define BMI_IC_SIZE_SHIFT FMAN_SP_IC_SIZE_SHIFT
-+
-+#define BMI_INT_BUF_MARG_SHIFT 28
-+
-+#define BMI_EXT_BUF_MARG_END_SHIFT FMAN_SP_EXT_BUF_MARG_END_SHIFT
-+
-+#define BMI_CMD_ATTR_COLOR_SHIFT 26
-+#define BMI_CMD_ATTR_COM_MODE_SHIFT 16
-+#define BMI_CMD_ATTR_MACCMD_SHIFT 8
-+#define BMI_CMD_ATTR_MACCMD_OVERRIDE_SHIFT 15
-+#define BMI_CMD_ATTR_MACCMD_SECURED_SHIFT 12
-+#define BMI_CMD_ATTR_MACCMD_SC_SHIFT 8
-+
-+#define BMI_POOL_DEP_NUM_OF_POOLS_VECTOR_SHIFT 24
-+
-+#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
-+#define BMI_TX_LOW_COMF_SHIFT 0
-+
-+#define BMI_PERFORMANCE_TASK_COMP_SHIFT 24
-+#define BMI_PERFORMANCE_PORT_COMP_SHIFT 16
-+#define BMI_PERFORMANCE_DMA_COMP_SHIFT 12
-+#define BMI_PERFORMANCE_FIFO_COMP_SHIFT 0
-+
-+#define BMI_MAX_BURST_SHIFT 16
-+#define BMI_COUNT_RATE_UNIT_SHIFT 16
-+
-+/* sizes */
-+#define FRAME_END_DATA_SIZE 16
-+#define FRAME_OFFSET_UNITS 16
-+#define MIN_TX_INT_OFFSET 16
-+#define MAX_FRAME_OFFSET 64
-+#define MAX_FIFO_PIPELINE_DEPTH 8
-+#define MAX_PERFORMANCE_TASK_COMP 64
-+#define MAX_PERFORMANCE_TX_QUEUE_COMP 8
-+#define MAX_PERFORMANCE_RX_QUEUE_COMP 64
-+#define MAX_PERFORMANCE_DMA_COMP 16
-+#define MAX_NUM_OF_TASKS 64
-+#define MAX_NUM_OF_EXTRA_TASKS 8
-+#define MAX_NUM_OF_DMAS 16
-+#define MAX_NUM_OF_EXTRA_DMAS 8
-+#define MAX_BURST_SIZE 1024
-+#define MIN_NUM_OF_OP_DMAS 2
-+
-+
-+/**************************************************************************//**
-+ @Description QMI defines
-+*//***************************************************************************/
-+/* masks */
-+#define QMI_PORT_CFG_EN 0x80000000
-+#define QMI_PORT_CFG_EN_COUNTERS 0x10000000
-+#define QMI_PORT_STATUS_DEQ_TNUM_BSY 0x80000000
-+#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
-+
-+#define QMI_DEQ_CFG_PREFETCH_NO_TNUM 0x02000000
-+#define QMI_DEQ_CFG_PREFETCH_WAITING_TNUM 0
-+#define QMI_DEQ_CFG_PREFETCH_1_FRAME 0
-+#define QMI_DEQ_CFG_PREFETCH_3_FRAMES 0x01000000
-+
-+#define QMI_DEQ_CFG_PRI 0x80000000
-+#define QMI_DEQ_CFG_TYPE1 0x10000000
-+#define QMI_DEQ_CFG_TYPE2 0x20000000
-+#define QMI_DEQ_CFG_TYPE3 0x30000000
-+
-+#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f
-+#define QMI_DEQ_CFG_SUBPORTAL_SHIFT 20
-+
-+/**************************************************************************//**
-+ @Description PARSER defines
-+*//***************************************************************************/
-+/* masks */
-+#define PRS_HDR_ERROR_DIS 0x00000800
-+#define PRS_HDR_SW_PRS_EN 0x00000400
-+#define PRS_CP_OFFSET_MASK 0x0000000F
-+#define PRS_TPID1_MASK 0xFFFF0000
-+#define PRS_TPID2_MASK 0x0000FFFF
-+#define PRS_TPID_DFLT 0x91009100
-+
-+#define PRS_HDR_MPLS_LBL_INTER_EN 0x00200000
-+#define PRS_HDR_IPV6_ROUTE_HDR_EN 0x00008000
-+#define PRS_HDR_PPPOE_MTU_CHECK_EN 0x80000000
-+#define PRS_HDR_UDP_PAD_REMOVAL 0x80000000
-+#define PRS_HDR_TCP_PAD_REMOVAL 0x80000000
-+#define PRS_CAC_STOP 0x00000001
-+#define PRS_CAC_ACTIVE 0x00000100
-+
-+/* shifts */
-+#define PRS_PCTPID_SHIFT 16
-+#define PRS_HDR_MPLS_NEXT_HDR_SHIFT 22
-+#define PRS_HDR_ETH_BC_SHIFT 28
-+#define PRS_HDR_ETH_MC_SHIFT 24
-+#define PRS_HDR_VLAN_STACKED_SHIFT 16
-+#define PRS_HDR_MPLS_STACKED_SHIFT 16
-+#define PRS_HDR_IPV4_1_BC_SHIFT 28
-+#define PRS_HDR_IPV4_1_MC_SHIFT 24
-+#define PRS_HDR_IPV4_2_UC_SHIFT 20
-+#define PRS_HDR_IPV4_2_MC_BC_SHIFT 16
-+#define PRS_HDR_IPV6_1_MC_SHIFT 24
-+#define PRS_HDR_IPV6_2_UC_SHIFT 20
-+#define PRS_HDR_IPV6_2_MC_SHIFT 16
-+
-+#define PRS_HDR_ETH_BC_MASK 0x0fffffff
-+#define PRS_HDR_ETH_MC_MASK 0xf0ffffff
-+#define PRS_HDR_VLAN_STACKED_MASK 0xfff0ffff
-+#define PRS_HDR_MPLS_STACKED_MASK 0xfff0ffff
-+#define PRS_HDR_IPV4_1_BC_MASK 0x0fffffff
-+#define PRS_HDR_IPV4_1_MC_MASK 0xf0ffffff
-+#define PRS_HDR_IPV4_2_UC_MASK 0xff0fffff
-+#define PRS_HDR_IPV4_2_MC_BC_MASK 0xfff0ffff
-+#define PRS_HDR_IPV6_1_MC_MASK 0xf0ffffff
-+#define PRS_HDR_IPV6_2_UC_MASK 0xff0fffff
-+#define PRS_HDR_IPV6_2_MC_MASK 0xfff0ffff
-+
-+/* others */
-+#define PRS_HDR_ENTRY_SIZE 8
-+#define DEFAULT_CLS_PLAN_VECTOR 0xFFFFFFFF
-+
-+#define IPSEC_SW_PATCH_START 0x20
-+#define SCTP_SW_PATCH_START 0x4D
-+#define DCCP_SW_PATCH_START 0x41
-+
-+/**************************************************************************//**
-+ @Description IM defines
-+*//***************************************************************************/
-+#define BD_R_E 0x80000000
-+#define BD_L 0x08000000
-+
-+#define BD_RX_CRE 0x00080000
-+#define BD_RX_FTL 0x00040000
-+#define BD_RX_FTS 0x00020000
-+#define BD_RX_OV 0x00010000
-+
-+#define BD_RX_ERRORS (BD_RX_CRE | BD_RX_FTL | BD_RX_FTS | BD_RX_OV)
-+
-+#define FM_IM_SIZEOF_BD sizeof(t_FmImBd)
-+
-+#define BD_STATUS_MASK 0xffff0000
-+#define BD_LENGTH_MASK 0x0000ffff
-+
-+#define BD_STATUS_AND_LENGTH_SET(bd, val) WRITE_UINT32(*(volatile uint32_t*)(bd), (val))
-+
-+#define BD_STATUS_AND_LENGTH(bd) GET_UINT32(*(volatile uint32_t*)(bd))
-+
-+#define BD_GET(id) &p_FmPort->im.p_BdRing[id]
-+
-+#define IM_ILEGAL_BD_ID 0xffff
-+
-+/* others */
-+#define IM_PRAM_ALIGN 0x100
-+
-+/* masks */
-+#define IM_MODE_GBL 0x20000000
-+#define IM_MODE_BO_MASK 0x18000000
-+#define IM_MODE_BO_SHIFT 3
-+#define IM_MODE_GRC_STP 0x00800000
-+
-+#define IM_MODE_SET_BO(val) (uint32_t)((val << (31-IM_MODE_BO_SHIFT)) & IM_MODE_BO_MASK)
-+
-+#define IM_RXQD_BSYINTM 0x0008
-+#define IM_RXQD_RXFINTM 0x0010
-+#define IM_RXQD_FPMEVT_SEL_MASK 0x0003
-+
-+#define IM_EV_BSY 0x40000000
-+#define IM_EV_RX 0x80000000
-+
-+
-+/**************************************************************************//**
-+ @Description Additional defines
-+*//***************************************************************************/
-+
-+typedef struct {
-+ t_Handle h_FmMuram;
-+ t_FmPortImPram *p_FmPortImPram;
-+ uint8_t fwExtStructsMemId;
-+ uint32_t fwExtStructsMemAttr;
-+ uint16_t bdRingSize;
-+ t_FmImBd *p_BdRing;
-+ t_Handle *p_BdShadow;
-+ uint16_t currBdId;
-+ uint16_t firstBdOfFrameId;
-+
-+ /* Rx port parameters */
-+ uint8_t dataMemId; /**< Memory partition ID for data buffers */
-+ uint32_t dataMemAttributes; /**< Memory attributes for data buffers */
-+ t_BufferPoolInfo rxPool;
-+ uint16_t mrblr;
-+ uint16_t rxFrameAccumLength;
-+ t_FmPortImRxStoreCallback *f_RxStore;
-+
-+ /* Tx port parameters */
-+ uint32_t txFirstBdStatus;
-+ t_FmPortImTxConfCallback *f_TxConf;
-+} t_FmMacIm;
-+
-+
-+typedef struct {
-+ struct fman_port_cfg dfltCfg;
-+ uint32_t dfltFqid;
-+ uint32_t confFqid;
-+ uint32_t errFqid;
-+ uintptr_t baseAddr;
-+ uint8_t deqSubPortal;
-+ bool deqHighPriority;
-+ e_FmPortDeqType deqType;
-+ e_FmPortDeqPrefetchOption deqPrefetchOption;
-+ uint16_t deqByteCnt;
-+ uint8_t cheksumLastBytesIgnore;
-+ uint8_t cutBytesFromEnd;
-+ t_FmBufPoolDepletion bufPoolDepletion;
-+ uint8_t pipelineDepth;
-+ uint16_t fifoLowComfLevel;
-+ bool frmDiscardOverride;
-+ bool enRateLimit;
-+ t_FmPortRateLimit rateLimit;
-+ e_FmPortDualRateLimiterScaleDown rateLimitDivider;
-+ bool enBufPoolDepletion;
-+ uint16_t liodnOffset;
-+ uint16_t liodnBase;
-+ t_FmExtPools extBufPools;
-+ e_FmDmaSwapOption dmaSwapData;
-+ e_FmDmaCacheOption dmaIntContextCacheAttr;
-+ e_FmDmaCacheOption dmaHeaderCacheAttr;
-+ e_FmDmaCacheOption dmaScatterGatherCacheAttr;
-+ bool dmaReadOptimize;
-+ bool dmaWriteOptimize;
-+ uint32_t txFifoMinFillLevel;
-+ uint32_t txFifoLowComfLevel;
-+ uint32_t rxFifoPriElevationLevel;
-+ uint32_t rxFifoThreshold;
-+ t_FmSpBufMargins bufMargins;
-+ t_FmSpIntContextDataCopy intContext;
-+ bool syncReq;
-+ e_FmPortColor color;
-+ fmPortFrameErrSelect_t errorsToDiscard;
-+ fmPortFrameErrSelect_t errorsToEnq;
-+ bool forwardReuseIntContext;
-+ t_FmBufferPrefixContent bufferPrefixContent;
-+ t_FmBackupBmPools *p_BackupBmPools;
-+ bool dontReleaseBuf;
-+ bool setNumOfTasks;
-+ bool setNumOfOpenDmas;
-+ bool setSizeOfFifo;
-+#if (DPAA_VERSION >= 11)
-+ bool noScatherGather;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
-+ bool bcbWorkaround;
-+#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
-+} t_FmPortDriverParam;
-+
-+
-+typedef struct t_FmPortRxPoolsParams
-+{
-+ uint8_t numOfPools;
-+ uint16_t secondLargestBufSize;
-+ uint16_t largestBufSize;
-+} t_FmPortRxPoolsParams;
-+
-+typedef struct t_FmPortDsarVars {
-+ t_Handle *autoResOffsets;
-+ t_FmPortDsarTablesSizes *autoResMaxSizes;
-+ uint32_t fmbm_tcfg;
-+ uint32_t fmbm_tcmne;
-+ uint32_t fmbm_rfne;
-+ uint32_t fmbm_rfpne;
-+ uint32_t fmbm_rcfg;
-+ bool dsarEnabledParser;
-+} t_FmPortDsarVars;
-+typedef struct {
-+ struct fman_port port;
-+ t_Handle h_Fm;
-+ t_Handle h_FmPcd;
-+ t_Handle h_FmMuram;
-+ t_FmRevisionInfo fmRevInfo;
-+ uint8_t portId;
-+ e_FmPortType portType;
-+ int enabled;
-+ char name[MODULE_NAME_SIZE];
-+ uint8_t hardwarePortId;
-+ uint16_t fmClkFreq;
-+ t_FmPortQmiRegs *p_FmPortQmiRegs;
-+ u_FmPortBmiRegs *p_FmPortBmiRegs;
-+ t_FmPortPrsRegs *p_FmPortPrsRegs;
-+ fmPcdEngines_t pcdEngines;
-+ uint32_t savedBmiNia;
-+ uint8_t netEnvId;
-+ uint32_t optArray[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
-+ uint32_t lcvs[FM_PCD_PRS_NUM_OF_HDRS];
-+ uint8_t privateInfo;
-+ uint32_t schemesPerPortVector;
-+ bool useClsPlan;
-+ uint8_t clsPlanGrpId;
-+ t_Handle ccTreeId;
-+ t_Handle completeArg;
-+ void (*f_Complete)(t_Handle arg);
-+ t_FmSpBufferOffsets bufferOffsets;
-+ /* Independent-Mode parameters support */
-+ bool imEn;
-+ t_FmMacIm im;
-+ volatile bool lock;
-+ t_Handle h_Spinlock;
-+ t_FmPortExceptionCallback *f_Exception;
-+ t_Handle h_App;
-+ uint8_t internalBufferOffset;
-+ uint8_t fmanCtrlEventId;
-+ uint32_t exceptions;
-+ bool polling;
-+ t_FmExtPools extBufPools;
-+ uint32_t requiredAction;
-+ uint32_t savedQmiPnen;
-+ uint32_t savedBmiFene;
-+ uint32_t savedBmiFpne;
-+ uint32_t savedBmiCmne;
-+ uint32_t savedBmiOfp;
-+ uint32_t savedNonRxQmiRegsPndn;
-+ uint32_t origNonRxQmiRegsPndn;
-+ int savedPrsStartOffset;
-+ bool includeInPrsStatistics;
-+ uint16_t maxFrameLength;
-+ t_FmFmanCtrl orFmanCtrl;
-+ t_FmPortRsrc openDmas;
-+ t_FmPortRsrc tasks;
-+ t_FmPortRsrc fifoBufs;
-+ t_FmPortRxPoolsParams rxPoolsParams;
-+// bool explicitUserSizeOfFifo;
-+ t_Handle h_IpReassemblyManip;
-+ t_Handle h_CapwapReassemblyManip;
-+ t_Handle h_ReassemblyTree;
-+ uint64_t fmMuramPhysBaseAddr;
-+#if (DPAA_VERSION >= 11)
-+ bool vspe;
-+ uint8_t dfltRelativeId;
-+ e_FmPortGprFuncType gprFunc;
-+ t_FmPcdCtrlParamsPage *p_ParamsPage;
-+#endif /* (DPAA_VERSION >= 11) */
-+ t_FmPortDsarVars deepSleepVars;
-+ t_FmPortDriverParam *p_FmPortDriverParam;
-+} t_FmPort;
-+
-+
-+void FmPortConfigIM (t_FmPort *p_FmPort, t_FmPortParams *p_FmPortParams);
-+t_Error FmPortImCheckInitParameters(t_FmPort *p_FmPort);
-+
-+t_Error FmPortImInit(t_FmPort *p_FmPort);
-+void FmPortImFree(t_FmPort *p_FmPort);
-+
-+t_Error FmPortImEnable (t_FmPort *p_FmPort);
-+t_Error FmPortImDisable (t_FmPort *p_FmPort);
-+t_Error FmPortImRx (t_FmPort *p_FmPort);
-+
-+void FmPortSetMacsecLcv(t_Handle h_FmPort);
-+void FmPortSetMacsecCmd(t_Handle h_FmPort, uint8_t dfltSci);
-+
-+
-+t_Error FM_PORT_SetNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfOpenDmas);
-+t_Error FM_PORT_SetNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks);
-+t_Error FM_PORT_SetSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo);
-+
-+static __inline__ uint8_t * BdBufferGet (t_PhysToVirt *f_PhysToVirt, t_FmImBd *p_Bd)
-+{
-+ uint64_t physAddr = (uint64_t)((uint64_t)GET_UINT8(p_Bd->buff.high) << 32);
-+ physAddr |= GET_UINT32(p_Bd->buff.low);
-+
-+ return (uint8_t *)f_PhysToVirt((physAddress_t)(physAddr));
-+}
-+
-+static __inline__ void SET_ADDR(volatile t_FmPhysAddr *fmPhysAddr, uint64_t value)
-+{
-+ WRITE_UINT8(fmPhysAddr->high,(uint8_t)((value & 0x000000ff00000000LL) >> 32));
-+ WRITE_UINT32(fmPhysAddr->low,(uint32_t)value);
-+}
-+
-+static __inline__ void BdBufferSet(t_VirtToPhys *f_VirtToPhys, t_FmImBd *p_Bd, uint8_t *p_Buffer)
-+{
-+ uint64_t physAddr = (uint64_t)(f_VirtToPhys(p_Buffer));
-+ SET_ADDR(&p_Bd->buff, physAddr);
-+}
-+
-+static __inline__ uint16_t GetNextBdId(t_FmPort *p_FmPort, uint16_t id)
-+{
-+ if (id < p_FmPort->im.bdRingSize-1)
-+ return (uint16_t)(id+1);
-+ else
-+ return 0;
-+}
-+
-+void FM_PORT_Dsar_DumpRegs(void);
-+
-+
-+#endif /* __FM_PORT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_dsar.h
-@@ -0,0 +1,494 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File fm_port_dsar.h
-+
-+ @Description Deep Sleep Auto Response project - common module header file.
-+
-+ Author - Eyal Harari
-+
-+ @Cautions See the FMan Controller spec and design document for more information.
-+*//***************************************************************************/
-+
-+#ifndef __FM_PORT_DSAR_H_
-+#define __FM_PORT_DSAR_H_
-+
-+#define DSAR_GETSER_MASK 0xFF0000FF
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response VLAN-IPv4 Binding Table (for ARP/ICMPv4)
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
-+ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+ uint16_t reserved;
-+} _PackedType t_DsarArpBindingEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response Address Resolution Protocol Statistics Descriptor
-+ Refer to the FMan Controller spec for more details.
-+ 0x00 INVAL_CNT Invalid ARP IPv4-Ethernet counter
-+ 0x04 ECHO_CNT Echo counter
-+ 0x08 CD_CNT Conflict Detection counter
-+ 0x0C AR_CNT Auto-Response counter
-+ 0x10 RATM_CNT Replies Addressed To Me counter
-+ 0x14 UKOP_CNT Unknown Operation counter
-+ 0x18 NMTP_CNT Not my TPA counter
-+ 0x1C NMVLAN_CNT Not My VLAN counter
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint32_t invalCnt; /**< Invalid ARP IPv4-Ethernet counter. */
-+ uint32_t echoCnt; /**< Echo counter. */
-+ uint32_t cdCnt; /**< Conflict Detection counter. */
-+ uint32_t arCnt; /**< Auto-Response counter. */
-+ uint32_t ratmCnt; /**< Replies Addressed To Me counter. */
-+ uint32_t ukopCnt; /**< Unknown Operation counter. */
-+ uint32_t nmtpCnt; /**< Not my TPA counter. */
-+ uint32_t nmVlanCnt; /**< Not My VLAN counter */
-+} _PackedType t_DsarArpStatistics;
-+
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response Address Resolution Protocol Descriptor
-+ 0x0 0-15 Control bits [0-15]. Bit 15 = CDEN.
-+ 0x2 0-15 NumOfBindings Number of entries in the binding list.
-+ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an IPv4-MAC Addresses Bindings list.
-+ 0x6 0-15
-+ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ARP Descriptors statistics data structure.
-+ 0xA 0-15
-+ 0xC 0-15 Reserved Reserved. Must be cleared.
-+ 0xE 015
-+
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint16_t control; /** Control bits [0-15]. Bit 15 = CDEN */
-+ uint16_t numOfBindings; /**< Number of VLAN-IPv4 */
-+ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
-+ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
-+ uint32_t reserved1; /**< Reserved. */
-+} _PackedType t_DsarArpDescriptor;
-+
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response VLAN-IPv4 Binding Table (for ARP/ICMPv4)
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
-+ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+ uint16_t reserved;
-+} _PackedType t_DsarIcmpV4BindingEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response ICMPv4 Statistics Descriptor
-+ Refer to the FMan Controller spec for more details.
-+ 0x00 INVAL_CNT Invalid ICMPv4 header counter
-+ 0x04 NMVLAN_CNT Not My VLAN counter
-+ 0x08 NMIP_CNT Not My IP counter
-+ 0x0C AR_CNT Auto-Response counter
-+ 0x10 CSERR_CNT Checksum Error counter
-+ 0x14 Reserved Reserved
-+ 0x18 Reserved Reserved
-+ 0x1C Reserved Reserved
-+
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint32_t invalCnt; /**< Invalid ICMPv4 Echo counter. */
-+ uint32_t nmVlanCnt; /**< Not My VLAN counter */
-+ uint32_t nmIpCnt; /**< Not My IP counter */
-+ uint32_t arCnt; /**< Auto-Response counter */
-+ uint32_t cserrCnt; /**< Checksum Error counter */
-+ uint32_t reserved0; /**< Reserved */
-+ uint32_t reserved1; /**< Reserved */
-+ uint32_t reserved2; /**< Reserved */
-+} _PackedType t_DsarIcmpV4Statistics;
-+
-+
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response ICMPv4 Descriptor
-+ 0x0 0-15 Control bits [0-15]
-+ 0x2 0-15 NumOfBindings Number of entries in the binding list.
-+ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an VLAN-IPv4 Addresses Bindings list.
-+ 0x6 0-15
-+ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ICMPv4 statistics data structure.
-+ 0xA 0-15
-+ 0xC 0-15 Reserved Reserved. Must be cleared.
-+ 0xE 015
-+
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint16_t control; /** Control bits [0-15]. */
-+ uint16_t numOfBindings; /**< Number of VLAN-IPv4 */
-+ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
-+ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
-+ uint32_t reserved1; /**< Reserved. */
-+} _PackedType t_DsarIcmpV4Descriptor;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response VLAN-IPv4 Binding Table (for ARP/ICMPv4)
-+ The 4 left-most bits (15:12) of the VlanId parameter are control flags.
-+ Flags[3:1] (VlanId[15:13]): Reserved, should be cleared.
-+ Flags[0] (VlanId[12]): Temporary address.
-+ • 0 - Assigned IP address.
-+ • 1- Temporary (tentative) IP address.
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint32_t ipv6Addr[4]; /*!< 3 * 32 bit IPv4 Address. */
-+ uint16_t resFlags:4; /*!< reserved flags. should be cleared */
-+ uint16_t vlanId:12; /*!< 12 bits VLAN ID. */
-+ /*!< This field should be 0x000 for an entry with no VLAN tag or a null VLAN ID. */
-+ uint16_t reserved;
-+} _PackedType t_DsarIcmpV6BindingEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response ICMPv4 Statistics Descriptor
-+ Refer to the FMan Controller spec for more details.
-+ 0x00 INVAL_CNT Invalid ICMPv4 header counter
-+ 0x04 NMVLAN_CNT Not My VLAN counter
-+ 0x08 NMIP_CNT Not My IP counter
-+ 0x0C AR_CNT Auto-Response counter
-+ 0x10 CSERR_CNT Checksum Error counter
-+ 0x14 MCAST_CNT Multicast counter
-+ 0x18 Reserved Reserved
-+ 0x1C Reserved Reserved
-+
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint32_t invalCnt; /**< Invalid ICMPv4 Echo counter. */
-+ uint32_t nmVlanCnt; /**< Not My VLAN counter */
-+ uint32_t nmIpCnt; /**< Not My IP counter */
-+ uint32_t arCnt; /**< Auto-Response counter */
-+ uint32_t reserved1; /**< Reserved */
-+ uint32_t reserved2; /**< Reserved */
-+ uint32_t reserved3; /**< Reserved */
-+ uint32_t reserved4; /**< Reserved */
-+} _PackedType t_DsarIcmpV6Statistics;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response Neighbor Discovery Statistics Descriptor
-+ 0x00 INVAL_CNT Invalid Neighbor Discovery message counter
-+ 0x04 NMVLAN_CNT Not My VLAN counter
-+ 0x08 NMIP_CNT Not My IP counter
-+ 0x0C AR_CNT Auto-Response counter
-+ 0x10 CSERR_CNT Checksum Error counter
-+ 0x14 USADVERT_CNT Unsolicited Neighbor Advertisements counter
-+ 0x18 NMMCAST_CNT Not My Multicast group counter
-+ 0x1C NSLLA_CNT No Source Link-Layer Address counter. Indicates that there was a match on a Target
-+ Address of a packet that its source IP address is a unicast address, but the ICMPv6
-+ Source Link-layer Address option is omitted
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint32_t invalCnt; /**< Invalid ICMPv4 Echo counter. */
-+ uint32_t nmVlanCnt; /**< Not My VLAN counter */
-+ uint32_t nmIpCnt; /**< Not My IP counter */
-+ uint32_t arCnt; /**< Auto-Response counter */
-+ uint32_t reserved1; /**< Reserved */
-+ uint32_t usadvertCnt; /**< Unsolicited Neighbor Advertisements counter */
-+ uint32_t nmmcastCnt; /**< Not My Multicast group counter */
-+ uint32_t nsllaCnt; /**< No Source Link-Layer Address counter */
-+} _PackedType t_NdStatistics;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response ICMPv6 Descriptor
-+ 0x0 0-15 Control bits [0-15]
-+ 0x2 0-15 NumOfBindings Number of entries in the binding list.
-+ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an VLAN-IPv4 Addresses Bindings list.
-+ 0x6 0-15
-+ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ICMPv4 statistics data structure.
-+ 0xA 0-15
-+ 0xC 0-15 Reserved Reserved. Must be cleared.
-+ 0xE 015
-+
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint16_t control; /** Control bits [0-15]. */
-+ uint16_t numOfBindings; /**< Number of VLAN-IPv6 */
-+ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
-+ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
-+ uint32_t reserved1; /**< Reserved. */
-+} _PackedType t_DsarIcmpV6Descriptor;
-+
-+
-+/**************************************************************************//**
-+ @Description Internet Control Message Protocol (ICMPv6) Echo message header
-+ The fields names are taken from RFC 4443.
-+*//***************************************************************************/
-+/* 0 1 2 3 */
-+/* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 */
-+/* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
-+/* | Type | Code | Checksum | */
-+/* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
-+/* | Identifier | Sequence Number | */
-+/* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
-+/* | Data ... */
-+/* +-+-+-+-+- */
-+typedef _Packed struct
-+{
-+ uint8_t type;
-+ uint8_t code;
-+ uint16_t checksum;
-+ uint16_t identifier;
-+ uint16_t sequenceNumber;
-+} _PackedType t_IcmpV6EchoHdr;
-+
-+/**************************************************************************//**
-+ @Description Internet Control Message Protocol (ICMPv6)
-+ Neighbor Solicitation/Advertisement header
-+ The fields names are taken from RFC 4861.
-+ The R/S/O fields are valid for Neighbor Advertisement only
-+*//***************************************************************************/
-+/* 0 1 2 3
-+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ * | Type | Code | Checksum |
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ * |R|S|O| Reserved |
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ * | |
-+ * + +
-+ * | |
-+ * + Target Address +
-+ * | |
-+ * + +
-+ * | |
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ * | Options ...
-+ * +-+-+-+-+-+-+-+-+-+-+-+-
-+ *
-+ * Options Format:
-+ * 0 1 2 3
-+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ * | Type | Length | Link-Layer Address ... |
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+ * | Link-Layer Address |
-+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-+*/
-+typedef _Packed struct
-+{
-+ uint8_t type;
-+ uint8_t code;
-+ uint16_t checksum;
-+ uint32_t router:1;
-+ uint32_t solicited:1;
-+ uint32_t override:1;
-+ uint32_t reserved:29;
-+ uint32_t targetAddr[4];
-+ uint8_t optionType;
-+ uint8_t optionLength;
-+ uint8_t linkLayerAddr[6];
-+} _PackedType t_IcmpV6NdHdr;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response ICMPv6 Descriptor
-+ 0x0 0-15 Control bits [0-15]
-+ 0x2 0-15 NumOfBindings Number of entries in the binding list.
-+ 0x4 0-15 BindingsPointer Bindings Pointer. This points to an VLAN-IPv4 Addresses Bindings list.
-+ 0x6 0-15
-+ 0x8 0-15 StatisticsPointer Statistics Pointer. This field points to the ICMPv4 statistics data structure.
-+ 0xA 0-15
-+ 0xC 0-15 Reserved Reserved. Must be cleared.
-+ 0xE 015
-+
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint16_t control; /** Control bits [0-15]. */
-+ uint16_t numOfBindings; /**< Number of VLAN-IPv6 */
-+ uint32_t p_Bindings; /**< VLAN-IPv4 Bindings table pointer. */
-+ uint32_t p_Statistics; /**< Statistics Data Structure pointer. */
-+ uint32_t solicitedAddr; /**< Solicited Node Multicast Group Address */
-+} _PackedType t_DsarNdDescriptor;
-+
-+/**************************************************************************//**
-+@Description Deep Sleep Auto Response SNMP OIDs table entry
-+
-+*//***************************************************************************/
-+typedef struct {
-+ uint16_t oidSize; /**< Size in octets of the OID. */
-+ uint16_t resSize; /**< Size in octets of the value that is attached to the OID. */
-+ uint32_t p_Oid; /**< Pointer to the OID. OID is encoded in BER but type and length are excluded. */
-+ uint32_t resValOrPtr; /**< Value (for up to 4 octets) or pointer to the Value. Encoded in BER. */
-+ uint32_t reserved;
-+} t_OidsTblEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP IPv4 Addresses Table Entry
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+typedef struct
-+{
-+ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
-+ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+ uint16_t reserved;
-+} t_DsarSnmpIpv4AddrTblEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP IPv6 Addresses Table Entry
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+#pragma pack(push,1)
-+typedef struct
-+{
-+ uint32_t ipv6Addr[4]; /*!< 4 * 32 bit IPv6 Address. */
-+ uint16_t vlanId; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+ uint16_t reserved;
-+} t_DsarSnmpIpv6AddrTblEntry;
-+#pragma pack(pop)
-+
-+/**************************************************************************//**
-+@Description Deep Sleep Auto Response SNMP statistics table
-+
-+*//***************************************************************************/
-+typedef struct {
-+ uint32_t snmpErrCnt; /**< Counts SNMP errors (wrong version, BER encoding, format). */
-+ uint32_t snmpCommunityErrCnt; /**< Counts messages that were dropped due to insufficient permission. */
-+ uint32_t snmpTotalDiscardCnt; /**< Counts any message that was dropped. */
-+ uint32_t snmpGetReqCnt; /**< Counts the number of get-request messages */
-+ uint32_t snmpGetNextReqCnt; /**< Counts the number of get-next-request messages */
-+} t_DsarSnmpStatistics;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP Descriptor
-+
-+*//***************************************************************************/
-+typedef struct
-+{
-+ uint16_t control; /**< Control bits [0-15]. */
-+ uint16_t maxSnmpMsgLength; /**< Maximal allowed SNMP message length. */
-+ uint16_t numOfIpv4Addresses; /**< Number of entries in IPv4 addresses table. */
-+ uint16_t numOfIpv6Addresses; /**< Number of entries in IPv6 addresses table. */
-+ uint32_t p_Ipv4AddrTbl; /**< Pointer to IPv4 addresses table. */
-+ uint32_t p_Ipv6AddrTbl; /**< Pointer to IPv6 addresses table. */
-+ uint32_t p_RdOnlyCommunityStr; /**< Pointer to the Read Only Community String. */
-+ uint32_t p_RdWrCommunityStr; /**< Pointer to the Read Write Community String. */
-+ uint32_t p_OidsTbl; /**< Pointer to OIDs table. */
-+ uint32_t oidsTblSize; /**< Number of entries in OIDs table. */
-+ uint32_t p_Statistics; /**< Pointer to SNMP statistics table. */
-+} t_DsarSnmpDescriptor;
-+
-+/**************************************************************************//**
-+@Description Deep Sleep Auto Response (Common) Statistics
-+
-+*//***************************************************************************/
-+typedef _Packed struct {
-+ uint32_t dsarDiscarded;
-+ uint32_t dsarErrDiscarded;
-+ uint32_t dsarFragDiscarded;
-+ uint32_t dsarTunnelDiscarded;
-+ uint32_t dsarArpDiscarded;
-+ uint32_t dsarIpDiscarded;
-+ uint32_t dsarTcpDiscarded;
-+ uint32_t dsarUdpDiscarded;
-+ uint32_t dsarIcmpV6ChecksumErr; /* ICMPv6 Checksum Error counter */
-+ uint32_t dsarIcmpV6OtherType; /* ICMPv6 'Other' type (not Echo or Neighbor Solicitaion/Advertisement counter */
-+ uint32_t dsarIcmpV4OtherType; /* ICMPv4 'Other' type (not Echo) counter */
-+} _PackedType t_ArStatistics;
-+
-+
-+/**************************************************************************//**
-+@Description Deep Sleep Auto Response TCP/UDP port filter table entry
-+
-+*//***************************************************************************/
-+typedef _Packed struct {
-+ uint32_t Ports;
-+ uint32_t PortsMask;
-+} _PackedType t_PortTblEntry;
-+
-+
-+
-+/**************************************************************************//**
-+@Description Deep Sleep Auto Response Common Parameters Descriptor
-+
-+*//***************************************************************************/
-+typedef _Packed struct {
-+ uint8_t arTxPort; /* 0x00 0-7 Auto Response Transmit Port number */
-+ uint8_t controlBits; /* 0x00 8-15 Auto Response control bits */
-+ uint16_t res1; /* 0x00 16-31 Reserved */
-+ uint32_t activeHPNIA; /* 0x04 0-31 Active mode Hardware Parser NIA */
-+ uint16_t snmpPort; /* 0x08 0-15 SNMP Port. */
-+ uint8_t macStationAddr[6]; /* 0x08 16-31 and 0x0C 0-31 MAC Station Address */
-+ uint8_t res2; /* 0x10 0-7 Reserved */
-+ uint8_t filterControl; /* 0x10 8-15 Filtering Control Bits. */
-+ uint16_t tcpControlPass; /* 0x10 16-31 TCP control pass flags */
-+ uint8_t ipProtocolTblSize; /* 0x14 0-7 IP Protocol Table Size. */
-+ uint8_t udpPortTblSize; /* 0x14 8-15 UDP Port Table Size. */
-+ uint8_t tcpPortTblSize; /* 0x14 16-23 TCP Port Table Size. */
-+ uint8_t res3; /* 0x14 24-31 Reserved */
-+ uint32_t p_IpProtocolFiltTbl; /* 0x18 0-31 Pointer to IP Protocol Filter Table */
-+ uint32_t p_UdpPortFiltTbl; /* 0x1C 0-31 Pointer to UDP Port Filter Table */
-+ uint32_t p_TcpPortFiltTbl; /* 0x20 0-31 Pointer to TCP Port Filter Table */
-+ uint32_t res4; /* 0x24 Reserved */
-+ uint32_t p_ArpDescriptor; /* 0x28 0-31 ARP Descriptor Pointer. */
-+ uint32_t p_NdDescriptor; /* 0x2C 0-31 Neighbor Discovery Descriptor. */
-+ uint32_t p_IcmpV4Descriptor; /* 0x30 0-31 ICMPv4 Descriptor pointer. */
-+ uint32_t p_IcmpV6Descriptor; /* 0x34 0-31 ICMPv6 Descriptor pointer. */
-+ uint32_t p_SnmpDescriptor; /* 0x38 0-31 SNMP Descriptor pointer. */
-+ uint32_t p_ArStats; /* 0x3C 0-31 Pointer to Auto Response Statistics */
-+} _PackedType t_ArCommonDesc;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/* t_ArCommonDesc.filterControl bits */
-+#define IP_PROT_TBL_PASS_MASK 0x08
-+#define UDP_PORT_TBL_PASS_MASK 0x04
-+#define TCP_PORT_TBL_PASS_MASK 0x02
-+
-+/* Offset of TCF flags within TCP packet */
-+#define TCP_FLAGS_OFFSET 12
-+
-+
-+#endif /* __FM_PORT_DSAR_H_ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port_im.c
-@@ -0,0 +1,753 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_port_im.c
-+
-+ @Description FM Port Independent-Mode ...
-+*//***************************************************************************/
-+#include "std_ext.h"
-+#include "string_ext.h"
-+#include "error_ext.h"
-+#include "memcpy_ext.h"
-+#include "fm_muram_ext.h"
-+
-+#include "fm_port.h"
-+
-+
-+#define TX_CONF_STATUS_UNSENT 0x1
-+
-+
-+typedef enum e_TxConfType
-+{
-+ e_TX_CONF_TYPE_CHECK = 0 /**< check if all the buffers were touched by the muxator, no confirmation callback */
-+ ,e_TX_CONF_TYPE_CALLBACK = 1 /**< confirm to user all the available sent buffers */
-+ ,e_TX_CONF_TYPE_FLUSH = 3 /**< confirm all buffers plus the unsent one with an appropriate status */
-+} e_TxConfType;
-+
-+
-+static void ImException(t_Handle h_FmPort, uint32_t event)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ ASSERT_COND(((event & (IM_EV_RX | IM_EV_BSY)) && FmIsMaster(p_FmPort->h_Fm)) ||
-+ !FmIsMaster(p_FmPort->h_Fm));
-+
-+ if (event & IM_EV_RX)
-+ FmPortImRx(p_FmPort);
-+ if ((event & IM_EV_BSY) && p_FmPort->f_Exception)
-+ p_FmPort->f_Exception(p_FmPort->h_App, e_FM_PORT_EXCEPTION_IM_BUSY);
-+}
-+
-+
-+static t_Error TxConf(t_FmPort *p_FmPort, e_TxConfType confType)
-+{
-+ t_Error retVal = E_BUSY;
-+ uint32_t bdStatus;
-+ uint16_t savedStartBdId, confBdId;
-+
-+ ASSERT_COND(p_FmPort);
-+
-+ /*
-+ if (confType==e_TX_CONF_TYPE_CHECK)
-+ return (WfqEntryIsQueueEmpty(p_FmPort->im.h_WfqEntry) ? E_OK : E_BUSY);
-+ */
-+
-+ confBdId = savedStartBdId = p_FmPort->im.currBdId;
-+ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(confBdId));
-+
-+ /* If R bit is set, we don't enter, or we break.
-+ we run till we get to R, or complete the loop */
-+ while ((!(bdStatus & BD_R_E) || (confType == e_TX_CONF_TYPE_FLUSH)) && (retVal != E_OK))
-+ {
-+ if (confType & e_TX_CONF_TYPE_CALLBACK) /* if it is confirmation with user callbacks */
-+ BD_STATUS_AND_LENGTH_SET(BD_GET(confBdId), 0);
-+
-+ /* case 1: R bit is 0 and Length is set -> confirm! */
-+ if ((confType & e_TX_CONF_TYPE_CALLBACK) && (bdStatus & BD_LENGTH_MASK))
-+ {
-+ if (p_FmPort->im.f_TxConf)
-+ {
-+ if ((confType == e_TX_CONF_TYPE_FLUSH) && (bdStatus & BD_R_E))
-+ p_FmPort->im.f_TxConf(p_FmPort->h_App,
-+ BdBufferGet(XX_PhysToVirt, BD_GET(confBdId)),
-+ TX_CONF_STATUS_UNSENT,
-+ p_FmPort->im.p_BdShadow[confBdId]);
-+ else
-+ p_FmPort->im.f_TxConf(p_FmPort->h_App,
-+ BdBufferGet(XX_PhysToVirt, BD_GET(confBdId)),
-+ 0,
-+ p_FmPort->im.p_BdShadow[confBdId]);
-+ }
-+ }
-+ /* case 2: R bit is 0 and Length is 0 -> not used yet, nop! */
-+
-+ confBdId = GetNextBdId(p_FmPort, confBdId);
-+ if (confBdId == savedStartBdId)
-+ retVal = E_OK;
-+ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(confBdId));
-+ }
-+
-+ return retVal;
-+}
-+
-+t_Error FmPortImEnable(t_FmPort *p_FmPort)
-+{
-+ uint32_t tmpReg = GET_UINT32(p_FmPort->im.p_FmPortImPram->mode);
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, (uint32_t)(tmpReg & ~IM_MODE_GRC_STP));
-+ return E_OK;
-+}
-+
-+t_Error FmPortImDisable(t_FmPort *p_FmPort)
-+{
-+ uint32_t tmpReg = GET_UINT32(p_FmPort->im.p_FmPortImPram->mode);
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, (uint32_t)(tmpReg | IM_MODE_GRC_STP));
-+ return E_OK;
-+}
-+
-+t_Error FmPortImRx(t_FmPort *p_FmPort)
-+{
-+ t_Handle h_CurrUserPriv, h_NewUserPriv;
-+ uint32_t bdStatus;
-+ volatile uint8_t buffPos;
-+ uint16_t length;
-+ uint16_t errors;
-+ uint8_t *p_CurData, *p_Data;
-+ uint32_t flags;
-+
-+ ASSERT_COND(p_FmPort);
-+
-+ flags = XX_LockIntrSpinlock(p_FmPort->h_Spinlock);
-+ if (p_FmPort->lock)
-+ {
-+ XX_UnlockIntrSpinlock(p_FmPort->h_Spinlock, flags);
-+ return E_OK;
-+ }
-+ p_FmPort->lock = TRUE;
-+ XX_UnlockIntrSpinlock(p_FmPort->h_Spinlock, flags);
-+
-+ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
-+
-+ while (!(bdStatus & BD_R_E)) /* while there is data in the Rx BD */
-+ {
-+ if ((p_Data = p_FmPort->im.rxPool.f_GetBuf(p_FmPort->im.rxPool.h_BufferPool, &h_NewUserPriv)) == NULL)
-+ {
-+ p_FmPort->lock = FALSE;
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Data buffer"));
-+ }
-+
-+ if (p_FmPort->im.firstBdOfFrameId == IM_ILEGAL_BD_ID)
-+ p_FmPort->im.firstBdOfFrameId = p_FmPort->im.currBdId;
-+
-+ p_CurData = BdBufferGet(p_FmPort->im.rxPool.f_PhysToVirt, BD_GET(p_FmPort->im.currBdId));
-+ h_CurrUserPriv = p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId];
-+ length = (uint16_t)((bdStatus & BD_L) ?
-+ ((bdStatus & BD_LENGTH_MASK) - p_FmPort->im.rxFrameAccumLength):
-+ (bdStatus & BD_LENGTH_MASK));
-+ p_FmPort->im.rxFrameAccumLength += length;
-+
-+ /* determine whether buffer is first, last, first and last (single */
-+ /* buffer frame) or middle (not first and not last) */
-+ buffPos = (uint8_t)((p_FmPort->im.currBdId == p_FmPort->im.firstBdOfFrameId) ?
-+ ((bdStatus & BD_L) ? SINGLE_BUF : FIRST_BUF) :
-+ ((bdStatus & BD_L) ? LAST_BUF : MIDDLE_BUF));
-+
-+ if (bdStatus & BD_L)
-+ {
-+ p_FmPort->im.rxFrameAccumLength = 0;
-+ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
-+ }
-+
-+ BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, BD_GET(p_FmPort->im.currBdId), p_Data);
-+
-+ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), BD_R_E);
-+
-+ errors = (uint16_t)((bdStatus & BD_RX_ERRORS) >> 16);
-+ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId] = h_NewUserPriv;
-+
-+ p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.offsetOut, (uint16_t)(p_FmPort->im.currBdId<<4));
-+ /* Pass the buffer if one of the conditions is true:
-+ - There are no errors
-+ - This is a part of a larger frame ( the application has already received some buffers ) */
-+ if ((buffPos != SINGLE_BUF) || !errors)
-+ {
-+ if (p_FmPort->im.f_RxStore(p_FmPort->h_App,
-+ p_CurData,
-+ length,
-+ errors,
-+ buffPos,
-+ h_CurrUserPriv) == e_RX_STORE_RESPONSE_PAUSE)
-+ break;
-+ }
-+ else if (p_FmPort->im.rxPool.f_PutBuf(p_FmPort->im.rxPool.h_BufferPool,
-+ p_CurData,
-+ h_CurrUserPriv))
-+ {
-+ p_FmPort->lock = FALSE;
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Failed freeing data buffer"));
-+ }
-+
-+ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
-+ }
-+ p_FmPort->lock = FALSE;
-+ return E_OK;
-+}
-+
-+void FmPortConfigIM (t_FmPort *p_FmPort, t_FmPortParams *p_FmPortParams)
-+{
-+ ASSERT_COND(p_FmPort);
-+
-+ SANITY_CHECK_RETURN(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->im.h_FmMuram = p_FmPortParams->specificParams.imRxTxParams.h_FmMuram;
-+ p_FmPort->p_FmPortDriverParam->liodnOffset = p_FmPortParams->specificParams.imRxTxParams.liodnOffset;
-+ p_FmPort->im.dataMemId = p_FmPortParams->specificParams.imRxTxParams.dataMemId;
-+ p_FmPort->im.dataMemAttributes = p_FmPortParams->specificParams.imRxTxParams.dataMemAttributes;
-+
-+ p_FmPort->im.fwExtStructsMemId = DEFAULT_PORT_ImfwExtStructsMemId;
-+ p_FmPort->im.fwExtStructsMemAttr = DEFAULT_PORT_ImfwExtStructsMemAttr;
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
-+ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ p_FmPort->im.rxPool.h_BufferPool = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.h_BufferPool;
-+ p_FmPort->im.rxPool.f_GetBuf = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_GetBuf;
-+ p_FmPort->im.rxPool.f_PutBuf = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_PutBuf;
-+ p_FmPort->im.rxPool.bufferSize = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.bufferSize;
-+ p_FmPort->im.rxPool.f_PhysToVirt = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_PhysToVirt;
-+ if (!p_FmPort->im.rxPool.f_PhysToVirt)
-+ p_FmPort->im.rxPool.f_PhysToVirt = XX_PhysToVirt;
-+ p_FmPort->im.rxPool.f_VirtToPhys = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_VirtToPhys;
-+ if (!p_FmPort->im.rxPool.f_VirtToPhys)
-+ p_FmPort->im.rxPool.f_VirtToPhys = XX_VirtToPhys;
-+ p_FmPort->im.f_RxStore = p_FmPortParams->specificParams.imRxTxParams.f_RxStore;
-+
-+ p_FmPort->im.mrblr = 0x8000;
-+ while (p_FmPort->im.mrblr)
-+ {
-+ if (p_FmPort->im.rxPool.bufferSize & p_FmPort->im.mrblr)
-+ break;
-+ p_FmPort->im.mrblr >>= 1;
-+ }
-+ if (p_FmPort->im.mrblr != p_FmPort->im.rxPool.bufferSize)
-+ DBG(WARNING, ("Max-Rx-Buffer-Length set to %d", p_FmPort->im.mrblr));
-+ p_FmPort->im.bdRingSize = DEFAULT_PORT_rxBdRingLength;
-+ p_FmPort->exceptions = DEFAULT_PORT_exception;
-+ if (FmIsMaster(p_FmPort->h_Fm))
-+ p_FmPort->polling = FALSE;
-+ else
-+ p_FmPort->polling = TRUE;
-+ p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ;
-+ }
-+ else
-+ {
-+ p_FmPort->im.f_TxConf = p_FmPortParams->specificParams.imRxTxParams.f_TxConf;
-+
-+ p_FmPort->im.bdRingSize = DEFAULT_PORT_txBdRingLength;
-+ }
-+}
-+
-+t_Error FmPortImCheckInitParameters(t_FmPort *p_FmPort)
-+{
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX) &&
-+ (p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) &&
-+ (p_FmPort->portType != e_FM_PORT_TYPE_TX) &&
-+ (p_FmPort->portType != e_FM_PORT_TYPE_TX_10G))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
-+ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ if (!POWER_OF_2(p_FmPort->im.mrblr))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("max Rx buffer length must be power of 2!!!"));
-+ if (p_FmPort->im.mrblr < 256)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("max Rx buffer length must at least 256!!!"));
-+ if (p_FmPort->p_FmPortDriverParam->liodnOffset & ~FM_LIODN_OFFSET_MASK)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1));
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FmPortImInit(t_FmPort *p_FmPort)
-+{
-+ t_FmImBd *p_Bd=NULL;
-+ t_Handle h_BufContext;
-+ uint64_t tmpPhysBase;
-+ uint16_t log2Num;
-+ uint8_t *p_Data/*, *p_Tmp*/;
-+ int i;
-+ t_Error err;
-+ uint16_t tmpReg16;
-+ uint32_t tmpReg32;
-+
-+ ASSERT_COND(p_FmPort);
-+
-+ p_FmPort->im.p_FmPortImPram =
-+ (t_FmPortImPram *)FM_MURAM_AllocMem(p_FmPort->im.h_FmMuram, sizeof(t_FmPortImPram), IM_PRAM_ALIGN);
-+ if (!p_FmPort->im.p_FmPortImPram)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Parameter-RAM!!!"));
-+ WRITE_BLOCK(p_FmPort->im.p_FmPortImPram, 0, sizeof(t_FmPortImPram));
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
-+ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ p_FmPort->im.p_BdRing =
-+ (t_FmImBd *)XX_MallocSmart((uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize),
-+ p_FmPort->im.fwExtStructsMemId,
-+ 4);
-+ if (!p_FmPort->im.p_BdRing)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD ring!!!"));
-+ IOMemSet32(p_FmPort->im.p_BdRing, 0, (uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
-+
-+ p_FmPort->im.p_BdShadow = (t_Handle *)XX_Malloc((uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
-+ if (!p_FmPort->im.p_BdShadow)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD shadow!!!"));
-+ memset(p_FmPort->im.p_BdShadow, 0, (uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
-+
-+ /* Initialize the Rx-BD ring */
-+ for (i=0; i<p_FmPort->im.bdRingSize; i++)
-+ {
-+ p_Bd = BD_GET(i);
-+ BD_STATUS_AND_LENGTH_SET (p_Bd, BD_R_E);
-+
-+ if ((p_Data = p_FmPort->im.rxPool.f_GetBuf(p_FmPort->im.rxPool.h_BufferPool, &h_BufContext)) == NULL)
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Data buffer"));
-+ BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, p_Bd, p_Data);
-+ p_FmPort->im.p_BdShadow[i] = h_BufContext;
-+ }
-+
-+ if ((p_FmPort->im.dataMemAttributes & MEMORY_ATTR_CACHEABLE) ||
-+ (p_FmPort->im.fwExtStructsMemAttr & MEMORY_ATTR_CACHEABLE))
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_GBL | IM_MODE_SET_BO(2));
-+ else
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_SET_BO(2));
-+
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->rxQdPtr,
-+ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
-+ p_FmPort->fmMuramPhysBaseAddr + 0x20));
-+
-+ LOG2((uint64_t)p_FmPort->im.mrblr, log2Num);
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->mrblr, log2Num);
-+
-+ /* Initialize Rx QD */
-+ tmpPhysBase = (uint64_t)(XX_VirtToPhys(p_FmPort->im.p_BdRing));
-+ SET_ADDR(&p_FmPort->im.p_FmPortImPram->rxQd.bdRingBase, tmpPhysBase);
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.bdRingSize, (uint16_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
-+
-+ /* Update the IM PRAM address in the BMI */
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfqid,
-+ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
-+ p_FmPort->fmMuramPhysBaseAddr));
-+ if (!p_FmPort->polling || p_FmPort->exceptions)
-+ {
-+ /* Allocate, configure and register interrupts */
-+ err = FmAllocFmanCtrlEventReg(p_FmPort->h_Fm, &p_FmPort->fmanCtrlEventId);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ ASSERT_COND(!(p_FmPort->fmanCtrlEventId & ~IM_RXQD_FPMEVT_SEL_MASK));
-+ tmpReg16 = (uint16_t)(p_FmPort->fmanCtrlEventId & IM_RXQD_FPMEVT_SEL_MASK);
-+ tmpReg32 = 0;
-+
-+ if (p_FmPort->exceptions & IM_EV_BSY)
-+ {
-+ tmpReg16 |= IM_RXQD_BSYINTM;
-+ tmpReg32 |= IM_EV_BSY;
-+ }
-+ if (!p_FmPort->polling)
-+ {
-+ tmpReg16 |= IM_RXQD_RXFINTM;
-+ tmpReg32 |= IM_EV_RX;
-+ }
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16);
-+
-+ FmRegisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, ImException , (t_Handle)p_FmPort);
-+
-+ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32);
-+ }
-+ else
-+ p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ;
-+ }
-+ else
-+ {
-+ p_FmPort->im.p_BdRing = (t_FmImBd *)XX_MallocSmart((uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize), p_FmPort->im.fwExtStructsMemId, 4);
-+ if (!p_FmPort->im.p_BdRing)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Tx BD ring!!!"));
-+ IOMemSet32(p_FmPort->im.p_BdRing, 0, (uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
-+
-+ p_FmPort->im.p_BdShadow = (t_Handle *)XX_Malloc((uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
-+ if (!p_FmPort->im.p_BdShadow)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD shadow!!!"));
-+ memset(p_FmPort->im.p_BdShadow, 0, (uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize));
-+ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
-+
-+ if ((p_FmPort->im.dataMemAttributes & MEMORY_ATTR_CACHEABLE) ||
-+ (p_FmPort->im.fwExtStructsMemAttr & MEMORY_ATTR_CACHEABLE))
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_GBL | IM_MODE_SET_BO(2));
-+ else
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_SET_BO(2));
-+
-+ WRITE_UINT32(p_FmPort->im.p_FmPortImPram->txQdPtr,
-+ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
-+ p_FmPort->fmMuramPhysBaseAddr + 0x40));
-+
-+ /* Initialize Tx QD */
-+ tmpPhysBase = (uint64_t)(XX_VirtToPhys(p_FmPort->im.p_BdRing));
-+ SET_ADDR(&p_FmPort->im.p_FmPortImPram->txQd.bdRingBase, tmpPhysBase);
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->txQd.bdRingSize, (uint16_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize));
-+
-+ /* Update the IM PRAM address in the BMI */
-+ WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfqid,
-+ (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) -
-+ p_FmPort->fmMuramPhysBaseAddr));
-+ }
-+
-+
-+ return E_OK;
-+}
-+
-+void FmPortImFree(t_FmPort *p_FmPort)
-+{
-+ uint32_t bdStatus;
-+ uint8_t *p_CurData;
-+
-+ ASSERT_COND(p_FmPort);
-+ ASSERT_COND(p_FmPort->im.p_FmPortImPram);
-+
-+ if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) ||
-+ (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ if (!p_FmPort->polling || p_FmPort->exceptions)
-+ {
-+ /* Deallocate and unregister interrupts */
-+ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, 0);
-+
-+ FmFreeFmanCtrlEventReg(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
-+
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, 0);
-+
-+ FmUnregisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
-+ }
-+ /* Try first clean what has received */
-+ FmPortImRx(p_FmPort);
-+
-+ /* Now, get rid of the the empty buffer! */
-+ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
-+
-+ while (bdStatus & BD_R_E) /* while there is data in the Rx BD */
-+ {
-+ p_CurData = BdBufferGet(p_FmPort->im.rxPool.f_PhysToVirt, BD_GET(p_FmPort->im.currBdId));
-+
-+ BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, BD_GET(p_FmPort->im.currBdId), NULL);
-+ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), 0);
-+
-+ p_FmPort->im.rxPool.f_PutBuf(p_FmPort->im.rxPool.h_BufferPool,
-+ p_CurData,
-+ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId]);
-+
-+ p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
-+ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
-+ }
-+ }
-+ else
-+ TxConf(p_FmPort, e_TX_CONF_TYPE_FLUSH);
-+
-+ FM_MURAM_FreeMem(p_FmPort->im.h_FmMuram, p_FmPort->im.p_FmPortImPram);
-+
-+ if (p_FmPort->im.p_BdShadow)
-+ XX_Free(p_FmPort->im.p_BdShadow);
-+
-+ if (p_FmPort->im.p_BdRing)
-+ XX_FreeSmart(p_FmPort->im.p_BdRing);
-+}
-+
-+
-+t_Error FM_PORT_ConfigIMMaxRxBufLength(t_Handle h_FmPort, uint16_t newVal)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->im.mrblr = newVal;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigIMRxBdRingLength(t_Handle h_FmPort, uint16_t newVal)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->im.bdRingSize = newVal;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigIMTxBdRingLength(t_Handle h_FmPort, uint16_t newVal)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->im.bdRingSize = newVal;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigIMFmanCtrlExternalStructsMemory(t_Handle h_FmPort,
-+ uint8_t memId,
-+ uint32_t memAttributes)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ p_FmPort->im.fwExtStructsMemId = memId;
-+ p_FmPort->im.fwExtStructsMemAttr = memAttributes;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ConfigIMPolling(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Available for Rx ports only"));
-+
-+ if (!FmIsMaster(p_FmPort->h_Fm))
-+ RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Available on master-partition only;"
-+ "in guest-partitions, IM is always in polling!"));
-+
-+ p_FmPort->polling = TRUE;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_SetIMExceptions(t_Handle h_FmPort, e_FmPortExceptions exception, bool enable)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ t_Error err;
-+ uint16_t tmpReg16;
-+ uint32_t tmpReg32;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ if (exception == e_FM_PORT_EXCEPTION_IM_BUSY)
-+ {
-+ if (enable)
-+ {
-+ p_FmPort->exceptions |= IM_EV_BSY;
-+ if (p_FmPort->fmanCtrlEventId == (uint8_t)NO_IRQ)
-+ {
-+ /* Allocate, configure and register interrupts */
-+ err = FmAllocFmanCtrlEventReg(p_FmPort->h_Fm, &p_FmPort->fmanCtrlEventId);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ ASSERT_COND(!(p_FmPort->fmanCtrlEventId & ~IM_RXQD_FPMEVT_SEL_MASK));
-+
-+ FmRegisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, ImException, (t_Handle)p_FmPort);
-+ tmpReg16 = (uint16_t)((p_FmPort->fmanCtrlEventId & IM_RXQD_FPMEVT_SEL_MASK) | IM_RXQD_BSYINTM);
-+ tmpReg32 = IM_EV_BSY;
-+ }
-+ else
-+ {
-+ tmpReg16 = (uint16_t)(GET_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen) | IM_RXQD_BSYINTM);
-+ tmpReg32 = FmGetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId) | IM_EV_BSY;
-+ }
-+
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16);
-+ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32);
-+ }
-+ else
-+ {
-+ p_FmPort->exceptions &= ~IM_EV_BSY;
-+ if (!p_FmPort->exceptions && p_FmPort->polling)
-+ {
-+ FmFreeFmanCtrlEventReg(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
-+ FmUnregisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId);
-+ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, 0);
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, 0);
-+ p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ;
-+ }
-+ else
-+ {
-+ tmpReg16 = (uint16_t)(GET_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen) & ~IM_RXQD_BSYINTM);
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16);
-+ tmpReg32 = FmGetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId) & ~IM_EV_BSY;
-+ FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32);
-+ }
-+ }
-+ }
-+ else
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Invalid exception."));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_PORT_ImTx( t_Handle h_FmPort,
-+ uint8_t *p_Data,
-+ uint16_t length,
-+ bool lastBuffer,
-+ t_Handle h_BufContext)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+ uint16_t nextBdId;
-+ uint32_t bdStatus, nextBdStatus;
-+ bool firstBuffer;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId));
-+ nextBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
-+ nextBdStatus = BD_STATUS_AND_LENGTH(BD_GET(nextBdId));
-+
-+ if (!(bdStatus & BD_R_E) && !(nextBdStatus & BD_R_E))
-+ {
-+ /* Confirm the current BD - BD is available */
-+ if ((bdStatus & BD_LENGTH_MASK) && (p_FmPort->im.f_TxConf))
-+ p_FmPort->im.f_TxConf (p_FmPort->h_App,
-+ BdBufferGet(XX_PhysToVirt, BD_GET(p_FmPort->im.currBdId)),
-+ 0,
-+ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId]);
-+
-+ bdStatus = length;
-+
-+ /* if this is the first BD of a frame */
-+ if (p_FmPort->im.firstBdOfFrameId == IM_ILEGAL_BD_ID)
-+ {
-+ firstBuffer = TRUE;
-+ p_FmPort->im.txFirstBdStatus = (bdStatus | BD_R_E);
-+
-+ if (!lastBuffer)
-+ p_FmPort->im.firstBdOfFrameId = p_FmPort->im.currBdId;
-+ }
-+ else
-+ firstBuffer = FALSE;
-+
-+ BdBufferSet(XX_VirtToPhys, BD_GET(p_FmPort->im.currBdId), p_Data);
-+ p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId] = h_BufContext;
-+
-+ /* deal with last */
-+ if (lastBuffer)
-+ {
-+ /* if single buffer frame */
-+ if (firstBuffer)
-+ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), p_FmPort->im.txFirstBdStatus | BD_L);
-+ else
-+ {
-+ /* Set the last BD of the frame */
-+ BD_STATUS_AND_LENGTH_SET (BD_GET(p_FmPort->im.currBdId), (bdStatus | BD_R_E | BD_L));
-+ /* Set the first BD of the frame */
-+ BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.firstBdOfFrameId), p_FmPort->im.txFirstBdStatus);
-+ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
-+ }
-+ WRITE_UINT16(p_FmPort->im.p_FmPortImPram->txQd.offsetIn, (uint16_t)(GetNextBdId(p_FmPort, p_FmPort->im.currBdId)<<4));
-+ }
-+ else if (!firstBuffer) /* mid frame buffer */
-+ BD_STATUS_AND_LENGTH_SET (BD_GET(p_FmPort->im.currBdId), bdStatus | BD_R_E);
-+
-+ p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId);
-+ }
-+ else
-+ {
-+ /* Discard current frame. Return error. */
-+ if (p_FmPort->im.firstBdOfFrameId != IM_ILEGAL_BD_ID)
-+ {
-+ /* Error: No free BD */
-+ /* Response: Discard current frame. Return error. */
-+ uint16_t cleanBdId = p_FmPort->im.firstBdOfFrameId;
-+
-+ ASSERT_COND(p_FmPort->im.firstBdOfFrameId != p_FmPort->im.currBdId);
-+
-+ /* Since firstInFrame is not NULL, one buffer at least has already been
-+ inserted into the BD ring. Using do-while covers the situation of a
-+ frame spanned throughout the whole Tx BD ring (p_CleanBd is incremented
-+ prior to testing whether or not it's equal to TxBd). */
-+ do
-+ {
-+ BD_STATUS_AND_LENGTH_SET(BD_GET(cleanBdId), 0);
-+ /* Advance BD pointer */
-+ cleanBdId = GetNextBdId(p_FmPort, cleanBdId);
-+ } while (cleanBdId != p_FmPort->im.currBdId);
-+
-+ p_FmPort->im.currBdId = cleanBdId;
-+ p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID;
-+ }
-+
-+ return ERROR_CODE(E_FULL);
-+ }
-+
-+ return E_OK;
-+}
-+
-+void FM_PORT_ImTxConf(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ TxConf(p_FmPort, e_TX_CONF_TYPE_CALLBACK);
-+}
-+
-+t_Error FM_PORT_ImRx(t_Handle h_FmPort)
-+{
-+ t_FmPort *p_FmPort = (t_FmPort*)h_FmPort;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE);
-+
-+ return FmPortImRx(p_FmPort);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fman_port.c
-@@ -0,0 +1,1570 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "common/general.h"
-+
-+#include "fman_common.h"
-+#include "fsl_fman_port.h"
-+
-+
-+/* problem Eyal: the following should not be here*/
-+#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
-+
-+static uint32_t get_no_pcd_nia_bmi_ac_enc_frame(struct fman_port_cfg *cfg)
-+{
-+ if (cfg->errata_A006675)
-+ return NIA_ENG_FM_CTL |
-+ NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
-+ else
-+ return NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
-+}
-+
-+static int init_bmi_rx(struct fman_port *port,
-+ struct fman_port_cfg *cfg,
-+ struct fman_port_params *params)
-+{
-+ struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
-+ uint32_t tmp;
-+
-+ /* Rx Configuration register */
-+ tmp = 0;
-+ if (port->im_en)
-+ tmp |= BMI_PORT_CFG_IM;
-+ else if (cfg->discard_override)
-+ tmp |= BMI_PORT_CFG_FDOVR;
-+ iowrite32be(tmp, &regs->fmbm_rcfg);
-+
-+ /* DMA attributes */
-+ tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
-+ if (cfg->dma_ic_stash_on)
-+ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
-+ if (cfg->dma_header_stash_on)
-+ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
-+ if (cfg->dma_sg_stash_on)
-+ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
-+ if (cfg->dma_write_optimize)
-+ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
-+ iowrite32be(tmp, &regs->fmbm_rda);
-+
-+ /* Rx FIFO parameters */
-+ tmp = (cfg->rx_pri_elevation / FMAN_PORT_BMI_FIFO_UNITS - 1) <<
-+ BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
-+ tmp |= cfg->rx_fifo_thr / FMAN_PORT_BMI_FIFO_UNITS - 1;
-+ iowrite32be(tmp, &regs->fmbm_rfp);
-+
-+ if (cfg->excessive_threshold_register)
-+ /* always allow access to the extra resources */
-+ iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
-+
-+ /* Frame end data */
-+ tmp = (uint32_t)cfg->checksum_bytes_ignore <<
-+ BMI_RX_FRAME_END_CS_IGNORE_SHIFT;
-+ tmp |= (uint32_t)cfg->rx_cut_end_bytes <<
-+ BMI_RX_FRAME_END_CUT_SHIFT;
-+ if (cfg->errata_A006320)
-+ tmp &= 0xffe0ffff;
-+ iowrite32be(tmp, &regs->fmbm_rfed);
-+
-+ /* Internal context parameters */
-+ tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
-+ BMI_IC_TO_EXT_SHIFT;
-+ tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
-+ BMI_IC_FROM_INT_SHIFT;
-+ tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
-+ iowrite32be(tmp, &regs->fmbm_ricp);
-+
-+ /* Internal buffer offset */
-+ tmp = ((uint32_t)cfg->int_buf_start_margin / FMAN_PORT_IC_OFFSET_UNITS)
-+ << BMI_INT_BUF_MARG_SHIFT;
-+ iowrite32be(tmp, &regs->fmbm_rim);
-+
-+ /* External buffer margins */
-+ if (!port->im_en)
-+ {
-+ tmp = (uint32_t)cfg->ext_buf_start_margin <<
-+ BMI_EXT_BUF_MARG_START_SHIFT;
-+ tmp |= (uint32_t)cfg->ext_buf_end_margin;
-+ if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
-+ tmp |= BMI_SG_DISABLE;
-+ iowrite32be(tmp, &regs->fmbm_rebm);
-+ }
-+
-+ /* Frame attributes */
-+ tmp = BMI_CMD_RX_MR_DEF;
-+ if (!port->im_en)
-+ {
-+ tmp |= BMI_CMD_ATTR_ORDER;
-+ tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
-+ if (cfg->sync_req)
-+ tmp |= BMI_CMD_ATTR_SYNC;
-+ }
-+ iowrite32be(tmp, &regs->fmbm_rfca);
-+
-+ /* NIA */
-+ if (port->im_en)
-+ tmp = NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_RX;
-+ else
-+ {
-+ tmp = (uint32_t)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
-+ tmp |= get_no_pcd_nia_bmi_ac_enc_frame(cfg);
-+ }
-+ iowrite32be(tmp, &regs->fmbm_rfne);
-+
-+ /* Enqueue NIA */
-+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
-+
-+ /* Default/error queues */
-+ if (!port->im_en)
-+ {
-+ iowrite32be((params->dflt_fqid & 0x00FFFFFF), &regs->fmbm_rfqid);
-+ iowrite32be((params->err_fqid & 0x00FFFFFF), &regs->fmbm_refqid);
-+ }
-+
-+ /* Discard/error masks */
-+ iowrite32be(params->discard_mask, &regs->fmbm_rfsdm);
-+ iowrite32be(params->err_mask, &regs->fmbm_rfsem);
-+
-+ /* Statistics counters */
-+ tmp = 0;
-+ if (cfg->stats_counters_enable)
-+ tmp = BMI_COUNTERS_EN;
-+ iowrite32be(tmp, &regs->fmbm_rstc);
-+
-+ /* Performance counters */
-+ fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
-+ tmp = 0;
-+ if (cfg->perf_counters_enable)
-+ tmp = BMI_COUNTERS_EN;
-+ iowrite32be(tmp, &regs->fmbm_rpc);
-+
-+ return 0;
-+}
-+
-+static int init_bmi_tx(struct fman_port *port,
-+ struct fman_port_cfg *cfg,
-+ struct fman_port_params *params)
-+{
-+ struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
-+ uint32_t tmp;
-+
-+ /* Tx Configuration register */
-+ tmp = 0;
-+ if (port->im_en)
-+ tmp |= BMI_PORT_CFG_IM;
-+ iowrite32be(tmp, &regs->fmbm_tcfg);
-+
-+ /* DMA attributes */
-+ tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
-+ if (cfg->dma_ic_stash_on)
-+ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
-+ if (cfg->dma_header_stash_on)
-+ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
-+ if (cfg->dma_sg_stash_on)
-+ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
-+ iowrite32be(tmp, &regs->fmbm_tda);
-+
-+ /* Tx FIFO parameters */
-+ tmp = (cfg->tx_fifo_min_level / FMAN_PORT_BMI_FIFO_UNITS) <<
-+ BMI_TX_FIFO_MIN_FILL_SHIFT;
-+ tmp |= ((uint32_t)cfg->tx_fifo_deq_pipeline_depth - 1) <<
-+ BMI_FIFO_PIPELINE_DEPTH_SHIFT;
-+ tmp |= (uint32_t)(cfg->tx_fifo_low_comf_level /
-+ FMAN_PORT_BMI_FIFO_UNITS - 1);
-+ iowrite32be(tmp, &regs->fmbm_tfp);
-+
-+ /* Frame end data */
-+ tmp = (uint32_t)cfg->checksum_bytes_ignore <<
-+ BMI_FRAME_END_CS_IGNORE_SHIFT;
-+ iowrite32be(tmp, &regs->fmbm_tfed);
-+
-+ /* Internal context parameters */
-+ if (!port->im_en)
-+ {
-+ tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
-+ BMI_IC_TO_EXT_SHIFT;
-+ tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
-+ BMI_IC_FROM_INT_SHIFT;
-+ tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
-+ iowrite32be(tmp, &regs->fmbm_ticp);
-+ }
-+ /* Frame attributes */
-+ tmp = BMI_CMD_TX_MR_DEF;
-+ if (port->im_en)
-+ tmp |= BMI_CMD_MR_DEAS;
-+ else
-+ {
-+ tmp |= BMI_CMD_ATTR_ORDER;
-+ tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
-+ }
-+ iowrite32be(tmp, &regs->fmbm_tfca);
-+
-+ /* Dequeue NIA + enqueue NIA */
-+ if (port->im_en)
-+ {
-+ iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX, &regs->fmbm_tfdne);
-+ iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX, &regs->fmbm_tfene);
-+ }
-+ else
-+ {
-+ iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
-+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
-+ if (cfg->fmbm_tfne_has_features)
-+ iowrite32be(!params->dflt_fqid ?
-+ BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
-+ NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
-+ if (!params->dflt_fqid && params->dont_release_buf)
-+ {
-+ iowrite32be(0x00FFFFFF, &regs->fmbm_tcfqid);
-+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, &regs->fmbm_tfene);
-+ if (cfg->fmbm_tfne_has_features)
-+ iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN, &regs->fmbm_tfne);
-+ }
-+ }
-+
-+ /* Confirmation/error queues */
-+ if (!port->im_en)
-+ {
-+ if (params->dflt_fqid || !params->dont_release_buf)
-+ iowrite32be(params->dflt_fqid & 0x00FFFFFF, &regs->fmbm_tcfqid);
-+ iowrite32be((params->err_fqid & 0x00FFFFFF), &regs->fmbm_tefqid);
-+ }
-+ /* Statistics counters */
-+ tmp = 0;
-+ if (cfg->stats_counters_enable)
-+ tmp = BMI_COUNTERS_EN;
-+ iowrite32be(tmp, &regs->fmbm_tstc);
-+
-+ /* Performance counters */
-+ fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
-+ tmp = 0;
-+ if (cfg->perf_counters_enable)
-+ tmp = BMI_COUNTERS_EN;
-+ iowrite32be(tmp, &regs->fmbm_tpc);
-+
-+ return 0;
-+}
-+
-+static int init_bmi_oh(struct fman_port *port,
-+ struct fman_port_cfg *cfg,
-+ struct fman_port_params *params)
-+{
-+ struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
-+ uint32_t tmp;
-+
-+ /* OP Configuration register */
-+ tmp = 0;
-+ if (cfg->discard_override)
-+ tmp |= BMI_PORT_CFG_FDOVR;
-+ iowrite32be(tmp, &regs->fmbm_ocfg);
-+
-+ /* DMA attributes */
-+ tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
-+ if (cfg->dma_ic_stash_on)
-+ tmp |= BMI_DMA_ATTR_IC_STASH_ON;
-+ if (cfg->dma_header_stash_on)
-+ tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
-+ if (cfg->dma_sg_stash_on)
-+ tmp |= BMI_DMA_ATTR_SG_STASH_ON;
-+ if (cfg->dma_write_optimize)
-+ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
-+ iowrite32be(tmp, &regs->fmbm_oda);
-+
-+ /* Tx FIFO parameters */
-+ tmp = ((uint32_t)cfg->tx_fifo_deq_pipeline_depth - 1) <<
-+ BMI_FIFO_PIPELINE_DEPTH_SHIFT;
-+ iowrite32be(tmp, &regs->fmbm_ofp);
-+
-+ /* Internal context parameters */
-+ tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
-+ BMI_IC_TO_EXT_SHIFT;
-+ tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
-+ BMI_IC_FROM_INT_SHIFT;
-+ tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
-+ iowrite32be(tmp, &regs->fmbm_oicp);
-+
-+ /* Frame attributes */
-+ tmp = BMI_CMD_OP_MR_DEF;
-+ tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
-+ if (cfg->sync_req)
-+ tmp |= BMI_CMD_ATTR_SYNC;
-+ if (port->type == E_FMAN_PORT_TYPE_OP)
-+ tmp |= BMI_CMD_ATTR_ORDER;
-+ iowrite32be(tmp, &regs->fmbm_ofca);
-+
-+ /* Internal buffer offset */
-+ tmp = ((uint32_t)cfg->int_buf_start_margin / FMAN_PORT_IC_OFFSET_UNITS)
-+ << BMI_INT_BUF_MARG_SHIFT;
-+ iowrite32be(tmp, &regs->fmbm_oim);
-+
-+ /* Dequeue NIA */
-+ iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_ofdne);
-+
-+ /* NIA and Enqueue NIA */
-+ if (port->type == E_FMAN_PORT_TYPE_HC) {
-+ iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_HC,
-+ &regs->fmbm_ofne);
-+ iowrite32be(NIA_ENG_QMI_ENQ, &regs->fmbm_ofene);
-+ } else {
-+ iowrite32be(get_no_pcd_nia_bmi_ac_enc_frame(cfg),
-+ &regs->fmbm_ofne);
-+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR,
-+ &regs->fmbm_ofene);
-+ }
-+
-+ /* Default/error queues */
-+ iowrite32be((params->dflt_fqid & 0x00FFFFFF), &regs->fmbm_ofqid);
-+ iowrite32be((params->err_fqid & 0x00FFFFFF), &regs->fmbm_oefqid);
-+
-+ /* Discard/error masks */
-+ if (port->type == E_FMAN_PORT_TYPE_OP) {
-+ iowrite32be(params->discard_mask, &regs->fmbm_ofsdm);
-+ iowrite32be(params->err_mask, &regs->fmbm_ofsem);
-+ }
-+
-+ /* Statistics counters */
-+ tmp = 0;
-+ if (cfg->stats_counters_enable)
-+ tmp = BMI_COUNTERS_EN;
-+ iowrite32be(tmp, &regs->fmbm_ostc);
-+
-+ /* Performance counters */
-+ fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
-+ tmp = 0;
-+ if (cfg->perf_counters_enable)
-+ tmp = BMI_COUNTERS_EN;
-+ iowrite32be(tmp, &regs->fmbm_opc);
-+
-+ return 0;
-+}
-+
-+static int init_qmi(struct fman_port *port,
-+ struct fman_port_cfg *cfg,
-+ struct fman_port_params *params)
-+{
-+ struct fman_port_qmi_regs *regs = port->qmi_regs;
-+ uint32_t tmp;
-+
-+ tmp = 0;
-+ if (cfg->queue_counters_enable)
-+ tmp |= QMI_PORT_CFG_EN_COUNTERS;
-+ iowrite32be(tmp, &regs->fmqm_pnc);
-+
-+ /* Rx port configuration */
-+ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
-+ (port->type == E_FMAN_PORT_TYPE_RX_10G)) {
-+ /* Enqueue NIA */
-+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
-+ return 0;
-+ }
-+
-+ /* Continue with Tx and O/H port configuration */
-+ if ((port->type == E_FMAN_PORT_TYPE_TX) ||
-+ (port->type == E_FMAN_PORT_TYPE_TX_10G)) {
-+ /* Enqueue NIA */
-+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
-+ &regs->fmqm_pnen);
-+ /* Dequeue NIA */
-+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
-+ } else {
-+ /* Enqueue NIA */
-+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
-+ /* Dequeue NIA */
-+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_FETCH, &regs->fmqm_pndn);
-+ }
-+
-+ /* Dequeue Configuration register */
-+ tmp = 0;
-+ if (cfg->deq_high_pri)
-+ tmp |= QMI_DEQ_CFG_PRI;
-+
-+ switch (cfg->deq_type) {
-+ case E_FMAN_PORT_DEQ_BY_PRI:
-+ tmp |= QMI_DEQ_CFG_TYPE1;
-+ break;
-+ case E_FMAN_PORT_DEQ_ACTIVE_FQ:
-+ tmp |= QMI_DEQ_CFG_TYPE2;
-+ break;
-+ case E_FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
-+ tmp |= QMI_DEQ_CFG_TYPE3;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ if (cfg->qmi_deq_options_support) {
-+ if ((port->type == E_FMAN_PORT_TYPE_HC) &&
-+ (cfg->deq_prefetch_opt != E_FMAN_PORT_DEQ_NO_PREFETCH))
-+ return -EINVAL;
-+
-+ switch (cfg->deq_prefetch_opt) {
-+ case E_FMAN_PORT_DEQ_NO_PREFETCH:
-+ break;
-+ case E_FMAN_PORT_DEQ_PART_PREFETCH:
-+ tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
-+ break;
-+ case E_FMAN_PORT_DEQ_FULL_PREFETCH:
-+ tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ }
-+ tmp |= (uint32_t)(params->deq_sp & QMI_DEQ_CFG_SP_MASK) <<
-+ QMI_DEQ_CFG_SP_SHIFT;
-+ tmp |= cfg->deq_byte_cnt;
-+ iowrite32be(tmp, &regs->fmqm_pndc);
-+
-+ return 0;
-+}
-+
-+static void get_rx_stats_reg(struct fman_port *port,
-+ enum fman_port_stats_counters counter,
-+ uint32_t **stats_reg)
-+{
-+ struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
-+
-+ switch (counter) {
-+ case E_FMAN_PORT_STATS_CNT_FRAME:
-+ *stats_reg = &regs->fmbm_rfrc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DISCARD:
-+ *stats_reg = &regs->fmbm_rfdc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
-+ *stats_reg = &regs->fmbm_rbdc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME:
-+ *stats_reg = &regs->fmbm_rfbc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME:
-+ *stats_reg = &regs->fmbm_rlfc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF:
-+ *stats_reg = &regs->fmbm_rodc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_FILTERED_FRAME:
-+ *stats_reg = &regs->fmbm_rffc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DMA_ERR:
-+ *stats_reg = &regs->fmbm_rfldec;
-+ break;
-+ default:
-+ *stats_reg = NULL;
-+ }
-+}
-+
-+static void get_tx_stats_reg(struct fman_port *port,
-+ enum fman_port_stats_counters counter,
-+ uint32_t **stats_reg)
-+{
-+ struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
-+
-+ switch (counter) {
-+ case E_FMAN_PORT_STATS_CNT_FRAME:
-+ *stats_reg = &regs->fmbm_tfrc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DISCARD:
-+ *stats_reg = &regs->fmbm_tfdc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
-+ *stats_reg = &regs->fmbm_tbdc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_LEN_ERR:
-+ *stats_reg = &regs->fmbm_tfledc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT:
-+ *stats_reg = &regs->fmbm_tfufdc;
-+ break;
-+ default:
-+ *stats_reg = NULL;
-+ }
-+}
-+
-+static void get_oh_stats_reg(struct fman_port *port,
-+ enum fman_port_stats_counters counter,
-+ uint32_t **stats_reg)
-+{
-+ struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
-+
-+ switch (counter) {
-+ case E_FMAN_PORT_STATS_CNT_FRAME:
-+ *stats_reg = &regs->fmbm_ofrc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DISCARD:
-+ *stats_reg = &regs->fmbm_ofdc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
-+ *stats_reg = &regs->fmbm_obdc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_FILTERED_FRAME:
-+ *stats_reg = &regs->fmbm_offc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_DMA_ERR:
-+ *stats_reg = &regs->fmbm_ofldec;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_LEN_ERR:
-+ *stats_reg = &regs->fmbm_ofledc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT:
-+ *stats_reg = &regs->fmbm_ofufdc;
-+ break;
-+ case E_FMAN_PORT_STATS_CNT_WRED_DISCARD:
-+ *stats_reg = &regs->fmbm_ofwdc;
-+ break;
-+ default:
-+ *stats_reg = NULL;
-+ }
-+}
-+
-+static void get_rx_perf_reg(struct fman_port *port,
-+ enum fman_port_perf_counters counter,
-+ uint32_t **perf_reg)
-+{
-+ struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
-+
-+ switch (counter) {
-+ case E_FMAN_PORT_PERF_CNT_CYCLE:
-+ *perf_reg = &regs->fmbm_rccn;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
-+ *perf_reg = &regs->fmbm_rtuc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_QUEUE_UTIL:
-+ *perf_reg = &regs->fmbm_rrquc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
-+ *perf_reg = &regs->fmbm_rduc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
-+ *perf_reg = &regs->fmbm_rfuc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_RX_PAUSE:
-+ *perf_reg = &regs->fmbm_rpac;
-+ break;
-+ default:
-+ *perf_reg = NULL;
-+ }
-+}
-+
-+static void get_tx_perf_reg(struct fman_port *port,
-+ enum fman_port_perf_counters counter,
-+ uint32_t **perf_reg)
-+{
-+ struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
-+
-+ switch (counter) {
-+ case E_FMAN_PORT_PERF_CNT_CYCLE:
-+ *perf_reg = &regs->fmbm_tccn;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
-+ *perf_reg = &regs->fmbm_ttuc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_QUEUE_UTIL:
-+ *perf_reg = &regs->fmbm_ttcquc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
-+ *perf_reg = &regs->fmbm_tduc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
-+ *perf_reg = &regs->fmbm_tfuc;
-+ break;
-+ default:
-+ *perf_reg = NULL;
-+ }
-+}
-+
-+static void get_oh_perf_reg(struct fman_port *port,
-+ enum fman_port_perf_counters counter,
-+ uint32_t **perf_reg)
-+{
-+ struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
-+
-+ switch (counter) {
-+ case E_FMAN_PORT_PERF_CNT_CYCLE:
-+ *perf_reg = &regs->fmbm_occn;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
-+ *perf_reg = &regs->fmbm_otuc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
-+ *perf_reg = &regs->fmbm_oduc;
-+ break;
-+ case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
-+ *perf_reg = &regs->fmbm_ofuc;
-+ break;
-+ default:
-+ *perf_reg = NULL;
-+ }
-+}
-+
-+static void get_qmi_counter_reg(struct fman_port *port,
-+ enum fman_port_qmi_counters counter,
-+ uint32_t **queue_reg)
-+{
-+ struct fman_port_qmi_regs *regs = port->qmi_regs;
-+
-+ switch (counter) {
-+ case E_FMAN_PORT_ENQ_TOTAL:
-+ *queue_reg = &regs->fmqm_pnetfc;
-+ break;
-+ case E_FMAN_PORT_DEQ_TOTAL:
-+ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
-+ (port->type == E_FMAN_PORT_TYPE_RX_10G))
-+ /* Counter not available for Rx ports */
-+ *queue_reg = NULL;
-+ else
-+ *queue_reg = &regs->fmqm_pndtfc;
-+ break;
-+ case E_FMAN_PORT_DEQ_FROM_DFLT:
-+ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
-+ (port->type == E_FMAN_PORT_TYPE_RX_10G))
-+ /* Counter not available for Rx ports */
-+ *queue_reg = NULL;
-+ else
-+ *queue_reg = &regs->fmqm_pndfdc;
-+ break;
-+ case E_FMAN_PORT_DEQ_CONFIRM:
-+ if ((port->type == E_FMAN_PORT_TYPE_RX) ||
-+ (port->type == E_FMAN_PORT_TYPE_RX_10G))
-+ /* Counter not available for Rx ports */
-+ *queue_reg = NULL;
-+ else
-+ *queue_reg = &regs->fmqm_pndcc;
-+ break;
-+ default:
-+ *queue_reg = NULL;
-+ }
-+}
-+
-+void fman_port_defconfig(struct fman_port_cfg *cfg, enum fman_port_type type)
-+{
-+ cfg->dma_swap_data = E_FMAN_PORT_DMA_NO_SWAP;
-+ cfg->dma_ic_stash_on = FALSE;
-+ cfg->dma_header_stash_on = FALSE;
-+ cfg->dma_sg_stash_on = FALSE;
-+ cfg->dma_write_optimize = TRUE;
-+ cfg->color = E_FMAN_PORT_COLOR_GREEN;
-+ cfg->discard_override = FALSE;
-+ cfg->checksum_bytes_ignore = 0;
-+ cfg->rx_cut_end_bytes = 4;
-+ cfg->rx_pri_elevation = ((0x3FF + 1) * FMAN_PORT_BMI_FIFO_UNITS);
-+ cfg->rx_fifo_thr = ((0x3FF + 1) * FMAN_PORT_BMI_FIFO_UNITS);
-+ cfg->rx_fd_bits = 0;
-+ cfg->ic_ext_offset = 0;
-+ cfg->ic_int_offset = 0;
-+ cfg->ic_size = 0;
-+ cfg->int_buf_start_margin = 0;
-+ cfg->ext_buf_start_margin = 0;
-+ cfg->ext_buf_end_margin = 0;
-+ cfg->tx_fifo_min_level = 0;
-+ cfg->tx_fifo_low_comf_level = (5 * KILOBYTE);
-+ cfg->stats_counters_enable = TRUE;
-+ cfg->perf_counters_enable = TRUE;
-+ cfg->deq_type = E_FMAN_PORT_DEQ_BY_PRI;
-+
-+ if (type == E_FMAN_PORT_TYPE_HC) {
-+ cfg->sync_req = FALSE;
-+ cfg->deq_prefetch_opt = E_FMAN_PORT_DEQ_NO_PREFETCH;
-+ } else {
-+ cfg->sync_req = TRUE;
-+ cfg->deq_prefetch_opt = E_FMAN_PORT_DEQ_FULL_PREFETCH;
-+ }
-+
-+ if (type == E_FMAN_PORT_TYPE_TX_10G) {
-+ cfg->tx_fifo_deq_pipeline_depth = 4;
-+ cfg->deq_high_pri = TRUE;
-+ cfg->deq_byte_cnt = 0x1400;
-+ } else {
-+ if ((type == E_FMAN_PORT_TYPE_HC) ||
-+ (type == E_FMAN_PORT_TYPE_OP))
-+ cfg->tx_fifo_deq_pipeline_depth = 2;
-+ else
-+ cfg->tx_fifo_deq_pipeline_depth = 1;
-+
-+ cfg->deq_high_pri = FALSE;
-+ cfg->deq_byte_cnt = 0x400;
-+ }
-+ cfg->no_scatter_gather = DEFAULT_FMAN_SP_NO_SCATTER_GATHER;
-+}
-+
-+static uint8_t fman_port_find_bpool(struct fman_port *port, uint8_t bpid)
-+{
-+ uint32_t *bp_reg, tmp;
-+ uint8_t i, id;
-+
-+ /* Find the pool */
-+ bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
-+ for (i = 0;
-+ (i < port->ext_pools_num && (i < FMAN_PORT_MAX_EXT_POOLS_NUM));
-+ i++) {
-+ tmp = ioread32be(&bp_reg[i]);
-+ id = (uint8_t)((tmp & BMI_EXT_BUF_POOL_ID_MASK) >>
-+ BMI_EXT_BUF_POOL_ID_SHIFT);
-+
-+ if (id == bpid)
-+ break;
-+ }
-+
-+ return i;
-+}
-+
-+int fman_port_init(struct fman_port *port,
-+ struct fman_port_cfg *cfg,
-+ struct fman_port_params *params)
-+{
-+ int err;
-+
-+ /* Init BMI registers */
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ err = init_bmi_rx(port, cfg, params);
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ err = init_bmi_tx(port, cfg, params);
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ err = init_bmi_oh(port, cfg, params);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ if (err)
-+ return err;
-+
-+ /* Init QMI registers */
-+ if (!port->im_en)
-+ {
-+ err = init_qmi(port, cfg, params);
-+ return err;
-+ }
-+ return 0;
-+}
-+
-+int fman_port_enable(struct fman_port *port)
-+{
-+ uint32_t *bmi_cfg_reg, tmp;
-+ bool rx_port;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
-+ rx_port = TRUE;
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
-+ rx_port = FALSE;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ bmi_cfg_reg = &port->bmi_regs->oh.fmbm_ocfg;
-+ rx_port = FALSE;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ /* Enable QMI */
-+ if (!rx_port) {
-+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
-+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
-+ }
-+
-+ /* Enable BMI */
-+ tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
-+ iowrite32be(tmp, bmi_cfg_reg);
-+
-+ return 0;
-+}
-+
-+int fman_port_disable(const struct fman_port *port)
-+{
-+ uint32_t *bmi_cfg_reg, *bmi_status_reg, tmp;
-+ bool rx_port, failure = FALSE;
-+ int count;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
-+ bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
-+ rx_port = TRUE;
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
-+ bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
-+ rx_port = FALSE;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ bmi_cfg_reg = &port->bmi_regs->oh.fmbm_ocfg;
-+ bmi_status_reg = &port->bmi_regs->oh.fmbm_ost;
-+ rx_port = FALSE;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ /* Disable QMI */
-+ if (!rx_port) {
-+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
-+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
-+
-+ /* Wait for QMI to finish FD handling */
-+ count = 100;
-+ do {
-+ udelay(10);
-+ tmp = ioread32be(&port->qmi_regs->fmqm_pns);
-+ } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
-+
-+ if (count == 0)
-+ {
-+ /* Timeout */
-+ failure = TRUE;
-+ }
-+ }
-+
-+ /* Disable BMI */
-+ tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
-+ iowrite32be(tmp, bmi_cfg_reg);
-+
-+ /* Wait for graceful stop end */
-+ count = 500;
-+ do {
-+ udelay(10);
-+ tmp = ioread32be(bmi_status_reg);
-+ } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
-+
-+ if (count == 0)
-+ {
-+ /* Timeout */
-+ failure = TRUE;
-+ }
-+
-+ if (failure)
-+ return -EBUSY;
-+
-+ return 0;
-+}
-+
-+int fman_port_set_bpools(const struct fman_port *port,
-+ const struct fman_port_bpools *bp)
-+{
-+ uint32_t tmp, *bp_reg, *bp_depl_reg;
-+ uint8_t i, max_bp_num;
-+ bool grp_depl_used = FALSE, rx_port;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ max_bp_num = port->ext_pools_num;
-+ rx_port = TRUE;
-+ bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
-+ bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ if (port->fm_rev_maj != 4)
-+ return -EINVAL;
-+ max_bp_num = FMAN_PORT_OBS_EXT_POOLS_NUM;
-+ rx_port = FALSE;
-+ bp_reg = port->bmi_regs->oh.fmbm_oebmpi;
-+ bp_depl_reg = &port->bmi_regs->oh.fmbm_ompd;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ if (rx_port) {
-+ /* Check buffers are provided in ascending order */
-+ for (i = 0;
-+ (i < (bp->count-1) && (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1));
-+ i++) {
-+ if (bp->bpool[i].size > bp->bpool[i+1].size)
-+ return -EINVAL;
-+ }
-+ }
-+
-+ /* Set up external buffers pools */
-+ for (i = 0; i < bp->count; i++) {
-+ tmp = BMI_EXT_BUF_POOL_VALID;
-+ tmp |= ((uint32_t)bp->bpool[i].bpid <<
-+ BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
-+
-+ if (rx_port) {
-+ if (bp->counters_enable)
-+ tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
-+
-+ if (bp->bpool[i].is_backup)
-+ tmp |= BMI_EXT_BUF_POOL_BACKUP;
-+
-+ tmp |= (uint32_t)bp->bpool[i].size;
-+ }
-+
-+ iowrite32be(tmp, &bp_reg[i]);
-+ }
-+
-+ /* Clear unused pools */
-+ for (i = bp->count; i < max_bp_num; i++)
-+ iowrite32be(0, &bp_reg[i]);
-+
-+ /* Pools depletion */
-+ tmp = 0;
-+ for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
-+ if (bp->bpool[i].grp_bp_depleted) {
-+ grp_depl_used = TRUE;
-+ tmp |= 0x80000000 >> i;
-+ }
-+
-+ if (bp->bpool[i].single_bp_depleted)
-+ tmp |= 0x80 >> i;
-+
-+ if (bp->bpool[i].pfc_priorities_en)
-+ tmp |= 0x0100 << i;
-+ }
-+
-+ if (grp_depl_used)
-+ tmp |= ((uint32_t)bp->grp_bp_depleted_num - 1) <<
-+ BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
-+
-+ iowrite32be(tmp, bp_depl_reg);
-+ return 0;
-+}
-+
-+int fman_port_set_rate_limiter(struct fman_port *port,
-+ struct fman_port_rate_limiter *rate_limiter)
-+{
-+ uint32_t *rate_limit_reg, *rate_limit_scale_reg;
-+ uint32_t granularity, tmp;
-+ uint8_t usec_bit, factor;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ rate_limit_reg = &port->bmi_regs->tx.fmbm_trlmt;
-+ rate_limit_scale_reg = &port->bmi_regs->tx.fmbm_trlmts;
-+ granularity = BMI_RATE_LIMIT_GRAN_TX;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ rate_limit_reg = &port->bmi_regs->oh.fmbm_orlmt;
-+ rate_limit_scale_reg = &port->bmi_regs->oh.fmbm_orlmts;
-+ granularity = BMI_RATE_LIMIT_GRAN_OP;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ /* Factor is per 1 usec count */
-+ factor = 1;
-+ usec_bit = rate_limiter->count_1micro_bit;
-+
-+ /* If rate limit is too small for an 1usec factor, adjust timestamp
-+ * scale and multiply the factor */
-+ while (rate_limiter->rate < (granularity / factor)) {
-+ if (usec_bit == 31)
-+ /* Can't configure rate limiter - rate is too small */
-+ return -EINVAL;
-+
-+ usec_bit++;
-+ factor <<= 1;
-+ }
-+
-+ /* Figure out register value. The "while" above quarantees that
-+ * (rate_limiter->rate * factor / granularity) >= 1 */
-+ tmp = (uint32_t)(rate_limiter->rate * factor / granularity - 1);
-+
-+ /* Check rate limit isn't too large */
-+ if (tmp >= BMI_RATE_LIMIT_MAX_RATE_IN_GRAN_UNITS)
-+ return -EINVAL;
-+
-+ /* Check burst size is in allowed range */
-+ if ((rate_limiter->burst_size == 0) ||
-+ (rate_limiter->burst_size >
-+ BMI_RATE_LIMIT_MAX_BURST_SIZE))
-+ return -EINVAL;
-+
-+ tmp |= (uint32_t)(rate_limiter->burst_size - 1) <<
-+ BMI_RATE_LIMIT_MAX_BURST_SHIFT;
-+
-+ if ((port->type == E_FMAN_PORT_TYPE_OP) &&
-+ (port->fm_rev_maj == 4)) {
-+ if (rate_limiter->high_burst_size_gran)
-+ tmp |= BMI_RATE_LIMIT_HIGH_BURST_SIZE_GRAN;
-+ }
-+
-+ iowrite32be(tmp, rate_limit_reg);
-+
-+ /* Set up rate limiter scale register */
-+ tmp = BMI_RATE_LIMIT_SCALE_EN;
-+ tmp |= (31 - (uint32_t)usec_bit) << BMI_RATE_LIMIT_SCALE_TSBS_SHIFT;
-+
-+ if ((port->type == E_FMAN_PORT_TYPE_OP) &&
-+ (port->fm_rev_maj == 4))
-+ tmp |= rate_limiter->rate_factor;
-+
-+ iowrite32be(tmp, rate_limit_scale_reg);
-+
-+ return 0;
-+}
-+
-+int fman_port_delete_rate_limiter(struct fman_port *port)
-+{
-+ uint32_t *rate_limit_scale_reg;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ rate_limit_scale_reg = &port->bmi_regs->tx.fmbm_trlmts;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ rate_limit_scale_reg = &port->bmi_regs->oh.fmbm_orlmts;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ iowrite32be(0, rate_limit_scale_reg);
-+ return 0;
-+}
-+
-+int fman_port_set_err_mask(struct fman_port *port, uint32_t err_mask)
-+{
-+ uint32_t *err_mask_reg;
-+
-+ /* Obtain register address */
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ err_mask_reg = &port->bmi_regs->rx.fmbm_rfsem;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ err_mask_reg = &port->bmi_regs->oh.fmbm_ofsem;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ iowrite32be(err_mask, err_mask_reg);
-+ return 0;
-+}
-+
-+int fman_port_set_discard_mask(struct fman_port *port, uint32_t discard_mask)
-+{
-+ uint32_t *discard_mask_reg;
-+
-+ /* Obtain register address */
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ discard_mask_reg = &port->bmi_regs->rx.fmbm_rfsdm;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ discard_mask_reg = &port->bmi_regs->oh.fmbm_ofsdm;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ iowrite32be(discard_mask, discard_mask_reg);
-+ return 0;
-+}
-+
-+int fman_port_modify_rx_fd_bits(struct fman_port *port,
-+ uint8_t rx_fd_bits,
-+ bool add)
-+{
-+ uint32_t tmp;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ tmp = ioread32be(&port->bmi_regs->rx.fmbm_rfne);
-+
-+ if (add)
-+ tmp |= (uint32_t)rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
-+ else
-+ tmp &= ~((uint32_t)rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT);
-+
-+ iowrite32be(tmp, &port->bmi_regs->rx.fmbm_rfne);
-+ return 0;
-+}
-+
-+int fman_port_set_perf_cnt_params(struct fman_port *port,
-+ struct fman_port_perf_cnt_params *params)
-+{
-+ uint32_t *pcp_reg, tmp;
-+
-+ /* Obtain register address and check parameters are in range */
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ pcp_reg = &port->bmi_regs->rx.fmbm_rpcp;
-+ if ((params->queue_val == 0) ||
-+ (params->queue_val > MAX_PERFORMANCE_RX_QUEUE_COMP))
-+ return -EINVAL;
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ pcp_reg = &port->bmi_regs->tx.fmbm_tpcp;
-+ if ((params->queue_val == 0) ||
-+ (params->queue_val > MAX_PERFORMANCE_TX_QUEUE_COMP))
-+ return -EINVAL;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ pcp_reg = &port->bmi_regs->oh.fmbm_opcp;
-+ if (params->queue_val != 0)
-+ return -EINVAL;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ if ((params->task_val == 0) ||
-+ (params->task_val > MAX_PERFORMANCE_TASK_COMP))
-+ return -EINVAL;
-+ if ((params->dma_val == 0) ||
-+ (params->dma_val > MAX_PERFORMANCE_DMA_COMP))
-+ return -EINVAL;
-+ if ((params->fifo_val == 0) ||
-+ ((params->fifo_val / FMAN_PORT_BMI_FIFO_UNITS) >
-+ MAX_PERFORMANCE_FIFO_COMP))
-+ return -EINVAL;
-+ tmp = (uint32_t)(params->task_val - 1) <<
-+ BMI_PERFORMANCE_TASK_COMP_SHIFT;
-+ tmp |= (uint32_t)(params->dma_val - 1) <<
-+ BMI_PERFORMANCE_DMA_COMP_SHIFT;
-+ tmp |= (uint32_t)(params->fifo_val / FMAN_PORT_BMI_FIFO_UNITS - 1);
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ tmp |= (uint32_t)(params->queue_val - 1) <<
-+ BMI_PERFORMANCE_QUEUE_COMP_SHIFT;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+
-+ iowrite32be(tmp, pcp_reg);
-+ return 0;
-+}
-+
-+int fman_port_set_stats_cnt_mode(struct fman_port *port, bool enable)
-+{
-+ uint32_t *stats_reg, tmp;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ stats_reg = &port->bmi_regs->rx.fmbm_rstc;
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ stats_reg = &port->bmi_regs->tx.fmbm_tstc;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ stats_reg = &port->bmi_regs->oh.fmbm_ostc;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ tmp = ioread32be(stats_reg);
-+
-+ if (enable)
-+ tmp |= BMI_COUNTERS_EN;
-+ else
-+ tmp &= ~BMI_COUNTERS_EN;
-+
-+ iowrite32be(tmp, stats_reg);
-+ return 0;
-+}
-+
-+int fman_port_set_perf_cnt_mode(struct fman_port *port, bool enable)
-+{
-+ uint32_t *stats_reg, tmp;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ stats_reg = &port->bmi_regs->rx.fmbm_rpc;
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ stats_reg = &port->bmi_regs->tx.fmbm_tpc;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ stats_reg = &port->bmi_regs->oh.fmbm_opc;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ tmp = ioread32be(stats_reg);
-+
-+ if (enable)
-+ tmp |= BMI_COUNTERS_EN;
-+ else
-+ tmp &= ~BMI_COUNTERS_EN;
-+
-+ iowrite32be(tmp, stats_reg);
-+ return 0;
-+}
-+
-+int fman_port_set_queue_cnt_mode(struct fman_port *port, bool enable)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc);
-+
-+ if (enable)
-+ tmp |= QMI_PORT_CFG_EN_COUNTERS;
-+ else
-+ tmp &= ~QMI_PORT_CFG_EN_COUNTERS;
-+
-+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
-+ return 0;
-+}
-+
-+int fman_port_set_bpool_cnt_mode(struct fman_port *port,
-+ uint8_t bpid,
-+ bool enable)
-+{
-+ uint8_t index;
-+ uint32_t tmp;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ /* Find the pool */
-+ index = fman_port_find_bpool(port, bpid);
-+ if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
-+ /* Not found */
-+ return -EINVAL;
-+
-+ tmp = ioread32be(&port->bmi_regs->rx.fmbm_ebmpi[index]);
-+
-+ if (enable)
-+ tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
-+ else
-+ tmp &= ~BMI_EXT_BUF_POOL_EN_COUNTER;
-+
-+ iowrite32be(tmp, &port->bmi_regs->rx.fmbm_ebmpi[index]);
-+ return 0;
-+}
-+
-+uint32_t fman_port_get_stats_counter(struct fman_port *port,
-+ enum fman_port_stats_counters counter)
-+{
-+ uint32_t *stats_reg, ret_val;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ get_rx_stats_reg(port, counter, &stats_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ get_tx_stats_reg(port, counter, &stats_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ get_oh_stats_reg(port, counter, &stats_reg);
-+ break;
-+ default:
-+ stats_reg = NULL;
-+ }
-+
-+ if (stats_reg == NULL)
-+ return 0;
-+
-+ ret_val = ioread32be(stats_reg);
-+ return ret_val;
-+}
-+
-+void fman_port_set_stats_counter(struct fman_port *port,
-+ enum fman_port_stats_counters counter,
-+ uint32_t value)
-+{
-+ uint32_t *stats_reg;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ get_rx_stats_reg(port, counter, &stats_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ get_tx_stats_reg(port, counter, &stats_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ get_oh_stats_reg(port, counter, &stats_reg);
-+ break;
-+ default:
-+ stats_reg = NULL;
-+ }
-+
-+ if (stats_reg == NULL)
-+ return;
-+
-+ iowrite32be(value, stats_reg);
-+}
-+
-+uint32_t fman_port_get_perf_counter(struct fman_port *port,
-+ enum fman_port_perf_counters counter)
-+{
-+ uint32_t *perf_reg, ret_val;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ get_rx_perf_reg(port, counter, &perf_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ get_tx_perf_reg(port, counter, &perf_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ get_oh_perf_reg(port, counter, &perf_reg);
-+ break;
-+ default:
-+ perf_reg = NULL;
-+ }
-+
-+ if (perf_reg == NULL)
-+ return 0;
-+
-+ ret_val = ioread32be(perf_reg);
-+ return ret_val;
-+}
-+
-+void fman_port_set_perf_counter(struct fman_port *port,
-+ enum fman_port_perf_counters counter,
-+ uint32_t value)
-+{
-+ uint32_t *perf_reg;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ get_rx_perf_reg(port, counter, &perf_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_TX:
-+ case E_FMAN_PORT_TYPE_TX_10G:
-+ get_tx_perf_reg(port, counter, &perf_reg);
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ case E_FMAN_PORT_TYPE_HC:
-+ get_oh_perf_reg(port, counter, &perf_reg);
-+ break;
-+ default:
-+ perf_reg = NULL;
-+ }
-+
-+ if (perf_reg == NULL)
-+ return;
-+
-+ iowrite32be(value, perf_reg);
-+}
-+
-+uint32_t fman_port_get_qmi_counter(struct fman_port *port,
-+ enum fman_port_qmi_counters counter)
-+{
-+ uint32_t *queue_reg, ret_val;
-+
-+ get_qmi_counter_reg(port, counter, &queue_reg);
-+
-+ if (queue_reg == NULL)
-+ return 0;
-+
-+ ret_val = ioread32be(queue_reg);
-+ return ret_val;
-+}
-+
-+void fman_port_set_qmi_counter(struct fman_port *port,
-+ enum fman_port_qmi_counters counter,
-+ uint32_t value)
-+{
-+ uint32_t *queue_reg;
-+
-+ get_qmi_counter_reg(port, counter, &queue_reg);
-+
-+ if (queue_reg == NULL)
-+ return;
-+
-+ iowrite32be(value, queue_reg);
-+}
-+
-+uint32_t fman_port_get_bpool_counter(struct fman_port *port, uint8_t bpid)
-+{
-+ uint8_t index;
-+ uint32_t ret_val;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ break;
-+ default:
-+ return 0;
-+ }
-+
-+ /* Find the pool */
-+ index = fman_port_find_bpool(port, bpid);
-+ if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
-+ /* Not found */
-+ return 0;
-+
-+ ret_val = ioread32be(&port->bmi_regs->rx.fmbm_acnt[index]);
-+ return ret_val;
-+}
-+
-+void fman_port_set_bpool_counter(struct fman_port *port,
-+ uint8_t bpid,
-+ uint32_t value)
-+{
-+ uint8_t index;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ break;
-+ default:
-+ return;
-+ }
-+
-+ /* Find the pool */
-+ index = fman_port_find_bpool(port, bpid);
-+ if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
-+ /* Not found */
-+ return;
-+
-+ iowrite32be(value, &port->bmi_regs->rx.fmbm_acnt[index]);
-+}
-+
-+int fman_port_add_congestion_grps(struct fman_port *port,
-+ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])
-+{
-+ int i;
-+ uint32_t tmp, *grp_map_reg;
-+ uint8_t max_grp_map_num;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ if (port->fm_rev_maj == 4)
-+ max_grp_map_num = 1;
-+ else
-+ max_grp_map_num = FMAN_PORT_CG_MAP_NUM;
-+ grp_map_reg = port->bmi_regs->rx.fmbm_rcgm;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ max_grp_map_num = 1;
-+ if (port->fm_rev_maj != 4)
-+ return -EINVAL;
-+ grp_map_reg = port->bmi_regs->oh.fmbm_ocgm;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ for (i = (max_grp_map_num - 1); i >= 0; i--) {
-+ if (grps_map[i] == 0)
-+ continue;
-+ tmp = ioread32be(&grp_map_reg[i]);
-+ tmp |= grps_map[i];
-+ iowrite32be(tmp, &grp_map_reg[i]);
-+ }
-+
-+ return 0;
-+}
-+
-+int fman_port_remove_congestion_grps(struct fman_port *port,
-+ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])
-+{
-+ int i;
-+ uint32_t tmp, *grp_map_reg;
-+ uint8_t max_grp_map_num;
-+
-+ switch (port->type) {
-+ case E_FMAN_PORT_TYPE_RX:
-+ case E_FMAN_PORT_TYPE_RX_10G:
-+ if (port->fm_rev_maj == 4)
-+ max_grp_map_num = 1;
-+ else
-+ max_grp_map_num = FMAN_PORT_CG_MAP_NUM;
-+ grp_map_reg = port->bmi_regs->rx.fmbm_rcgm;
-+ break;
-+ case E_FMAN_PORT_TYPE_OP:
-+ max_grp_map_num = 1;
-+ if (port->fm_rev_maj != 4)
-+ return -EINVAL;
-+ grp_map_reg = port->bmi_regs->oh.fmbm_ocgm;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ for (i = (max_grp_map_num - 1); i >= 0; i--) {
-+ if (grps_map[i] == 0)
-+ continue;
-+ tmp = ioread32be(&grp_map_reg[i]);
-+ tmp &= ~grps_map[i];
-+ iowrite32be(tmp, &grp_map_reg[i]);
-+ }
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/Makefile
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+obj-y += fsl-ncsw-RTC.o
-+
-+fsl-ncsw-RTC-objs := fm_rtc.o fman_rtc.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c
-@@ -0,0 +1,692 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_rtc.c
-+
-+ @Description FM RTC driver implementation.
-+
-+ @Cautions None
-+*//***************************************************************************/
-+#include <linux/math64.h>
-+#include "error_ext.h"
-+#include "debug_ext.h"
-+#include "string_ext.h"
-+#include "part_ext.h"
-+#include "xx_ext.h"
-+#include "ncsw_ext.h"
-+
-+#include "fm_rtc.h"
-+#include "fm_common.h"
-+
-+
-+
-+/*****************************************************************************/
-+static t_Error CheckInitParameters(t_FmRtc *p_Rtc)
-+{
-+ struct rtc_cfg *p_RtcDriverParam = p_Rtc->p_RtcDriverParam;
-+ int i;
-+
-+ if ((p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_EXTERNAL) &&
-+ (p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_SYSTEM) &&
-+ (p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR))
-+ RETURN_ERROR(MAJOR, E_INVALID_CLOCK, ("Source clock undefined"));
-+
-+ if (p_Rtc->outputClockDivisor == 0)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("Divisor for output clock (should be positive)"));
-+ }
-+
-+ for (i=0; i < FM_RTC_NUM_OF_ALARMS; i++)
-+ {
-+ if ((p_RtcDriverParam->alarm_polarity[i] != E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW) &&
-+ (p_RtcDriverParam->alarm_polarity[i] != E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH))
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm %d signal polarity", i));
-+ }
-+ }
-+ for (i=0; i < FM_RTC_NUM_OF_EXT_TRIGGERS; i++)
-+ {
-+ if ((p_RtcDriverParam->trigger_polarity[i] != E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE) &&
-+ (p_RtcDriverParam->trigger_polarity[i] != E_FMAN_RTC_TRIGGER_ON_RISING_EDGE))
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Trigger %d signal polarity", i));
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+static void RtcExceptions(t_Handle h_FmRtc)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+ struct rtc_regs *p_MemMap;
-+ register uint32_t events;
-+
-+ ASSERT_COND(p_Rtc);
-+ p_MemMap = p_Rtc->p_MemMap;
-+
-+ events = fman_rtc_check_and_clear_event(p_MemMap);
-+ if (events & FMAN_RTC_TMR_TEVENT_ALM1)
-+ {
-+ if (p_Rtc->alarmParams[0].clearOnExpiration)
-+ {
-+ fman_rtc_set_timer_alarm_l(p_MemMap, 0, 0);
-+ fman_rtc_disable_interupt(p_MemMap, FMAN_RTC_TMR_TEVENT_ALM1);
-+ }
-+ ASSERT_COND(p_Rtc->alarmParams[0].f_AlarmCallback);
-+ p_Rtc->alarmParams[0].f_AlarmCallback(p_Rtc->h_App, 0);
-+ }
-+ if (events & FMAN_RTC_TMR_TEVENT_ALM2)
-+ {
-+ if (p_Rtc->alarmParams[1].clearOnExpiration)
-+ {
-+ fman_rtc_set_timer_alarm_l(p_MemMap, 1, 0);
-+ fman_rtc_disable_interupt(p_MemMap, FMAN_RTC_TMR_TEVENT_ALM2);
-+ }
-+ ASSERT_COND(p_Rtc->alarmParams[1].f_AlarmCallback);
-+ p_Rtc->alarmParams[1].f_AlarmCallback(p_Rtc->h_App, 1);
-+ }
-+ if (events & FMAN_RTC_TMR_TEVENT_PP1)
-+ {
-+ ASSERT_COND(p_Rtc->periodicPulseParams[0].f_PeriodicPulseCallback);
-+ p_Rtc->periodicPulseParams[0].f_PeriodicPulseCallback(p_Rtc->h_App, 0);
-+ }
-+ if (events & FMAN_RTC_TMR_TEVENT_PP2)
-+ {
-+ ASSERT_COND(p_Rtc->periodicPulseParams[1].f_PeriodicPulseCallback);
-+ p_Rtc->periodicPulseParams[1].f_PeriodicPulseCallback(p_Rtc->h_App, 1);
-+ }
-+ if (events & FMAN_RTC_TMR_TEVENT_ETS1)
-+ {
-+ ASSERT_COND(p_Rtc->externalTriggerParams[0].f_ExternalTriggerCallback);
-+ p_Rtc->externalTriggerParams[0].f_ExternalTriggerCallback(p_Rtc->h_App, 0);
-+ }
-+ if (events & FMAN_RTC_TMR_TEVENT_ETS2)
-+ {
-+ ASSERT_COND(p_Rtc->externalTriggerParams[1].f_ExternalTriggerCallback);
-+ p_Rtc->externalTriggerParams[1].f_ExternalTriggerCallback(p_Rtc->h_App, 1);
-+ }
-+}
-+
-+
-+/*****************************************************************************/
-+t_Handle FM_RTC_Config(t_FmRtcParams *p_FmRtcParam)
-+{
-+ t_FmRtc *p_Rtc;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmRtcParam, E_NULL_POINTER, NULL);
-+
-+ /* Allocate memory for the FM RTC driver parameters */
-+ p_Rtc = (t_FmRtc *)XX_Malloc(sizeof(t_FmRtc));
-+ if (!p_Rtc)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM RTC driver structure"));
-+ return NULL;
-+ }
-+
-+ memset(p_Rtc, 0, sizeof(t_FmRtc));
-+
-+ /* Allocate memory for the FM RTC driver parameters */
-+ p_Rtc->p_RtcDriverParam = (struct rtc_cfg *)XX_Malloc(sizeof(struct rtc_cfg));
-+ if (!p_Rtc->p_RtcDriverParam)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM RTC driver parameters"));
-+ XX_Free(p_Rtc);
-+ return NULL;
-+ }
-+
-+ memset(p_Rtc->p_RtcDriverParam, 0, sizeof(struct rtc_cfg));
-+
-+ /* Store RTC configuration parameters */
-+ p_Rtc->h_Fm = p_FmRtcParam->h_Fm;
-+
-+ /* Set default RTC configuration parameters */
-+ fman_rtc_defconfig(p_Rtc->p_RtcDriverParam);
-+
-+ p_Rtc->outputClockDivisor = DEFAULT_OUTPUT_CLOCK_DIVISOR;
-+ p_Rtc->p_RtcDriverParam->bypass = DEFAULT_BYPASS;
-+ p_Rtc->clockPeriodNanoSec = DEFAULT_CLOCK_PERIOD; /* 1 usec */
-+
-+
-+ /* Store RTC parameters in the RTC control structure */
-+ p_Rtc->p_MemMap = (struct rtc_regs *)UINT_TO_PTR(p_FmRtcParam->baseAddress);
-+ p_Rtc->h_App = p_FmRtcParam->h_App;
-+
-+ return p_Rtc;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_Init(t_Handle h_FmRtc)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+ struct rtc_cfg *p_RtcDriverParam;
-+ struct rtc_regs *p_MemMap;
-+ uint32_t freqCompensation = 0;
-+ uint64_t tmpDouble;
-+ bool init_freq_comp = FALSE;
-+
-+ p_RtcDriverParam = p_Rtc->p_RtcDriverParam;
-+ p_MemMap = p_Rtc->p_MemMap;
-+
-+ if (CheckInitParameters(p_Rtc)!=E_OK)
-+ RETURN_ERROR(MAJOR, E_CONFLICT,
-+ ("Init Parameters are not Valid"));
-+
-+ /* TODO check that no timestamping MACs are working in this stage. */
-+
-+ /* find source clock frequency in Mhz */
-+ if (p_Rtc->p_RtcDriverParam->src_clk != E_FMAN_RTC_SOURCE_CLOCK_SYSTEM)
-+ p_Rtc->srcClkFreqMhz = p_Rtc->p_RtcDriverParam->ext_src_clk_freq;
-+ else
-+ p_Rtc->srcClkFreqMhz = (uint32_t)(FmGetMacClockFreq(p_Rtc->h_Fm));
-+
-+ /* if timer in Master mode Initialize TMR_CTRL */
-+ /* We want the counter (TMR_CNT) to count in nano-seconds */
-+ if (!p_RtcDriverParam->timer_slave_mode && p_Rtc->p_RtcDriverParam->bypass)
-+ p_Rtc->clockPeriodNanoSec = (1000 / p_Rtc->srcClkFreqMhz);
-+ else
-+ {
-+ /* Initialize TMR_ADD with the initial frequency compensation value:
-+ freqCompensation = (2^32 / frequency ratio) */
-+ /* frequency ratio = sorce clock/rtc clock =
-+ * (p_Rtc->srcClkFreqMhz*1000000))/ 1/(p_Rtc->clockPeriodNanoSec * 1000000000) */
-+ init_freq_comp = TRUE;
-+ freqCompensation = (uint32_t)DIV_CEIL(ACCUMULATOR_OVERFLOW * 1000,
-+ p_Rtc->clockPeriodNanoSec * p_Rtc->srcClkFreqMhz);
-+ }
-+
-+ /* check the legality of the relation between source and destination clocks */
-+ /* should be larger than 1.0001 */
-+ tmpDouble = 10000 * (uint64_t)p_Rtc->clockPeriodNanoSec * (uint64_t)p_Rtc->srcClkFreqMhz;
-+ if ((tmpDouble) <= 10001)
-+ RETURN_ERROR(MAJOR, E_CONFLICT,
-+ ("Invalid relation between source and destination clocks. Should be larger than 1.0001"));
-+
-+ fman_rtc_init(p_RtcDriverParam,
-+ p_MemMap,
-+ FM_RTC_NUM_OF_ALARMS,
-+ FM_RTC_NUM_OF_PERIODIC_PULSES,
-+ FM_RTC_NUM_OF_EXT_TRIGGERS,
-+ init_freq_comp,
-+ freqCompensation,
-+ p_Rtc->outputClockDivisor);
-+
-+ /* Register the FM RTC interrupt */
-+ FmRegisterIntr(p_Rtc->h_Fm, e_FM_MOD_TMR, 0, e_FM_INTR_TYPE_NORMAL, RtcExceptions , p_Rtc);
-+
-+ /* Free parameters structures */
-+ XX_Free(p_Rtc->p_RtcDriverParam);
-+ p_Rtc->p_RtcDriverParam = NULL;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_Free(t_Handle h_FmRtc)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+
-+ if (p_Rtc->p_RtcDriverParam)
-+ {
-+ XX_Free(p_Rtc->p_RtcDriverParam);
-+ }
-+ else
-+ {
-+ FM_RTC_Disable(h_FmRtc);
-+ }
-+
-+ /* Unregister FM RTC interrupt */
-+ FmUnregisterIntr(p_Rtc->h_Fm, e_FM_MOD_TMR, 0, e_FM_INTR_TYPE_NORMAL);
-+ XX_Free(p_Rtc);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigSourceClock(t_Handle h_FmRtc,
-+ e_FmSrcClk srcClk,
-+ uint32_t freqInMhz)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ p_Rtc->p_RtcDriverParam->src_clk = (enum fman_src_clock)srcClk;
-+ if (srcClk != e_FM_RTC_SOURCE_CLOCK_SYSTEM)
-+ p_Rtc->p_RtcDriverParam->ext_src_clk_freq = freqInMhz;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigPeriod(t_Handle h_FmRtc, uint32_t period)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ p_Rtc->clockPeriodNanoSec = period;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigFrequencyBypass(t_Handle h_FmRtc, bool enabled)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ p_Rtc->p_RtcDriverParam->bypass = enabled;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigInvertedInputClockPhase(t_Handle h_FmRtc, bool inverted)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ p_Rtc->p_RtcDriverParam->invert_input_clk_phase = inverted;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigInvertedOutputClockPhase(t_Handle h_FmRtc, bool inverted)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ p_Rtc->p_RtcDriverParam->invert_output_clk_phase = inverted;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigOutputClockDivisor(t_Handle h_FmRtc, uint16_t divisor)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ p_Rtc->outputClockDivisor = divisor;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigPulseRealignment(t_Handle h_FmRtc, bool enable)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ p_Rtc->p_RtcDriverParam->pulse_realign = enable;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigAlarmPolarity(t_Handle h_FmRtc,
-+ uint8_t alarmId,
-+ e_FmRtcAlarmPolarity alarmPolarity)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (alarmId >= FM_RTC_NUM_OF_ALARMS)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm ID"));
-+
-+ p_Rtc->p_RtcDriverParam->alarm_polarity[alarmId] =
-+ (enum fman_rtc_alarm_polarity)alarmPolarity;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ConfigExternalTriggerPolarity(t_Handle h_FmRtc,
-+ uint8_t triggerId,
-+ e_FmRtcTriggerPolarity triggerPolarity)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (triggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External trigger ID"));
-+ }
-+
-+ p_Rtc->p_RtcDriverParam->trigger_polarity[triggerId] =
-+ (enum fman_rtc_trigger_polarity)triggerPolarity;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_Enable(t_Handle h_FmRtc, bool resetClock)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ fman_rtc_enable(p_Rtc->p_MemMap, resetClock);
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_Disable(t_Handle h_FmRtc)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ /* TODO A check must be added here, that no timestamping MAC's
-+ * are working in this stage. */
-+ fman_rtc_disable(p_Rtc->p_MemMap);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_SetClockOffset(t_Handle h_FmRtc, int64_t offset)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ fman_rtc_set_timer_offset(p_Rtc->p_MemMap, offset);
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_SetAlarm(t_Handle h_FmRtc, t_FmRtcAlarmParams *p_FmRtcAlarmParams)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+ uint64_t tmpAlarm;
-+ bool enable = FALSE;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmRtcAlarmParams->alarmId >= FM_RTC_NUM_OF_ALARMS)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm ID"));
-+ }
-+
-+ if (p_FmRtcAlarmParams->alarmTime < p_Rtc->clockPeriodNanoSec)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
-+ ("Alarm time must be equal or larger than RTC period - %d nanoseconds",
-+ p_Rtc->clockPeriodNanoSec));
-+ tmpAlarm = p_FmRtcAlarmParams->alarmTime;
-+ if (do_div(tmpAlarm, p_Rtc->clockPeriodNanoSec))
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
-+ ("Alarm time must be a multiple of RTC period - %d nanoseconds",
-+ p_Rtc->clockPeriodNanoSec));
-+
-+ if (p_FmRtcAlarmParams->f_AlarmCallback)
-+ {
-+ p_Rtc->alarmParams[p_FmRtcAlarmParams->alarmId].f_AlarmCallback = p_FmRtcAlarmParams->f_AlarmCallback;
-+ p_Rtc->alarmParams[p_FmRtcAlarmParams->alarmId].clearOnExpiration = p_FmRtcAlarmParams->clearOnExpiration;
-+ enable = TRUE;
-+ }
-+
-+ fman_rtc_set_alarm(p_Rtc->p_MemMap, p_FmRtcAlarmParams->alarmId, (unsigned long)tmpAlarm, enable);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_SetPeriodicPulse(t_Handle h_FmRtc, t_FmRtcPeriodicPulseParams *p_FmRtcPeriodicPulseParams)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+ bool enable = FALSE;
-+ uint64_t tmpFiper;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmRtcPeriodicPulseParams->periodicPulseId >= FM_RTC_NUM_OF_PERIODIC_PULSES)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse ID"));
-+ }
-+ if (fman_rtc_is_enabled(p_Rtc->p_MemMap))
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Can't set Periodic pulse when RTC is enabled."));
-+ if (p_FmRtcPeriodicPulseParams->periodicPulsePeriod < p_Rtc->clockPeriodNanoSec)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
-+ ("Periodic pulse must be equal or larger than RTC period - %d nanoseconds",
-+ p_Rtc->clockPeriodNanoSec));
-+ tmpFiper = p_FmRtcPeriodicPulseParams->periodicPulsePeriod;
-+ if (do_div(tmpFiper, p_Rtc->clockPeriodNanoSec))
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
-+ ("Periodic pulse must be a multiple of RTC period - %d nanoseconds",
-+ p_Rtc->clockPeriodNanoSec));
-+ if (tmpFiper & 0xffffffff00000000LL)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,
-+ ("Periodic pulse/RTC Period must be smaller than 4294967296",
-+ p_Rtc->clockPeriodNanoSec));
-+
-+ if (p_FmRtcPeriodicPulseParams->f_PeriodicPulseCallback)
-+ {
-+ p_Rtc->periodicPulseParams[p_FmRtcPeriodicPulseParams->periodicPulseId].f_PeriodicPulseCallback =
-+ p_FmRtcPeriodicPulseParams->f_PeriodicPulseCallback;
-+ enable = TRUE;
-+ }
-+ fman_rtc_set_periodic_pulse(p_Rtc->p_MemMap, p_FmRtcPeriodicPulseParams->periodicPulseId, (uint32_t)tmpFiper, enable);
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ClearPeriodicPulse(t_Handle h_FmRtc, uint8_t periodicPulseId)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (periodicPulseId >= FM_RTC_NUM_OF_PERIODIC_PULSES)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse ID"));
-+ }
-+
-+ p_Rtc->periodicPulseParams[periodicPulseId].f_PeriodicPulseCallback = NULL;
-+ fman_rtc_clear_periodic_pulse(p_Rtc->p_MemMap, periodicPulseId);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_SetExternalTrigger(t_Handle h_FmRtc, t_FmRtcExternalTriggerParams *p_FmRtcExternalTriggerParams)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+ bool enable = FALSE;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (p_FmRtcExternalTriggerParams->externalTriggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External Trigger ID"));
-+ }
-+
-+ if (p_FmRtcExternalTriggerParams->f_ExternalTriggerCallback)
-+ {
-+ p_Rtc->externalTriggerParams[p_FmRtcExternalTriggerParams->externalTriggerId].f_ExternalTriggerCallback = p_FmRtcExternalTriggerParams->f_ExternalTriggerCallback;
-+ enable = TRUE;
-+ }
-+
-+ fman_rtc_set_ext_trigger(p_Rtc->p_MemMap, p_FmRtcExternalTriggerParams->externalTriggerId, enable, p_FmRtcExternalTriggerParams->usePulseAsInput);
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_ClearExternalTrigger(t_Handle h_FmRtc, uint8_t externalTriggerId)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (externalTriggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External Trigger ID"));
-+
-+ p_Rtc->externalTriggerParams[externalTriggerId].f_ExternalTriggerCallback = NULL;
-+
-+ fman_rtc_clear_external_trigger(p_Rtc->p_MemMap, externalTriggerId);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_GetExternalTriggerTimeStamp(t_Handle h_FmRtc,
-+ uint8_t triggerId,
-+ uint64_t *p_TimeStamp)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ if (triggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS)
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External trigger ID"));
-+
-+ *p_TimeStamp = fman_rtc_get_trigger_stamp(p_Rtc->p_MemMap, triggerId)*p_Rtc->clockPeriodNanoSec;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_GetCurrentTime(t_Handle h_FmRtc, uint64_t *p_Ts)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ *p_Ts = fman_rtc_get_timer(p_Rtc->p_MemMap)*p_Rtc->clockPeriodNanoSec;
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_SetCurrentTime(t_Handle h_FmRtc, uint64_t ts)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ do_div(ts, p_Rtc->clockPeriodNanoSec);
-+ fman_rtc_set_timer(p_Rtc->p_MemMap, (int64_t)ts);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_GetFreqCompensation(t_Handle h_FmRtc, uint32_t *p_Compensation)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ *p_Compensation = fman_rtc_get_frequency_compensation(p_Rtc->p_MemMap);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_SetFreqCompensation(t_Handle h_FmRtc, uint32_t freqCompensation)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ /* set the new freqCompensation */
-+ fman_rtc_set_frequency_compensation(p_Rtc->p_MemMap, freqCompensation);
-+
-+ return E_OK;
-+}
-+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+/*****************************************************************************/
-+t_Error FM_RTC_EnableInterrupt(t_Handle h_FmRtc, uint32_t events)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ /* enable interrupt */
-+ fman_rtc_enable_interupt(p_Rtc->p_MemMap, events);
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+t_Error FM_RTC_DisableInterrupt(t_Handle h_FmRtc, uint32_t events)
-+{
-+ t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE);
-+
-+ /* disable interrupt */
-+ fman_rtc_disable_interupt(p_Rtc->p_MemMap, events);
-+
-+ return E_OK;
-+}
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h
-@@ -0,0 +1,96 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_rtc.h
-+
-+ @Description Memory map and internal definitions for FM RTC IEEE1588 Timer driver.
-+
-+ @Cautions None
-+*//***************************************************************************/
-+
-+#ifndef __FM_RTC_H__
-+#define __FM_RTC_H__
-+
-+#include "std_ext.h"
-+#include "fm_rtc_ext.h"
-+
-+
-+#define __ERR_MODULE__ MODULE_FM_RTC
-+
-+/* General definitions */
-+
-+#define ACCUMULATOR_OVERFLOW ((uint64_t)(1LL << 32))
-+#define DEFAULT_OUTPUT_CLOCK_DIVISOR 0x00000002
-+#define DEFAULT_BYPASS FALSE
-+#define DEFAULT_CLOCK_PERIOD 1000
-+
-+
-+
-+typedef struct t_FmRtcAlarm
-+{
-+ t_FmRtcExceptionsCallback *f_AlarmCallback;
-+ bool clearOnExpiration;
-+} t_FmRtcAlarm;
-+
-+typedef struct t_FmRtcPeriodicPulse
-+{
-+ t_FmRtcExceptionsCallback *f_PeriodicPulseCallback;
-+} t_FmRtcPeriodicPulse;
-+
-+typedef struct t_FmRtcExternalTrigger
-+{
-+ t_FmRtcExceptionsCallback *f_ExternalTriggerCallback;
-+} t_FmRtcExternalTrigger;
-+
-+
-+/**************************************************************************//**
-+ @Description RTC FM driver control structure.
-+*//***************************************************************************/
-+typedef struct t_FmRtc
-+{
-+ t_Part *p_Part; /**< Pointer to the integration device */
-+ t_Handle h_Fm;
-+ t_Handle h_App; /**< Application handle */
-+ struct rtc_regs *p_MemMap;
-+ uint32_t clockPeriodNanoSec; /**< RTC clock period in nano-seconds (for FS mode) */
-+ uint32_t srcClkFreqMhz;
-+ uint16_t outputClockDivisor; /**< Output clock divisor (for FS mode) */
-+ t_FmRtcAlarm alarmParams[FM_RTC_NUM_OF_ALARMS];
-+ t_FmRtcPeriodicPulse periodicPulseParams[FM_RTC_NUM_OF_PERIODIC_PULSES];
-+ t_FmRtcExternalTrigger externalTriggerParams[FM_RTC_NUM_OF_EXT_TRIGGERS];
-+ struct rtc_cfg *p_RtcDriverParam; /**< RTC Driver parameters (for Init phase) */
-+} t_FmRtc;
-+
-+
-+#endif /* __FM_RTC_H__ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Rtc/fman_rtc.c
-@@ -0,0 +1,334 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "fsl_fman_rtc.h"
-+
-+void fman_rtc_defconfig(struct rtc_cfg *cfg)
-+{
-+ int i;
-+ cfg->src_clk = DEFAULT_SRC_CLOCK;
-+ cfg->invert_input_clk_phase = DEFAULT_INVERT_INPUT_CLK_PHASE;
-+ cfg->invert_output_clk_phase = DEFAULT_INVERT_OUTPUT_CLK_PHASE;
-+ cfg->pulse_realign = DEFAULT_PULSE_REALIGN;
-+ for (i = 0; i < FMAN_RTC_MAX_NUM_OF_ALARMS; i++)
-+ cfg->alarm_polarity[i] = DEFAULT_ALARM_POLARITY;
-+ for (i = 0; i < FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS; i++)
-+ cfg->trigger_polarity[i] = DEFAULT_TRIGGER_POLARITY;
-+}
-+
-+uint32_t fman_rtc_get_events(struct rtc_regs *regs)
-+{
-+ return ioread32be(&regs->tmr_tevent);
-+}
-+
-+uint32_t fman_rtc_get_event(struct rtc_regs *regs, uint32_t ev_mask)
-+{
-+ return ioread32be(&regs->tmr_tevent) & ev_mask;
-+}
-+
-+uint32_t fman_rtc_get_interrupt_mask(struct rtc_regs *regs)
-+{
-+ return ioread32be(&regs->tmr_temask);
-+}
-+
-+void fman_rtc_set_interrupt_mask(struct rtc_regs *regs, uint32_t mask)
-+{
-+ iowrite32be(mask, &regs->tmr_temask);
-+}
-+
-+void fman_rtc_ack_event(struct rtc_regs *regs, uint32_t events)
-+{
-+ iowrite32be(events, &regs->tmr_tevent);
-+}
-+
-+uint32_t fman_rtc_check_and_clear_event(struct rtc_regs *regs)
-+{
-+ uint32_t event;
-+
-+ event = ioread32be(&regs->tmr_tevent);
-+ event &= ioread32be(&regs->tmr_temask);
-+
-+ if (event)
-+ iowrite32be(event, &regs->tmr_tevent);
-+ return event;
-+}
-+
-+uint32_t fman_rtc_get_frequency_compensation(struct rtc_regs *regs)
-+{
-+ return ioread32be(&regs->tmr_add);
-+}
-+
-+void fman_rtc_set_frequency_compensation(struct rtc_regs *regs, uint32_t val)
-+{
-+ iowrite32be(val, &regs->tmr_add);
-+}
-+
-+void fman_rtc_enable_interupt(struct rtc_regs *regs, uint32_t events)
-+{
-+ fman_rtc_set_interrupt_mask(regs, fman_rtc_get_interrupt_mask(regs) | events);
-+}
-+
-+void fman_rtc_disable_interupt(struct rtc_regs *regs, uint32_t events)
-+{
-+ fman_rtc_set_interrupt_mask(regs, fman_rtc_get_interrupt_mask(regs) & ~events);
-+}
-+
-+void fman_rtc_set_timer_alarm_l(struct rtc_regs *regs, int index, uint32_t val)
-+{
-+ iowrite32be(val, &regs->tmr_alarm[index].tmr_alarm_l);
-+}
-+
-+void fman_rtc_set_timer_fiper(struct rtc_regs *regs, int index, uint32_t val)
-+{
-+ iowrite32be(val, &regs->tmr_fiper[index]);
-+}
-+
-+void fman_rtc_set_timer_alarm(struct rtc_regs *regs, int index, int64_t val)
-+{
-+ iowrite32be((uint32_t)val, &regs->tmr_alarm[index].tmr_alarm_l);
-+ iowrite32be((uint32_t)(val >> 32), &regs->tmr_alarm[index].tmr_alarm_h);
-+}
-+
-+void fman_rtc_set_timer_offset(struct rtc_regs *regs, int64_t val)
-+{
-+ iowrite32be((uint32_t)val, &regs->tmr_off_l);
-+ iowrite32be((uint32_t)(val >> 32), &regs->tmr_off_h);
-+}
-+
-+uint64_t fman_rtc_get_trigger_stamp(struct rtc_regs *regs, int id)
-+{
-+ uint64_t time;
-+ /* TMR_CNT_L must be read first to get an accurate value */
-+ time = (uint64_t)ioread32be(&regs->tmr_etts[id].tmr_etts_l);
-+ time |= ((uint64_t)ioread32be(&regs->tmr_etts[id].tmr_etts_h)
-+ << 32);
-+
-+ return time;
-+}
-+
-+uint32_t fman_rtc_get_timer_ctrl(struct rtc_regs *regs)
-+{
-+ return ioread32be(&regs->tmr_ctrl);
-+}
-+
-+void fman_rtc_set_timer_ctrl(struct rtc_regs *regs, uint32_t val)
-+{
-+ iowrite32be(val, &regs->tmr_ctrl);
-+}
-+
-+void fman_rtc_timers_soft_reset(struct rtc_regs *regs)
-+{
-+ fman_rtc_set_timer_ctrl(regs, FMAN_RTC_TMR_CTRL_TMSR);
-+ udelay(10);
-+ fman_rtc_set_timer_ctrl(regs, 0);
-+}
-+
-+void fman_rtc_init(struct rtc_cfg *cfg, struct rtc_regs *regs, int num_alarms,
-+ int num_fipers, int num_ext_triggers, bool init_freq_comp,
-+ uint32_t freq_compensation, uint32_t output_clock_divisor)
-+{
-+ uint32_t tmr_ctrl;
-+ int i;
-+
-+ fman_rtc_timers_soft_reset(regs);
-+
-+ /* Set the source clock */
-+ switch (cfg->src_clk) {
-+ case E_FMAN_RTC_SOURCE_CLOCK_SYSTEM:
-+ tmr_ctrl = FMAN_RTC_TMR_CTRL_CKSEL_MAC_CLK;
-+ break;
-+ case E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR:
-+ tmr_ctrl = FMAN_RTC_TMR_CTRL_CKSEL_OSC_CLK;
-+ break;
-+ default:
-+ /* Use a clock from the External TMR reference clock.*/
-+ tmr_ctrl = FMAN_RTC_TMR_CTRL_CKSEL_EXT_CLK;
-+ break;
-+ }
-+
-+ /* whatever period the user picked, the timestamp will advance in '1'
-+ * every time the period passed. */
-+ tmr_ctrl |= ((1 << FMAN_RTC_TMR_CTRL_TCLK_PERIOD_SHIFT) &
-+ FMAN_RTC_TMR_CTRL_TCLK_PERIOD_MASK);
-+
-+ if (cfg->invert_input_clk_phase)
-+ tmr_ctrl |= FMAN_RTC_TMR_CTRL_CIPH;
-+ if (cfg->invert_output_clk_phase)
-+ tmr_ctrl |= FMAN_RTC_TMR_CTRL_COPH;
-+
-+ for (i = 0; i < num_alarms; i++) {
-+ if (cfg->alarm_polarity[i] ==
-+ E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW)
-+ tmr_ctrl |= (FMAN_RTC_TMR_CTRL_ALMP1 >> i);
-+ }
-+
-+ for (i = 0; i < num_ext_triggers; i++)
-+ if (cfg->trigger_polarity[i] ==
-+ E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE)
-+ tmr_ctrl |= (FMAN_RTC_TMR_CTRL_ETEP1 << i);
-+
-+ if (!cfg->timer_slave_mode && cfg->bypass)
-+ tmr_ctrl |= FMAN_RTC_TMR_CTRL_BYP;
-+
-+ fman_rtc_set_timer_ctrl(regs, tmr_ctrl);
-+ if (init_freq_comp)
-+ fman_rtc_set_frequency_compensation(regs, freq_compensation);
-+
-+ /* Clear TMR_ALARM registers */
-+ for (i = 0; i < num_alarms; i++)
-+ fman_rtc_set_timer_alarm(regs, i, 0xFFFFFFFFFFFFFFFFLL);
-+
-+ /* Clear TMR_TEVENT */
-+ fman_rtc_ack_event(regs, FMAN_RTC_TMR_TEVENT_ALL);
-+
-+ /* Initialize TMR_TEMASK */
-+ fman_rtc_set_interrupt_mask(regs, 0);
-+
-+ /* Clear TMR_FIPER registers */
-+ for (i = 0; i < num_fipers; i++)
-+ fman_rtc_set_timer_fiper(regs, i, 0xFFFFFFFF);
-+
-+ /* Initialize TMR_PRSC */
-+ iowrite32be(output_clock_divisor, &regs->tmr_prsc);
-+
-+ /* Clear TMR_OFF */
-+ fman_rtc_set_timer_offset(regs, 0);
-+}
-+
-+bool fman_rtc_is_enabled(struct rtc_regs *regs)
-+{
-+ return (bool)(fman_rtc_get_timer_ctrl(regs) & FMAN_RTC_TMR_CTRL_TE);
-+}
-+
-+void fman_rtc_enable(struct rtc_regs *regs, bool reset_clock)
-+{
-+ uint32_t tmr_ctrl = fman_rtc_get_timer_ctrl(regs);
-+
-+ /* TODO check that no timestamping MACs are working in this stage. */
-+ if (reset_clock) {
-+ fman_rtc_set_timer_ctrl(regs, (tmr_ctrl | FMAN_RTC_TMR_CTRL_TMSR));
-+
-+ udelay(10);
-+ /* Clear TMR_OFF */
-+ fman_rtc_set_timer_offset(regs, 0);
-+ }
-+
-+ fman_rtc_set_timer_ctrl(regs, (tmr_ctrl | FMAN_RTC_TMR_CTRL_TE));
-+}
-+
-+void fman_rtc_disable(struct rtc_regs *regs)
-+{
-+ fman_rtc_set_timer_ctrl(regs, (fman_rtc_get_timer_ctrl(regs)
-+ & ~(FMAN_RTC_TMR_CTRL_TE)));
-+}
-+
-+void fman_rtc_clear_periodic_pulse(struct rtc_regs *regs, int id)
-+{
-+ uint32_t tmp_reg;
-+ if (id == 0)
-+ tmp_reg = FMAN_RTC_TMR_TEVENT_PP1;
-+ else
-+ tmp_reg = FMAN_RTC_TMR_TEVENT_PP2;
-+ fman_rtc_disable_interupt(regs, tmp_reg);
-+
-+ tmp_reg = fman_rtc_get_timer_ctrl(regs);
-+ if (tmp_reg & FMAN_RTC_TMR_CTRL_FS)
-+ fman_rtc_set_timer_ctrl(regs, tmp_reg & ~FMAN_RTC_TMR_CTRL_FS);
-+
-+ fman_rtc_set_timer_fiper(regs, id, 0xFFFFFFFF);
-+}
-+
-+void fman_rtc_clear_external_trigger(struct rtc_regs *regs, int id)
-+{
-+ uint32_t tmpReg, tmp_ctrl;
-+
-+ if (id == 0)
-+ tmpReg = FMAN_RTC_TMR_TEVENT_ETS1;
-+ else
-+ tmpReg = FMAN_RTC_TMR_TEVENT_ETS2;
-+ fman_rtc_disable_interupt(regs, tmpReg);
-+
-+ if (id == 0)
-+ tmpReg = FMAN_RTC_TMR_CTRL_PP1L;
-+ else
-+ tmpReg = FMAN_RTC_TMR_CTRL_PP2L;
-+ tmp_ctrl = fman_rtc_get_timer_ctrl(regs);
-+ if (tmp_ctrl & tmpReg)
-+ fman_rtc_set_timer_ctrl(regs, tmp_ctrl & ~tmpReg);
-+}
-+
-+void fman_rtc_set_alarm(struct rtc_regs *regs, int id, uint32_t val, bool enable)
-+{
-+ uint32_t tmpReg;
-+ fman_rtc_set_timer_alarm(regs, id, val);
-+ if (enable) {
-+ if (id == 0)
-+ tmpReg = FMAN_RTC_TMR_TEVENT_ALM1;
-+ else
-+ tmpReg = FMAN_RTC_TMR_TEVENT_ALM2;
-+ fman_rtc_enable_interupt(regs, tmpReg);
-+ }
-+}
-+
-+void fman_rtc_set_periodic_pulse(struct rtc_regs *regs, int id, uint32_t val,
-+ bool enable)
-+{
-+ uint32_t tmpReg;
-+ fman_rtc_set_timer_fiper(regs, id, val);
-+ if (enable) {
-+ if (id == 0)
-+ tmpReg = FMAN_RTC_TMR_TEVENT_PP1;
-+ else
-+ tmpReg = FMAN_RTC_TMR_TEVENT_PP2;
-+ fman_rtc_enable_interupt(regs, tmpReg);
-+ }
-+}
-+
-+void fman_rtc_set_ext_trigger(struct rtc_regs *regs, int id, bool enable,
-+ bool use_pulse_as_input)
-+{
-+ uint32_t tmpReg;
-+ if (enable) {
-+ if (id == 0)
-+ tmpReg = FMAN_RTC_TMR_TEVENT_ETS1;
-+ else
-+ tmpReg = FMAN_RTC_TMR_TEVENT_ETS2;
-+ fman_rtc_enable_interupt(regs, tmpReg);
-+ }
-+ if (use_pulse_as_input) {
-+ if (id == 0)
-+ tmpReg = FMAN_RTC_TMR_CTRL_PP1L;
-+ else
-+ tmpReg = FMAN_RTC_TMR_CTRL_PP2L;
-+ fman_rtc_set_timer_ctrl(regs, fman_rtc_get_timer_ctrl(regs) | tmpReg);
-+ }
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/Makefile
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+
-+obj-y += fsl-ncsw-sp.o
-+
-+fsl-ncsw-sp-objs := fm_sp.o fman_sp.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c
-@@ -0,0 +1,757 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_sp.c
-+
-+ @Description FM PCD Storage profile ...
-+*//***************************************************************************/
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+#include "net_ext.h"
-+
-+#include "fm_vsp_ext.h"
-+#include "fm_sp.h"
-+#include "fm_common.h"
-+#include "fsl_fman_sp.h"
-+
-+
-+#if (DPAA_VERSION >= 11)
-+static t_Error CheckParamsGeneratedInternally(t_FmVspEntry *p_FmVspEntry)
-+{
-+ t_Error err = E_OK;
-+
-+ if ((err = FmSpCheckIntContextParams(&p_FmVspEntry->intContext))!= E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if ((err = FmSpCheckBufMargins(&p_FmVspEntry->bufMargins)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ return err;
-+
-+}
-+
-+static t_Error CheckParams(t_FmVspEntry *p_FmVspEntry)
-+{
-+ t_Error err = E_OK;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->h_Fm, E_INVALID_HANDLE);
-+
-+ if ((err = FmSpCheckBufPoolsParams(&p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools,
-+ p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools,
-+ p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion)) != E_OK)
-+
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset & ~FM_LIODN_OFFSET_MASK)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1));
-+
-+ err = FmVSPCheckRelativeProfile(p_FmVspEntry->h_Fm,
-+ p_FmVspEntry->portType,
-+ p_FmVspEntry->portId,
-+ p_FmVspEntry->relativeProfileId);
-+
-+ return err;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+
-+/*****************************************************************************/
-+/* Inter-module API routines */
-+/*****************************************************************************/
-+void FmSpSetBufPoolsInAscOrderOfBufSizes(t_FmExtPools *p_FmExtPools,
-+ uint8_t *orderedArray,
-+ uint16_t *sizesArray)
-+{
-+ uint16_t bufSize = 0;
-+ int i=0, j=0, k=0;
-+
-+ /* First we copy the external buffers pools information to an ordered local array */
-+ for (i=0;i<p_FmExtPools->numOfPoolsUsed;i++)
-+ {
-+ /* get pool size */
-+ bufSize = p_FmExtPools->extBufPool[i].size;
-+
-+ /* keep sizes in an array according to poolId for direct access */
-+ sizesArray[p_FmExtPools->extBufPool[i].id] = bufSize;
-+
-+ /* save poolId in an ordered array according to size */
-+ for (j=0;j<=i;j++)
-+ {
-+ /* this is the next free place in the array */
-+ if (j==i)
-+ orderedArray[i] = p_FmExtPools->extBufPool[i].id;
-+ else
-+ {
-+ /* find the right place for this poolId */
-+ if (bufSize < sizesArray[orderedArray[j]])
-+ {
-+ /* move the poolIds one place ahead to make room for this poolId */
-+ for (k=i;k>j;k--)
-+ orderedArray[k] = orderedArray[k-1];
-+
-+ /* now k==j, this is the place for the new size */
-+ orderedArray[k] = p_FmExtPools->extBufPool[i].id;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+}
-+
-+t_Error FmSpCheckBufPoolsParams(t_FmExtPools *p_FmExtPools,
-+ t_FmBackupBmPools *p_FmBackupBmPools,
-+ t_FmBufPoolDepletion *p_FmBufPoolDepletion)
-+{
-+
-+ int i = 0, j = 0;
-+ bool found;
-+ uint8_t count = 0;
-+
-+ if (p_FmExtPools)
-+ {
-+ if (p_FmExtPools->numOfPoolsUsed > FM_PORT_MAX_NUM_OF_EXT_POOLS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfPoolsUsed can't be larger than %d", FM_PORT_MAX_NUM_OF_EXT_POOLS));
-+
-+ for (i=0;i<p_FmExtPools->numOfPoolsUsed;i++)
-+ {
-+ if (p_FmExtPools->extBufPool[i].id >= BM_MAX_NUM_OF_POOLS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("extBufPools.extBufPool[%d].id can't be larger than %d", i, BM_MAX_NUM_OF_POOLS));
-+ if (!p_FmExtPools->extBufPool[i].size)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("extBufPools.extBufPool[%d].size is 0", i));
-+ }
-+ }
-+ if (!p_FmExtPools && (p_FmBackupBmPools || p_FmBufPoolDepletion))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("backupBmPools ot bufPoolDepletion can not be defined without external pools"));
-+
-+ /* backup BM pools indication is valid only for some chip derivatives
-+ (limited by the config routine) */
-+ if (p_FmBackupBmPools)
-+ {
-+ if (p_FmBackupBmPools->numOfBackupPools >= p_FmExtPools->numOfPoolsUsed)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_BackupBmPools must be smaller than extBufPools.numOfPoolsUsed"));
-+ found = FALSE;
-+ for (i = 0;i<p_FmBackupBmPools->numOfBackupPools;i++)
-+ {
-+
-+ for (j=0;j<p_FmExtPools->numOfPoolsUsed;j++)
-+ {
-+ if (p_FmBackupBmPools->poolIds[i] == p_FmExtPools->extBufPool[j].id)
-+ {
-+ found = TRUE;
-+ break;
-+ }
-+ }
-+ if (!found)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("All p_BackupBmPools.poolIds must be included in extBufPools.extBufPool[n].id"));
-+ else
-+ found = FALSE;
-+ }
-+ }
-+
-+ /* up to extBufPools.numOfPoolsUsed pools may be defined */
-+ if (p_FmBufPoolDepletion && p_FmBufPoolDepletion->poolsGrpModeEnable)
-+ {
-+ if ((p_FmBufPoolDepletion->numOfPools > p_FmExtPools->numOfPoolsUsed))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPools can't be larger than %d and can't be larger than numOfPoolsUsed", FM_PORT_MAX_NUM_OF_EXT_POOLS));
-+
-+ if (!p_FmBufPoolDepletion->numOfPools)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPoolsToConsider can not be 0 when poolsGrpModeEnable=TRUE"));
-+
-+ found = FALSE;
-+ count = 0;
-+ /* for each pool that is in poolsToConsider, check if it is defined
-+ in extBufPool */
-+ for (i=0;i<BM_MAX_NUM_OF_POOLS;i++)
-+ {
-+ if (p_FmBufPoolDepletion->poolsToConsider[i])
-+ {
-+ for (j=0;j<p_FmExtPools->numOfPoolsUsed;j++)
-+ {
-+ if (i == p_FmExtPools->extBufPool[j].id)
-+ {
-+ found = TRUE;
-+ count++;
-+ break;
-+ }
-+ }
-+ if (!found)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Pools selected for depletion are not used."));
-+ else
-+ found = FALSE;
-+ }
-+ }
-+ /* check that the number of pools that we have checked is equal to the number announced by the user */
-+ if (count != p_FmBufPoolDepletion->numOfPools)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPools is larger than the number of pools defined."));
-+ }
-+
-+ if (p_FmBufPoolDepletion && p_FmBufPoolDepletion->singlePoolModeEnable)
-+ {
-+ /* calculate vector for number of pools depletion */
-+ found = FALSE;
-+ count = 0;
-+ for (i=0;i<BM_MAX_NUM_OF_POOLS;i++)
-+ {
-+ if (p_FmBufPoolDepletion->poolsToConsiderForSingleMode[i])
-+ {
-+ for (j=0;j<p_FmExtPools->numOfPoolsUsed;j++)
-+ {
-+ if (i == p_FmExtPools->extBufPool[j].id)
-+ {
-+ found = TRUE;
-+ count++;
-+ break;
-+ }
-+ }
-+ if (!found)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Pools selected for depletion are not used."));
-+ else
-+ found = FALSE;
-+ }
-+ }
-+ if (!count)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("No pools defined for single buffer mode pool depletion."));
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSpCheckIntContextParams(t_FmSpIntContextDataCopy *p_FmSpIntContextDataCopy)
-+{
-+ /* Check that divisible by 16 and not larger than 240 */
-+ if (p_FmSpIntContextDataCopy->intContextOffset >MAX_INT_OFFSET)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.intContextOffset can't be larger than %d", MAX_INT_OFFSET));
-+ if (p_FmSpIntContextDataCopy->intContextOffset % OFFSET_UNITS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.intContextOffset has to be divisible by %d", OFFSET_UNITS));
-+
-+ /* check that ic size+ic internal offset, does not exceed ic block size */
-+ if (p_FmSpIntContextDataCopy->size + p_FmSpIntContextDataCopy->intContextOffset > MAX_IC_SIZE)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size + intContext.intContextOffset has to be smaller than %d", MAX_IC_SIZE));
-+ /* Check that divisible by 16 and not larger than 256 */
-+ if (p_FmSpIntContextDataCopy->size % OFFSET_UNITS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size has to be divisible by %d", OFFSET_UNITS));
-+
-+ /* Check that divisible by 16 and not larger than 4K */
-+ if (p_FmSpIntContextDataCopy->extBufOffset > MAX_EXT_OFFSET)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.extBufOffset can't be larger than %d", MAX_EXT_OFFSET));
-+ if (p_FmSpIntContextDataCopy->extBufOffset % OFFSET_UNITS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.extBufOffset has to be divisible by %d", OFFSET_UNITS));
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSpCheckBufMargins(t_FmSpBufMargins *p_FmSpBufMargins)
-+{
-+ /* Check the margin definition */
-+ if (p_FmSpBufMargins->startMargins > MAX_EXT_BUFFER_OFFSET)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufMargins.startMargins can't be larger than %d", MAX_EXT_BUFFER_OFFSET));
-+ if (p_FmSpBufMargins->endMargins > MAX_EXT_BUFFER_OFFSET)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufMargins.endMargins can't be larger than %d", MAX_EXT_BUFFER_OFFSET));
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSpBuildBufferStructure(t_FmSpIntContextDataCopy *p_FmSpIntContextDataCopy,
-+ t_FmBufferPrefixContent *p_BufferPrefixContent,
-+ t_FmSpBufMargins *p_FmSpBufMargins,
-+ t_FmSpBufferOffsets *p_FmSpBufferOffsets,
-+ uint8_t *internalBufferOffset)
-+{
-+ uint32_t tmp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmSpIntContextDataCopy, E_INVALID_VALUE);
-+ ASSERT_COND(p_FmSpIntContextDataCopy);
-+ ASSERT_COND(p_BufferPrefixContent);
-+ ASSERT_COND(p_FmSpBufMargins);
-+ ASSERT_COND(p_FmSpBufferOffsets);
-+
-+ /* Align start of internal context data to 16 byte */
-+ p_FmSpIntContextDataCopy->extBufOffset =
-+ (uint16_t)((p_BufferPrefixContent->privDataSize & (OFFSET_UNITS-1)) ?
-+ ((p_BufferPrefixContent->privDataSize + OFFSET_UNITS) & ~(uint16_t)(OFFSET_UNITS-1)) :
-+ p_BufferPrefixContent->privDataSize);
-+
-+ /* Translate margin and intContext params to FM parameters */
-+ /* Initialize with illegal value. Later we'll set legal values. */
-+ p_FmSpBufferOffsets->prsResultOffset = (uint32_t)ILLEGAL_BASE;
-+ p_FmSpBufferOffsets->timeStampOffset = (uint32_t)ILLEGAL_BASE;
-+ p_FmSpBufferOffsets->hashResultOffset= (uint32_t)ILLEGAL_BASE;
-+ p_FmSpBufferOffsets->pcdInfoOffset = (uint32_t)ILLEGAL_BASE;
-+
-+ /* Internally the driver supports 4 options
-+ 1. prsResult/timestamp/hashResult selection (in fact 8 options, but for simplicity we'll
-+ relate to it as 1).
-+ 2. All IC context (from AD) not including debug.*/
-+
-+ /* This 'if' covers option 2. We copy from beginning of context. */
-+ if (p_BufferPrefixContent->passAllOtherPCDInfo)
-+ {
-+ p_FmSpIntContextDataCopy->size = 128; /* must be aligned to 16 */
-+ /* Start copying data after 16 bytes (FD) from the beginning of the internal context */
-+ p_FmSpIntContextDataCopy->intContextOffset = 16;
-+
-+ if (p_BufferPrefixContent->passAllOtherPCDInfo)
-+ p_FmSpBufferOffsets->pcdInfoOffset = p_FmSpIntContextDataCopy->extBufOffset;
-+ if (p_BufferPrefixContent->passPrsResult)
-+ p_FmSpBufferOffsets->prsResultOffset =
-+ (uint32_t)(p_FmSpIntContextDataCopy->extBufOffset + 16);
-+ if (p_BufferPrefixContent->passTimeStamp)
-+ p_FmSpBufferOffsets->timeStampOffset =
-+ (uint32_t)(p_FmSpIntContextDataCopy->extBufOffset + 48);
-+ if (p_BufferPrefixContent->passHashResult)
-+ p_FmSpBufferOffsets->hashResultOffset =
-+ (uint32_t)(p_FmSpIntContextDataCopy->extBufOffset + 56);
-+ }
-+ else
-+ {
-+ /* This case covers the options under 1 */
-+ /* Copy size must be in 16-byte granularity. */
-+ p_FmSpIntContextDataCopy->size =
-+ (uint16_t)((p_BufferPrefixContent->passPrsResult ? 32 : 0) +
-+ ((p_BufferPrefixContent->passTimeStamp ||
-+ p_BufferPrefixContent->passHashResult) ? 16 : 0));
-+
-+ /* Align start of internal context data to 16 byte */
-+ p_FmSpIntContextDataCopy->intContextOffset =
-+ (uint8_t)(p_BufferPrefixContent->passPrsResult ? 32 :
-+ ((p_BufferPrefixContent->passTimeStamp ||
-+ p_BufferPrefixContent->passHashResult) ? 64 : 0));
-+
-+ if (p_BufferPrefixContent->passPrsResult)
-+ p_FmSpBufferOffsets->prsResultOffset = p_FmSpIntContextDataCopy->extBufOffset;
-+ if (p_BufferPrefixContent->passTimeStamp)
-+ p_FmSpBufferOffsets->timeStampOffset = p_BufferPrefixContent->passPrsResult ?
-+ (p_FmSpIntContextDataCopy->extBufOffset + sizeof(t_FmPrsResult)) :
-+ p_FmSpIntContextDataCopy->extBufOffset;
-+ if (p_BufferPrefixContent->passHashResult)
-+ /* If PR is not requested, whether TS is requested or not, IC will be copied from TS */
-+ p_FmSpBufferOffsets->hashResultOffset = p_BufferPrefixContent->passPrsResult ?
-+ (p_FmSpIntContextDataCopy->extBufOffset + sizeof(t_FmPrsResult) + 8) :
-+ p_FmSpIntContextDataCopy->extBufOffset + 8;
-+ }
-+
-+ if (p_FmSpIntContextDataCopy->size)
-+ p_FmSpBufMargins->startMargins =
-+ (uint16_t)(p_FmSpIntContextDataCopy->extBufOffset +
-+ p_FmSpIntContextDataCopy->size);
-+ else
-+ /* No Internal Context passing, STartMargin is immediately after privateInfo */
-+ p_FmSpBufMargins->startMargins = p_BufferPrefixContent->privDataSize;
-+
-+ /* save extra space for manip in both external and internal buffers */
-+ if (p_BufferPrefixContent->manipExtraSpace)
-+ {
-+ uint8_t extraSpace;
-+#ifdef FM_CAPWAP_SUPPORT
-+ if ((p_BufferPrefixContent->manipExtraSpace + CAPWAP_FRAG_EXTRA_SPACE) >= 256)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("p_BufferPrefixContent->manipExtraSpace should be less than %d",
-+ 256-CAPWAP_FRAG_EXTRA_SPACE));
-+ extraSpace = (uint8_t)(p_BufferPrefixContent->manipExtraSpace + CAPWAP_FRAG_EXTRA_SPACE);
-+#else
-+ extraSpace = p_BufferPrefixContent->manipExtraSpace;
-+#endif /* FM_CAPWAP_SUPPORT */
-+ p_FmSpBufferOffsets->manipOffset = p_FmSpBufMargins->startMargins;
-+ p_FmSpBufMargins->startMargins += extraSpace;
-+ *internalBufferOffset = extraSpace;
-+ }
-+
-+ /* align data start */
-+ tmp = (uint32_t)(p_FmSpBufMargins->startMargins % p_BufferPrefixContent->dataAlign);
-+ if (tmp)
-+ p_FmSpBufMargins->startMargins += (p_BufferPrefixContent->dataAlign-tmp);
-+ p_FmSpBufferOffsets->dataOffset = p_FmSpBufMargins->startMargins;
-+
-+ return E_OK;
-+}
-+/*********************** End of inter-module routines ************************/
-+
-+
-+#if (DPAA_VERSION >= 11)
-+/*****************************************************************************/
-+/* API routines */
-+/*****************************************************************************/
-+t_Handle FM_VSP_Config(t_FmVspParams *p_FmVspParams)
-+{
-+ t_FmVspEntry *p_FmVspEntry = NULL;
-+ struct fm_storage_profile_params fm_vsp_params;
-+
-+ p_FmVspEntry = (t_FmVspEntry *)XX_Malloc(sizeof(t_FmVspEntry));
-+ if (!p_FmVspEntry)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_StorageProfile allocation failed"));
-+ return NULL;
-+ }
-+ memset(p_FmVspEntry, 0, sizeof(t_FmVspEntry));
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams = (t_FmVspEntryDriverParams *)XX_Malloc(sizeof(t_FmVspEntryDriverParams));
-+ if (!p_FmVspEntry->p_FmVspEntryDriverParams)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_StorageProfile allocation failed"));
-+ XX_Free(p_FmVspEntry);
-+ return NULL;
-+ }
-+ memset(p_FmVspEntry->p_FmVspEntryDriverParams, 0, sizeof(t_FmVspEntryDriverParams));
-+ fman_vsp_defconfig(&fm_vsp_params);
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr = fm_vsp_params.header_cache_attr;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr = fm_vsp_params.int_context_cache_attr;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr = fm_vsp_params.scatter_gather_cache_attr;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData = fm_vsp_params.dma_swap_data;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize = fm_vsp_params.dma_write_optimize;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather = fm_vsp_params.no_scather_gather;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.privDataSize = DEFAULT_FM_SP_bufferPrefixContent_privDataSize;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passPrsResult= DEFAULT_FM_SP_bufferPrefixContent_passPrsResult;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passTimeStamp= DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passAllOtherPCDInfo
-+ = DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.dataAlign = DEFAULT_FM_SP_bufferPrefixContent_dataAlign;
-+ p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset = p_FmVspParams->liodnOffset;
-+
-+ memcpy(&p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools, &p_FmVspParams->extBufPools, sizeof(t_FmExtPools));
-+ p_FmVspEntry->h_Fm = p_FmVspParams->h_Fm;
-+ p_FmVspEntry->portType = p_FmVspParams->portParams.portType;
-+ p_FmVspEntry->portId = p_FmVspParams->portParams.portId;
-+
-+ p_FmVspEntry->relativeProfileId = p_FmVspParams->relativeProfileId;
-+
-+ return p_FmVspEntry;
-+}
-+
-+t_Error FM_VSP_Init(t_Handle h_FmVsp)
-+{
-+
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry *)h_FmVsp;
-+ struct fm_storage_profile_params fm_vsp_params;
-+ uint8_t orderedArray[FM_PORT_MAX_NUM_OF_EXT_POOLS];
-+ uint16_t sizesArray[BM_MAX_NUM_OF_POOLS];
-+ t_Error err;
-+ uint16_t absoluteProfileId = 0;
-+ int i = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams,E_INVALID_HANDLE);
-+
-+ CHECK_INIT_PARAMETERS(p_FmVspEntry, CheckParams);
-+
-+ memset(&orderedArray, 0, sizeof(uint8_t) * FM_PORT_MAX_NUM_OF_EXT_POOLS);
-+ memset(&sizesArray, 0, sizeof(uint16_t) * BM_MAX_NUM_OF_POOLS);
-+
-+ err = FmSpBuildBufferStructure(&p_FmVspEntry->intContext,
-+ &p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent,
-+ &p_FmVspEntry->bufMargins,
-+ &p_FmVspEntry->bufferOffsets,
-+ &p_FmVspEntry->internalBufferOffset);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+
-+ err = CheckParamsGeneratedInternally(p_FmVspEntry);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+
-+ p_FmVspEntry->p_FmSpRegsBase =
-+ (struct fm_pcd_storage_profile_regs *)FmGetVSPBaseAddr(p_FmVspEntry->h_Fm);
-+ if (!p_FmVspEntry->p_FmSpRegsBase)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("impossible to initialize SpRegsBase"));
-+
-+ /* order external buffer pools in ascending order of buffer pools sizes */
-+ FmSpSetBufPoolsInAscOrderOfBufSizes(&(p_FmVspEntry->p_FmVspEntryDriverParams)->extBufPools,
-+ orderedArray,
-+ sizesArray);
-+
-+ p_FmVspEntry->extBufPools.numOfPoolsUsed =
-+ p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools.numOfPoolsUsed;
-+ for (i = 0; i < p_FmVspEntry->extBufPools.numOfPoolsUsed; i++)
-+ {
-+ p_FmVspEntry->extBufPools.extBufPool[i].id = orderedArray[i];
-+ p_FmVspEntry->extBufPools.extBufPool[i].size = sizesArray[orderedArray[i]];
-+ }
-+
-+ /* on user responsibility to fill it according requirement */
-+ memset(&fm_vsp_params, 0, sizeof(struct fm_storage_profile_params));
-+ fm_vsp_params.dma_swap_data = p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData;
-+ fm_vsp_params.int_context_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr;
-+ fm_vsp_params.header_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr;
-+ fm_vsp_params.scatter_gather_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr;
-+ fm_vsp_params.dma_write_optimize = p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize;
-+ fm_vsp_params.liodn_offset = p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset;
-+ fm_vsp_params.no_scather_gather = p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather;
-+
-+ if (p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion)
-+ {
-+ fm_vsp_params.buf_pool_depletion.buf_pool_depletion_enabled = TRUE;
-+ fm_vsp_params.buf_pool_depletion.pools_grp_mode_enable = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsGrpModeEnable;
-+ fm_vsp_params.buf_pool_depletion.num_pools = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->numOfPools;
-+ fm_vsp_params.buf_pool_depletion.pools_to_consider = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsToConsider;
-+ fm_vsp_params.buf_pool_depletion.single_pool_mode_enable = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->singlePoolModeEnable;
-+ fm_vsp_params.buf_pool_depletion.pools_to_consider_for_single_mode = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsToConsiderForSingleMode;
-+ fm_vsp_params.buf_pool_depletion.has_pfc_priorities = TRUE;
-+ fm_vsp_params.buf_pool_depletion.pfc_priorities_en = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->pfcPrioritiesEn;
-+ }
-+ else
-+ fm_vsp_params.buf_pool_depletion.buf_pool_depletion_enabled = FALSE;
-+
-+ if (p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools)
-+ {
-+ fm_vsp_params.backup_pools.num_backup_pools = p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools->numOfBackupPools;
-+ fm_vsp_params.backup_pools.pool_ids = p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools->poolIds;
-+ }
-+ else
-+ fm_vsp_params.backup_pools.num_backup_pools = 0;
-+
-+ fm_vsp_params.fm_ext_pools.num_pools_used = p_FmVspEntry->extBufPools.numOfPoolsUsed;
-+ fm_vsp_params.fm_ext_pools.ext_buf_pool = (struct fman_ext_pool_params*)&p_FmVspEntry->extBufPools.extBufPool;
-+ fm_vsp_params.buf_margins = (struct fman_sp_buf_margins*)&p_FmVspEntry->bufMargins;
-+ fm_vsp_params.int_context = (struct fman_sp_int_context_data_copy*)&p_FmVspEntry->intContext;
-+
-+ /* no check on err - it was checked earlier */
-+ FmVSPGetAbsoluteProfileId(p_FmVspEntry->h_Fm,
-+ p_FmVspEntry->portType,
-+ p_FmVspEntry->portId,
-+ p_FmVspEntry->relativeProfileId,
-+ &absoluteProfileId);
-+
-+ ASSERT_COND(p_FmVspEntry->p_FmSpRegsBase);
-+ ASSERT_COND(fm_vsp_params.int_context);
-+ ASSERT_COND(fm_vsp_params.buf_margins);
-+ ASSERT_COND((absoluteProfileId <= FM_VSP_MAX_NUM_OF_ENTRIES));
-+
-+ /* Set all registers related to VSP */
-+ fman_vsp_init(p_FmVspEntry->p_FmSpRegsBase, absoluteProfileId, &fm_vsp_params,FM_PORT_MAX_NUM_OF_EXT_POOLS, BM_MAX_NUM_OF_POOLS, FM_MAX_NUM_OF_PFC_PRIORITIES);
-+
-+ p_FmVspEntry->absoluteSpId = absoluteProfileId;
-+
-+ if (p_FmVspEntry->p_FmVspEntryDriverParams)
-+ XX_Free(p_FmVspEntry->p_FmVspEntryDriverParams);
-+ p_FmVspEntry->p_FmVspEntryDriverParams = NULL;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_Free(t_Handle h_FmVsp)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry *)h_FmVsp;
-+ SANITY_CHECK_RETURN_ERROR(h_FmVsp, E_INVALID_HANDLE);
-+ XX_Free(p_FmVspEntry);
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigBufferPrefixContent(t_Handle h_FmVsp, t_FmBufferPrefixContent *p_FmBufferPrefixContent)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+
-+ memcpy(&p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent, p_FmBufferPrefixContent, sizeof(t_FmBufferPrefixContent));
-+ /* if dataAlign was not initialized by user, we return to driver's default */
-+ if (!p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.dataAlign)
-+ p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.dataAlign = DEFAULT_FM_SP_bufferPrefixContent_dataAlign;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigDmaSwapData(t_Handle h_FmVsp, e_FmDmaSwapOption swapData)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData = swapData;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigDmaIcCacheAttr(t_Handle h_FmVsp, e_FmDmaCacheOption intContextCacheAttr)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr = intContextCacheAttr;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigDmaHdrAttr(t_Handle h_FmVsp, e_FmDmaCacheOption headerCacheAttr)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr = headerCacheAttr;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigDmaScatterGatherAttr(t_Handle h_FmVsp, e_FmDmaCacheOption scatterGatherCacheAttr)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr = scatterGatherCacheAttr;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigDmaWriteOptimize(t_Handle h_FmVsp, bool optimize)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize = optimize;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigNoScatherGather(t_Handle h_FmVsp, bool noScatherGather)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather = noScatherGather;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigPoolDepletion(t_Handle h_FmVsp, t_FmBufPoolDepletion *p_BufPoolDepletion)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmVsp, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_BufPoolDepletion, E_INVALID_HANDLE);
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion = (t_FmBufPoolDepletion *)XX_Malloc(sizeof(t_FmBufPoolDepletion));
-+ if (!p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BufPoolDepletion allocation failed"));
-+ memcpy(p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion, p_BufPoolDepletion, sizeof(t_FmBufPoolDepletion));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_VSP_ConfigBackupPools(t_Handle h_FmVsp, t_FmBackupBmPools *p_BackupBmPools)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmVsp, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_BackupBmPools, E_INVALID_HANDLE);
-+
-+ p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools = (t_FmBackupBmPools *)XX_Malloc(sizeof(t_FmBackupBmPools));
-+ if (!p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BackupBmPools allocation failed"));
-+ memcpy(p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools, p_BackupBmPools, sizeof(t_FmBackupBmPools));
-+
-+ return E_OK;
-+}
-+
-+uint32_t FM_VSP_GetBufferDataOffset(t_Handle h_FmVsp)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, 0);
-+
-+ return p_FmVspEntry->bufferOffsets.dataOffset;
-+}
-+
-+uint8_t * FM_VSP_GetBufferICInfo(t_Handle h_FmVsp, char *p_Data)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
-+
-+ if (p_FmVspEntry->bufferOffsets.pcdInfoOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (uint8_t *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.pcdInfoOffset);
-+}
-+
-+t_FmPrsResult * FM_VSP_GetBufferPrsResult(t_Handle h_FmVsp, char *p_Data)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
-+
-+ if (p_FmVspEntry->bufferOffsets.prsResultOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (t_FmPrsResult *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.prsResultOffset);
-+}
-+
-+uint64_t * FM_VSP_GetBufferTimeStamp(t_Handle h_FmVsp, char *p_Data)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
-+
-+ if (p_FmVspEntry->bufferOffsets.timeStampOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (uint64_t *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.timeStampOffset);
-+}
-+
-+uint8_t * FM_VSP_GetBufferHashResult(t_Handle h_FmVsp, char *p_Data)
-+{
-+ t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry*)h_FmVsp;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmVspEntry, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(!p_FmVspEntry->p_FmVspEntryDriverParams, E_INVALID_STATE, NULL);
-+
-+ if (p_FmVspEntry->bufferOffsets.hashResultOffset == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return (uint8_t *)PTR_MOVE(p_Data, p_FmVspEntry->bufferOffsets.hashResultOffset);
-+}
-+
-+#endif /* (DPAA_VERSION >= 11) */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h
-@@ -0,0 +1,85 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_sp.h
-+
-+ @Description FM SP ...
-+*//***************************************************************************/
-+#ifndef __FM_SP_H
-+#define __FM_SP_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+
-+#include "fm_sp_common.h"
-+#include "fm_common.h"
-+
-+
-+#define __ERR_MODULE__ MODULE_FM_SP
-+
-+typedef struct {
-+ t_FmBufferPrefixContent bufferPrefixContent;
-+ e_FmDmaSwapOption dmaSwapData;
-+ e_FmDmaCacheOption dmaIntContextCacheAttr;
-+ e_FmDmaCacheOption dmaHeaderCacheAttr;
-+ e_FmDmaCacheOption dmaScatterGatherCacheAttr;
-+ bool dmaWriteOptimize;
-+ uint16_t liodnOffset;
-+ bool noScatherGather;
-+ t_FmBufPoolDepletion *p_BufPoolDepletion;
-+ t_FmBackupBmPools *p_BackupBmPools;
-+ t_FmExtPools extBufPools;
-+} t_FmVspEntryDriverParams;
-+
-+typedef struct {
-+ bool valid;
-+ volatile bool lock;
-+ uint8_t pointedOwners;
-+ uint16_t absoluteSpId;
-+ uint8_t internalBufferOffset;
-+ t_FmSpBufMargins bufMargins;
-+ t_FmSpIntContextDataCopy intContext;
-+ t_FmSpBufferOffsets bufferOffsets;
-+ t_Handle h_Fm;
-+ e_FmPortType portType; /**< Port type */
-+ uint8_t portId; /**< Port Id - relative to type */
-+ uint8_t relativeProfileId;
-+ struct fm_pcd_storage_profile_regs *p_FmSpRegsBase;
-+ t_FmExtPools extBufPools;
-+ t_FmVspEntryDriverParams *p_FmVspEntryDriverParams;
-+} t_FmVspEntry;
-+
-+
-+#endif /* __FM_SP_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c
-@@ -0,0 +1,197 @@
-+/*
-+ * Copyright 2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "fsl_fman_sp.h"
-+
-+
-+uint32_t fman_vsp_get_statistics(struct fm_pcd_storage_profile_regs *regs,
-+ uint16_t index)
-+{
-+ struct fm_pcd_storage_profile_regs *sp_regs;
-+ sp_regs = &regs[index];
-+ return ioread32be(&sp_regs->fm_sp_acnt);
-+}
-+
-+void fman_vsp_set_statistics(struct fm_pcd_storage_profile_regs *regs,
-+ uint16_t index, uint32_t value)
-+{
-+ struct fm_pcd_storage_profile_regs *sp_regs;
-+ sp_regs = &regs[index];
-+ iowrite32be(value, &sp_regs->fm_sp_acnt);
-+}
-+
-+void fman_vsp_defconfig(struct fm_storage_profile_params *cfg)
-+{
-+ cfg->dma_swap_data =
-+ DEFAULT_FMAN_SP_DMA_SWAP_DATA;
-+ cfg->int_context_cache_attr =
-+ DEFAULT_FMAN_SP_DMA_INT_CONTEXT_CACHE_ATTR;
-+ cfg->header_cache_attr =
-+ DEFAULT_FMAN_SP_DMA_HEADER_CACHE_ATTR;
-+ cfg->scatter_gather_cache_attr =
-+ DEFAULT_FMAN_SP_DMA_SCATTER_GATHER_CACHE_ATTR;
-+ cfg->dma_write_optimize =
-+ DEFAULT_FMAN_SP_DMA_WRITE_OPTIMIZE;
-+ cfg->no_scather_gather =
-+ DEFAULT_FMAN_SP_NO_SCATTER_GATHER;
-+}
-+
-+static inline uint32_t calc_vec_dep(int max_pools, bool *pools,
-+ struct fman_ext_pools *ext_buf_pools, uint32_t mask)
-+{
-+ int i, j;
-+ uint32_t vector = 0;
-+ for (i = 0; i < max_pools; i++)
-+ if (pools[i])
-+ for (j = 0; j < ext_buf_pools->num_pools_used; j++)
-+ if (i == ext_buf_pools->ext_buf_pool[j].id) {
-+ vector |= mask >> j;
-+ break;
-+ }
-+ return vector;
-+}
-+
-+void fman_vsp_init(struct fm_pcd_storage_profile_regs *regs,
-+ uint16_t index, struct fm_storage_profile_params *fm_vsp_params,
-+ int port_max_num_of_ext_pools, int bm_max_num_of_pools,
-+ int max_num_of_pfc_priorities)
-+{
-+ int i = 0, j = 0;
-+ struct fm_pcd_storage_profile_regs *sp_regs;
-+ uint32_t tmp_reg, vector;
-+ struct fman_ext_pools *ext_buf_pools = &fm_vsp_params->fm_ext_pools;
-+ struct fman_buf_pool_depletion *buf_pool_depletion =
-+ &fm_vsp_params->buf_pool_depletion;
-+ struct fman_backup_bm_pools *backup_pools =
-+ &fm_vsp_params->backup_pools;
-+ struct fman_sp_int_context_data_copy *int_context_data_copy =
-+ fm_vsp_params->int_context;
-+ struct fman_sp_buf_margins *external_buffer_margins =
-+ fm_vsp_params->buf_margins;
-+ bool no_scather_gather = fm_vsp_params->no_scather_gather;
-+ uint16_t liodn_offset = fm_vsp_params->liodn_offset;
-+
-+ sp_regs = &regs[index];
-+
-+ /* fill external buffers manager pool information register*/
-+ for (i = 0; i < ext_buf_pools->num_pools_used; i++) {
-+ tmp_reg = FMAN_SP_EXT_BUF_POOL_VALID |
-+ FMAN_SP_EXT_BUF_POOL_EN_COUNTER;
-+ tmp_reg |= ((uint32_t)ext_buf_pools->ext_buf_pool[i].id <<
-+ FMAN_SP_EXT_BUF_POOL_ID_SHIFT);
-+ tmp_reg |= ext_buf_pools->ext_buf_pool[i].size;
-+ /* functionality available only for some deriviatives
-+ (limited by config) */
-+ for (j = 0; j < backup_pools->num_backup_pools; j++)
-+ if (ext_buf_pools->ext_buf_pool[i].id ==
-+ backup_pools->pool_ids[j]) {
-+ tmp_reg |= FMAN_SP_EXT_BUF_POOL_BACKUP;
-+ break;
-+ }
-+ iowrite32be(tmp_reg, &sp_regs->fm_sp_ebmpi[i]);
-+ }
-+
-+ /* clear unused pools */
-+ for (i = ext_buf_pools->num_pools_used;
-+ i < port_max_num_of_ext_pools; i++)
-+ iowrite32be(0, &sp_regs->fm_sp_ebmpi[i]);
-+
-+ /* fill pool depletion register*/
-+ tmp_reg = 0;
-+ if (buf_pool_depletion->buf_pool_depletion_enabled && buf_pool_depletion->pools_grp_mode_enable) {
-+ /* calculate vector for number of pools depletion */
-+ vector = calc_vec_dep(bm_max_num_of_pools, buf_pool_depletion->
-+ pools_to_consider, ext_buf_pools, 0x80000000);
-+
-+ /* configure num of pools and vector for number of pools mode */
-+ tmp_reg |= (((uint32_t)buf_pool_depletion->num_pools - 1) <<
-+ FMAN_SP_POOL_DEP_NUM_OF_POOLS_SHIFT);
-+ tmp_reg |= vector;
-+ }
-+
-+ if (buf_pool_depletion->buf_pool_depletion_enabled && buf_pool_depletion->single_pool_mode_enable) {
-+ /* calculate vector for number of pools depletion */
-+ vector = calc_vec_dep(bm_max_num_of_pools, buf_pool_depletion->
-+ pools_to_consider_for_single_mode,
-+ ext_buf_pools, 0x00000080);
-+
-+ /* configure num of pools and vector for number of pools mode */
-+ tmp_reg |= vector;
-+ }
-+
-+ /* fill QbbPEV */
-+ if (buf_pool_depletion->buf_pool_depletion_enabled) {
-+ vector = 0;
-+ for (i = 0; i < max_num_of_pfc_priorities; i++)
-+ if (buf_pool_depletion->pfc_priorities_en[i] == TRUE)
-+ vector |= 0x00000100 << i;
-+ tmp_reg |= vector;
-+ }
-+ iowrite32be(tmp_reg, &sp_regs->fm_sp_mpd);
-+
-+ /* fill dma attributes register */
-+ tmp_reg = 0;
-+ tmp_reg |= (uint32_t)fm_vsp_params->dma_swap_data <<
-+ FMAN_SP_DMA_ATTR_SWP_SHIFT;
-+ tmp_reg |= (uint32_t)fm_vsp_params->int_context_cache_attr <<
-+ FMAN_SP_DMA_ATTR_IC_CACHE_SHIFT;
-+ tmp_reg |= (uint32_t)fm_vsp_params->header_cache_attr <<
-+ FMAN_SP_DMA_ATTR_HDR_CACHE_SHIFT;
-+ tmp_reg |= (uint32_t)fm_vsp_params->scatter_gather_cache_attr <<
-+ FMAN_SP_DMA_ATTR_SG_CACHE_SHIFT;
-+ if (fm_vsp_params->dma_write_optimize)
-+ tmp_reg |= FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE;
-+ iowrite32be(tmp_reg, &sp_regs->fm_sp_da);
-+
-+ /* IC parameters - fill internal context parameters register */
-+ tmp_reg = 0;
-+ tmp_reg |= (((uint32_t)int_context_data_copy->ext_buf_offset/
-+ OFFSET_UNITS) << FMAN_SP_IC_TO_EXT_SHIFT);
-+ tmp_reg |= (((uint32_t)int_context_data_copy->int_context_offset/
-+ OFFSET_UNITS) << FMAN_SP_IC_FROM_INT_SHIFT);
-+ tmp_reg |= (((uint32_t)int_context_data_copy->size/OFFSET_UNITS) <<
-+ FMAN_SP_IC_SIZE_SHIFT);
-+ iowrite32be(tmp_reg, &sp_regs->fm_sp_icp);
-+
-+ /* buffer margins - fill external buffer margins register */
-+ tmp_reg = 0;
-+ tmp_reg |= (((uint32_t)external_buffer_margins->start_margins) <<
-+ FMAN_SP_EXT_BUF_MARG_START_SHIFT);
-+ tmp_reg |= (((uint32_t)external_buffer_margins->end_margins) <<
-+ FMAN_SP_EXT_BUF_MARG_END_SHIFT);
-+ if (no_scather_gather)
-+ tmp_reg |= FMAN_SP_SG_DISABLE;
-+ iowrite32be(tmp_reg, &sp_regs->fm_sp_ebm);
-+
-+ /* buffer margins - fill spliodn register */
-+ iowrite32be(liodn_offset, &sp_regs->fm_sp_spliodn);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.c
-@@ -0,0 +1,5216 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm.c
-+
-+ @Description FM driver routines implementation.
-+*//***************************************************************************/
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "xx_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+#include "debug_ext.h"
-+#include "fm_muram_ext.h"
-+#include <linux/math64.h>
-+
-+#include "fm_common.h"
-+#include "fm_ipc.h"
-+#include "fm.h"
-+#ifndef CONFIG_FMAN_ARM
-+#include <linux/fsl/svr.h>
-+#endif
-+#include "fsl_fman.h"
-+
-+
-+/****************************************/
-+/* static functions */
-+/****************************************/
-+
-+static volatile bool blockingFlag = FALSE;
-+static void IpcMsgCompletionCB(t_Handle h_Fm,
-+ uint8_t *p_Msg,
-+ uint8_t *p_Reply,
-+ uint32_t replyLength,
-+ t_Error status)
-+{
-+ UNUSED(h_Fm);UNUSED(p_Msg);UNUSED(p_Reply);UNUSED(replyLength);UNUSED(status);
-+ blockingFlag = FALSE;
-+}
-+
-+static void FreeInitResources(t_Fm *p_Fm)
-+{
-+ if (p_Fm->camBaseAddr)
-+ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->camBaseAddr));
-+ if (p_Fm->fifoBaseAddr)
-+ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->fifoBaseAddr));
-+ if (p_Fm->resAddr)
-+ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->resAddr));
-+}
-+
-+static bool IsFmanCtrlCodeLoaded(t_Fm *p_Fm)
-+{
-+ t_FMIramRegs *p_Iram;
-+
-+ ASSERT_COND(p_Fm);
-+ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
-+
-+ return (bool)!!(GET_UINT32(p_Iram->iready) & IRAM_READY);
-+}
-+
-+static t_Error CheckFmParameters(t_Fm *p_Fm)
-+{
-+ if (IsFmanCtrlCodeLoaded(p_Fm) && !p_Fm->resetOnInit)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Old FMan CTRL code is loaded; FM must be reset!"));
-+#if (DPAA_VERSION < 11)
-+ if (!p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats ||
-+ (p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats > DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("axiDbgNumOfBeats has to be in the range 1 - %d", DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS));
-+#endif /* (DPAA_VERSION < 11) */
-+ if (p_Fm->p_FmDriverParam->dma_cam_num_of_entries % DMA_CAM_UNITS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_cam_num_of_entries has to be divisble by %d", DMA_CAM_UNITS));
-+// if (!p_Fm->p_FmDriverParam->dma_cam_num_of_entries || (p_Fm->p_FmDriverParam->dma_cam_num_of_entries > DMA_MODE_MAX_CAM_NUM_OF_ENTRIES))
-+// RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_cam_num_of_entries has to be in the range 1 - %d", DMA_MODE_MAX_CAM_NUM_OF_ENTRIES));
-+ if (p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer > DMA_THRESH_MAX_COMMQ)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_comm_qtsh_asrt_emer can not be larger than %d", DMA_THRESH_MAX_COMMQ));
-+ if (p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer > DMA_THRESH_MAX_COMMQ)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_comm_qtsh_clr_emer can not be larger than %d", DMA_THRESH_MAX_COMMQ));
-+ if (p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer >= p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_comm_qtsh_clr_emer must be smaller than dma_comm_qtsh_asrt_emer"));
-+#if (DPAA_VERSION < 11)
-+ if (p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer > DMA_THRESH_MAX_BUF)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_read_buf_tsh_asrt_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
-+ if (p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer > DMA_THRESH_MAX_BUF)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_read_buf_tsh_clr_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
-+ if (p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer >= p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_read_buf_tsh_clr_emer must be smaller than dma_read_buf_tsh_asrt_emer"));
-+ if (p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer > DMA_THRESH_MAX_BUF)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_write_buf_tsh_asrt_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
-+ if (p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer > DMA_THRESH_MAX_BUF)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_write_buf_tsh_clr_emer can not be larger than %d", DMA_THRESH_MAX_BUF));
-+ if (p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer >= p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_write_buf_tsh_clr_emer must be smaller than dma_write_buf_tsh_asrt_emer"));
-+#else /* (DPAA_VERSION >= 11) */
-+ if ((p_Fm->p_FmDriverParam->dma_dbg_cnt_mode == E_FMAN_DMA_DBG_CNT_INT_READ_EM)||
-+ (p_Fm->p_FmDriverParam->dma_dbg_cnt_mode == E_FMAN_DMA_DBG_CNT_INT_WRITE_EM) ||
-+ (p_Fm->p_FmDriverParam->dma_dbg_cnt_mode == E_FMAN_DMA_DBG_CNT_RAW_WAR_PROT))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_dbg_cnt_mode value not supported by this integration."));
-+ if ((p_Fm->p_FmDriverParam->dma_emergency_bus_select == FM_DMA_MURAM_READ_EMERGENCY)||
-+ (p_Fm->p_FmDriverParam->dma_emergency_bus_select == FM_DMA_MURAM_WRITE_EMERGENCY))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("emergencyBusSelect value not supported by this integration."));
-+ if (p_Fm->p_FmDriverParam->dma_stop_on_bus_error)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_stop_on_bus_error not supported by this integration."));
-+#ifdef FM_AID_MODE_NO_TNUM_SW005
-+ if (p_Fm->p_FmDriverParam->dma_aid_mode != E_FMAN_DMA_AID_OUT_PORT_ID)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_aid_mode not supported by this integration."));
-+#endif /* FM_AID_MODE_NO_TNUM_SW005 */
-+ if (p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dma_axi_dbg_num_of_beats not supported by this integration."));
-+#endif /* (DPAA_VERSION < 11) */
-+
-+ if (!p_Fm->p_FmStateStruct->fmClkFreq)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fmClkFreq must be set."));
-+ if (USEC_TO_CLK(p_Fm->p_FmDriverParam->dma_watchdog, p_Fm->p_FmStateStruct->fmClkFreq) > DMA_MAX_WATCHDOG)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("dma_watchdog depends on FM clock. dma_watchdog(in microseconds) * clk (in Mhz), may not exceed 0x08x", DMA_MAX_WATCHDOG));
-+
-+#if (DPAA_VERSION >= 11)
-+ if ((p_Fm->partVSPBase + p_Fm->partNumOfVSPs) > FM_VSP_MAX_NUM_OF_ENTRIES)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("partVSPBase+partNumOfVSPs out of range!!!"));
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (p_Fm->p_FmStateStruct->totalFifoSize % BMI_FIFO_UNITS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("totalFifoSize number has to be divisible by %d", BMI_FIFO_UNITS));
-+ if (!p_Fm->p_FmStateStruct->totalFifoSize ||
-+ (p_Fm->p_FmStateStruct->totalFifoSize > BMI_MAX_FIFO_SIZE))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("totalFifoSize (currently defined as %d) has to be in the range of 256 to %d",
-+ p_Fm->p_FmStateStruct->totalFifoSize,
-+ BMI_MAX_FIFO_SIZE));
-+ if (!p_Fm->p_FmStateStruct->totalNumOfTasks ||
-+ (p_Fm->p_FmStateStruct->totalNumOfTasks > BMI_MAX_NUM_OF_TASKS))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("totalNumOfTasks number has to be in the range 1 - %d", BMI_MAX_NUM_OF_TASKS));
-+
-+#ifdef FM_HAS_TOTAL_DMAS
-+ if (!p_Fm->p_FmStateStruct->maxNumOfOpenDmas ||
-+ (p_Fm->p_FmStateStruct->maxNumOfOpenDmas > BMI_MAX_NUM_OF_DMAS))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxNumOfOpenDmas number has to be in the range 1 - %d", BMI_MAX_NUM_OF_DMAS));
-+#endif /* FM_HAS_TOTAL_DMAS */
-+
-+ if (p_Fm->p_FmDriverParam->disp_limit_tsh > FPM_MAX_DISP_LIMIT)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("disp_limit_tsh can't be greater than %d", FPM_MAX_DISP_LIMIT));
-+
-+ if (!p_Fm->f_Exception)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
-+ if (!p_Fm->f_BusError)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided"));
-+
-+#ifdef FM_NO_WATCHDOG
-+ if ((p_Fm->p_FmStateStruct->revInfo.majorRev == 2) &&
-+ (p_Fm->p_FmDriverParam->dma_watchdog))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("watchdog!"));
-+#endif /* FM_NO_WATCHDOG */
-+
-+#ifdef FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008
-+ if ((p_Fm->p_FmStateStruct->revInfo.majorRev < 6) &&
-+ (p_Fm->p_FmDriverParam->halt_on_unrecov_ecc_err))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("HaltOnEccError!"));
-+#endif /* FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008 */
-+
-+#ifdef FM_NO_TNUM_AGING
-+ if ((p_Fm->p_FmStateStruct->revInfo.majorRev != 4) &&
-+ (p_Fm->p_FmStateStruct->revInfo.majorRev < 6))
-+ if (p_Fm->p_FmDriverParam->tnum_aging_period)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Tnum aging!"));
-+#endif /* FM_NO_TNUM_AGING */
-+
-+ /* check that user did not set revision-dependent exceptions */
-+#ifdef FM_NO_DISPATCH_RAM_ECC
-+ if ((p_Fm->p_FmStateStruct->revInfo.majorRev != 4) &&
-+ (p_Fm->p_FmStateStruct->revInfo.majorRev < 6))
-+ if (p_Fm->userSetExceptions & FM_EX_BMI_DISPATCH_RAM_ECC)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("exception e_FM_EX_BMI_DISPATCH_RAM_ECC!"));
-+#endif /* FM_NO_DISPATCH_RAM_ECC */
-+
-+#ifdef FM_QMI_NO_ECC_EXCEPTIONS
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev == 4)
-+ if (p_Fm->userSetExceptions & (FM_EX_QMI_SINGLE_ECC | FM_EX_QMI_DOUBLE_ECC))
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("exception e_FM_EX_QMI_SINGLE_ECC/e_FM_EX_QMI_DOUBLE_ECC!"));
-+#endif /* FM_QMI_NO_ECC_EXCEPTIONS */
-+
-+#ifdef FM_QMI_NO_SINGLE_ECC_EXCEPTION
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ if (p_Fm->userSetExceptions & FM_EX_QMI_SINGLE_ECC)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("exception e_FM_EX_QMI_SINGLE_ECC!"));
-+#endif /* FM_QMI_NO_SINGLE_ECC_EXCEPTION */
-+
-+ return E_OK;
-+}
-+
-+
-+static void SendIpcIsr(t_Fm *p_Fm, uint32_t macEvent, uint32_t pendingReg)
-+{
-+ ASSERT_COND(p_Fm->guestId == NCSW_MASTER_ID);
-+
-+ if (p_Fm->intrMng[macEvent].guestId == NCSW_MASTER_ID)
-+ p_Fm->intrMng[macEvent].f_Isr(p_Fm->intrMng[macEvent].h_SrcHandle);
-+
-+ /* If the MAC is running on guest-partition and we have IPC session with it,
-+ we inform him about the event through IPC; otherwise, we ignore the event. */
-+ else if (p_Fm->h_IpcSessions[p_Fm->intrMng[macEvent].guestId])
-+ {
-+ t_Error err;
-+ t_FmIpcIsr fmIpcIsr;
-+ t_FmIpcMsg msg;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_GUEST_ISR;
-+ fmIpcIsr.pendingReg = pendingReg;
-+ fmIpcIsr.boolErr = FALSE;
-+ memcpy(msg.msgBody, &fmIpcIsr, sizeof(fmIpcIsr));
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[p_Fm->intrMng[macEvent].guestId],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(fmIpcIsr),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ }
-+ else
-+ DBG(TRACE, ("FM Guest mode, without IPC - can't call ISR!"));
-+}
-+
-+static void BmiErrEvent(t_Fm *p_Fm)
-+{
-+ uint32_t event;
-+ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
-+
-+
-+ event = fman_get_bmi_err_event(bmi_rg);
-+
-+ if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_STORAGE_PROFILE_ECC);
-+ if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_LIST_RAM_ECC);
-+ if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_STATISTICS_RAM_ECC);
-+ if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_DISPATCH_RAM_ECC);
-+}
-+
-+static void QmiErrEvent(t_Fm *p_Fm)
-+{
-+ uint32_t event;
-+ struct fman_qmi_regs *qmi_rg = p_Fm->p_FmQmiRegs;
-+
-+ event = fman_get_qmi_err_event(qmi_rg);
-+
-+ if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_DOUBLE_ECC);
-+ if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
-+}
-+
-+static void DmaErrEvent(t_Fm *p_Fm)
-+{
-+ uint32_t status, com_id;
-+ uint8_t tnum;
-+ uint8_t hardwarePortId;
-+ uint8_t relativePortId;
-+ uint16_t liodn;
-+ struct fman_dma_regs *dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ status = fman_get_dma_err_event(dma_rg);
-+
-+ if (status & DMA_STATUS_BUS_ERR)
-+ {
-+ com_id = fman_get_dma_com_id(dma_rg);
-+ hardwarePortId = (uint8_t)(((com_id & DMA_TRANSFER_PORTID_MASK) >> DMA_TRANSFER_PORTID_SHIFT));
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+ HW_PORT_ID_TO_SW_PORT_ID(relativePortId, hardwarePortId);
-+ tnum = (uint8_t)((com_id & DMA_TRANSFER_TNUM_MASK) >> DMA_TRANSFER_TNUM_SHIFT);
-+ liodn = (uint16_t)(com_id & DMA_TRANSFER_LIODN_MASK);
-+ ASSERT_COND(p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] != e_FM_PORT_TYPE_DUMMY);
-+ p_Fm->f_BusError(p_Fm->h_App,
-+ p_Fm->p_FmStateStruct->portsTypes[hardwarePortId],
-+ relativePortId,
-+ fman_get_dma_addr(dma_rg),
-+ tnum,
-+ liodn);
-+ }
-+ if (status & DMA_STATUS_FM_SPDAT_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_SINGLE_PORT_ECC);
-+ if (status & DMA_STATUS_READ_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_READ_ECC);
-+ if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_SYSTEM_WRITE_ECC);
-+ if (status & DMA_STATUS_FM_WRITE_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_FM_WRITE_ECC);
-+ }
-+
-+static void FpmErrEvent(t_Fm *p_Fm)
-+{
-+ uint32_t event;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ event = fman_get_fpm_err_event(fpm_rg);
-+
-+ if ((event & FPM_EV_MASK_DOUBLE_ECC) && (event & FPM_EV_MASK_DOUBLE_ECC_EN))
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_DOUBLE_ECC);
-+ if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_STALL_ON_TASKS);
-+ if ((event & FPM_EV_MASK_SINGLE_ECC) && (event & FPM_EV_MASK_SINGLE_ECC_EN))
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_SINGLE_ECC);
-+}
-+
-+static void MuramErrIntr(t_Fm *p_Fm)
-+{
-+ uint32_t event;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ event = fman_get_muram_err_event(fpm_rg);
-+
-+ if (event & FPM_RAM_MURAM_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_MURAM_ECC);
-+}
-+
-+static void IramErrIntr(t_Fm *p_Fm)
-+{
-+ uint32_t event;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ event = fman_get_iram_err_event(fpm_rg);
-+
-+ if (event & FPM_RAM_IRAM_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_IRAM_ECC);
-+}
-+
-+static void QmiEvent(t_Fm *p_Fm)
-+{
-+ uint32_t event;
-+ struct fman_qmi_regs *qmi_rg = p_Fm->p_FmQmiRegs;
-+
-+ event = fman_get_qmi_event(qmi_rg);
-+
-+ if (event & QMI_INTR_EN_SINGLE_ECC)
-+ p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_SINGLE_ECC);
-+}
-+
-+static void UnimplementedIsr(t_Handle h_Arg)
-+{
-+ UNUSED(h_Arg);
-+
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented ISR!"));
-+}
-+
-+static void UnimplementedFmanCtrlIsr(t_Handle h_Arg, uint32_t event)
-+{
-+ UNUSED(h_Arg); UNUSED(event);
-+
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented FmCtl ISR!"));
-+}
-+
-+static void EnableTimeStamp(t_Fm *p_Fm)
-+{
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ ASSERT_COND(p_Fm->p_FmStateStruct);
-+ ASSERT_COND(p_Fm->p_FmStateStruct->count1MicroBit);
-+
-+ fman_enable_time_stamp(fpm_rg, p_Fm->p_FmStateStruct->count1MicroBit, p_Fm->p_FmStateStruct->fmClkFreq);
-+
-+ p_Fm->p_FmStateStruct->enabledTimeStamp = TRUE;
-+}
-+
-+static t_Error ClearIRam(t_Fm *p_Fm)
-+{
-+ t_FMIramRegs *p_Iram;
-+ int i;
-+ int iram_size;
-+
-+ ASSERT_COND(p_Fm);
-+ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
-+ iram_size = FM_IRAM_SIZE(p_Fm->p_FmStateStruct->revInfo.majorRev,p_Fm->p_FmStateStruct->revInfo.minorRev);
-+
-+ /* Enable the auto-increment */
-+ WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE);
-+ while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ;
-+
-+ for (i=0; i < (iram_size/4); i++)
-+ WRITE_UINT32(p_Iram->idata, 0xffffffff);
-+
-+ WRITE_UINT32(p_Iram->iadd, iram_size - 4);
-+ CORE_MemoryBarrier();
-+ while (GET_UINT32(p_Iram->idata) != 0xffffffff) ;
-+
-+ return E_OK;
-+}
-+
-+static t_Error LoadFmanCtrlCode(t_Fm *p_Fm)
-+{
-+ t_FMIramRegs *p_Iram;
-+ int i;
-+ uint32_t tmp;
-+ uint8_t compTo16;
-+
-+ ASSERT_COND(p_Fm);
-+ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
-+
-+ /* Enable the auto-increment */
-+ WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE);
-+ while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ;
-+
-+ for (i=0; i < (p_Fm->firmware.size / 4); i++)
-+ WRITE_UINT32(p_Iram->idata, p_Fm->firmware.p_Code[i]);
-+
-+ compTo16 = (uint8_t)(p_Fm->firmware.size % 16);
-+ if (compTo16)
-+ for (i=0; i < ((16-compTo16) / 4); i++)
-+ WRITE_UINT32(p_Iram->idata, 0xffffffff);
-+
-+ WRITE_UINT32(p_Iram->iadd,p_Fm->firmware.size-4);
-+ while (GET_UINT32(p_Iram->iadd) != (p_Fm->firmware.size-4)) ;
-+
-+ /* verify that writing has completed */
-+ while (GET_UINT32(p_Iram->idata) != p_Fm->firmware.p_Code[(p_Fm->firmware.size / 4)-1]) ;
-+
-+ if (p_Fm->fwVerify)
-+ {
-+ WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE);
-+ while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ;
-+ for (i=0; i < (p_Fm->firmware.size / 4); i++)
-+ {
-+ tmp = GET_UINT32(p_Iram->idata);
-+ if (tmp != p_Fm->firmware.p_Code[i])
-+ RETURN_ERROR(MAJOR, E_WRITE_FAILED,
-+ ("UCode write error : write 0x%x, read 0x%x",
-+ p_Fm->firmware.p_Code[i],tmp));
-+ }
-+ WRITE_UINT32(p_Iram->iadd, 0x0);
-+ }
-+
-+ /* Enable patch from IRAM */
-+ WRITE_UINT32(p_Iram->iready, IRAM_READY);
-+ XX_UDelay(1000);
-+
-+ DBG(INFO, ("FMan-Controller code (ver %d.%d.%d) loaded to IRAM.",
-+ ((uint16_t *)p_Fm->firmware.p_Code)[2],
-+ ((uint8_t *)p_Fm->firmware.p_Code)[6],
-+ ((uint8_t *)p_Fm->firmware.p_Code)[7]));
-+
-+ return E_OK;
-+}
-+
-+#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
-+static t_Error FwNotResetErratumBugzilla6173WA(t_Fm *p_Fm)
-+{
-+ t_FMIramRegs *p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
-+ uint32_t tmpReg;
-+ uint32_t savedSpliodn[63];
-+
-+ /* write to IRAM first location the debug instruction */
-+ WRITE_UINT32(p_Iram->iadd, 0);
-+ while (GET_UINT32(p_Iram->iadd) != 0) ;
-+ WRITE_UINT32(p_Iram->idata, FM_FW_DEBUG_INSTRUCTION);
-+
-+ WRITE_UINT32(p_Iram->iadd, 0);
-+ while (GET_UINT32(p_Iram->iadd) != 0) ;
-+ while (GET_UINT32(p_Iram->idata) != FM_FW_DEBUG_INSTRUCTION) ;
-+
-+ /* Enable patch from IRAM */
-+ WRITE_UINT32(p_Iram->iready, IRAM_READY);
-+ CORE_MemoryBarrier();
-+ XX_UDelay(100);
-+ IO2MemCpy32((uint8_t *)savedSpliodn,
-+ (uint8_t *)p_Fm->p_FmBmiRegs->fmbm_spliodn,
-+ 63*sizeof(uint32_t));
-+
-+ /* reset FMAN */
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET);
-+ CORE_MemoryBarrier();
-+ XX_UDelay(100);
-+
-+ /* verify breakpoint debug status register */
-+ tmpReg = GET_UINT32(*(uint32_t *)UINT_TO_PTR(p_Fm->baseAddr + FM_DEBUG_STATUS_REGISTER_OFFSET));
-+ if (!tmpReg)
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid debug status register value is '0'"));
-+
-+ /*************************************/
-+ /* Load FMan-Controller code to IRAM */
-+ /*************************************/
-+ ClearIRam(p_Fm);
-+ if (p_Fm->firmware.p_Code &&
-+ (LoadFmanCtrlCode(p_Fm) != E_OK))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+ XX_UDelay(100);
-+
-+ /* reset FMAN again to start the microcode */
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET);
-+ CORE_MemoryBarrier();
-+ XX_UDelay(100);
-+ Mem2IOCpy32((uint8_t *)p_Fm->p_FmBmiRegs->fmbm_spliodn,
-+ (uint8_t *)savedSpliodn,
-+ 63*sizeof(uint32_t));
-+
-+ if (fman_is_qmi_halt_not_busy_state(p_Fm->p_FmQmiRegs))
-+ {
-+ fman_resume(p_Fm->p_FmFpmRegs);
-+ CORE_MemoryBarrier();
-+ XX_UDelay(100);
-+ }
-+
-+ return E_OK;
-+}
-+#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
-+
-+static void GuestErrorIsr(t_Fm *p_Fm, uint32_t pending)
-+{
-+#define FM_G_CALL_1G_MAC_ERR_ISR(_id) \
-+do { \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].h_SrcHandle);\
-+} while (0)
-+#define FM_G_CALL_10G_MAC_ERR_ISR(_id) \
-+do { \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].h_SrcHandle);\
-+} while (0)
-+
-+ /* error interrupts */
-+ if (pending & ERR_INTR_EN_1G_MAC0)
-+ FM_G_CALL_1G_MAC_ERR_ISR(0);
-+ if (pending & ERR_INTR_EN_1G_MAC1)
-+ FM_G_CALL_1G_MAC_ERR_ISR(1);
-+ if (pending & ERR_INTR_EN_1G_MAC2)
-+ FM_G_CALL_1G_MAC_ERR_ISR(2);
-+ if (pending & ERR_INTR_EN_1G_MAC3)
-+ FM_G_CALL_1G_MAC_ERR_ISR(3);
-+ if (pending & ERR_INTR_EN_1G_MAC4)
-+ FM_G_CALL_1G_MAC_ERR_ISR(4);
-+ if (pending & ERR_INTR_EN_1G_MAC5)
-+ FM_G_CALL_1G_MAC_ERR_ISR(5);
-+ if (pending & ERR_INTR_EN_1G_MAC6)
-+ FM_G_CALL_1G_MAC_ERR_ISR(6);
-+ if (pending & ERR_INTR_EN_1G_MAC7)
-+ FM_G_CALL_1G_MAC_ERR_ISR(7);
-+ if (pending & ERR_INTR_EN_10G_MAC0)
-+ FM_G_CALL_10G_MAC_ERR_ISR(0);
-+ if (pending & ERR_INTR_EN_10G_MAC1)
-+ FM_G_CALL_10G_MAC_ERR_ISR(1);
-+}
-+
-+static void GuestEventIsr(t_Fm *p_Fm, uint32_t pending)
-+{
-+#define FM_G_CALL_1G_MAC_ISR(_id) \
-+do { \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].h_SrcHandle);\
-+} while (0)
-+#define FM_G_CALL_10G_MAC_ISR(_id) \
-+do { \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].h_SrcHandle);\
-+} while (0)
-+
-+ if (pending & INTR_EN_1G_MAC0)
-+ FM_G_CALL_1G_MAC_ISR(0);
-+ if (pending & INTR_EN_1G_MAC1)
-+ FM_G_CALL_1G_MAC_ISR(1);
-+ if (pending & INTR_EN_1G_MAC2)
-+ FM_G_CALL_1G_MAC_ISR(2);
-+ if (pending & INTR_EN_1G_MAC3)
-+ FM_G_CALL_1G_MAC_ISR(3);
-+ if (pending & INTR_EN_1G_MAC4)
-+ FM_G_CALL_1G_MAC_ISR(4);
-+ if (pending & INTR_EN_1G_MAC5)
-+ FM_G_CALL_1G_MAC_ISR(5);
-+ if (pending & INTR_EN_1G_MAC6)
-+ FM_G_CALL_1G_MAC_ISR(6);
-+ if (pending & INTR_EN_1G_MAC7)
-+ FM_G_CALL_1G_MAC_ISR(7);
-+ if (pending & INTR_EN_10G_MAC0)
-+ FM_G_CALL_10G_MAC_ISR(0);
-+ if (pending & INTR_EN_10G_MAC1)
-+ FM_G_CALL_10G_MAC_ISR(1);
-+ if (pending & INTR_EN_TMR)
-+ p_Fm->intrMng[e_FM_EV_TMR].f_Isr(p_Fm->intrMng[e_FM_EV_TMR].h_SrcHandle);
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+static t_Error SetVSPWindow(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint8_t baseStorageProfile,
-+ uint8_t log2NumOfProfiles)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+
-+ ASSERT_COND(h_Fm);
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->p_FmBmiRegs &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcVspSetPortWindow fmIpcVspSetPortWindow;
-+ t_FmIpcMsg msg;
-+ t_Error err = E_OK;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&fmIpcVspSetPortWindow, 0, sizeof(t_FmIpcVspSetPortWindow));
-+ fmIpcVspSetPortWindow.hardwarePortId = hardwarePortId;
-+ fmIpcVspSetPortWindow.baseStorageProfile = baseStorageProfile;
-+ fmIpcVspSetPortWindow.log2NumOfProfiles = log2NumOfProfiles;
-+ msg.msgId = FM_VSP_SET_PORT_WINDOW;
-+ memcpy(msg.msgBody, &fmIpcVspSetPortWindow, sizeof(t_FmIpcVspSetPortWindow));
-+
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+ else if (!p_Fm->p_FmBmiRegs)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+
-+ fman_set_vsp_window(p_Fm->p_FmBmiRegs,
-+ hardwarePortId,
-+ baseStorageProfile,
-+ log2NumOfProfiles);
-+
-+ return E_OK;
-+}
-+
-+static uint8_t AllocVSPsForPartition(t_Handle h_Fm, uint8_t base, uint8_t numOfProfiles, uint8_t guestId)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ uint8_t profilesFound = 0;
-+ int i = 0;
-+ uint32_t intFlags;
-+
-+ if (!numOfProfiles)
-+ return E_OK;
-+
-+ if ((numOfProfiles > FM_VSP_MAX_NUM_OF_ENTRIES) ||
-+ (base + numOfProfiles > FM_VSP_MAX_NUM_OF_ENTRIES))
-+ return (uint8_t)ILLEGAL_BASE;
-+
-+ if (p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ t_Error err;
-+ uint32_t replyLength;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
-+ ipcAllocParams.guestId = p_Fm->guestId;
-+ ipcAllocParams.num = p_Fm->partNumOfVSPs;
-+ ipcAllocParams.base = p_Fm->partVSPBase;
-+ msg.msgId = FM_VSP_ALLOC;
-+ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
-+ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if ((err != E_OK) ||
-+ (replyLength != (sizeof(uint32_t) + sizeof(uint8_t))))
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ else
-+ memcpy((uint8_t*)&p_Fm->partVSPBase, reply.replyBody, sizeof(uint8_t));
-+ if (p_Fm->partVSPBase == (uint8_t)(ILLEGAL_BASE))
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ DBG(WARNING, ("FM Guest mode, without IPC - can't validate VSP range!"));
-+ return (uint8_t)ILLEGAL_BASE;
-+ }
-+
-+ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
-+ for (i = base; i < base + numOfProfiles; i++)
-+ if (p_Fm->p_FmSp->profiles[i].profilesMng.ownerId == (uint8_t)ILLEGAL_BASE)
-+ profilesFound++;
-+ else
-+ break;
-+
-+ if (profilesFound == numOfProfiles)
-+ for (i = base; i<base + numOfProfiles; i++)
-+ p_Fm->p_FmSp->profiles[i].profilesMng.ownerId = guestId;
-+ else
-+ {
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+ return (uint8_t)ILLEGAL_BASE;
-+ }
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+
-+ return base;
-+}
-+
-+static void FreeVSPsForPartition(t_Handle h_Fm, uint8_t base, uint8_t numOfProfiles, uint8_t guestId)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ int i = 0;
-+
-+ ASSERT_COND(p_Fm);
-+
-+ if (p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+ t_Error err;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams));
-+ ipcAllocParams.guestId = p_Fm->guestId;
-+ ipcAllocParams.num = p_Fm->partNumOfVSPs;
-+ ipcAllocParams.base = p_Fm->partVSPBase;
-+ msg.msgId = FM_VSP_FREE;
-+ memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams));
-+ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return;
-+ }
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ DBG(WARNING, ("FM Guest mode, without IPC - can't validate VSP range!"));
-+ return;
-+ }
-+
-+ ASSERT_COND(p_Fm->p_FmSp);
-+
-+ for (i=base; i<numOfProfiles; i++)
-+ {
-+ if (p_Fm->p_FmSp->profiles[i].profilesMng.ownerId == guestId)
-+ p_Fm->p_FmSp->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
-+ else
-+ DBG(WARNING, ("Request for freeing storage profile window which wasn't allocated to this partition"));
-+ }
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+static t_Error FmGuestHandleIpcMsgCB(t_Handle h_Fm,
-+ uint8_t *p_Msg,
-+ uint32_t msgLength,
-+ uint8_t *p_Reply,
-+ uint32_t *p_ReplyLength)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_FmIpcMsg *p_IpcMsg = (t_FmIpcMsg*)p_Msg;
-+
-+ UNUSED(p_Reply);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((msgLength > sizeof(uint32_t)), E_INVALID_VALUE);
-+
-+#ifdef DISABLE_SANITY_CHECKS
-+ UNUSED(msgLength);
-+#endif /* DISABLE_SANITY_CHECKS */
-+
-+ ASSERT_COND(p_Msg);
-+
-+ *p_ReplyLength = 0;
-+
-+ switch (p_IpcMsg->msgId)
-+ {
-+ case (FM_GUEST_ISR):
-+ {
-+ t_FmIpcIsr ipcIsr;
-+
-+ memcpy((uint8_t*)&ipcIsr, p_IpcMsg->msgBody, sizeof(t_FmIpcIsr));
-+ if (ipcIsr.boolErr)
-+ GuestErrorIsr(p_Fm, ipcIsr.pendingReg);
-+ else
-+ GuestEventIsr(p_Fm, ipcIsr.pendingReg);
-+ break;
-+ }
-+ default:
-+ *p_ReplyLength = 0;
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!"));
-+ }
-+ return E_OK;
-+}
-+
-+static t_Error FmHandleIpcMsgCB(t_Handle h_Fm,
-+ uint8_t *p_Msg,
-+ uint32_t msgLength,
-+ uint8_t *p_Reply,
-+ uint32_t *p_ReplyLength)
-+{
-+ t_Error err;
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_FmIpcMsg *p_IpcMsg = (t_FmIpcMsg*)p_Msg;
-+ t_FmIpcReply *p_IpcReply = (t_FmIpcReply*)p_Reply;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((msgLength >= sizeof(uint32_t)), E_INVALID_VALUE);
-+
-+#ifdef DISABLE_SANITY_CHECKS
-+ UNUSED(msgLength);
-+#endif /* DISABLE_SANITY_CHECKS */
-+
-+ ASSERT_COND(p_IpcMsg);
-+
-+ memset(p_IpcReply, 0, (sizeof(uint8_t) * FM_IPC_MAX_REPLY_SIZE));
-+ *p_ReplyLength = 0;
-+
-+ switch (p_IpcMsg->msgId)
-+ {
-+ case (FM_GET_SET_PORT_PARAMS):
-+ {
-+ t_FmIpcPortInInitParams ipcInitParams;
-+ t_FmInterModulePortInitParams initParams;
-+ t_FmIpcPortOutInitParams ipcOutInitParams;
-+
-+ memcpy((uint8_t*)&ipcInitParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortInInitParams));
-+ initParams.hardwarePortId = ipcInitParams.hardwarePortId;
-+ initParams.portType = (e_FmPortType)ipcInitParams.enumPortType;
-+ initParams.independentMode = (bool)(ipcInitParams.boolIndependentMode);
-+ initParams.liodnOffset = ipcInitParams.liodnOffset;
-+ initParams.numOfTasks = ipcInitParams.numOfTasks;
-+ initParams.numOfExtraTasks = ipcInitParams.numOfExtraTasks;
-+ initParams.numOfOpenDmas = ipcInitParams.numOfOpenDmas;
-+ initParams.numOfExtraOpenDmas = ipcInitParams.numOfExtraOpenDmas;
-+ initParams.sizeOfFifo = ipcInitParams.sizeOfFifo;
-+ initParams.extraSizeOfFifo = ipcInitParams.extraSizeOfFifo;
-+ initParams.deqPipelineDepth = ipcInitParams.deqPipelineDepth;
-+ initParams.maxFrameLength = ipcInitParams.maxFrameLength;
-+ initParams.liodnBase = ipcInitParams.liodnBase;
-+
-+ p_IpcReply->error = (uint32_t)FmGetSetPortParams(h_Fm, &initParams);
-+
-+ ipcOutInitParams.ipcPhysAddr.high = initParams.fmMuramPhysBaseAddr.high;
-+ ipcOutInitParams.ipcPhysAddr.low = initParams.fmMuramPhysBaseAddr.low;
-+ ipcOutInitParams.sizeOfFifo = initParams.sizeOfFifo;
-+ ipcOutInitParams.extraSizeOfFifo = initParams.extraSizeOfFifo;
-+ ipcOutInitParams.numOfTasks = initParams.numOfTasks;
-+ ipcOutInitParams.numOfExtraTasks = initParams.numOfExtraTasks;
-+ ipcOutInitParams.numOfOpenDmas = initParams.numOfOpenDmas;
-+ ipcOutInitParams.numOfExtraOpenDmas = initParams.numOfExtraOpenDmas;
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcOutInitParams, sizeof(ipcOutInitParams));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcPortOutInitParams);
-+ break;
-+ }
-+ case (FM_SET_SIZE_OF_FIFO):
-+ {
-+ t_FmIpcPortRsrcParams ipcPortRsrcParams;
-+
-+ memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams));
-+ p_IpcReply->error = (uint32_t)FmSetSizeOfFifo(h_Fm,
-+ ipcPortRsrcParams.hardwarePortId,
-+ &ipcPortRsrcParams.val,
-+ &ipcPortRsrcParams.extra,
-+ (bool)ipcPortRsrcParams.boolInitialConfig);
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_SET_NUM_OF_TASKS):
-+ {
-+ t_FmIpcPortRsrcParams ipcPortRsrcParams;
-+
-+ memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams));
-+ p_IpcReply->error = (uint32_t)FmSetNumOfTasks(h_Fm, ipcPortRsrcParams.hardwarePortId,
-+ (uint8_t*)&ipcPortRsrcParams.val,
-+ (uint8_t*)&ipcPortRsrcParams.extra,
-+ (bool)ipcPortRsrcParams.boolInitialConfig);
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_SET_NUM_OF_OPEN_DMAS):
-+ {
-+ t_FmIpcPortRsrcParams ipcPortRsrcParams;
-+
-+ memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams));
-+ p_IpcReply->error = (uint32_t)FmSetNumOfOpenDmas(h_Fm, ipcPortRsrcParams.hardwarePortId,
-+ (uint8_t*)&ipcPortRsrcParams.val,
-+ (uint8_t*)&ipcPortRsrcParams.extra,
-+ (bool)ipcPortRsrcParams.boolInitialConfig);
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_RESUME_STALLED_PORT):
-+ *p_ReplyLength = sizeof(uint32_t);
-+ p_IpcReply->error = (uint32_t)FmResumeStalledPort(h_Fm, p_IpcMsg->msgBody[0]);
-+ break;
-+ case (FM_MASTER_IS_ALIVE):
-+ {
-+ uint8_t guestId = p_IpcMsg->msgBody[0];
-+ /* build the FM master partition IPC address */
-+ memset(p_Fm->fmIpcHandlerModuleName[guestId], 0, (sizeof(char)) * MODULE_NAME_SIZE);
-+ if (Sprint (p_Fm->fmIpcHandlerModuleName[guestId], "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, guestId) != (guestId<10 ? 6:7))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+ p_Fm->h_IpcSessions[guestId] = XX_IpcInitSession(p_Fm->fmIpcHandlerModuleName[guestId], p_Fm->fmModuleName);
-+ if (p_Fm->h_IpcSessions[guestId] == NULL)
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM Master IPC session for guest %d", guestId));
-+ *(uint8_t*)(p_IpcReply->replyBody) = 1;
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ break;
-+ }
-+ case (FM_IS_PORT_STALLED):
-+ {
-+ bool tmp;
-+
-+ p_IpcReply->error = (uint32_t)FmIsPortStalled(h_Fm, p_IpcMsg->msgBody[0], &tmp);
-+ *(uint8_t*)(p_IpcReply->replyBody) = (uint8_t)tmp;
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ break;
-+ }
-+ case (FM_RESET_MAC):
-+ {
-+ t_FmIpcMacParams ipcMacParams;
-+
-+ memcpy((uint8_t*)&ipcMacParams, p_IpcMsg->msgBody, sizeof(t_FmIpcMacParams));
-+ p_IpcReply->error = (uint32_t)FmResetMac(p_Fm,
-+ (e_FmMacType)(ipcMacParams.enumType),
-+ ipcMacParams.id);
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_SET_MAC_MAX_FRAME):
-+ {
-+ t_FmIpcMacMaxFrameParams ipcMacMaxFrameParams;
-+
-+ memcpy((uint8_t*)&ipcMacMaxFrameParams, p_IpcMsg->msgBody, sizeof(t_FmIpcMacMaxFrameParams));
-+ err = FmSetMacMaxFrame(p_Fm,
-+ (e_FmMacType)(ipcMacMaxFrameParams.macParams.enumType),
-+ ipcMacMaxFrameParams.macParams.id,
-+ ipcMacMaxFrameParams.maxFrameLength);
-+ if (err != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ break;
-+ }
-+#if (DPAA_VERSION >= 11)
-+ case (FM_VSP_ALLOC) :
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ uint8_t vspBase;
-+ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
-+ vspBase = AllocVSPsForPartition(h_Fm, (uint8_t)ipcAllocParams.base, (uint8_t)ipcAllocParams.num, ipcAllocParams.guestId);
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&vspBase, sizeof(uint8_t));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ break;
-+ }
-+ case (FM_VSP_FREE) :
-+ {
-+ t_FmIpcResourceAllocParams ipcAllocParams;
-+ memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams));
-+ FreeVSPsForPartition(h_Fm, (uint8_t)ipcAllocParams.base, (uint8_t)ipcAllocParams.num, ipcAllocParams.guestId);
-+ break;
-+ }
-+ case (FM_VSP_SET_PORT_WINDOW) :
-+ {
-+ t_FmIpcVspSetPortWindow ipcVspSetPortWindow;
-+ memcpy(&ipcVspSetPortWindow, p_IpcMsg->msgBody, sizeof(t_FmIpcVspSetPortWindow));
-+ err = SetVSPWindow(h_Fm,
-+ ipcVspSetPortWindow.hardwarePortId,
-+ ipcVspSetPortWindow.baseStorageProfile,
-+ ipcVspSetPortWindow.log2NumOfProfiles);
-+ return err;
-+ }
-+ case (FM_SET_CONG_GRP_PFC_PRIO) :
-+ {
-+ t_FmIpcSetCongestionGroupPfcPriority fmIpcSetCongestionGroupPfcPriority;
-+ memcpy(&fmIpcSetCongestionGroupPfcPriority, p_IpcMsg->msgBody, sizeof(t_FmIpcSetCongestionGroupPfcPriority));
-+ err = FmSetCongestionGroupPFCpriority(h_Fm,
-+ fmIpcSetCongestionGroupPfcPriority.congestionGroupId,
-+ fmIpcSetCongestionGroupPfcPriority.priorityBitMap);
-+ return err;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ case (FM_FREE_PORT):
-+ {
-+ t_FmInterModulePortFreeParams portParams;
-+ t_FmIpcPortFreeParams ipcPortParams;
-+
-+ memcpy((uint8_t*)&ipcPortParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortFreeParams));
-+ portParams.hardwarePortId = ipcPortParams.hardwarePortId;
-+ portParams.portType = (e_FmPortType)(ipcPortParams.enumPortType);
-+ portParams.deqPipelineDepth = ipcPortParams.deqPipelineDepth;
-+ FmFreePortParams(h_Fm, &portParams);
-+ break;
-+ }
-+ case (FM_REGISTER_INTR):
-+ {
-+ t_FmIpcRegisterIntr ipcRegIntr;
-+
-+ memcpy((uint8_t*)&ipcRegIntr, p_IpcMsg->msgBody, sizeof(ipcRegIntr));
-+ p_Fm->intrMng[ipcRegIntr.event].guestId = ipcRegIntr.guestId;
-+ break;
-+ }
-+ case (FM_GET_PARAMS):
-+ {
-+ t_FmIpcParams ipcParams;
-+
-+ /* Get clock frequency */
-+ ipcParams.fmClkFreq = p_Fm->p_FmStateStruct->fmClkFreq;
-+ ipcParams.fmMacClkFreq = p_Fm->p_FmStateStruct->fmMacClkFreq;
-+
-+ fman_get_revision(p_Fm->p_FmFpmRegs,&ipcParams.majorRev,&ipcParams.minorRev);
-+
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcParams, sizeof(t_FmIpcParams));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcParams);
-+ break;
-+ }
-+ case (FM_GET_FMAN_CTRL_CODE_REV):
-+ {
-+ t_FmCtrlCodeRevisionInfo fmanCtrlRevInfo;
-+ t_FmIpcFmanCtrlCodeRevisionInfo ipcRevInfo;
-+
-+ p_IpcReply->error = (uint32_t)FM_GetFmanCtrlCodeRevision(h_Fm, &fmanCtrlRevInfo);
-+ ipcRevInfo.packageRev = fmanCtrlRevInfo.packageRev;
-+ ipcRevInfo.majorRev = fmanCtrlRevInfo.majorRev;
-+ ipcRevInfo.minorRev = fmanCtrlRevInfo.minorRev;
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcRevInfo, sizeof(t_FmIpcFmanCtrlCodeRevisionInfo));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcFmanCtrlCodeRevisionInfo);
-+ break;
-+ }
-+
-+ case (FM_DMA_STAT):
-+ {
-+ t_FmDmaStatus dmaStatus;
-+ t_FmIpcDmaStatus ipcDmaStatus;
-+
-+ FM_GetDmaStatus(h_Fm, &dmaStatus);
-+ ipcDmaStatus.boolCmqNotEmpty = (uint8_t)dmaStatus.cmqNotEmpty;
-+ ipcDmaStatus.boolBusError = (uint8_t)dmaStatus.busError;
-+ ipcDmaStatus.boolReadBufEccError = (uint8_t)dmaStatus.readBufEccError;
-+ ipcDmaStatus.boolWriteBufEccSysError = (uint8_t)dmaStatus.writeBufEccSysError;
-+ ipcDmaStatus.boolWriteBufEccFmError = (uint8_t)dmaStatus.writeBufEccFmError;
-+ ipcDmaStatus.boolSinglePortEccError = (uint8_t)dmaStatus.singlePortEccError;
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcDmaStatus, sizeof(t_FmIpcDmaStatus));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus);
-+ break;
-+ }
-+ case (FM_ALLOC_FMAN_CTRL_EVENT_REG):
-+ p_IpcReply->error = (uint32_t)FmAllocFmanCtrlEventReg(h_Fm, (uint8_t*)p_IpcReply->replyBody);
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ break;
-+ case (FM_FREE_FMAN_CTRL_EVENT_REG):
-+ FmFreeFmanCtrlEventReg(h_Fm, p_IpcMsg->msgBody[0]);
-+ break;
-+ case (FM_GET_TIMESTAMP_SCALE):
-+ {
-+ uint32_t timeStamp = FmGetTimeStampScale(h_Fm);
-+
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&timeStamp, sizeof(uint32_t));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_GET_COUNTER):
-+ {
-+ e_FmCounters inCounter;
-+ uint32_t outCounter;
-+
-+ memcpy((uint8_t*)&inCounter, p_IpcMsg->msgBody, sizeof(uint32_t));
-+ outCounter = FM_GetCounter(h_Fm, inCounter);
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&outCounter, sizeof(uint32_t));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_SET_FMAN_CTRL_EVENTS_ENABLE):
-+ {
-+ t_FmIpcFmanEvents ipcFmanEvents;
-+
-+ memcpy((uint8_t*)&ipcFmanEvents, p_IpcMsg->msgBody, sizeof(t_FmIpcFmanEvents));
-+ FmSetFmanCtrlIntr(h_Fm,
-+ ipcFmanEvents.eventRegId,
-+ ipcFmanEvents.enableEvents);
-+ break;
-+ }
-+ case (FM_GET_FMAN_CTRL_EVENTS_ENABLE):
-+ {
-+ uint32_t tmp = FmGetFmanCtrlIntr(h_Fm, p_IpcMsg->msgBody[0]);
-+
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&tmp, sizeof(uint32_t));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ break;
-+ }
-+ case (FM_GET_PHYS_MURAM_BASE):
-+ {
-+ t_FmPhysAddr physAddr;
-+ t_FmIpcPhysAddr ipcPhysAddr;
-+
-+ FmGetPhysicalMuramBase(h_Fm, &physAddr);
-+ ipcPhysAddr.high = physAddr.high;
-+ ipcPhysAddr.low = physAddr.low;
-+ memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcPhysAddr, sizeof(t_FmIpcPhysAddr));
-+ *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcPhysAddr);
-+ break;
-+ }
-+ case (FM_ENABLE_RAM_ECC):
-+ {
-+ if (((err = FM_EnableRamsEcc(h_Fm)) != E_OK) ||
-+ ((err = FM_SetException(h_Fm, e_FM_EX_IRAM_ECC, TRUE)) != E_OK) ||
-+ ((err = FM_SetException(h_Fm, e_FM_EX_MURAM_ECC, TRUE)) != E_OK))
-+#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0))
-+ UNUSED(err);
-+#else
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+#endif /* (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0)) */
-+ break;
-+ }
-+ case (FM_DISABLE_RAM_ECC):
-+ {
-+
-+ if (((err = FM_SetException(h_Fm, e_FM_EX_IRAM_ECC, FALSE)) != E_OK) ||
-+ ((err = FM_SetException(h_Fm, e_FM_EX_MURAM_ECC, FALSE)) != E_OK) ||
-+ ((err = FM_DisableRamsEcc(h_Fm)) != E_OK))
-+#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0))
-+ UNUSED(err);
-+#else
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+#endif /* (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0)) */
-+ break;
-+ }
-+ case (FM_SET_NUM_OF_FMAN_CTRL):
-+ {
-+ t_FmIpcPortNumOfFmanCtrls ipcPortNumOfFmanCtrls;
-+
-+ memcpy((uint8_t*)&ipcPortNumOfFmanCtrls, p_IpcMsg->msgBody, sizeof(t_FmIpcPortNumOfFmanCtrls));
-+ err = FmSetNumOfRiscsPerPort(h_Fm,
-+ ipcPortNumOfFmanCtrls.hardwarePortId,
-+ ipcPortNumOfFmanCtrls.numOfFmanCtrls,
-+ ipcPortNumOfFmanCtrls.orFmanCtrl);
-+ if (err != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ break;
-+ }
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+ case (FM_10G_TX_ECC_WA):
-+ p_IpcReply->error = (uint32_t)Fm10GTxEccWorkaround(h_Fm, p_IpcMsg->msgBody[0]);
-+ *p_ReplyLength = sizeof(uint32_t);
-+ break;
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+ default:
-+ *p_ReplyLength = 0;
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!"));
-+ }
-+ return E_OK;
-+}
-+
-+
-+/****************************************/
-+/* Inter-Module functions */
-+/****************************************/
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+t_Error Fm10GTxEccWorkaround(t_Handle h_Fm, uint8_t macId)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_Error err = E_OK;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+ uint8_t rxHardwarePortId, txHardwarePortId;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_10G_TX_ECC_WA;
-+ msg.msgBody[0] = macId;
-+ replyLength = sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(macId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return (t_Error)(reply.error);
-+ }
-+
-+ SANITY_CHECK_RETURN_ERROR((macId == 0), E_NOT_SUPPORTED);
-+ SANITY_CHECK_RETURN_ERROR(IsFmanCtrlCodeLoaded(p_Fm), E_INVALID_STATE);
-+
-+ rxHardwarePortId = SwPortIdToHwPortId(e_FM_PORT_TYPE_RX_10G,
-+ macId,
-+ p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+ txHardwarePortId = SwPortIdToHwPortId(e_FM_PORT_TYPE_TX_10G,
-+ macId,
-+ p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+ if ((p_Fm->p_FmStateStruct->portsTypes[rxHardwarePortId] != e_FM_PORT_TYPE_DUMMY) ||
-+ (p_Fm->p_FmStateStruct->portsTypes[txHardwarePortId] != e_FM_PORT_TYPE_DUMMY))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("MAC should be initialized prior to Rx and Tx ports!"));
-+
-+ return fman_set_erratum_10gmac_a004_wa(fpm_rg);
-+}
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+uint16_t FmGetTnumAgingPeriod(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_Fm->p_FmDriverParam, E_INVALID_STATE, 0);
-+
-+ return p_Fm->tnumAgingPeriod;
-+}
-+
-+t_Error FmSetPortPreFetchConfiguration(t_Handle h_Fm,
-+ uint8_t portNum,
-+ bool preFetchConfigured)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ p_Fm->portsPreFetchConfigured[portNum] = TRUE;
-+ p_Fm->portsPreFetchValue[portNum] = preFetchConfigured;
-+
-+ return E_OK;
-+}
-+
-+t_Error FmGetPortPreFetchConfiguration(t_Handle h_Fm,
-+ uint8_t portNum,
-+ bool *p_PortConfigured,
-+ bool *p_PreFetchConfigured)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ /* If the prefetch wasn't configured yet (not enable or disabled)
-+ we return the value TRUE as it was already configured */
-+ if (!p_Fm->portsPreFetchConfigured[portNum])
-+ {
-+ *p_PortConfigured = FALSE;
-+ *p_PreFetchConfigured = FALSE;
-+ }
-+ else
-+ {
-+ *p_PortConfigured = TRUE;
-+ *p_PreFetchConfigured = (p_Fm->portsPreFetchConfigured[portNum]);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSetCongestionGroupPFCpriority(t_Handle h_Fm,
-+ uint32_t congestionGroupId,
-+ uint8_t priorityBitMap)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ uint32_t regNum;
-+
-+ ASSERT_COND(h_Fm);
-+
-+ if (congestionGroupId > FM_PORT_NUM_OF_CONGESTION_GRPS)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("Congestion group ID bigger than %d",
-+ FM_PORT_NUM_OF_CONGESTION_GRPS));
-+
-+ if (p_Fm->guestId == NCSW_MASTER_ID)
-+ {
-+ ASSERT_COND(p_Fm->baseAddr);
-+ regNum = (FM_PORT_NUM_OF_CONGESTION_GRPS - 1 - congestionGroupId) / 4;
-+ fman_set_congestion_group_pfc_priority((uint32_t *)((p_Fm->baseAddr+FM_MM_CGP)),
-+ congestionGroupId,
-+ priorityBitMap,
-+ regNum);
-+ }
-+ else if (p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+ t_FmIpcSetCongestionGroupPfcPriority fmIpcSetCongestionGroupPfcPriority;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&fmIpcSetCongestionGroupPfcPriority, 0, sizeof(t_FmIpcSetCongestionGroupPfcPriority));
-+ fmIpcSetCongestionGroupPfcPriority.congestionGroupId = congestionGroupId;
-+ fmIpcSetCongestionGroupPfcPriority.priorityBitMap = priorityBitMap;
-+
-+ msg.msgId = FM_SET_CONG_GRP_PFC_PRIO;
-+ memcpy(msg.msgBody, &fmIpcSetCongestionGroupPfcPriority, sizeof(t_FmIpcSetCongestionGroupPfcPriority));
-+
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("guest without IPC!"));
-+
-+ return E_OK;
-+}
-+
-+uintptr_t FmGetPcdPrsBaseAddr(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
-+
-+ if (!p_Fm->baseAddr)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE,
-+ ("No base-addr; probably Guest with IPC!"));
-+ return 0;
-+ }
-+
-+ return (p_Fm->baseAddr + FM_MM_PRS);
-+}
-+
-+uintptr_t FmGetPcdKgBaseAddr(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
-+
-+ if (!p_Fm->baseAddr)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE,
-+ ("No base-addr; probably Guest with IPC!"));
-+ return 0;
-+ }
-+
-+ return (p_Fm->baseAddr + FM_MM_KG);
-+}
-+
-+uintptr_t FmGetPcdPlcrBaseAddr(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
-+
-+ if (!p_Fm->baseAddr)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE,
-+ ("No base-addr; probably Guest with IPC!"));
-+ return 0;
-+ }
-+
-+ return (p_Fm->baseAddr + FM_MM_PLCR);
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+uintptr_t FmGetVSPBaseAddr(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
-+
-+ return p_Fm->vspBaseAddr;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+t_Handle FmGetMuramHandle(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, NULL);
-+
-+ return (p_Fm->h_FmMuram);
-+}
-+
-+void FmGetPhysicalMuramBase(t_Handle h_Fm, t_FmPhysAddr *p_FmPhysAddr)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ if (p_Fm->fmMuramPhysBaseAddr)
-+ {
-+ /* General FM driver initialization */
-+ p_FmPhysAddr->low = (uint32_t)p_Fm->fmMuramPhysBaseAddr;
-+ p_FmPhysAddr->high = (uint8_t)((p_Fm->fmMuramPhysBaseAddr & 0x000000ff00000000LL) >> 32);
-+ return;
-+ }
-+
-+ ASSERT_COND(p_Fm->guestId != NCSW_MASTER_ID);
-+
-+ if (p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+ t_FmIpcPhysAddr ipcPhysAddr;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_GET_PHYS_MURAM_BASE;
-+ replyLength = sizeof(uint32_t) + sizeof(t_FmPhysAddr);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return;
-+ }
-+ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmPhysAddr)))
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_VALUE,("IPC reply length mismatch"));
-+ return;
-+ }
-+ memcpy((uint8_t*)&ipcPhysAddr, reply.replyBody, sizeof(t_FmIpcPhysAddr));
-+ p_FmPhysAddr->high = ipcPhysAddr.high;
-+ p_FmPhysAddr->low = ipcPhysAddr.low;
-+ }
-+ else
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without neither IPC nor mapped register!"));
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FmVSPAllocForPort (t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint8_t numOfVSPs)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ t_Error err = E_OK;
-+ uint32_t profilesFound, intFlags;
-+ uint8_t first, i;
-+ uint8_t log2Num;
-+ uint8_t swPortIndex=0, hardwarePortId;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ if (!numOfVSPs)
-+ return E_OK;
-+
-+ if (numOfVSPs > FM_VSP_MAX_NUM_OF_ENTRIES)
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles can not be bigger than %d.",FM_VSP_MAX_NUM_OF_ENTRIES));
-+
-+ if (!POWER_OF_2(numOfVSPs))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numProfiles must be a power of 2."));
-+
-+ LOG2((uint64_t)numOfVSPs, log2Num);
-+
-+ if ((log2Num == 0) || (p_Fm->partVSPBase == 0))
-+ first = 0;
-+ else
-+ first = 1<<log2Num;
-+
-+ if (first > (p_Fm->partVSPBase + p_Fm->partNumOfVSPs))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("can not allocate storage profile port window"));
-+
-+ if (first < p_Fm->partVSPBase)
-+ while (first < p_Fm->partVSPBase)
-+ first = first + numOfVSPs;
-+
-+ if ((first + numOfVSPs) > (p_Fm->partVSPBase + p_Fm->partNumOfVSPs))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("can not allocate storage profile port window"));
-+
-+ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
-+ profilesFound = 0;
-+ for (i=first; i < p_Fm->partVSPBase + p_Fm->partNumOfVSPs; )
-+ {
-+ if (!p_Fm->p_FmSp->profiles[i].profilesMng.allocated)
-+ {
-+ profilesFound++;
-+ i++;
-+ if (profilesFound == numOfVSPs)
-+ break;
-+ }
-+ else
-+ {
-+ profilesFound = 0;
-+ /* advance i to the next aligned address */
-+ first = i = (uint8_t)(first + numOfVSPs);
-+ }
-+ }
-+ if (profilesFound == numOfVSPs)
-+ for (i = first; i<first + numOfVSPs; i++)
-+ p_Fm->p_FmSp->profiles[i].profilesMng.allocated = TRUE;
-+ else
-+ {
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+ RETURN_ERROR(MINOR, E_FULL, ("No profiles."));
-+ }
-+
-+ hardwarePortId = SwPortIdToHwPortId(portType,
-+ portId,
-+ p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ p_Fm->p_FmSp->portsMapping[swPortIndex].numOfProfiles = numOfVSPs;
-+ p_Fm->p_FmSp->portsMapping[swPortIndex].profilesBase = first;
-+
-+ if ((err = SetVSPWindow(h_Fm,hardwarePortId, first,log2Num)) != E_OK)
-+ for (i = first; i < first + numOfVSPs; i++)
-+ p_Fm->p_FmSp->profiles[i].profilesMng.allocated = FALSE;
-+
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+
-+ return err;
-+}
-+
-+t_Error FmVSPFreeForPort(t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ uint8_t swPortIndex=0, hardwarePortId, first, numOfVSPs, i;
-+ uint32_t intFlags;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ hardwarePortId = SwPortIdToHwPortId(portType,
-+ portId,
-+ p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ numOfVSPs = (uint8_t)p_Fm->p_FmSp->portsMapping[swPortIndex].numOfProfiles;
-+ first = (uint8_t)p_Fm->p_FmSp->portsMapping[swPortIndex].profilesBase;
-+
-+ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
-+ for (i = first; i < first + numOfVSPs; i++)
-+ p_Fm->p_FmSp->profiles[i].profilesMng.allocated = FALSE;
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+
-+ p_Fm->p_FmSp->portsMapping[swPortIndex].numOfProfiles = 0;
-+ p_Fm->p_FmSp->portsMapping[swPortIndex].profilesBase = 0;
-+
-+ return E_OK;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+t_Error FmAllocFmanCtrlEventReg(t_Handle h_Fm, uint8_t *p_EventId)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ uint8_t i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_ALLOC_FMAN_CTRL_EVENT_REG;
-+ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+
-+ *p_EventId = *(uint8_t*)(reply.replyBody);
-+
-+ return (t_Error)(reply.error);
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+
-+ for (i=0;i<FM_NUM_OF_FMAN_CTRL_EVENT_REGS;i++)
-+ if (!p_Fm->usedEventRegs[i])
-+ {
-+ p_Fm->usedEventRegs[i] = TRUE;
-+ *p_EventId = i;
-+ break;
-+ }
-+
-+ if (i==FM_NUM_OF_FMAN_CTRL_EVENT_REGS)
-+ RETURN_ERROR(MAJOR, E_BUSY, ("No resource - FMan controller event register."));
-+
-+ return E_OK;
-+}
-+
-+void FmFreeFmanCtrlEventReg(t_Handle h_Fm, uint8_t eventId)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_FREE_FMAN_CTRL_EVENT_REG;
-+ msg.msgBody[0] = eventId;
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(eventId),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return;
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+ return;
-+ }
-+
-+ ((t_Fm*)h_Fm)->usedEventRegs[eventId] = FALSE;
-+}
-+
-+void FmSetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, uint32_t enableEvents)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->p_FmFpmRegs &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcFmanEvents fmanCtrl;
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+
-+ fmanCtrl.eventRegId = eventRegId;
-+ fmanCtrl.enableEvents = enableEvents;
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_SET_FMAN_CTRL_EVENTS_ENABLE;
-+ memcpy(msg.msgBody, &fmanCtrl, sizeof(fmanCtrl));
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(fmanCtrl),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return;
-+ }
-+ else if (!p_Fm->p_FmFpmRegs)
-+ {
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+ return;
-+ }
-+
-+ ASSERT_COND(eventRegId < FM_NUM_OF_FMAN_CTRL_EVENT_REGS);
-+ fman_set_ctrl_intr(fpm_rg, eventRegId, enableEvents);
-+}
-+
-+uint32_t FmGetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->p_FmFpmRegs &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength, ctrlIntr;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_GET_FMAN_CTRL_EVENTS_ENABLE;
-+ msg.msgBody[0] = eventRegId;
-+ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(eventRegId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return 0;
-+ }
-+ if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t)))
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return 0;
-+ }
-+ memcpy((uint8_t*)&ctrlIntr, reply.replyBody, sizeof(uint32_t));
-+ return ctrlIntr;
-+ }
-+ else if (!p_Fm->p_FmFpmRegs)
-+ {
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+ return 0;
-+ }
-+
-+ return fman_get_ctrl_intr(fpm_rg, eventRegId);
-+}
-+
-+void FmRegisterIntr(t_Handle h_Fm,
-+ e_FmEventModules module,
-+ uint8_t modId,
-+ e_FmIntrType intrType,
-+ void (*f_Isr) (t_Handle h_Arg),
-+ t_Handle h_Arg)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ int event = 0;
-+
-+ ASSERT_COND(h_Fm);
-+
-+ GET_FM_MODULE_EVENT(module, modId, intrType, event);
-+ ASSERT_COND(event < e_FM_EV_DUMMY_LAST);
-+
-+ /* register in local FM structure */
-+ p_Fm->intrMng[event].f_Isr = f_Isr;
-+ p_Fm->intrMng[event].h_SrcHandle = h_Arg;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcRegisterIntr fmIpcRegisterIntr;
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+
-+ /* register in Master FM structure */
-+ fmIpcRegisterIntr.event = (uint32_t)event;
-+ fmIpcRegisterIntr.guestId = p_Fm->guestId;
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_REGISTER_INTR;
-+ memcpy(msg.msgBody, &fmIpcRegisterIntr, sizeof(fmIpcRegisterIntr));
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(fmIpcRegisterIntr),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+}
-+
-+void FmUnregisterIntr(t_Handle h_Fm,
-+ e_FmEventModules module,
-+ uint8_t modId,
-+ e_FmIntrType intrType)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ int event = 0;
-+
-+ ASSERT_COND(h_Fm);
-+
-+ GET_FM_MODULE_EVENT(module, modId,intrType, event);
-+ ASSERT_COND(event < e_FM_EV_DUMMY_LAST);
-+
-+ p_Fm->intrMng[event].f_Isr = UnimplementedIsr;
-+ p_Fm->intrMng[event].h_SrcHandle = NULL;
-+}
-+
-+void FmRegisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Arg, uint32_t event), t_Handle h_Arg)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ ASSERT_COND(eventRegId<FM_NUM_OF_FMAN_CTRL_EVENT_REGS);
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM in guest-mode"));
-+ return;
-+ }
-+
-+ p_Fm->fmanCtrlIntr[eventRegId].f_Isr = f_Isr;
-+ p_Fm->fmanCtrlIntr[eventRegId].h_SrcHandle = h_Arg;
-+}
-+
-+void FmUnregisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ ASSERT_COND(eventRegId<FM_NUM_OF_FMAN_CTRL_EVENT_REGS);
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM in guest-mode"));
-+ return;
-+ }
-+
-+ p_Fm->fmanCtrlIntr[eventRegId].f_Isr = UnimplementedFmanCtrlIsr;
-+ p_Fm->fmanCtrlIntr[eventRegId].h_SrcHandle = NULL;
-+}
-+
-+void FmRegisterPcd(t_Handle h_Fm, t_Handle h_FmPcd)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ if (p_Fm->h_Pcd)
-+ REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("PCD already set"));
-+
-+ p_Fm->h_Pcd = h_FmPcd;
-+}
-+
-+void FmUnregisterPcd(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ if (!p_Fm->h_Pcd)
-+ REPORT_ERROR(MAJOR, E_NOT_FOUND, ("PCD handle!"));
-+
-+ p_Fm->h_Pcd = NULL;
-+}
-+
-+t_Handle FmGetPcdHandle(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ return p_Fm->h_Pcd;
-+}
-+
-+uint8_t FmGetId(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0xff);
-+
-+ return p_Fm->p_FmStateStruct->fmId;
-+}
-+
-+t_Error FmReset(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rstc, FPM_RSTC_FM_RESET);
-+ CORE_MemoryBarrier();
-+ XX_UDelay(100);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSetNumOfRiscsPerPort(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint8_t numOfFmanCtrls,
-+ t_FmFmanCtrl orFmanCtrl)
-+{
-+
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_fpm_regs *fpm_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((numOfFmanCtrls > 0) && (numOfFmanCtrls < 3)) , E_INVALID_HANDLE);
-+
-+ fpm_rg = p_Fm->p_FmFpmRegs;
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->p_FmFpmRegs &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcPortNumOfFmanCtrls params;
-+ t_FmIpcMsg msg;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ params.hardwarePortId = hardwarePortId;
-+ params.numOfFmanCtrls = numOfFmanCtrls;
-+ params.orFmanCtrl = orFmanCtrl;
-+ msg.msgId = FM_SET_NUM_OF_FMAN_CTRL;
-+ memcpy(msg.msgBody, &params, sizeof(params));
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) +sizeof(params),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+ else if (!p_Fm->p_FmFpmRegs)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+
-+ fman_set_num_of_riscs_per_port(fpm_rg, hardwarePortId, numOfFmanCtrls, orFmanCtrl);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmGetSetPortParams(t_Handle h_Fm, t_FmInterModulePortInitParams *p_PortParams)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_Error err;
-+ uint32_t intFlags;
-+ uint8_t hardwarePortId = p_PortParams->hardwarePortId, macId;
-+ struct fman_rg fman_rg;
-+
-+ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
-+ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
-+ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
-+ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ t_FmIpcPortInInitParams portInParams;
-+ t_FmIpcPortOutInitParams portOutParams;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ portInParams.hardwarePortId = p_PortParams->hardwarePortId;
-+ portInParams.enumPortType = (uint32_t)p_PortParams->portType;
-+ portInParams.boolIndependentMode= (uint8_t)p_PortParams->independentMode;
-+ portInParams.liodnOffset = p_PortParams->liodnOffset;
-+ portInParams.numOfTasks = p_PortParams->numOfTasks;
-+ portInParams.numOfExtraTasks = p_PortParams->numOfExtraTasks;
-+ portInParams.numOfOpenDmas = p_PortParams->numOfOpenDmas;
-+ portInParams.numOfExtraOpenDmas = p_PortParams->numOfExtraOpenDmas;
-+ portInParams.sizeOfFifo = p_PortParams->sizeOfFifo;
-+ portInParams.extraSizeOfFifo = p_PortParams->extraSizeOfFifo;
-+ portInParams.deqPipelineDepth = p_PortParams->deqPipelineDepth;
-+ portInParams.maxFrameLength = p_PortParams->maxFrameLength;
-+ portInParams.liodnBase = p_PortParams->liodnBase;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_GET_SET_PORT_PARAMS;
-+ memcpy(msg.msgBody, &portInParams, sizeof(portInParams));
-+ replyLength = (sizeof(uint32_t) + sizeof(t_FmIpcPortOutInitParams));
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) +sizeof(portInParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmIpcPortOutInitParams)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ memcpy((uint8_t*)&portOutParams, reply.replyBody, sizeof(t_FmIpcPortOutInitParams));
-+
-+ p_PortParams->fmMuramPhysBaseAddr.high = portOutParams.ipcPhysAddr.high;
-+ p_PortParams->fmMuramPhysBaseAddr.low = portOutParams.ipcPhysAddr.low;
-+ p_PortParams->numOfTasks = portOutParams.numOfTasks;
-+ p_PortParams->numOfExtraTasks = portOutParams.numOfExtraTasks;
-+ p_PortParams->numOfOpenDmas = portOutParams.numOfOpenDmas;
-+ p_PortParams->numOfExtraOpenDmas = portOutParams.numOfExtraOpenDmas;
-+ p_PortParams->sizeOfFifo = portOutParams.sizeOfFifo;
-+ p_PortParams->extraSizeOfFifo = portOutParams.extraSizeOfFifo;
-+
-+ return (t_Error)(reply.error);
-+ }
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
-+ if (p_PortParams->independentMode)
-+ {
-+ /* set port parameters */
-+ p_Fm->independentMode = p_PortParams->independentMode;
-+ /* disable dispatch limit */
-+ fman_qmi_disable_dispatch_limit(fman_rg.fpm_rg);
-+ }
-+
-+ if (p_PortParams->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
-+ {
-+ if (p_Fm->hcPortInitialized)
-+ {
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Only one host command port is allowed."));
-+ }
-+ else
-+ p_Fm->hcPortInitialized = TRUE;
-+ }
-+ p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] = p_PortParams->portType;
-+
-+ err = FmSetNumOfTasks(p_Fm, hardwarePortId, &p_PortParams->numOfTasks, &p_PortParams->numOfExtraTasks, TRUE);
-+ if (err)
-+ {
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev != 4)
-+#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
-+ if ((p_PortParams->portType != e_FM_PORT_TYPE_RX) &&
-+ (p_PortParams->portType != e_FM_PORT_TYPE_RX_10G))
-+ /* for transmit & O/H ports */
-+ {
-+ uint8_t enqTh;
-+ uint8_t deqTh;
-+
-+ /* update qmi ENQ/DEQ threshold */
-+ p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums += p_PortParams->deqPipelineDepth;
-+ enqTh = fman_get_qmi_enq_th(fman_rg.qmi_rg);
-+ /* if enqTh is too big, we reduce it to the max value that is still OK */
-+ if (enqTh >= (QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums))
-+ {
-+ enqTh = (uint8_t)(QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums - 1);
-+ fman_set_qmi_enq_th(fman_rg.qmi_rg, enqTh);
-+ }
-+
-+ deqTh = fman_get_qmi_deq_th(fman_rg.qmi_rg);
-+ /* if deqTh is too small, we enlarge it to the min value that is still OK.
-+ deqTh may not be larger than 63 (QMI_MAX_NUM_OF_TNUMS-1). */
-+ if ((deqTh <= p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums) && (deqTh < QMI_MAX_NUM_OF_TNUMS-1))
-+ {
-+ deqTh = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums + 1);
-+ fman_set_qmi_deq_th(fman_rg.qmi_rg, deqTh);
-+ }
-+ }
-+
-+#ifdef FM_LOW_END_RESTRICTION
-+ if ((hardwarePortId==0x1) || (hardwarePortId==0x29))
-+ {
-+ if (p_Fm->p_FmStateStruct->lowEndRestriction)
-+ {
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("OP #0 cannot work with Tx Port #1."));
-+ }
-+ else
-+ p_Fm->p_FmStateStruct->lowEndRestriction = TRUE;
-+ }
-+#endif /* FM_LOW_END_RESTRICTION */
-+
-+ err = FmSetSizeOfFifo(p_Fm,
-+ hardwarePortId,
-+ &p_PortParams->sizeOfFifo,
-+ &p_PortParams->extraSizeOfFifo,
-+ TRUE);
-+ if (err)
-+ {
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ err = FmSetNumOfOpenDmas(p_Fm,
-+ hardwarePortId,
-+ &p_PortParams->numOfOpenDmas,
-+ &p_PortParams->numOfExtraOpenDmas,
-+ TRUE);
-+ if (err)
-+ {
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ fman_set_liodn_per_port(&fman_rg,
-+ hardwarePortId,
-+ p_PortParams->liodnBase,
-+ p_PortParams->liodnOffset);
-+
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev < 6)
-+ fman_set_order_restoration_per_port(fman_rg.fpm_rg,
-+ hardwarePortId,
-+ p_PortParams->independentMode,
-+ !!((p_PortParams->portType==e_FM_PORT_TYPE_RX) || (p_PortParams->portType==e_FM_PORT_TYPE_RX_10G)));
-+
-+ HW_PORT_ID_TO_SW_PORT_ID(macId, hardwarePortId);
-+
-+#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)
-+ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX_10G) ||
-+ (p_PortParams->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ ASSERT_COND(macId < FM_MAX_NUM_OF_10G_MACS);
-+ if (p_PortParams->maxFrameLength >= p_Fm->p_FmStateStruct->macMaxFrameLengths10G[macId])
-+ p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId] = p_PortParams->maxFrameLength;
-+ else
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Port maxFrameLength is smaller than MAC current MTU"));
-+ }
-+ else
-+#endif /* defined(FM_MAX_NUM_OF_10G_MACS) && ... */
-+ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX) ||
-+ (p_PortParams->portType == e_FM_PORT_TYPE_RX))
-+ {
-+ ASSERT_COND(macId < FM_MAX_NUM_OF_1G_MACS);
-+ if (p_PortParams->maxFrameLength >= p_Fm->p_FmStateStruct->macMaxFrameLengths1G[macId])
-+ p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId] = p_PortParams->maxFrameLength;
-+ else
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Port maxFrameLength is smaller than MAC current MTU"));
-+ }
-+
-+ FmGetPhysicalMuramBase(p_Fm, &p_PortParams->fmMuramPhysBaseAddr);
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+
-+ return E_OK;
-+}
-+
-+void FmFreePortParams(t_Handle h_Fm,t_FmInterModulePortFreeParams *p_PortParams)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ uint32_t intFlags;
-+ uint8_t hardwarePortId = p_PortParams->hardwarePortId;
-+ uint8_t numOfTasks, numOfDmas, macId;
-+ uint16_t sizeOfFifo;
-+ t_Error err;
-+ t_FmIpcPortFreeParams portParams;
-+ t_FmIpcMsg msg;
-+ struct fman_qmi_regs *qmi_rg = p_Fm->p_FmQmiRegs;
-+ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ portParams.hardwarePortId = p_PortParams->hardwarePortId;
-+ portParams.enumPortType = (uint32_t)p_PortParams->portType;
-+ portParams.deqPipelineDepth = p_PortParams->deqPipelineDepth;
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_FREE_PORT;
-+ memcpy(msg.msgBody, &portParams, sizeof(portParams));
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(portParams),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return;
-+ }
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ intFlags = XX_LockIntrSpinlock(p_Fm->h_Spinlock);
-+
-+ if (p_PortParams->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)
-+ {
-+ ASSERT_COND(p_Fm->hcPortInitialized);
-+ p_Fm->hcPortInitialized = FALSE;
-+ }
-+
-+ p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] = e_FM_PORT_TYPE_DUMMY;
-+
-+ /* free numOfTasks */
-+ numOfTasks = fman_get_num_of_tasks(bmi_rg, hardwarePortId);
-+ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfTasks >= numOfTasks);
-+ p_Fm->p_FmStateStruct->accumulatedNumOfTasks -= numOfTasks;
-+
-+ /* free numOfOpenDmas */
-+ numOfDmas = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
-+ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas >= numOfDmas);
-+ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas -= numOfDmas;
-+
-+#ifdef FM_HAS_TOTAL_DMAS
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev < 6)
-+ {
-+ /* update total num of DMA's with committed number of open DMAS, and max uncommitted pool. */
-+ fman_set_num_of_open_dmas(bmi_rg,
-+ hardwarePortId,
-+ 1,
-+ 0,
-+ (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize));
-+ }
-+#endif /* FM_HAS_TOTAL_DMAS */
-+
-+ /* free sizeOfFifo */
-+ sizeOfFifo = fman_get_size_of_fifo(bmi_rg, hardwarePortId);
-+ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedFifoSize >= (sizeOfFifo * BMI_FIFO_UNITS));
-+ p_Fm->p_FmStateStruct->accumulatedFifoSize -= (sizeOfFifo * BMI_FIFO_UNITS);
-+
-+#ifdef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev != 4)
-+#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
-+ if ((p_PortParams->portType != e_FM_PORT_TYPE_RX) &&
-+ (p_PortParams->portType != e_FM_PORT_TYPE_RX_10G))
-+ /* for transmit & O/H ports */
-+ {
-+ uint8_t enqTh;
-+ uint8_t deqTh;
-+
-+ /* update qmi ENQ/DEQ threshold */
-+ p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums -= p_PortParams->deqPipelineDepth;
-+
-+ /* p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums is now smaller,
-+ so we can enlarge enqTh */
-+ enqTh = (uint8_t)(QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums - 1);
-+
-+ /* p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums is now smaller,
-+ so we can reduce deqTh */
-+ deqTh = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums + 1);
-+
-+ fman_set_qmi_enq_th(qmi_rg, enqTh);
-+ fman_set_qmi_deq_th(qmi_rg, deqTh);
-+ }
-+
-+ HW_PORT_ID_TO_SW_PORT_ID(macId, hardwarePortId);
-+
-+#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)
-+ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX_10G) ||
-+ (p_PortParams->portType == e_FM_PORT_TYPE_RX_10G))
-+ {
-+ ASSERT_COND(macId < FM_MAX_NUM_OF_10G_MACS);
-+ p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId] = 0;
-+ }
-+ else
-+#endif /* defined(FM_MAX_NUM_OF_10G_MACS) && ... */
-+ if ((p_PortParams->portType == e_FM_PORT_TYPE_TX) ||
-+ (p_PortParams->portType == e_FM_PORT_TYPE_RX))
-+ {
-+ ASSERT_COND(macId < FM_MAX_NUM_OF_1G_MACS);
-+ p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId] = 0;
-+ }
-+
-+#ifdef FM_LOW_END_RESTRICTION
-+ if ((hardwarePortId==0x1) || (hardwarePortId==0x29))
-+ p_Fm->p_FmStateStruct->lowEndRestriction = FALSE;
-+#endif /* FM_LOW_END_RESTRICTION */
-+ XX_UnlockIntrSpinlock(p_Fm->h_Spinlock, intFlags);
-+}
-+
-+t_Error FmIsPortStalled(t_Handle h_Fm, uint8_t hardwarePortId, bool *p_IsStalled)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_IS_PORT_STALLED;
-+ msg.msgBody[0] = hardwarePortId;
-+ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(hardwarePortId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+
-+ *p_IsStalled = (bool)!!(*(uint8_t*)(reply.replyBody));
-+
-+ return (t_Error)(reply.error);
-+ }
-+ else if (!p_Fm->baseAddr)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+
-+ *p_IsStalled = fman_is_port_stalled(fpm_rg, hardwarePortId);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmResumeStalledPort(t_Handle h_Fm, uint8_t hardwarePortId)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_Error err;
-+ bool isStalled;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_RESUME_STALLED_PORT;
-+ msg.msgBody[0] = hardwarePortId;
-+ replyLength = sizeof(uint32_t);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(hardwarePortId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return (t_Error)(reply.error);
-+ }
-+ else if (!p_Fm->baseAddr)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Not available for this FM revision!"));
-+
-+ /* Get port status */
-+ err = FmIsPortStalled(h_Fm, hardwarePortId, &isStalled);
-+ if (err)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't get port status"));
-+ if (!isStalled)
-+ return E_OK;
-+
-+ fman_resume_stalled_port(fpm_rg, hardwarePortId);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmResetMac(t_Handle h_Fm, e_FmMacType type, uint8_t macId)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_Error err;
-+ struct fman_fpm_regs *fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("FMan MAC reset!"));
-+#endif /*(DPAA_VERSION >= 11)*/
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcMacParams macParams;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ macParams.id = macId;
-+ macParams.enumType = (uint32_t)type;
-+ msg.msgId = FM_RESET_MAC;
-+ memcpy(msg.msgBody, &macParams, sizeof(macParams));
-+ replyLength = sizeof(uint32_t);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(macParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return (t_Error)(reply.error);
-+ }
-+ else if (!p_Fm->baseAddr)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+
-+ err = (t_Error)fman_reset_mac(fpm_rg, macId, !!(type == e_FM_MAC_10G));
-+
-+ if (err == -EBUSY)
-+ return ERROR_CODE(E_TIMEOUT);
-+ else if (err)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal MAC ID"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSetMacMaxFrame(t_Handle h_Fm, e_FmMacType type, uint8_t macId, uint16_t mtu)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcMacMaxFrameParams macMaxFrameLengthParams;
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ macMaxFrameLengthParams.macParams.id = macId;
-+ macMaxFrameLengthParams.macParams.enumType = (uint32_t)type;
-+ macMaxFrameLengthParams.maxFrameLength = (uint16_t)mtu;
-+ msg.msgId = FM_SET_MAC_MAX_FRAME;
-+ memcpy(msg.msgBody, &macMaxFrameLengthParams, sizeof(macMaxFrameLengthParams));
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(macMaxFrameLengthParams),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+
-+ /* if port is already initialized, check that MaxFrameLength is smaller
-+ * or equal to the port's max */
-+#if (defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS))
-+ if (type == e_FM_MAC_10G)
-+ {
-+ if ((!p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId])
-+ || (p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId] &&
-+ (mtu <= p_Fm->p_FmStateStruct->portMaxFrameLengths10G[macId])))
-+ p_Fm->p_FmStateStruct->macMaxFrameLengths10G[macId] = mtu;
-+ else
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("MAC maxFrameLength is larger than Port maxFrameLength"));
-+
-+ }
-+ else
-+#else
-+ UNUSED(type);
-+#endif /* (defined(FM_MAX_NUM_OF_10G_MACS) && ... */
-+ if ((!p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId])
-+ || (p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId] &&
-+ (mtu <= p_Fm->p_FmStateStruct->portMaxFrameLengths1G[macId])))
-+ p_Fm->p_FmStateStruct->macMaxFrameLengths1G[macId] = mtu;
-+ else
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, ("MAC maxFrameLength is larger than Port maxFrameLength"));
-+
-+ return E_OK;
-+}
-+
-+uint16_t FmGetClockFreq(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ /* for multicore environment: this depends on the
-+ * fact that fmClkFreq was properly initialized at "init". */
-+ return p_Fm->p_FmStateStruct->fmClkFreq;
-+}
-+
-+uint16_t FmGetMacClockFreq(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ return p_Fm->p_FmStateStruct->fmMacClkFreq;
-+}
-+
-+uint32_t FmGetTimeStampScale(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength, timeStamp;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_GET_TIMESTAMP_SCALE;
-+ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return 0;
-+ }
-+ if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t)))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return 0;
-+ }
-+
-+ memcpy((uint8_t*)&timeStamp, reply.replyBody, sizeof(uint32_t));
-+ return timeStamp;
-+ }
-+ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->baseAddr)
-+ {
-+ if (!(GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_tsc1) & FPM_TS_CTL_EN))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("timestamp is not enabled!"));
-+ return 0;
-+ }
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ DBG(WARNING, ("No IPC - can't validate FM if timestamp enabled."));
-+
-+ return p_Fm->p_FmStateStruct->count1MicroBit;
-+}
-+
-+t_Error FmEnableRamsEcc(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ p_Fm->p_FmStateStruct->ramsEccOwners++;
-+ p_Fm->p_FmStateStruct->internalCall = TRUE;
-+
-+ return FM_EnableRamsEcc(p_Fm);
-+}
-+
-+t_Error FmDisableRamsEcc(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ ASSERT_COND(p_Fm->p_FmStateStruct->ramsEccOwners);
-+ p_Fm->p_FmStateStruct->ramsEccOwners--;
-+
-+ if (p_Fm->p_FmStateStruct->ramsEccOwners==0)
-+ {
-+ p_Fm->p_FmStateStruct->internalCall = TRUE;
-+ return FM_DisableRamsEcc(p_Fm);
-+ }
-+
-+ return E_OK;
-+}
-+
-+uint8_t FmGetGuestId(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ return p_Fm->guestId;
-+}
-+
-+bool FmIsMaster(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ return (p_Fm->guestId == NCSW_MASTER_ID);
-+}
-+
-+t_Error FmSetSizeOfFifo(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint32_t *p_SizeOfFifo,
-+ uint32_t *p_ExtraSizeOfFifo,
-+ bool initialConfig)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_FmIpcPortRsrcParams rsrcParams;
-+ t_Error err;
-+ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
-+ uint32_t sizeOfFifo = *p_SizeOfFifo, extraSizeOfFifo = *p_ExtraSizeOfFifo;
-+ uint16_t currentVal = 0, currentExtraVal = 0;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ rsrcParams.hardwarePortId = hardwarePortId;
-+ rsrcParams.val = sizeOfFifo;
-+ rsrcParams.extra = extraSizeOfFifo;
-+ rsrcParams.boolInitialConfig = (uint8_t)initialConfig;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_SET_SIZE_OF_FIFO;
-+ memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams));
-+ replyLength = sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(rsrcParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return (t_Error)(reply.error);
-+ }
-+ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->baseAddr)
-+ {
-+ DBG(WARNING, ("No IPC - can't validate FM total-fifo size."));
-+ fman_set_size_of_fifo(bmi_rg, hardwarePortId, sizeOfFifo, extraSizeOfFifo);
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without neither IPC nor mapped register!"));
-+
-+ if (!initialConfig)
-+ {
-+ /* !initialConfig - runtime change of existing value.
-+ * - read the current FIFO and extra FIFO size */
-+ currentExtraVal = fman_get_size_of_extra_fifo(bmi_rg, hardwarePortId);
-+ currentVal = fman_get_size_of_fifo(bmi_rg, hardwarePortId);
-+ }
-+
-+ if (extraSizeOfFifo > currentExtraVal)
-+ {
-+ if (extraSizeOfFifo && !p_Fm->p_FmStateStruct->extraFifoPoolSize)
-+ /* if this is the first time a port requires extraFifoPoolSize, the total extraFifoPoolSize
-+ * must be initialized to 1 buffer per port
-+ */
-+ p_Fm->p_FmStateStruct->extraFifoPoolSize = FM_MAX_NUM_OF_RX_PORTS*BMI_FIFO_UNITS;
-+
-+ p_Fm->p_FmStateStruct->extraFifoPoolSize = MAX(p_Fm->p_FmStateStruct->extraFifoPoolSize, extraSizeOfFifo);
-+ }
-+
-+ /* check that there are enough uncommitted fifo size */
-+ if ((p_Fm->p_FmStateStruct->accumulatedFifoSize - currentVal + sizeOfFifo) >
-+ (p_Fm->p_FmStateStruct->totalFifoSize - p_Fm->p_FmStateStruct->extraFifoPoolSize)){
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("Port request fifo size + accumulated size > total FIFO size:"));
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("port 0x%x requested %d bytes, extra size = %d, accumulated size = %d total size = %d",
-+ hardwarePortId, sizeOfFifo, p_Fm->p_FmStateStruct->extraFifoPoolSize,
-+ p_Fm->p_FmStateStruct->accumulatedFifoSize,
-+ p_Fm->p_FmStateStruct->totalFifoSize));
-+ }
-+ else
-+ {
-+ /* update accumulated */
-+ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedFifoSize >= currentVal);
-+ p_Fm->p_FmStateStruct->accumulatedFifoSize -= currentVal;
-+ p_Fm->p_FmStateStruct->accumulatedFifoSize += sizeOfFifo;
-+ fman_set_size_of_fifo(bmi_rg, hardwarePortId, sizeOfFifo, extraSizeOfFifo);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSetNumOfTasks(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint8_t *p_NumOfTasks,
-+ uint8_t *p_NumOfExtraTasks,
-+ bool initialConfig)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ t_Error err;
-+ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
-+ uint8_t currentVal = 0, currentExtraVal = 0, numOfTasks = *p_NumOfTasks, numOfExtraTasks = *p_NumOfExtraTasks;
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcPortRsrcParams rsrcParams;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ rsrcParams.hardwarePortId = hardwarePortId;
-+ rsrcParams.val = numOfTasks;
-+ rsrcParams.extra = numOfExtraTasks;
-+ rsrcParams.boolInitialConfig = (uint8_t)initialConfig;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_SET_NUM_OF_TASKS;
-+ memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams));
-+ replyLength = sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(rsrcParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return (t_Error)(reply.error);
-+ }
-+ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->baseAddr)
-+ {
-+ DBG(WARNING, ("No IPC - can't validate FM total-num-of-tasks."));
-+ fman_set_num_of_tasks(bmi_rg, hardwarePortId, numOfTasks, numOfExtraTasks);
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without neither IPC nor mapped register!"));
-+
-+ if (!initialConfig)
-+ {
-+ /* !initialConfig - runtime change of existing value.
-+ * - read the current number of tasks */
-+ currentVal = fman_get_num_of_tasks(bmi_rg, hardwarePortId);
-+ currentExtraVal = fman_get_num_extra_tasks(bmi_rg, hardwarePortId);
-+ }
-+
-+ if (numOfExtraTasks > currentExtraVal)
-+ p_Fm->p_FmStateStruct->extraTasksPoolSize =
-+ (uint8_t)MAX(p_Fm->p_FmStateStruct->extraTasksPoolSize, numOfExtraTasks);
-+
-+ /* check that there are enough uncommitted tasks */
-+ if ((p_Fm->p_FmStateStruct->accumulatedNumOfTasks - currentVal + numOfTasks) >
-+ (p_Fm->p_FmStateStruct->totalNumOfTasks - p_Fm->p_FmStateStruct->extraTasksPoolSize))
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE,
-+ ("Requested numOfTasks and extra tasks pool for fm%d exceed total numOfTasks.",
-+ p_Fm->p_FmStateStruct->fmId));
-+ else
-+ {
-+ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfTasks >= currentVal);
-+ /* update accumulated */
-+ p_Fm->p_FmStateStruct->accumulatedNumOfTasks -= currentVal;
-+ p_Fm->p_FmStateStruct->accumulatedNumOfTasks += numOfTasks;
-+ fman_set_num_of_tasks(bmi_rg, hardwarePortId, numOfTasks, numOfExtraTasks);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FmSetNumOfOpenDmas(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint8_t *p_NumOfOpenDmas,
-+ uint8_t *p_NumOfExtraOpenDmas,
-+ bool initialConfig)
-+
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ t_Error err;
-+ struct fman_bmi_regs *bmi_rg = p_Fm->p_FmBmiRegs;
-+ uint8_t numOfOpenDmas = *p_NumOfOpenDmas, numOfExtraOpenDmas = *p_NumOfExtraOpenDmas;
-+ uint8_t totalNumDmas = 0, currentVal = 0, currentExtraVal = 0;
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcPortRsrcParams rsrcParams;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ rsrcParams.hardwarePortId = hardwarePortId;
-+ rsrcParams.val = numOfOpenDmas;
-+ rsrcParams.extra = numOfExtraOpenDmas;
-+ rsrcParams.boolInitialConfig = (uint8_t)initialConfig;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_SET_NUM_OF_OPEN_DMAS;
-+ memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams));
-+ replyLength = sizeof(uint32_t);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) + sizeof(rsrcParams),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != sizeof(uint32_t))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return (t_Error)(reply.error);
-+ }
-+#ifdef FM_HAS_TOTAL_DMAS
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("running in guest-mode without IPC!"));
-+#else
-+ else if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->baseAddr &&
-+ (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6))
-+ {
-+ /*DBG(WARNING, ("No IPC - can't validate FM total-num-of-dmas."));*/
-+
-+ if (!numOfOpenDmas)
-+ {
-+ /* first config without explic it value: Do Nothing - reset value shouldn't be
-+ changed, read register for port save */
-+ *p_NumOfOpenDmas = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
-+ *p_NumOfExtraOpenDmas = fman_get_num_extra_dmas(bmi_rg, hardwarePortId);
-+ }
-+ else
-+ /* whether it is the first time with explicit value, or runtime "set" - write register */
-+ fman_set_num_of_open_dmas(bmi_rg,
-+ hardwarePortId,
-+ numOfOpenDmas,
-+ numOfExtraOpenDmas,
-+ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize);
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without neither IPC nor mapped register!"));
-+#endif /* FM_HAS_TOTAL_DMAS */
-+
-+ if (!initialConfig)
-+ {
-+ /* !initialConfig - runtime change of existing value.
-+ * - read the current number of open Dma's */
-+ currentExtraVal = fman_get_num_extra_dmas(bmi_rg, hardwarePortId);
-+ currentVal = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
-+ }
-+
-+#ifdef FM_NO_GUARANTEED_RESET_VALUES
-+ /* it's illegal to be in a state where this is not the first set and no value is specified */
-+ ASSERT_COND(initialConfig || numOfOpenDmas);
-+ if (!numOfOpenDmas)
-+ {
-+ /* !numOfOpenDmas - first configuration according to values in regs.
-+ * - read the current number of open Dma's */
-+ currentExtraVal = fman_get_num_extra_dmas(bmi_rg, hardwarePortId);
-+ currentVal = fman_get_num_of_dmas(bmi_rg, hardwarePortId);
-+ /* This is the first configuration and user did not specify value (!numOfOpenDmas),
-+ * reset values will be used and we just save these values for resource management */
-+ p_Fm->p_FmStateStruct->extraOpenDmasPoolSize =
-+ (uint8_t)MAX(p_Fm->p_FmStateStruct->extraOpenDmasPoolSize, currentExtraVal);
-+ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas += currentVal;
-+ *p_NumOfOpenDmas = currentVal;
-+ *p_NumOfExtraOpenDmas = currentExtraVal;
-+ return E_OK;
-+ }
-+#endif /* FM_NO_GUARANTEED_RESET_VALUES */
-+
-+ if (numOfExtraOpenDmas > currentExtraVal)
-+ p_Fm->p_FmStateStruct->extraOpenDmasPoolSize =
-+ (uint8_t)MAX(p_Fm->p_FmStateStruct->extraOpenDmasPoolSize, numOfExtraOpenDmas);
-+
-+#ifdef FM_HAS_TOTAL_DMAS
-+ if ((p_Fm->p_FmStateStruct->revInfo.majorRev < 6) &&
-+ (p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas - currentVal + numOfOpenDmas >
-+ p_Fm->p_FmStateStruct->maxNumOfOpenDmas))
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE,
-+ ("Requested numOfOpenDmas for fm%d exceeds total numOfOpenDmas.",
-+ p_Fm->p_FmStateStruct->fmId));
-+#else
-+ if ((p_Fm->p_FmStateStruct->revInfo.majorRev >= 6) &&
-+#ifdef FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
-+ !((p_Fm->p_FmStateStruct->revInfo.majorRev == 6) &&
-+ (p_Fm->p_FmStateStruct->revInfo.minorRev == 0)) &&
-+#endif /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 */
-+ (p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas - currentVal + numOfOpenDmas > DMA_THRESH_MAX_COMMQ + 1))
-+ RETURN_ERROR(MAJOR, E_NOT_AVAILABLE,
-+ ("Requested numOfOpenDmas for fm%d exceeds DMA Command queue (%d)",
-+ p_Fm->p_FmStateStruct->fmId, DMA_THRESH_MAX_COMMQ+1));
-+#endif /* FM_HAS_TOTAL_DMAS */
-+ else
-+ {
-+ ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas >= currentVal);
-+ /* update acummulated */
-+ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas -= currentVal;
-+ p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas += numOfOpenDmas;
-+
-+#ifdef FM_HAS_TOTAL_DMAS
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev < 6)
-+ totalNumDmas = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize);
-+#endif /* FM_HAS_TOTAL_DMAS */
-+ fman_set_num_of_open_dmas(bmi_rg,
-+ hardwarePortId,
-+ numOfOpenDmas,
-+ numOfExtraOpenDmas,
-+ totalNumDmas);
-+ }
-+
-+ return E_OK;
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FmVSPCheckRelativeProfile(t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint16_t relativeProfile)
-+{
-+ t_Fm *p_Fm;
-+ t_FmSp *p_FmPcdSp;
-+ uint8_t swPortIndex=0, hardwarePortId;
-+
-+ ASSERT_COND(h_Fm);
-+ p_Fm = (t_Fm*)h_Fm;
-+
-+ hardwarePortId = SwPortIdToHwPortId(portType,
-+ portId,
-+ p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+ ASSERT_COND(hardwarePortId);
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ p_FmPcdSp = p_Fm->p_FmSp;
-+ ASSERT_COND(p_FmPcdSp);
-+
-+ if (!p_FmPcdSp->portsMapping[swPortIndex].numOfProfiles)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE , ("Port has no allocated profiles"));
-+ if (relativeProfile >= p_FmPcdSp->portsMapping[swPortIndex].numOfProfiles)
-+ RETURN_ERROR(MAJOR, E_NOT_IN_RANGE , ("Profile id is out of range"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FmVSPGetAbsoluteProfileId(t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint16_t relativeProfile,
-+ uint16_t *p_AbsoluteId)
-+{
-+ t_Fm *p_Fm;
-+ t_FmSp *p_FmPcdSp;
-+ uint8_t swPortIndex=0, hardwarePortId;
-+ t_Error err;
-+
-+ ASSERT_COND(h_Fm);
-+ p_Fm = (t_Fm*)h_Fm;
-+
-+ err = FmVSPCheckRelativeProfile(h_Fm, portType, portId, relativeProfile);
-+ if (err != E_OK)
-+ return err;
-+
-+ hardwarePortId = SwPortIdToHwPortId(portType,
-+ portId,
-+ p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+ ASSERT_COND(hardwarePortId);
-+ HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId);
-+
-+ p_FmPcdSp = p_Fm->p_FmSp;
-+ ASSERT_COND(p_FmPcdSp);
-+
-+ *p_AbsoluteId = (uint16_t)(p_FmPcdSp->portsMapping[swPortIndex].profilesBase + relativeProfile);
-+
-+ return E_OK;
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+static t_Error InitFmDma(t_Fm *p_Fm)
-+{
-+ t_Error err;
-+
-+ err = (t_Error)fman_dma_init(p_Fm->p_FmDmaRegs, p_Fm->p_FmDriverParam);
-+ if (err != E_OK)
-+ return err;
-+
-+ /* Allocate MURAM for CAM */
-+ p_Fm->camBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram,
-+ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*DMA_CAM_SIZEOF_ENTRY),
-+ DMA_CAM_ALIGN));
-+ if (!p_Fm->camBaseAddr)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for DMA CAM failed"));
-+
-+ WRITE_BLOCK(UINT_TO_PTR(p_Fm->camBaseAddr),
-+ 0,
-+ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*DMA_CAM_SIZEOF_ENTRY));
-+
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev == 2)
-+ {
-+ FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->camBaseAddr));
-+
-+ p_Fm->camBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram,
-+ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*72 + 128),
-+ 64));
-+ if (!p_Fm->camBaseAddr)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for DMA CAM failed"));
-+
-+ WRITE_BLOCK(UINT_TO_PTR(p_Fm->camBaseAddr),
-+ 0,
-+ (uint32_t)(p_Fm->p_FmDriverParam->dma_cam_num_of_entries*72 + 128));
-+
-+ switch(p_Fm->p_FmDriverParam->dma_cam_num_of_entries)
-+ {
-+ case (8):
-+ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xff000000);
-+ break;
-+ case (16):
-+ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xffff0000);
-+ break;
-+ case (24):
-+ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xffffff00);
-+ break;
-+ case (32):
-+ WRITE_UINT32(*(uint32_t*)p_Fm->camBaseAddr, 0xffffffff);
-+ break;
-+ default:
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("wrong dma_cam_num_of_entries"));
-+ }
-+ }
-+
-+ p_Fm->p_FmDriverParam->cam_base_addr =
-+ (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->camBaseAddr)) - p_Fm->fmMuramPhysBaseAddr);
-+
-+ return E_OK;
-+}
-+
-+static t_Error InitFmFpm(t_Fm *p_Fm)
-+{
-+ return (t_Error)fman_fpm_init(p_Fm->p_FmFpmRegs, p_Fm->p_FmDriverParam);
-+}
-+
-+static t_Error InitFmBmi(t_Fm *p_Fm)
-+{
-+ return (t_Error)fman_bmi_init(p_Fm->p_FmBmiRegs, p_Fm->p_FmDriverParam);
-+}
-+
-+static t_Error InitFmQmi(t_Fm *p_Fm)
-+{
-+ return (t_Error)fman_qmi_init(p_Fm->p_FmQmiRegs, p_Fm->p_FmDriverParam);
-+}
-+
-+static t_Error InitGuestMode(t_Fm *p_Fm)
-+{
-+ t_Error err = E_OK;
-+ int i;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+
-+ ASSERT_COND(p_Fm);
-+ ASSERT_COND(p_Fm->guestId != NCSW_MASTER_ID);
-+
-+ /* build the FM guest partition IPC address */
-+ if (Sprint (p_Fm->fmModuleName, "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, p_Fm->guestId) != (p_Fm->guestId<10 ? 6:7))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+
-+ /* build the FM master partition IPC address */
-+ memset(p_Fm->fmIpcHandlerModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE);
-+ if (Sprint (p_Fm->fmIpcHandlerModuleName[0], "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, NCSW_MASTER_ID) != 6)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+
-+ for (i=0;i<e_FM_EV_DUMMY_LAST;i++)
-+ p_Fm->intrMng[i].f_Isr = UnimplementedIsr;
-+
-+ p_Fm->h_IpcSessions[0] = XX_IpcInitSession(p_Fm->fmIpcHandlerModuleName[0], p_Fm->fmModuleName);
-+ if (p_Fm->h_IpcSessions[0])
-+ {
-+ uint8_t isMasterAlive;
-+ t_FmIpcParams ipcParams;
-+
-+ err = XX_IpcRegisterMsgHandler(p_Fm->fmModuleName, FmGuestHandleIpcMsgCB, p_Fm, FM_IPC_MAX_REPLY_SIZE);
-+ if (err)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_MASTER_IS_ALIVE;
-+ msg.msgBody[0] = p_Fm->guestId;
-+ replyLength = sizeof(uint32_t) + sizeof(uint8_t);
-+ do
-+ {
-+ blockingFlag = TRUE;
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId)+sizeof(p_Fm->guestId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ IpcMsgCompletionCB,
-+ p_Fm)) != E_OK)
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ while (blockingFlag) ;
-+ if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t)))
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ isMasterAlive = *(uint8_t*)(reply.replyBody);
-+ } while (!isMasterAlive);
-+
-+ /* read FM parameters and save */
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_GET_PARAMS;
-+ replyLength = sizeof(uint32_t) + sizeof(t_FmIpcParams);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmIpcParams)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ memcpy((uint8_t*)&ipcParams, reply.replyBody, sizeof(t_FmIpcParams));
-+
-+ p_Fm->p_FmStateStruct->fmClkFreq = ipcParams.fmClkFreq;
-+ p_Fm->p_FmStateStruct->fmMacClkFreq = ipcParams.fmMacClkFreq;
-+ p_Fm->p_FmStateStruct->revInfo.majorRev = ipcParams.majorRev;
-+ p_Fm->p_FmStateStruct->revInfo.minorRev = ipcParams.minorRev;
-+ }
-+ else
-+ {
-+ DBG(WARNING, ("FM Guest mode - without IPC"));
-+ if (!p_Fm->p_FmStateStruct->fmClkFreq)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("No fmClkFreq configured for guest without IPC"));
-+ if (p_Fm->baseAddr)
-+ {
-+ fman_get_revision(p_Fm->p_FmFpmRegs,
-+ &p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ &p_Fm->p_FmStateStruct->revInfo.minorRev);
-+
-+ }
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ p_Fm->partVSPBase = AllocVSPsForPartition(p_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
-+ if (p_Fm->partVSPBase == (uint8_t)(ILLEGAL_BASE))
-+ DBG(WARNING, ("partition VSPs allocation is FAILED"));
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* General FM driver initialization */
-+ if (p_Fm->baseAddr)
-+ p_Fm->fmMuramPhysBaseAddr =
-+ (uint64_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->baseAddr + FM_MM_MURAM)));
-+
-+ XX_Free(p_Fm->p_FmDriverParam);
-+ p_Fm->p_FmDriverParam = NULL;
-+
-+ if ((p_Fm->guestId == NCSW_MASTER_ID) ||
-+ (p_Fm->h_IpcSessions[0]))
-+ {
-+ FM_DisableRamsEcc(p_Fm);
-+ FmMuramClear(p_Fm->h_FmMuram);
-+ FM_EnableRamsEcc(p_Fm);
-+ }
-+
-+ return E_OK;
-+}
-+
-+static __inline__ enum fman_exceptions FmanExceptionTrans(e_FmExceptions exception)
-+{
-+ switch (exception) {
-+ case e_FM_EX_DMA_BUS_ERROR:
-+ return E_FMAN_EX_DMA_BUS_ERROR;
-+ case e_FM_EX_DMA_READ_ECC:
-+ return E_FMAN_EX_DMA_READ_ECC;
-+ case e_FM_EX_DMA_SYSTEM_WRITE_ECC:
-+ return E_FMAN_EX_DMA_SYSTEM_WRITE_ECC;
-+ case e_FM_EX_DMA_FM_WRITE_ECC:
-+ return E_FMAN_EX_DMA_FM_WRITE_ECC;
-+ case e_FM_EX_FPM_STALL_ON_TASKS:
-+ return E_FMAN_EX_FPM_STALL_ON_TASKS;
-+ case e_FM_EX_FPM_SINGLE_ECC:
-+ return E_FMAN_EX_FPM_SINGLE_ECC;
-+ case e_FM_EX_FPM_DOUBLE_ECC:
-+ return E_FMAN_EX_FPM_DOUBLE_ECC;
-+ case e_FM_EX_QMI_SINGLE_ECC:
-+ return E_FMAN_EX_QMI_SINGLE_ECC;
-+ case e_FM_EX_QMI_DOUBLE_ECC:
-+ return E_FMAN_EX_QMI_DOUBLE_ECC;
-+ case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
-+ return E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
-+ case e_FM_EX_BMI_LIST_RAM_ECC:
-+ return E_FMAN_EX_BMI_LIST_RAM_ECC;
-+ case e_FM_EX_BMI_STORAGE_PROFILE_ECC:
-+ return E_FMAN_EX_BMI_STORAGE_PROFILE_ECC;
-+ case e_FM_EX_BMI_STATISTICS_RAM_ECC:
-+ return E_FMAN_EX_BMI_STATISTICS_RAM_ECC;
-+ case e_FM_EX_BMI_DISPATCH_RAM_ECC:
-+ return E_FMAN_EX_BMI_DISPATCH_RAM_ECC;
-+ case e_FM_EX_IRAM_ECC:
-+ return E_FMAN_EX_IRAM_ECC;
-+ case e_FM_EX_MURAM_ECC:
-+ return E_FMAN_EX_MURAM_ECC;
-+ default:
-+ return E_FMAN_EX_DMA_BUS_ERROR;
-+ }
-+}
-+
-+uint8_t SwPortIdToHwPortId(e_FmPortType type, uint8_t relativePortId, uint8_t majorRev, uint8_t minorRev)
-+{
-+ switch (type)
-+ {
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
-+ CHECK_PORT_ID_OH_PORTS(relativePortId);
-+ return (uint8_t)(BASE_OH_PORTID + (relativePortId));
-+ case (e_FM_PORT_TYPE_RX):
-+ CHECK_PORT_ID_1G_RX_PORTS(relativePortId);
-+ return (uint8_t)(BASE_1G_RX_PORTID + (relativePortId));
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ /* The 10G port in T1024 (FMan Version 6.4) is the first port.
-+ * This is the reason why the 1G port offset is used.
-+ */
-+ if (majorRev == 6 && minorRev == 4)
-+ {
-+ CHECK_PORT_ID_1G_RX_PORTS(relativePortId);
-+ return (uint8_t)(BASE_1G_RX_PORTID + (relativePortId));
-+ }
-+ else
-+ {
-+ CHECK_PORT_ID_10G_RX_PORTS(relativePortId);
-+ return (uint8_t)(BASE_10G_RX_PORTID + (relativePortId));
-+ }
-+ case (e_FM_PORT_TYPE_TX):
-+ CHECK_PORT_ID_1G_TX_PORTS(relativePortId);
-+ return (uint8_t)(BASE_1G_TX_PORTID + (relativePortId));
-+ case (e_FM_PORT_TYPE_TX_10G):
-+ /* The 10G port in T1024 (FMan Version 6.4) is the first port.
-+ * This is the reason why the 1G port offset is used.
-+ */
-+ if (majorRev == 6 && minorRev == 4)
-+ {
-+ CHECK_PORT_ID_1G_TX_PORTS(relativePortId);
-+ return (uint8_t)(BASE_1G_TX_PORTID + (relativePortId));
-+ }
-+ else
-+ {
-+ CHECK_PORT_ID_10G_TX_PORTS(relativePortId);
-+ return (uint8_t)(BASE_10G_TX_PORTID + (relativePortId));
-+ }
-+ default:
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal port type"));
-+ return 0;
-+ }
-+}
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+t_Error FmDumpPortRegs (t_Handle h_Fm, uint8_t hardwarePortId)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+
-+ DECLARE_DUMP;
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(((p_Fm->guestId == NCSW_MASTER_ID) ||
-+ p_Fm->baseAddr), E_INVALID_OPERATION);
-+
-+ DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], ("fmbm_pp for port %u", (hardwarePortId)));
-+ DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], sizeof(uint32_t));
-+
-+ DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], ("fmbm_pfs for port %u", (hardwarePortId )));
-+ DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], sizeof(uint32_t));
-+
-+ DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId-1], ("fmbm_spliodn for port %u", (hardwarePortId)));
-+ DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId-1], sizeof(uint32_t));
-+
-+ DUMP_TITLE(&p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId], ("fmfp_ps for port %u", (hardwarePortId)));
-+ DUMP_MEMORY(&p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId], sizeof(uint32_t));
-+
-+ DUMP_TITLE(&p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId/2], ("fmdmplr for port %u", (hardwarePortId)));
-+ DUMP_MEMORY(&p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId/2], sizeof(uint32_t));
-+
-+ return E_OK;
-+}
-+#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
-+
-+
-+/*****************************************************************************/
-+/* API Init unit functions */
-+/*****************************************************************************/
-+t_Handle FM_Config(t_FmParams *p_FmParam)
-+{
-+ t_Fm *p_Fm;
-+ uint8_t i;
-+ uintptr_t baseAddr;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_FmParam, E_NULL_POINTER, NULL);
-+ SANITY_CHECK_RETURN_VALUE(((p_FmParam->firmware.p_Code && p_FmParam->firmware.size) ||
-+ (!p_FmParam->firmware.p_Code && !p_FmParam->firmware.size)),
-+ E_INVALID_VALUE, NULL);
-+
-+ baseAddr = p_FmParam->baseAddr;
-+
-+ /* Allocate FM structure */
-+ p_Fm = (t_Fm *) XX_Malloc(sizeof(t_Fm));
-+ if (!p_Fm)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM driver structure"));
-+ return NULL;
-+ }
-+ memset(p_Fm, 0, sizeof(t_Fm));
-+
-+ p_Fm->p_FmStateStruct = (t_FmStateStruct *) XX_Malloc(sizeof(t_FmStateStruct));
-+ if (!p_Fm->p_FmStateStruct)
-+ {
-+ XX_Free(p_Fm);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Status structure"));
-+ return NULL;
-+ }
-+ memset(p_Fm->p_FmStateStruct, 0, sizeof(t_FmStateStruct));
-+
-+ /* Initialize FM parameters which will be kept by the driver */
-+ p_Fm->p_FmStateStruct->fmId = p_FmParam->fmId;
-+ p_Fm->guestId = p_FmParam->guestId;
-+
-+ for (i=0; i<FM_MAX_NUM_OF_HW_PORT_IDS; i++)
-+ p_Fm->p_FmStateStruct->portsTypes[i] = e_FM_PORT_TYPE_DUMMY;
-+
-+ /* Allocate the FM driver's parameters structure */
-+ p_Fm->p_FmDriverParam = (struct fman_cfg *)XX_Malloc(sizeof(struct fman_cfg));
-+ if (!p_Fm->p_FmDriverParam)
-+ {
-+ XX_Free(p_Fm->p_FmStateStruct);
-+ XX_Free(p_Fm);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM driver parameters"));
-+ return NULL;
-+ }
-+ memset(p_Fm->p_FmDriverParam, 0, sizeof(struct fman_cfg));
-+
-+#if (DPAA_VERSION >= 11)
-+ p_Fm->p_FmSp = (t_FmSp *)XX_Malloc(sizeof(t_FmSp));
-+ if (!p_Fm->p_FmSp)
-+ {
-+ XX_Free(p_Fm->p_FmDriverParam);
-+ XX_Free(p_Fm->p_FmStateStruct);
-+ XX_Free(p_Fm);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("allocation for internal data structure failed"));
-+ return NULL;
-+ }
-+ memset(p_Fm->p_FmSp, 0, sizeof(t_FmSp));
-+
-+ for (i=0; i<FM_VSP_MAX_NUM_OF_ENTRIES; i++)
-+ p_Fm->p_FmSp->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* Initialize FM parameters which will be kept by the driver */
-+ p_Fm->p_FmStateStruct->fmId = p_FmParam->fmId;
-+ p_Fm->h_FmMuram = p_FmParam->h_FmMuram;
-+ p_Fm->h_App = p_FmParam->h_App;
-+ p_Fm->p_FmStateStruct->fmClkFreq = p_FmParam->fmClkFreq;
-+ p_Fm->p_FmStateStruct->fmMacClkFreq = p_FmParam->fmClkFreq / ((!p_FmParam->fmMacClkRatio)? 2: p_FmParam->fmMacClkRatio);
-+ p_Fm->f_Exception = p_FmParam->f_Exception;
-+ p_Fm->f_BusError = p_FmParam->f_BusError;
-+ p_Fm->p_FmFpmRegs = (struct fman_fpm_regs *)UINT_TO_PTR(baseAddr + FM_MM_FPM);
-+ p_Fm->p_FmBmiRegs = (struct fman_bmi_regs *)UINT_TO_PTR(baseAddr + FM_MM_BMI);
-+ p_Fm->p_FmQmiRegs = (struct fman_qmi_regs *)UINT_TO_PTR(baseAddr + FM_MM_QMI);
-+ p_Fm->p_FmDmaRegs = (struct fman_dma_regs *)UINT_TO_PTR(baseAddr + FM_MM_DMA);
-+ p_Fm->p_FmRegs = (struct fman_regs *)UINT_TO_PTR(baseAddr + FM_MM_BMI);
-+ p_Fm->baseAddr = baseAddr;
-+ p_Fm->p_FmStateStruct->irq = p_FmParam->irq;
-+ p_Fm->p_FmStateStruct->errIrq = p_FmParam->errIrq;
-+ p_Fm->hcPortInitialized = FALSE;
-+ p_Fm->independentMode = FALSE;
-+
-+ p_Fm->h_Spinlock = XX_InitSpinlock();
-+ if (!p_Fm->h_Spinlock)
-+ {
-+ XX_Free(p_Fm->p_FmDriverParam);
-+ XX_Free(p_Fm->p_FmStateStruct);
-+ XX_Free(p_Fm);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("can't allocate spinlock!"));
-+ return NULL;
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ p_Fm->partVSPBase = p_FmParam->partVSPBase;
-+ p_Fm->partNumOfVSPs = p_FmParam->partNumOfVSPs;
-+ p_Fm->vspBaseAddr = p_FmParam->vspBaseAddr;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ fman_defconfig(p_Fm->p_FmDriverParam,
-+ !!(p_Fm->guestId == NCSW_MASTER_ID));
-+/* overide macros dependent parameters */
-+#ifdef FM_PEDANTIC_DMA
-+ p_Fm->p_FmDriverParam->pedantic_dma = TRUE;
-+ p_Fm->p_FmDriverParam->dma_aid_override = TRUE;
-+#endif /* FM_PEDANTIC_DMA */
-+#ifndef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+ p_Fm->p_FmDriverParam->qmi_deq_option_support = TRUE;
-+#endif /* !FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
-+
-+ p_Fm->p_FmStateStruct->ramsEccEnable = FALSE;
-+ p_Fm->p_FmStateStruct->extraFifoPoolSize = 0;
-+ p_Fm->p_FmStateStruct->exceptions = DEFAULT_exceptions;
-+ p_Fm->resetOnInit = DEFAULT_resetOnInit;
-+ p_Fm->f_ResetOnInitOverride = DEFAULT_resetOnInitOverrideCallback;
-+ p_Fm->fwVerify = DEFAULT_VerifyUcode;
-+ p_Fm->firmware.size = p_FmParam->firmware.size;
-+ if (p_Fm->firmware.size)
-+ {
-+ p_Fm->firmware.p_Code = (uint32_t *)XX_Malloc(p_Fm->firmware.size);
-+ if (!p_Fm->firmware.p_Code)
-+ {
-+ XX_FreeSpinlock(p_Fm->h_Spinlock);
-+ XX_Free(p_Fm->p_FmStateStruct);
-+ XX_Free(p_Fm->p_FmDriverParam);
-+ XX_Free(p_Fm);
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM firmware code"));
-+ return NULL;
-+ }
-+ memcpy(p_Fm->firmware.p_Code, p_FmParam->firmware.p_Code ,p_Fm->firmware.size);
-+ }
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ return p_Fm;
-+
-+ /* read revision */
-+ /* Chip dependent, will be configured in Init */
-+ fman_get_revision(p_Fm->p_FmFpmRegs,
-+ &p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ &p_Fm->p_FmStateStruct->revInfo.minorRev);
-+
-+#ifdef FM_AID_MODE_NO_TNUM_SW005
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ p_Fm->p_FmDriverParam->dma_aid_mode = e_FM_DMA_AID_OUT_PORT_ID;
-+#endif /* FM_AID_MODE_NO_TNUM_SW005 */
-+#ifndef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev != 4)
-+ p_Fm->p_FmDriverParam->qmi_def_tnums_thresh = QMI_DEF_TNUMS_THRESH;
-+#endif /* FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
-+
-+ p_Fm->p_FmStateStruct->totalFifoSize = 0;
-+ p_Fm->p_FmStateStruct->totalNumOfTasks =
-+ DEFAULT_totalNumOfTasks(p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+
-+#ifdef FM_HAS_TOTAL_DMAS
-+ p_Fm->p_FmStateStruct->maxNumOfOpenDmas = BMI_MAX_NUM_OF_DMAS;
-+#endif /* FM_HAS_TOTAL_DMAS */
-+#if (DPAA_VERSION < 11)
-+ p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer = DEFAULT_dmaCommQLow;
-+ p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer = DEFAULT_dmaCommQHigh;
-+ p_Fm->p_FmDriverParam->dma_cam_num_of_entries = DEFAULT_dmaCamNumOfEntries;
-+ p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer = DEFAULT_dmaReadIntBufLow;
-+ p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer = DEFAULT_dmaReadIntBufHigh;
-+ p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer = DEFAULT_dmaWriteIntBufLow;
-+ p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer = DEFAULT_dmaWriteIntBufHigh;
-+ p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats = DEFAULT_axiDbgNumOfBeats;
-+#endif /* (DPAA_VERSION < 11) */
-+#ifdef FM_NO_TNUM_AGING
-+ p_Fm->p_FmDriverParam->tnum_aging_period = 0;
-+#endif
-+ p_Fm->tnumAgingPeriod = p_Fm->p_FmDriverParam->tnum_aging_period;
-+
-+ return p_Fm;
-+}
-+
-+/**************************************************************************//**
-+ @Function FM_Init
-+
-+ @Description Initializes the FM module
-+
-+ @Param[in] h_Fm - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_Init(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_cfg *p_FmDriverParam = NULL;
-+ t_Error err = E_OK;
-+ int i;
-+ t_FmRevisionInfo revInfo;
-+ struct fman_rg fman_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+
-+ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
-+ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
-+ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
-+ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ p_Fm->p_FmStateStruct->count1MicroBit = FM_TIMESTAMP_1_USEC_BIT;
-+ p_Fm->p_FmDriverParam->num_of_fman_ctrl_evnt_regs = FM_NUM_OF_FMAN_CTRL_EVENT_REGS;
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ return InitGuestMode(p_Fm);
-+
-+ /* if user didn't configured totalFifoSize - (totalFifoSize=0) we configure default
-+ * according to chip. otherwise, we use user's configuration.
-+ */
-+ if (p_Fm->p_FmStateStruct->totalFifoSize == 0)
-+ p_Fm->p_FmStateStruct->totalFifoSize = DEFAULT_totalFifoSize(p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+
-+ CHECK_INIT_PARAMETERS(p_Fm, CheckFmParameters);
-+
-+ p_FmDriverParam = p_Fm->p_FmDriverParam;
-+
-+ FM_GetRevision(p_Fm, &revInfo);
-+
-+ /* clear revision-dependent non existing exception */
-+#ifdef FM_NO_DISPATCH_RAM_ECC
-+ if ((revInfo.majorRev != 4) &&
-+ (revInfo.majorRev < 6))
-+ p_Fm->p_FmStateStruct->exceptions &= ~FM_EX_BMI_DISPATCH_RAM_ECC;
-+#endif /* FM_NO_DISPATCH_RAM_ECC */
-+
-+#ifdef FM_QMI_NO_ECC_EXCEPTIONS
-+ if (revInfo.majorRev == 4)
-+ p_Fm->p_FmStateStruct->exceptions &= ~(FM_EX_QMI_SINGLE_ECC | FM_EX_QMI_DOUBLE_ECC);
-+#endif /* FM_QMI_NO_ECC_EXCEPTIONS */
-+
-+#ifdef FM_QMI_NO_SINGLE_ECC_EXCEPTION
-+ if (revInfo.majorRev >= 6)
-+ p_Fm->p_FmStateStruct->exceptions &= ~FM_EX_QMI_SINGLE_ECC;
-+#endif /* FM_QMI_NO_SINGLE_ECC_EXCEPTION */
-+
-+ FmMuramClear(p_Fm->h_FmMuram);
-+
-+ /* clear CPG */
-+ IOMemSet32(UINT_TO_PTR(p_Fm->baseAddr + FM_MM_CGP), 0, FM_PORT_NUM_OF_CONGESTION_GRPS);
-+
-+ /* add to the default exceptions the user's definitions */
-+ p_Fm->p_FmStateStruct->exceptions |= p_Fm->userSetExceptions;
-+
-+ /* Reset the FM if required */
-+ if (p_Fm->resetOnInit)
-+ {
-+#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
-+ if ((err = FwNotResetErratumBugzilla6173WA(p_Fm)) != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+#else /* not FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
-+
-+ if (p_Fm->f_ResetOnInitOverride)
-+ {
-+ /* Perform user specific FMan reset */
-+ p_Fm->f_ResetOnInitOverride(h_Fm);
-+ }
-+ else
-+ {
-+ /* Perform FMan reset */
-+ FmReset(h_Fm);
-+ }
-+
-+ if (fman_is_qmi_halt_not_busy_state(p_Fm->p_FmQmiRegs))
-+ {
-+ fman_resume(p_Fm->p_FmFpmRegs);
-+ XX_UDelay(100);
-+ }
-+#endif /* not FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
-+ }
-+
-+#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
-+ if (!p_Fm->resetOnInit) /* Skip operations done in errata workaround */
-+ {
-+#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
-+ /* Load FMan-Controller code to IRAM */
-+
-+ ClearIRam(p_Fm);
-+
-+ if (p_Fm->firmware.p_Code && (LoadFmanCtrlCode(p_Fm) != E_OK))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
-+ }
-+#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+ /* save first 256 byte in MURAM */
-+ p_Fm->resAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram, 256, 0));
-+ if (!p_Fm->resAddr)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for reserved Area failed"));
-+
-+ WRITE_BLOCK(UINT_TO_PTR(p_Fm->resAddr), 0, 256);
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+#if (DPAA_VERSION >= 11)
-+ p_Fm->partVSPBase = AllocVSPsForPartition(h_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
-+ if (p_Fm->partVSPBase == (uint8_t)(ILLEGAL_BASE))
-+ DBG(WARNING, ("partition VSPs allocation is FAILED"));
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ /* General FM driver initialization */
-+ p_Fm->fmMuramPhysBaseAddr =
-+ (uint64_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->baseAddr + FM_MM_MURAM)));
-+
-+ for (i=0;i<e_FM_EV_DUMMY_LAST;i++)
-+ p_Fm->intrMng[i].f_Isr = UnimplementedIsr;
-+ for (i=0;i<FM_NUM_OF_FMAN_CTRL_EVENT_REGS;i++)
-+ p_Fm->fmanCtrlIntr[i].f_Isr = UnimplementedFmanCtrlIsr;
-+
-+ p_FmDriverParam->exceptions = p_Fm->p_FmStateStruct->exceptions;
-+
-+ /**********************/
-+ /* Init DMA Registers */
-+ /**********************/
-+ err = InitFmDma(p_Fm);
-+ if (err != E_OK)
-+ {
-+ FreeInitResources(p_Fm);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /**********************/
-+ /* Init FPM Registers */
-+ /**********************/
-+ err = InitFmFpm(p_Fm);
-+ if (err != E_OK)
-+ {
-+ FreeInitResources(p_Fm);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /* define common resources */
-+ /* allocate MURAM for FIFO according to total size */
-+ p_Fm->fifoBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram,
-+ p_Fm->p_FmStateStruct->totalFifoSize,
-+ BMI_FIFO_ALIGN));
-+ if (!p_Fm->fifoBaseAddr)
-+ {
-+ FreeInitResources(p_Fm);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for BMI FIFO failed"));
-+ }
-+
-+ p_FmDriverParam->fifo_base_addr = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->fifoBaseAddr)) - p_Fm->fmMuramPhysBaseAddr);
-+ p_FmDriverParam->total_fifo_size = p_Fm->p_FmStateStruct->totalFifoSize;
-+ p_FmDriverParam->total_num_of_tasks = p_Fm->p_FmStateStruct->totalNumOfTasks;
-+ p_FmDriverParam->clk_freq = p_Fm->p_FmStateStruct->fmClkFreq;
-+
-+ /**********************/
-+ /* Init BMI Registers */
-+ /**********************/
-+ err = InitFmBmi(p_Fm);
-+ if (err != E_OK)
-+ {
-+ FreeInitResources(p_Fm);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /**********************/
-+ /* Init QMI Registers */
-+ /**********************/
-+ err = InitFmQmi(p_Fm);
-+ if (err != E_OK)
-+ {
-+ FreeInitResources(p_Fm);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /* build the FM master partition IPC address */
-+ if (Sprint (p_Fm->fmModuleName, "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, NCSW_MASTER_ID) != 6)
-+ {
-+ FreeInitResources(p_Fm);
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed"));
-+ }
-+
-+ err = XX_IpcRegisterMsgHandler(p_Fm->fmModuleName, FmHandleIpcMsgCB, p_Fm, FM_IPC_MAX_REPLY_SIZE);
-+ if (err)
-+ {
-+ FreeInitResources(p_Fm);
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+ }
-+
-+ /* Register the FM interrupts handlers */
-+ if (p_Fm->p_FmStateStruct->irq != NO_IRQ)
-+ {
-+ XX_SetIntr(p_Fm->p_FmStateStruct->irq, FM_EventIsr, p_Fm);
-+ XX_EnableIntr(p_Fm->p_FmStateStruct->irq);
-+ }
-+
-+ if (p_Fm->p_FmStateStruct->errIrq != NO_IRQ)
-+ {
-+ XX_SetIntr(p_Fm->p_FmStateStruct->errIrq, (void (*) (t_Handle))FM_ErrorIsr, p_Fm);
-+ XX_EnableIntr(p_Fm->p_FmStateStruct->errIrq);
-+ }
-+
-+ err = (t_Error)fman_enable(&fman_rg , p_FmDriverParam);
-+ if (err != E_OK)
-+ return err; /* FIXME */
-+
-+ EnableTimeStamp(p_Fm);
-+
-+ if (p_Fm->firmware.p_Code)
-+ {
-+ XX_Free(p_Fm->firmware.p_Code);
-+ p_Fm->firmware.p_Code = NULL;
-+ }
-+
-+ XX_Free(p_Fm->p_FmDriverParam);
-+ p_Fm->p_FmDriverParam = NULL;
-+
-+ return E_OK;
-+}
-+
-+/**************************************************************************//**
-+ @Function FM_Free
-+
-+ @Description Frees all resources that were assigned to FM module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_Fm - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_Free(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_rg fman_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
-+ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
-+ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
-+ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+#if (DPAA_VERSION >= 11)
-+ FreeVSPsForPartition(h_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
-+
-+ if (p_Fm->p_FmSp)
-+ {
-+ XX_Free(p_Fm->p_FmSp);
-+ p_Fm->p_FmSp = NULL;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (p_Fm->fmModuleName[0] != 0)
-+ XX_IpcUnregisterMsgHandler(p_Fm->fmModuleName);
-+
-+ if (!p_Fm->recoveryMode)
-+ XX_Free(p_Fm->p_FmStateStruct);
-+
-+ XX_Free(p_Fm);
-+
-+ return E_OK;
-+ }
-+
-+ fman_free_resources(&fman_rg);
-+
-+ if ((p_Fm->guestId == NCSW_MASTER_ID) && (p_Fm->fmModuleName[0] != 0))
-+ XX_IpcUnregisterMsgHandler(p_Fm->fmModuleName);
-+
-+ if (p_Fm->p_FmStateStruct)
-+ {
-+ if (p_Fm->p_FmStateStruct->irq != NO_IRQ)
-+ {
-+ XX_DisableIntr(p_Fm->p_FmStateStruct->irq);
-+ XX_FreeIntr(p_Fm->p_FmStateStruct->irq);
-+ }
-+ if (p_Fm->p_FmStateStruct->errIrq != NO_IRQ)
-+ {
-+ XX_DisableIntr(p_Fm->p_FmStateStruct->errIrq);
-+ XX_FreeIntr(p_Fm->p_FmStateStruct->errIrq);
-+ }
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ FreeVSPsForPartition(h_Fm, p_Fm->partVSPBase, p_Fm->partNumOfVSPs, p_Fm->guestId);
-+
-+ if (p_Fm->p_FmSp)
-+ {
-+ XX_Free(p_Fm->p_FmSp);
-+ p_Fm->p_FmSp = NULL;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ if (p_Fm->h_Spinlock)
-+ XX_FreeSpinlock(p_Fm->h_Spinlock);
-+
-+ if (p_Fm->p_FmDriverParam)
-+ {
-+ if (p_Fm->firmware.p_Code)
-+ XX_Free(p_Fm->firmware.p_Code);
-+ XX_Free(p_Fm->p_FmDriverParam);
-+ p_Fm->p_FmDriverParam = NULL;
-+ }
-+
-+ FreeInitResources(p_Fm);
-+
-+ if (!p_Fm->recoveryMode && p_Fm->p_FmStateStruct)
-+ XX_Free(p_Fm->p_FmStateStruct);
-+
-+ XX_Free(p_Fm);
-+
-+ return E_OK;
-+}
-+
-+/*************************************************/
-+/* API Advanced Init unit functions */
-+/*************************************************/
-+
-+t_Error FM_ConfigResetOnInit(t_Handle h_Fm, bool enable)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->resetOnInit = enable;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigResetOnInitOverrideCallback(t_Handle h_Fm, t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->f_ResetOnInitOverride = f_ResetOnInitOverride;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigTotalFifoSize(t_Handle h_Fm, uint32_t totalFifoSize)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmStateStruct->totalFifoSize = totalFifoSize;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaCacheOverride(t_Handle h_Fm, e_FmDmaCacheOverride cacheOverride)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ enum fman_dma_cache_override fsl_cache_override;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ FMAN_CACHE_OVERRIDE_TRANS(fsl_cache_override, cacheOverride)
-+ p_Fm->p_FmDriverParam->dma_cache_override = fsl_cache_override;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaAidOverride(t_Handle h_Fm, bool aidOverride)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->dma_aid_override = aidOverride;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaAidMode(t_Handle h_Fm, e_FmDmaAidMode aidMode)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ enum fman_dma_aid_mode fsl_aid_mode;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ FMAN_AID_MODE_TRANS(fsl_aid_mode, aidMode);
-+ p_Fm->p_FmDriverParam->dma_aid_mode = fsl_aid_mode;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaAxiDbgNumOfBeats(t_Handle h_Fm, uint8_t axiDbgNumOfBeats)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+#if (DPAA_VERSION >= 11)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
-+#else
-+ p_Fm->p_FmDriverParam->dma_axi_dbg_num_of_beats = axiDbgNumOfBeats;
-+
-+ return E_OK;
-+#endif /* (DPAA_VERSION >= 11) */
-+}
-+
-+t_Error FM_ConfigDmaCamNumOfEntries(t_Handle h_Fm, uint8_t numOfEntries)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->dma_cam_num_of_entries = numOfEntries;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaDbgCounter(t_Handle h_Fm, e_FmDmaDbgCntMode fmDmaDbgCntMode)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ enum fman_dma_dbg_cnt_mode fsl_dma_dbg_cnt;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ FMAN_DMA_DBG_CNT_TRANS(fsl_dma_dbg_cnt, fmDmaDbgCntMode);
-+ p_Fm->p_FmDriverParam->dma_dbg_cnt_mode = fsl_dma_dbg_cnt;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaStopOnBusErr(t_Handle h_Fm, bool stop)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->dma_stop_on_bus_error = stop;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaEmergency(t_Handle h_Fm, t_FmDmaEmergency *p_Emergency)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ enum fman_dma_emergency_level fsl_dma_emer;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ FMAN_DMA_EMER_TRANS(fsl_dma_emer, p_Emergency->emergencyLevel);
-+ p_Fm->p_FmDriverParam->dma_en_emergency = TRUE;
-+ p_Fm->p_FmDriverParam->dma_emergency_bus_select = (uint32_t)p_Emergency->emergencyBusSelect;
-+ p_Fm->p_FmDriverParam->dma_emergency_level = fsl_dma_emer;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaEmergencySmoother(t_Handle h_Fm, uint32_t emergencyCnt)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->dma_en_emergency_smoother = TRUE;
-+ p_Fm->p_FmDriverParam->dma_emergency_switch_counter = emergencyCnt;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaErr(t_Handle h_Fm, e_FmDmaErr dmaErr)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ enum fman_dma_err fsl_dma_err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ FMAN_DMA_ERR_TRANS(fsl_dma_err, dmaErr);
-+ p_Fm->p_FmDriverParam->dma_err = fsl_dma_err;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigCatastrophicErr(t_Handle h_Fm, e_FmCatastrophicErr catastrophicErr)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ enum fman_catastrophic_err fsl_catastrophic_err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ FMAN_CATASTROPHIC_ERR_TRANS(fsl_catastrophic_err, catastrophicErr);
-+ p_Fm->p_FmDriverParam->catastrophic_err = fsl_catastrophic_err;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigEnableMuramTestMode(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
-+
-+ p_Fm->p_FmDriverParam->en_muram_test_mode = TRUE;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigEnableIramTestMode(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE );
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
-+
-+ p_Fm->p_FmDriverParam->en_iram_test_mode = TRUE;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigHaltOnExternalActivation(t_Handle h_Fm, bool enable)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->halt_on_external_activ = enable;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigHaltOnUnrecoverableEccError(t_Handle h_Fm, bool enable)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
-+
-+ p_Fm->p_FmDriverParam->halt_on_unrecov_ecc_err = enable;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigException(t_Handle h_Fm, e_FmExceptions exception, bool enable)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ uint32_t bitMask = 0;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Fm->userSetExceptions |= bitMask;
-+ else
-+ p_Fm->p_FmStateStruct->exceptions &= ~bitMask;
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigExternalEccRamsEnable(t_Handle h_Fm, bool enable)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->external_ecc_rams_enable = enable;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigTnumAgingPeriod(t_Handle h_Fm, uint16_t tnumAgingPeriod)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->tnum_aging_period = tnumAgingPeriod;
-+ p_Fm->tnumAgingPeriod = p_Fm->p_FmDriverParam->tnum_aging_period;
-+
-+ return E_OK;
-+}
-+
-+/****************************************************/
-+/* Hidden-DEBUG Only API */
-+/****************************************************/
-+
-+t_Error FM_ConfigThresholds(t_Handle h_Fm, t_FmThresholds *p_FmThresholds)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->disp_limit_tsh = p_FmThresholds->dispLimit;
-+ p_Fm->p_FmDriverParam->prs_disp_tsh = p_FmThresholds->prsDispTh;
-+ p_Fm->p_FmDriverParam->plcr_disp_tsh = p_FmThresholds->plcrDispTh;
-+ p_Fm->p_FmDriverParam->kg_disp_tsh = p_FmThresholds->kgDispTh;
-+ p_Fm->p_FmDriverParam->bmi_disp_tsh = p_FmThresholds->bmiDispTh;
-+ p_Fm->p_FmDriverParam->qmi_enq_disp_tsh = p_FmThresholds->qmiEnqDispTh;
-+ p_Fm->p_FmDriverParam->qmi_deq_disp_tsh = p_FmThresholds->qmiDeqDispTh;
-+ p_Fm->p_FmDriverParam->fm_ctl1_disp_tsh = p_FmThresholds->fmCtl1DispTh;
-+ p_Fm->p_FmDriverParam->fm_ctl2_disp_tsh = p_FmThresholds->fmCtl2DispTh;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaSosEmergencyThreshold(t_Handle h_Fm, uint32_t dmaSosEmergency)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->dma_sos_emergency = dmaSosEmergency;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaWriteBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds)
-+
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+#if (DPAA_VERSION >= 11)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
-+#else
-+ p_Fm->p_FmDriverParam->dma_write_buf_tsh_asrt_emer = p_FmDmaThresholds->assertEmergency;
-+ p_Fm->p_FmDriverParam->dma_write_buf_tsh_clr_emer = p_FmDmaThresholds->clearEmergency;
-+
-+ return E_OK;
-+#endif
-+}
-+
-+t_Error FM_ConfigDmaCommQThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->dma_comm_qtsh_asrt_emer = p_FmDmaThresholds->assertEmergency;
-+ p_Fm->p_FmDriverParam->dma_comm_qtsh_clr_emer = p_FmDmaThresholds->clearEmergency;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigDmaReadBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+#if (DPAA_VERSION >= 11)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Not available for this FM revision!"));
-+#else
-+ p_Fm->p_FmDriverParam->dma_read_buf_tsh_clr_emer = p_FmDmaThresholds->clearEmergency;
-+ p_Fm->p_FmDriverParam->dma_read_buf_tsh_asrt_emer = p_FmDmaThresholds->assertEmergency;
-+
-+ return E_OK;
-+#endif
-+}
-+
-+t_Error FM_ConfigDmaWatchdog(t_Handle h_Fm, uint32_t watchdogValue)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ p_Fm->p_FmDriverParam->dma_watchdog = watchdogValue;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_ConfigEnableCounters(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+UNUSED(p_Fm);
-+
-+ return E_OK;
-+}
-+
-+t_Error FmGetSetParams(t_Handle h_Fm, t_FmGetSetParams *p_Params)
-+{
-+ t_Fm* p_Fm = (t_Fm*)h_Fm;
-+ if (p_Params->setParams.type & UPDATE_FM_CLD)
-+ {
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_cld, GET_UINT32(
-+ p_Fm->p_FmFpmRegs->fm_cld) | 0x00000800);
-+ }
-+ if (p_Params->setParams.type & CLEAR_IRAM_READY)
-+ {
-+ t_FMIramRegs *p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
-+ WRITE_UINT32(p_Iram->iready,GET_UINT32(p_Iram->iready) & ~IRAM_READY);
-+ }
-+ if (p_Params->setParams.type & UPDATE_FPM_EXTC)
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_extc,0x80000000);
-+ if (p_Params->setParams.type & UPDATE_FPM_EXTC_CLEAR)
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_extc,0x00800000);
-+ if (p_Params->setParams.type & UPDATE_FPM_BRKC_SLP)
-+ {
-+ if (p_Params->setParams.sleep)
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc, GET_UINT32(
-+ p_Fm->p_FmFpmRegs->fmfp_brkc) | FPM_BRKC_SLP);
-+ else
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc, GET_UINT32(
-+ p_Fm->p_FmFpmRegs->fmfp_brkc) & ~FPM_BRKC_SLP);
-+ }
-+ if (p_Params->getParams.type & GET_FM_CLD)
-+ p_Params->getParams.fm_cld = GET_UINT32(p_Fm->p_FmFpmRegs->fm_cld);
-+ if (p_Params->getParams.type & GET_FMQM_GS)
-+ p_Params->getParams.fmqm_gs = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_gs);
-+ if (p_Params->getParams.type & GET_FM_NPI)
-+ p_Params->getParams.fm_npi = GET_UINT32(p_Fm->p_FmFpmRegs->fm_npi);
-+ if (p_Params->getParams.type & GET_FMFP_EXTC)
-+ p_Params->getParams.fmfp_extc = GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_extc);
-+ return E_OK;
-+}
-+
-+
-+/****************************************************/
-+/* API Run-time Control uint functions */
-+/****************************************************/
-+void FM_EventIsr(t_Handle h_Fm)
-+{
-+#define FM_M_CALL_1G_MAC_ISR(_id) \
-+ { \
-+ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].guestId) \
-+ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id), pending); \
-+ else \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0+_id)].h_SrcHandle);\
-+ }
-+#define FM_M_CALL_10G_MAC_ISR(_id) \
-+ { \
-+ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].guestId) \
-+ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id), pending); \
-+ else \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_10G_MAC0+_id)].h_SrcHandle);\
-+ }
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ uint32_t pending, event;
-+ struct fman_fpm_regs *fpm_rg;
-+
-+ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ /* normal interrupts */
-+ pending = fman_get_normal_pending(fpm_rg);
-+ if (!pending)
-+ return;
-+ if (pending & INTR_EN_WAKEUP) // this is a wake up from sleep interrupt
-+ {
-+ t_FmGetSetParams fmGetSetParams;
-+ memset(&fmGetSetParams, 0, sizeof (t_FmGetSetParams));
-+ fmGetSetParams.setParams.type = UPDATE_FPM_BRKC_SLP;
-+ fmGetSetParams.setParams.sleep = 0;
-+ FmGetSetParams(h_Fm, &fmGetSetParams);
-+ }
-+ if (pending & INTR_EN_QMI)
-+ QmiEvent(p_Fm);
-+ if (pending & INTR_EN_PRS)
-+ p_Fm->intrMng[e_FM_EV_PRS].f_Isr(p_Fm->intrMng[e_FM_EV_PRS].h_SrcHandle);
-+ if (pending & INTR_EN_PLCR)
-+ p_Fm->intrMng[e_FM_EV_PLCR].f_Isr(p_Fm->intrMng[e_FM_EV_PLCR].h_SrcHandle);
-+ if (pending & INTR_EN_TMR)
-+ p_Fm->intrMng[e_FM_EV_TMR].f_Isr(p_Fm->intrMng[e_FM_EV_TMR].h_SrcHandle);
-+
-+ /* MAC events may belong to different partitions */
-+ if (pending & INTR_EN_1G_MAC0)
-+ FM_M_CALL_1G_MAC_ISR(0);
-+ if (pending & INTR_EN_1G_MAC1)
-+ FM_M_CALL_1G_MAC_ISR(1);
-+ if (pending & INTR_EN_1G_MAC2)
-+ FM_M_CALL_1G_MAC_ISR(2);
-+ if (pending & INTR_EN_1G_MAC3)
-+ FM_M_CALL_1G_MAC_ISR(3);
-+ if (pending & INTR_EN_1G_MAC4)
-+ FM_M_CALL_1G_MAC_ISR(4);
-+ if (pending & INTR_EN_1G_MAC5)
-+ FM_M_CALL_1G_MAC_ISR(5);
-+ if (pending & INTR_EN_1G_MAC6)
-+ FM_M_CALL_1G_MAC_ISR(6);
-+ if (pending & INTR_EN_1G_MAC7)
-+ FM_M_CALL_1G_MAC_ISR(7);
-+ if (pending & INTR_EN_10G_MAC0)
-+ FM_M_CALL_10G_MAC_ISR(0);
-+ if (pending & INTR_EN_10G_MAC1)
-+ FM_M_CALL_10G_MAC_ISR(1);
-+
-+ /* IM port events may belong to different partitions */
-+ if (pending & INTR_EN_REV0)
-+ {
-+ event = fman_get_controller_event(fpm_rg, 0);
-+ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_0].guestId)
-+ /*TODO IPC ISR For Fman Ctrl */
-+ ASSERT_COND(0);
-+ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_0, pending); */
-+ else
-+ p_Fm->fmanCtrlIntr[0].f_Isr(p_Fm->fmanCtrlIntr[0].h_SrcHandle, event);
-+
-+ }
-+ if (pending & INTR_EN_REV1)
-+ {
-+ event = fman_get_controller_event(fpm_rg, 1);
-+ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_1].guestId)
-+ /*TODO IPC ISR For Fman Ctrl */
-+ ASSERT_COND(0);
-+ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_1, pending); */
-+ else
-+ p_Fm->fmanCtrlIntr[1].f_Isr(p_Fm->fmanCtrlIntr[1].h_SrcHandle, event);
-+ }
-+ if (pending & INTR_EN_REV2)
-+ {
-+ event = fman_get_controller_event(fpm_rg, 2);
-+ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_2].guestId)
-+ /*TODO IPC ISR For Fman Ctrl */
-+ ASSERT_COND(0);
-+ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_2, pending); */
-+ else
-+ p_Fm->fmanCtrlIntr[2].f_Isr(p_Fm->fmanCtrlIntr[2].h_SrcHandle, event);
-+ }
-+ if (pending & INTR_EN_REV3)
-+ {
-+ event = fman_get_controller_event(fpm_rg, 3);
-+ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_3].guestId)
-+ /*TODO IPC ISR For Fman Ctrl */
-+ ASSERT_COND(0);
-+ /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_2, pendin3); */
-+ else
-+ p_Fm->fmanCtrlIntr[3].f_Isr(p_Fm->fmanCtrlIntr[3].h_SrcHandle, event);
-+ }
-+#ifdef FM_MACSEC_SUPPORT
-+ if (pending & INTR_EN_MACSEC_MAC0)
-+ {
-+ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_MACSEC_MAC0].guestId)
-+ SendIpcIsr(p_Fm, e_FM_EV_MACSEC_MAC0, pending);
-+ else
-+ p_Fm->intrMng[e_FM_EV_MACSEC_MAC0].f_Isr(p_Fm->intrMng[e_FM_EV_MACSEC_MAC0].h_SrcHandle);
-+ }
-+#endif /* FM_MACSEC_SUPPORT */
-+}
-+
-+t_Error FM_ErrorIsr(t_Handle h_Fm)
-+{
-+#define FM_M_CALL_1G_MAC_ERR_ISR(_id) \
-+ { \
-+ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].guestId) \
-+ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id), pending); \
-+ else \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].h_SrcHandle);\
-+ }
-+#define FM_M_CALL_10G_MAC_ERR_ISR(_id) \
-+ { \
-+ if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].guestId) \
-+ SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id), pending); \
-+ else \
-+ p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].h_SrcHandle);\
-+ }
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ uint32_t pending;
-+ struct fman_fpm_regs *fpm_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ /* error interrupts */
-+ pending = fman_get_fpm_error_interrupts(fpm_rg);
-+ if (!pending)
-+ return ERROR_CODE(E_EMPTY);
-+
-+ if (pending & ERR_INTR_EN_BMI)
-+ BmiErrEvent(p_Fm);
-+ if (pending & ERR_INTR_EN_QMI)
-+ QmiErrEvent(p_Fm);
-+ if (pending & ERR_INTR_EN_FPM)
-+ FpmErrEvent(p_Fm);
-+ if (pending & ERR_INTR_EN_DMA)
-+ DmaErrEvent(p_Fm);
-+ if (pending & ERR_INTR_EN_IRAM)
-+ IramErrIntr(p_Fm);
-+ if (pending & ERR_INTR_EN_MURAM)
-+ MuramErrIntr(p_Fm);
-+ if (pending & ERR_INTR_EN_PRS)
-+ p_Fm->intrMng[e_FM_EV_ERR_PRS].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_PRS].h_SrcHandle);
-+ if (pending & ERR_INTR_EN_PLCR)
-+ p_Fm->intrMng[e_FM_EV_ERR_PLCR].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_PLCR].h_SrcHandle);
-+ if (pending & ERR_INTR_EN_KG)
-+ p_Fm->intrMng[e_FM_EV_ERR_KG].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_KG].h_SrcHandle);
-+
-+ /* MAC events may belong to different partitions */
-+ if (pending & ERR_INTR_EN_1G_MAC0)
-+ FM_M_CALL_1G_MAC_ERR_ISR(0);
-+ if (pending & ERR_INTR_EN_1G_MAC1)
-+ FM_M_CALL_1G_MAC_ERR_ISR(1);
-+ if (pending & ERR_INTR_EN_1G_MAC2)
-+ FM_M_CALL_1G_MAC_ERR_ISR(2);
-+ if (pending & ERR_INTR_EN_1G_MAC3)
-+ FM_M_CALL_1G_MAC_ERR_ISR(3);
-+ if (pending & ERR_INTR_EN_1G_MAC4)
-+ FM_M_CALL_1G_MAC_ERR_ISR(4);
-+ if (pending & ERR_INTR_EN_1G_MAC5)
-+ FM_M_CALL_1G_MAC_ERR_ISR(5);
-+ if (pending & ERR_INTR_EN_1G_MAC6)
-+ FM_M_CALL_1G_MAC_ERR_ISR(6);
-+ if (pending & ERR_INTR_EN_1G_MAC7)
-+ FM_M_CALL_1G_MAC_ERR_ISR(7);
-+ if (pending & ERR_INTR_EN_10G_MAC0)
-+ FM_M_CALL_10G_MAC_ERR_ISR(0);
-+ if (pending & ERR_INTR_EN_10G_MAC1)
-+ FM_M_CALL_10G_MAC_ERR_ISR(1);
-+
-+#ifdef FM_MACSEC_SUPPORT
-+ if (pending & ERR_INTR_EN_MACSEC_MAC0)
-+ {
-+ if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_ERR_MACSEC_MAC0].guestId)
-+ SendIpcIsr(p_Fm, e_FM_EV_ERR_MACSEC_MAC0, pending);
-+ else
-+ p_Fm->intrMng[e_FM_EV_ERR_MACSEC_MAC0].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_MACSEC_MAC0].h_SrcHandle);
-+ }
-+#endif /* FM_MACSEC_SUPPORT */
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_SetPortsBandwidth(t_Handle h_Fm, t_FmPortsBandwidthParams *p_PortsBandwidth)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ int i;
-+ uint8_t sum;
-+ uint8_t hardwarePortId;
-+ uint8_t weights[64];
-+ uint8_t weight, maxPercent = 0;
-+ struct fman_bmi_regs *bmi_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ bmi_rg = p_Fm->p_FmBmiRegs;
-+
-+ memset(weights, 0, (sizeof(uint8_t) * 64));
-+
-+ /* check that all ports add up to 100% */
-+ sum = 0;
-+ for (i=0; i < p_PortsBandwidth->numOfPorts; i++)
-+ sum +=p_PortsBandwidth->portsBandwidths[i].bandwidth;
-+ if (sum != 100)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Sum of ports bandwidth differ from 100%"));
-+
-+ /* find highest percent */
-+ for (i=0; i < p_PortsBandwidth->numOfPorts; i++)
-+ {
-+ if (p_PortsBandwidth->portsBandwidths[i].bandwidth > maxPercent)
-+ maxPercent = p_PortsBandwidth->portsBandwidths[i].bandwidth;
-+ }
-+
-+ ASSERT_COND(maxPercent > 0); /* guaranteed by sum = 100 */
-+
-+ /* calculate weight for each port */
-+ for (i=0; i < p_PortsBandwidth->numOfPorts; i++)
-+ {
-+ weight = (uint8_t)((p_PortsBandwidth->portsBandwidths[i].bandwidth * PORT_MAX_WEIGHT ) / maxPercent);
-+ /* we want even division between 1-to-PORT_MAX_WEIGHT. so if exact division
-+ is not reached, we round up so that:
-+ 0 until maxPercent/PORT_MAX_WEIGHT get "1"
-+ maxPercent/PORT_MAX_WEIGHT+1 until (maxPercent/PORT_MAX_WEIGHT)*2 get "2"
-+ ...
-+ maxPercent - maxPercent/PORT_MAX_WEIGHT until maxPercent get "PORT_MAX_WEIGHT: */
-+ if ((uint8_t)((p_PortsBandwidth->portsBandwidths[i].bandwidth * PORT_MAX_WEIGHT ) % maxPercent))
-+ weight++;
-+
-+ /* find the location of this port within the register */
-+ hardwarePortId =
-+ SwPortIdToHwPortId(p_PortsBandwidth->portsBandwidths[i].type,
-+ p_PortsBandwidth->portsBandwidths[i].relativePortId,
-+ p_Fm->p_FmStateStruct->revInfo.majorRev,
-+ p_Fm->p_FmStateStruct->revInfo.minorRev);
-+
-+ ASSERT_COND(IN_RANGE(1, hardwarePortId, 63));
-+ weights[hardwarePortId] = weight;
-+ }
-+
-+ fman_set_ports_bandwidth(bmi_rg, weights);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_EnableRamsEcc(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_fpm_regs *fpm_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+
-+ fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ t_FmIpcMsg msg;
-+ t_Error err;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_ENABLE_RAM_ECC;
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+
-+ if (!p_Fm->p_FmStateStruct->internalCall)
-+ p_Fm->p_FmStateStruct->explicitEnable = TRUE;
-+ p_Fm->p_FmStateStruct->internalCall = FALSE;
-+
-+ if (p_Fm->p_FmStateStruct->ramsEccEnable)
-+ return E_OK;
-+ else
-+ {
-+ fman_enable_rams_ecc(fpm_rg);
-+ p_Fm->p_FmStateStruct->ramsEccEnable = TRUE;
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_DisableRamsEcc(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ bool explicitDisable = FALSE;
-+ struct fman_fpm_regs *fpm_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_HANDLE);
-+
-+ fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ if (p_Fm->guestId != NCSW_MASTER_ID)
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msgId = FM_DISABLE_RAM_ECC;
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ return E_OK;
-+ }
-+
-+ if (!p_Fm->p_FmStateStruct->internalCall)
-+ explicitDisable = TRUE;
-+ p_Fm->p_FmStateStruct->internalCall = FALSE;
-+
-+ /* if rams are already disabled, or if rams were explicitly enabled and are
-+ currently called indirectly (not explicitly), ignore this call. */
-+ if (!p_Fm->p_FmStateStruct->ramsEccEnable ||
-+ (p_Fm->p_FmStateStruct->explicitEnable && !explicitDisable))
-+ return E_OK;
-+ else
-+ {
-+ if (p_Fm->p_FmStateStruct->explicitEnable)
-+ /* This is the case were both explicit are TRUE.
-+ Turn off this flag for cases were following ramsEnable
-+ routines are called */
-+ p_Fm->p_FmStateStruct->explicitEnable = FALSE;
-+
-+ fman_enable_rams_ecc(fpm_rg);
-+ p_Fm->p_FmStateStruct->ramsEccEnable = FALSE;
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_SetException(t_Handle h_Fm, e_FmExceptions exception, bool enable)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ uint32_t bitMask = 0;
-+ enum fman_exceptions fslException;
-+ struct fman_rg fman_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
-+ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
-+ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
-+ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ GET_EXCEPTION_FLAG(bitMask, exception);
-+ if (bitMask)
-+ {
-+ if (enable)
-+ p_Fm->p_FmStateStruct->exceptions |= bitMask;
-+ else
-+ p_Fm->p_FmStateStruct->exceptions &= ~bitMask;
-+
-+ fslException = FmanExceptionTrans(exception);
-+
-+ return (t_Error)fman_set_exception(&fman_rg,
-+ fslException,
-+ enable);
-+ }
-+ else
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception"));
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_GetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ p_FmRevisionInfo->majorRev = p_Fm->p_FmStateStruct->revInfo.majorRev;
-+ p_FmRevisionInfo->minorRev = p_Fm->p_FmStateStruct->revInfo.minorRev;
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_GetFmanCtrlCodeRevision(t_Handle h_Fm, t_FmCtrlCodeRevisionInfo *p_RevisionInfo)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_FMIramRegs *p_Iram;
-+ uint32_t revInfo;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_RevisionInfo, E_NULL_POINTER);
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_Error err;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength;
-+ t_FmIpcFmanCtrlCodeRevisionInfo ipcRevInfo;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_GET_FMAN_CTRL_CODE_REV;
-+ replyLength = sizeof(uint32_t) + sizeof(t_FmCtrlCodeRevisionInfo);
-+ if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmCtrlCodeRevisionInfo)))
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ memcpy((uint8_t*)&ipcRevInfo, reply.replyBody, sizeof(t_FmCtrlCodeRevisionInfo));
-+ p_RevisionInfo->packageRev = ipcRevInfo.packageRev;
-+ p_RevisionInfo->majorRev = ipcRevInfo.majorRev;
-+ p_RevisionInfo->minorRev = ipcRevInfo.minorRev;
-+ return (t_Error)(reply.error);
-+ }
-+ else if (p_Fm->guestId != NCSW_MASTER_ID)
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("running in guest-mode without IPC!"));
-+
-+ p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM);
-+ WRITE_UINT32(p_Iram->iadd, 0x4);
-+ while (GET_UINT32(p_Iram->iadd) != 0x4) ;
-+ revInfo = GET_UINT32(p_Iram->idata);
-+ p_RevisionInfo->packageRev = (uint16_t)((revInfo & 0xFFFF0000) >> 16);
-+ p_RevisionInfo->majorRev = (uint8_t)((revInfo & 0x0000FF00) >> 8);
-+ p_RevisionInfo->minorRev = (uint8_t)(revInfo & 0x000000FF);
-+
-+ return E_OK;
-+}
-+
-+uint32_t FM_GetCounter(t_Handle h_Fm, e_FmCounters counter)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_Error err;
-+ uint32_t counterValue;
-+ struct fman_rg fman_rg;
-+ enum fman_counters fsl_counter;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(!p_Fm->p_FmDriverParam, E_INVALID_STATE, 0);
-+
-+ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
-+ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
-+ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
-+ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ uint32_t replyLength, outCounter;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_GET_COUNTER;
-+ memcpy(msg.msgBody, (uint8_t *)&counter, sizeof(uint32_t));
-+ replyLength = sizeof(uint32_t) + sizeof(uint32_t);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId) +sizeof(counterValue),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MAJOR, err, NO_MSG);
-+ return 0;
-+ }
-+ if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t)))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return 0;
-+ }
-+
-+ memcpy((uint8_t*)&outCounter, reply.replyBody, sizeof(uint32_t));
-+ return outCounter;
-+ }
-+ else if (!p_Fm->baseAddr)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Either IPC or 'baseAddress' is required!"));
-+ return 0;
-+ }
-+
-+ /* When applicable (when there is an 'enable counters' bit,
-+ check that counters are enabled */
-+ switch (counter)
-+ {
-+ case (e_FM_COUNTERS_DEQ_1):
-+ case (e_FM_COUNTERS_DEQ_2):
-+ case (e_FM_COUNTERS_DEQ_3):
-+ if ((p_Fm->p_FmStateStruct->revInfo.majorRev == 4) ||
-+ (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6))
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Requested counter not supported"));
-+ return 0;
-+ }
-+ case (e_FM_COUNTERS_ENQ_TOTAL_FRAME):
-+ case (e_FM_COUNTERS_DEQ_TOTAL_FRAME):
-+ case (e_FM_COUNTERS_DEQ_0):
-+ case (e_FM_COUNTERS_DEQ_FROM_DEFAULT):
-+ case (e_FM_COUNTERS_DEQ_FROM_CONTEXT):
-+ case (e_FM_COUNTERS_DEQ_FROM_FD):
-+ case (e_FM_COUNTERS_DEQ_CONFIRM):
-+ if (!(GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc) & QMI_CFG_EN_COUNTERS))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Requested counter was not enabled"));
-+ return 0;
-+ }
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ FMAN_COUNTERS_TRANS(fsl_counter, counter);
-+ return fman_get_counter(&fman_rg, fsl_counter);
-+}
-+
-+t_Error FM_ModifyCounter(t_Handle h_Fm, e_FmCounters counter, uint32_t val)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_rg fman_rg;
-+ enum fman_counters fsl_counter;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
-+ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
-+ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
-+ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ FMAN_COUNTERS_TRANS(fsl_counter, counter);
-+ return (t_Error)fman_modify_counter(&fman_rg, fsl_counter, val);
-+}
-+
-+void FM_SetDmaEmergency(t_Handle h_Fm, e_FmDmaMuramPort muramPort, bool enable)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_dma_regs *dma_rg;
-+
-+ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ fman_set_dma_emergency(dma_rg, !!(muramPort==e_FM_DMA_MURAM_PORT_WRITE), enable);
-+}
-+
-+void FM_SetDmaExtBusPri(t_Handle h_Fm, e_FmDmaExtBusPri pri)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_dma_regs *dma_rg;
-+
-+ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ fman_set_dma_ext_bus_pri(dma_rg, pri);
-+}
-+
-+void FM_GetDmaStatus(t_Handle h_Fm, t_FmDmaStatus *p_FmDmaStatus)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ uint32_t dmaStatus;
-+ struct fman_dma_regs *dma_rg;
-+
-+ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ if ((p_Fm->guestId != NCSW_MASTER_ID) &&
-+ !p_Fm->baseAddr &&
-+ p_Fm->h_IpcSessions[0])
-+ {
-+ t_FmIpcDmaStatus ipcDmaStatus;
-+ t_FmIpcMsg msg;
-+ t_FmIpcReply reply;
-+ t_Error err;
-+ uint32_t replyLength;
-+
-+ memset(&msg, 0, sizeof(msg));
-+ memset(&reply, 0, sizeof(reply));
-+ msg.msgId = FM_DMA_STAT;
-+ replyLength = sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus);
-+ err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0],
-+ (uint8_t*)&msg,
-+ sizeof(msg.msgId),
-+ (uint8_t*)&reply,
-+ &replyLength,
-+ NULL,
-+ NULL);
-+ if (err != E_OK)
-+ {
-+ REPORT_ERROR(MINOR, err, NO_MSG);
-+ return;
-+ }
-+ if (replyLength != (sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus)))
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch"));
-+ return;
-+ }
-+ memcpy((uint8_t*)&ipcDmaStatus, reply.replyBody, sizeof(t_FmIpcDmaStatus));
-+
-+ p_FmDmaStatus->cmqNotEmpty = (bool)ipcDmaStatus.boolCmqNotEmpty; /**< Command queue is not empty */
-+ p_FmDmaStatus->busError = (bool)ipcDmaStatus.boolBusError; /**< Bus error occurred */
-+ p_FmDmaStatus->readBufEccError = (bool)ipcDmaStatus.boolReadBufEccError; /**< Double ECC error on buffer Read */
-+ p_FmDmaStatus->writeBufEccSysError =(bool)ipcDmaStatus.boolWriteBufEccSysError; /**< Double ECC error on buffer write from system side */
-+ p_FmDmaStatus->writeBufEccFmError = (bool)ipcDmaStatus.boolWriteBufEccFmError; /**< Double ECC error on buffer write from FM side */
-+ p_FmDmaStatus->singlePortEccError = (bool)ipcDmaStatus.boolSinglePortEccError; /**< Double ECC error on buffer write from FM side */
-+ return;
-+ }
-+ else if (!p_Fm->baseAddr)
-+ {
-+ REPORT_ERROR(MINOR, E_NOT_SUPPORTED,
-+ ("Either IPC or 'baseAddress' is required!"));
-+ return;
-+ }
-+
-+ dmaStatus = fman_get_dma_status(dma_rg);
-+
-+ p_FmDmaStatus->cmqNotEmpty = (bool)(dmaStatus & DMA_STATUS_CMD_QUEUE_NOT_EMPTY);
-+ p_FmDmaStatus->busError = (bool)(dmaStatus & DMA_STATUS_BUS_ERR);
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ p_FmDmaStatus->singlePortEccError = (bool)(dmaStatus & DMA_STATUS_FM_SPDAT_ECC);
-+ else
-+ {
-+ p_FmDmaStatus->readBufEccError = (bool)(dmaStatus & DMA_STATUS_READ_ECC);
-+ p_FmDmaStatus->writeBufEccSysError = (bool)(dmaStatus & DMA_STATUS_SYSTEM_WRITE_ECC);
-+ p_FmDmaStatus->writeBufEccFmError = (bool)(dmaStatus & DMA_STATUS_FM_WRITE_ECC);
-+ }
-+}
-+
-+void FM_Resume(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ struct fman_fpm_regs *fpm_rg;
-+
-+ SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ fpm_rg = p_Fm->p_FmFpmRegs;
-+
-+ fman_resume(fpm_rg);
-+}
-+
-+t_Error FM_GetSpecialOperationCoding(t_Handle h_Fm,
-+ fmSpecialOperations_t spOper,
-+ uint8_t *p_SpOperCoding)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ t_FmCtrlCodeRevisionInfo revInfo;
-+ t_Error err;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR(p_SpOperCoding, E_NULL_POINTER);
-+
-+ if (!spOper)
-+ {
-+ *p_SpOperCoding = 0;
-+ return E_OK;
-+ }
-+
-+ if ((err = FM_GetFmanCtrlCodeRevision(p_Fm, &revInfo)) != E_OK)
-+ {
-+ DBG(WARNING, ("FM in guest-mode without IPC, can't validate firmware revision."));
-+ revInfo.packageRev = IP_OFFLOAD_PACKAGE_NUMBER;
-+ }
-+ else if (!IS_OFFLOAD_PACKAGE(revInfo.packageRev))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Fman ctrl code package"));
-+
-+ switch (spOper)
-+ {
-+ case (FM_SP_OP_CAPWAP_DTLS_DEC):
-+ *p_SpOperCoding = 9;
-+ break;
-+ case (FM_SP_OP_CAPWAP_DTLS_ENC):
-+ *p_SpOperCoding = 10;
-+ break;
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_IPSEC_MANIP):
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_IPSEC_MANIP|FM_SP_OP_RPD):
-+ *p_SpOperCoding = 5;
-+ break;
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_MANIP):
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_MANIP|FM_SP_OP_RPD):
-+ *p_SpOperCoding = 6;
-+ break;
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_RPD):
-+ *p_SpOperCoding = 3;
-+ break;
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN):
-+ *p_SpOperCoding = 1;
-+ break;
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_IPSEC_UPDATE_UDP_LEN|FM_SP_OP_IPSEC_NO_ETH_HDR):
-+ *p_SpOperCoding = 12;
-+ break;
-+ case (FM_SP_OP_IPSEC|FM_SP_OP_RPD):
-+ *p_SpOperCoding = 4;
-+ break;
-+ case (FM_SP_OP_IPSEC):
-+ *p_SpOperCoding = 2;
-+ break;
-+ case (FM_SP_OP_DCL4C):
-+ *p_SpOperCoding = 7;
-+ break;
-+ case (FM_SP_OP_CLEAR_RPD):
-+ *p_SpOperCoding = 8;
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_CtrlMonStart(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ t_FmTrbRegs *p_MonRegs;
-+ uint8_t i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc,
-+ GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc) | FPM_BRKC_RDBG);
-+
-+ for (i = 0; i < FM_NUM_OF_CTRL; i++)
-+ {
-+ p_MonRegs = (t_FmTrbRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_TRB(i));
-+
-+ /* Reset control registers */
-+ WRITE_UINT32(p_MonRegs->tcrh, TRB_TCRH_RESET);
-+ WRITE_UINT32(p_MonRegs->tcrl, TRB_TCRL_RESET);
-+
-+ /* Configure: counter #1 counts all stalls in risc - ldsched stall
-+ counter #2 counts all stalls in risc - other stall*/
-+ WRITE_UINT32(p_MonRegs->tcrl, TRB_TCRL_RESET | TRB_TCRL_UTIL);
-+
-+ /* Enable monitoring */
-+ WRITE_UINT32(p_MonRegs->tcrh, TRB_TCRH_ENABLE_COUNTERS);
-+ }
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_CtrlMonStop(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ t_FmTrbRegs *p_MonRegs;
-+ uint8_t i;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+
-+ for (i = 0; i < FM_NUM_OF_CTRL; i++)
-+ {
-+ p_MonRegs = (t_FmTrbRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_TRB(i));
-+ WRITE_UINT32(p_MonRegs->tcrh, TRB_TCRH_DISABLE_COUNTERS);
-+ }
-+
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc,
-+ GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_brkc) & ~FPM_BRKC_RDBG);
-+
-+ return E_OK;
-+}
-+
-+t_Error FM_CtrlMonGetCounters(t_Handle h_Fm, uint8_t fmCtrlIndex, t_FmCtrlMon *p_Mon)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_Fm;
-+ t_FmTrbRegs *p_MonRegs;
-+ uint64_t clkCnt, utilValue, effValue;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+ SANITY_CHECK_RETURN_ERROR((p_Fm->guestId == NCSW_MASTER_ID), E_NOT_SUPPORTED);
-+ SANITY_CHECK_RETURN_ERROR(p_Mon, E_NULL_POINTER);
-+
-+ if (fmCtrlIndex >= FM_NUM_OF_CTRL)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FM Controller index"));
-+
-+ p_MonRegs = (t_FmTrbRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_TRB(fmCtrlIndex));
-+
-+ clkCnt = (uint64_t)
-+ ((uint64_t)GET_UINT32(p_MonRegs->tpcch) << 32 | GET_UINT32(p_MonRegs->tpccl));
-+
-+ utilValue = (uint64_t)
-+ ((uint64_t)GET_UINT32(p_MonRegs->tpc1h) << 32 | GET_UINT32(p_MonRegs->tpc1l));
-+
-+ effValue = (uint64_t)
-+ ((uint64_t)GET_UINT32(p_MonRegs->tpc2h) << 32 | GET_UINT32(p_MonRegs->tpc2l));
-+
-+ p_Mon->percentCnt[0] = (uint8_t)div64_u64((clkCnt - utilValue) * 100, clkCnt);
-+ if (clkCnt != utilValue)
-+ p_Mon->percentCnt[1] = (uint8_t)div64_u64(((clkCnt - utilValue) - effValue) * 100, clkCnt - utilValue);
-+ else
-+ p_Mon->percentCnt[1] = 0;
-+
-+ return E_OK;
-+}
-+
-+t_Handle FM_GetMuramHandle(t_Handle h_Fm)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, NULL);
-+
-+ return (p_Fm->h_FmMuram);
-+}
-+
-+/****************************************************/
-+/* Hidden-DEBUG Only API */
-+/****************************************************/
-+t_Error FM_ForceIntr (t_Handle h_Fm, e_FmExceptions exception)
-+{
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ enum fman_exceptions fslException;
-+ struct fman_rg fman_rg;
-+
-+ SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE);
-+
-+ fman_rg.bmi_rg = p_Fm->p_FmBmiRegs;
-+ fman_rg.qmi_rg = p_Fm->p_FmQmiRegs;
-+ fman_rg.fpm_rg = p_Fm->p_FmFpmRegs;
-+ fman_rg.dma_rg = p_Fm->p_FmDmaRegs;
-+
-+ switch (exception)
-+ {
-+ case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
-+ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_EX_QMI_SINGLE_ECC:
-+ if (p_Fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("e_FM_EX_QMI_SINGLE_ECC not supported on this integration."));
-+
-+ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_SINGLE_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_EX_QMI_DOUBLE_ECC:
-+ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DOUBLE_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_EX_BMI_LIST_RAM_ECC:
-+ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_LIST_RAM_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_EX_BMI_STORAGE_PROFILE_ECC:
-+ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_STORAGE_PROFILE_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_EX_BMI_STATISTICS_RAM_ECC:
-+ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_STATISTICS_RAM_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ case e_FM_EX_BMI_DISPATCH_RAM_ECC:
-+ if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_DISPATCH_RAM_ECC))
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked"));
-+ break;
-+ default:
-+ RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception may not be forced"));
-+ }
-+
-+ fslException = FmanExceptionTrans(exception);
-+ fman_force_intr (&fman_rg, fslException);
-+
-+ return E_OK;
-+}
-+
-+t_Handle FmGetPcd(t_Handle h_Fm)
-+{
-+ return ((t_Fm*)h_Fm)->h_Pcd;
-+}
-+#if (DPAA_VERSION >= 11)
-+extern void *g_MemacRegs;
-+void fm_clk_down(void);
-+uint32_t fman_memac_get_event(void *regs, uint32_t ev_mask);
-+void FM_ChangeClock(t_Handle h_Fm, int hardwarePortId)
-+{
-+ int macId;
-+ uint32_t event, rcr;
-+ t_Fm *p_Fm = (t_Fm*)h_Fm;
-+ rcr = GET_UINT32(p_Fm->p_FmFpmRegs->fm_rcr);
-+ rcr |= 0x04000000;
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rcr, rcr);
-+
-+ HW_PORT_ID_TO_SW_PORT_ID(macId, hardwarePortId);
-+ do
-+ {
-+ event = fman_memac_get_event(g_MemacRegs, 0xFFFFFFFF);
-+ } while ((event & 0x00000020) == 0);
-+ fm_clk_down();
-+ rcr = GET_UINT32(p_Fm->p_FmFpmRegs->fm_rcr);
-+ rcr &= ~0x04000000;
-+ WRITE_UINT32(p_Fm->p_FmFpmRegs->fm_rcr, rcr);
-+}
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm.h
-@@ -0,0 +1,648 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm.h
-+
-+ @Description FM internal structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_H
-+#define __FM_H
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_ext.h"
-+#include "fm_ipc.h"
-+
-+#include "fsl_fman.h"
-+
-+#define __ERR_MODULE__ MODULE_FM
-+
-+#define FM_MAX_NUM_OF_HW_PORT_IDS 64
-+#define FM_MAX_NUM_OF_GUESTS 100
-+
-+/**************************************************************************//**
-+ @Description Exceptions
-+*//***************************************************************************/
-+#define FM_EX_DMA_BUS_ERROR 0x80000000 /**< DMA bus error. */
-+#define FM_EX_DMA_READ_ECC 0x40000000
-+#define FM_EX_DMA_SYSTEM_WRITE_ECC 0x20000000
-+#define FM_EX_DMA_FM_WRITE_ECC 0x10000000
-+#define FM_EX_FPM_STALL_ON_TASKS 0x08000000 /**< Stall of tasks on FPM */
-+#define FM_EX_FPM_SINGLE_ECC 0x04000000 /**< Single ECC on FPM */
-+#define FM_EX_FPM_DOUBLE_ECC 0x02000000
-+#define FM_EX_QMI_SINGLE_ECC 0x01000000 /**< Single ECC on FPM */
-+#define FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000 /**< Dequeu from default queue id */
-+#define FM_EX_QMI_DOUBLE_ECC 0x00400000
-+#define FM_EX_BMI_LIST_RAM_ECC 0x00200000
-+#define FM_EX_BMI_STORAGE_PROFILE_ECC 0x00100000
-+#define FM_EX_BMI_STATISTICS_RAM_ECC 0x00080000
-+#define FM_EX_IRAM_ECC 0x00040000
-+#define FM_EX_MURAM_ECC 0x00020000
-+#define FM_EX_BMI_DISPATCH_RAM_ECC 0x00010000
-+#define FM_EX_DMA_SINGLE_PORT_ECC 0x00008000
-+
-+#define DMA_EMSR_EMSTR_MASK 0x0000FFFF
-+
-+#define DMA_THRESH_COMMQ_MASK 0xFF000000
-+#define DMA_THRESH_READ_INT_BUF_MASK 0x007F0000
-+#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000007F
-+
-+#define GET_EXCEPTION_FLAG(bitMask, exception) \
-+switch (exception){ \
-+ case e_FM_EX_DMA_BUS_ERROR: \
-+ bitMask = FM_EX_DMA_BUS_ERROR; break; \
-+ case e_FM_EX_DMA_SINGLE_PORT_ECC: \
-+ bitMask = FM_EX_DMA_SINGLE_PORT_ECC; break; \
-+ case e_FM_EX_DMA_READ_ECC: \
-+ bitMask = FM_EX_DMA_READ_ECC; break; \
-+ case e_FM_EX_DMA_SYSTEM_WRITE_ECC: \
-+ bitMask = FM_EX_DMA_SYSTEM_WRITE_ECC; break; \
-+ case e_FM_EX_DMA_FM_WRITE_ECC: \
-+ bitMask = FM_EX_DMA_FM_WRITE_ECC; break; \
-+ case e_FM_EX_FPM_STALL_ON_TASKS: \
-+ bitMask = FM_EX_FPM_STALL_ON_TASKS; break; \
-+ case e_FM_EX_FPM_SINGLE_ECC: \
-+ bitMask = FM_EX_FPM_SINGLE_ECC; break; \
-+ case e_FM_EX_FPM_DOUBLE_ECC: \
-+ bitMask = FM_EX_FPM_DOUBLE_ECC; break; \
-+ case e_FM_EX_QMI_SINGLE_ECC: \
-+ bitMask = FM_EX_QMI_SINGLE_ECC; break; \
-+ case e_FM_EX_QMI_DOUBLE_ECC: \
-+ bitMask = FM_EX_QMI_DOUBLE_ECC; break; \
-+ case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: \
-+ bitMask = FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID; break; \
-+ case e_FM_EX_BMI_LIST_RAM_ECC: \
-+ bitMask = FM_EX_BMI_LIST_RAM_ECC; break; \
-+ case e_FM_EX_BMI_STORAGE_PROFILE_ECC: \
-+ bitMask = FM_EX_BMI_STORAGE_PROFILE_ECC; break; \
-+ case e_FM_EX_BMI_STATISTICS_RAM_ECC: \
-+ bitMask = FM_EX_BMI_STATISTICS_RAM_ECC; break; \
-+ case e_FM_EX_BMI_DISPATCH_RAM_ECC: \
-+ bitMask = FM_EX_BMI_DISPATCH_RAM_ECC; break; \
-+ case e_FM_EX_IRAM_ECC: \
-+ bitMask = FM_EX_IRAM_ECC; break; \
-+ case e_FM_EX_MURAM_ECC: \
-+ bitMask = FM_EX_MURAM_ECC; break; \
-+ default: bitMask = 0;break; \
-+}
-+
-+#define GET_FM_MODULE_EVENT(_mod, _id, _intrType, _event) \
-+ switch (_mod) { \
-+ case e_FM_MOD_PRS: \
-+ if (_id) _event = e_FM_EV_DUMMY_LAST; \
-+ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PRS : e_FM_EV_PRS; \
-+ break; \
-+ case e_FM_MOD_KG: \
-+ if (_id) _event = e_FM_EV_DUMMY_LAST; \
-+ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_KG : e_FM_EV_DUMMY_LAST; \
-+ break; \
-+ case e_FM_MOD_PLCR: \
-+ if (_id) _event = e_FM_EV_DUMMY_LAST; \
-+ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PLCR : e_FM_EV_PLCR; \
-+ break; \
-+ case e_FM_MOD_TMR: \
-+ if (_id) _event = e_FM_EV_DUMMY_LAST; \
-+ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_TMR; \
-+ break; \
-+ case e_FM_MOD_10G_MAC: \
-+ if (_id >= FM_MAX_NUM_OF_10G_MACS) _event = e_FM_EV_DUMMY_LAST; \
-+ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? (e_FM_EV_ERR_10G_MAC0 + _id) : (e_FM_EV_10G_MAC0 + _id); \
-+ break; \
-+ case e_FM_MOD_1G_MAC: \
-+ if (_id >= FM_MAX_NUM_OF_1G_MACS) _event = e_FM_EV_DUMMY_LAST; \
-+ else _event = (_intrType == e_FM_INTR_TYPE_ERR) ? (e_FM_EV_ERR_1G_MAC0 + _id) : (e_FM_EV_1G_MAC0 + _id); \
-+ break; \
-+ case e_FM_MOD_MACSEC: \
-+ switch (_id){ \
-+ case (0): _event = (_intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_MACSEC_MAC0:e_FM_EV_MACSEC_MAC0; \
-+ break; \
-+ } \
-+ break; \
-+ case e_FM_MOD_FMAN_CTRL: \
-+ if (_intrType == e_FM_INTR_TYPE_ERR) _event = e_FM_EV_DUMMY_LAST; \
-+ else _event = (e_FM_EV_FMAN_CTRL_0 + _id); \
-+ break; \
-+ default: _event = e_FM_EV_DUMMY_LAST; \
-+ break; \
-+ }
-+
-+#define FMAN_CACHE_OVERRIDE_TRANS(fsl_cache_override, _cache_override) \
-+ switch (_cache_override){ \
-+ case e_FM_DMA_NO_CACHE_OR: \
-+ fsl_cache_override = E_FMAN_DMA_NO_CACHE_OR; break; \
-+ case e_FM_DMA_NO_STASH_DATA: \
-+ fsl_cache_override = E_FMAN_DMA_NO_STASH_DATA; break; \
-+ case e_FM_DMA_MAY_STASH_DATA: \
-+ fsl_cache_override = E_FMAN_DMA_MAY_STASH_DATA; break; \
-+ case e_FM_DMA_STASH_DATA: \
-+ fsl_cache_override = E_FMAN_DMA_STASH_DATA; break; \
-+ default: \
-+ fsl_cache_override = E_FMAN_DMA_NO_CACHE_OR; break; \
-+ }
-+
-+#define FMAN_AID_MODE_TRANS(fsl_aid_mode, _aid_mode) \
-+ switch (_aid_mode){ \
-+ case e_FM_DMA_AID_OUT_PORT_ID: \
-+ fsl_aid_mode = E_FMAN_DMA_AID_OUT_PORT_ID; break; \
-+ case e_FM_DMA_AID_OUT_TNUM: \
-+ fsl_aid_mode = E_FMAN_DMA_AID_OUT_TNUM; break; \
-+ default: \
-+ fsl_aid_mode = E_FMAN_DMA_AID_OUT_PORT_ID; break; \
-+ }
-+
-+#define FMAN_DMA_DBG_CNT_TRANS(fsl_dma_dbg_cnt, _dma_dbg_cnt) \
-+ switch (_dma_dbg_cnt){ \
-+ case e_FM_DMA_DBG_NO_CNT: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_NO_CNT; break; \
-+ case e_FM_DMA_DBG_CNT_DONE: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_DONE; break; \
-+ case e_FM_DMA_DBG_CNT_COMM_Q_EM: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_COMM_Q_EM; break; \
-+ case e_FM_DMA_DBG_CNT_INT_READ_EM: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_INT_READ_EM; break; \
-+ case e_FM_DMA_DBG_CNT_INT_WRITE_EM: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_INT_WRITE_EM ; break; \
-+ case e_FM_DMA_DBG_CNT_FPM_WAIT: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_FPM_WAIT ; break; \
-+ case e_FM_DMA_DBG_CNT_SIGLE_BIT_ECC: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_SIGLE_BIT_ECC ; break; \
-+ case e_FM_DMA_DBG_CNT_RAW_WAR_PROT: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_CNT_RAW_WAR_PROT ; break; \
-+ default: \
-+ fsl_dma_dbg_cnt = E_FMAN_DMA_DBG_NO_CNT; break; \
-+ }
-+
-+#define FMAN_DMA_EMER_TRANS(fsl_dma_emer, _dma_emer) \
-+ switch (_dma_emer){ \
-+ case e_FM_DMA_EM_EBS: \
-+ fsl_dma_emer = E_FMAN_DMA_EM_EBS; break; \
-+ case e_FM_DMA_EM_SOS: \
-+ fsl_dma_emer = E_FMAN_DMA_EM_SOS; break; \
-+ default: \
-+ fsl_dma_emer = E_FMAN_DMA_EM_EBS; break; \
-+ }
-+
-+#define FMAN_DMA_ERR_TRANS(fsl_dma_err, _dma_err) \
-+ switch (_dma_err){ \
-+ case e_FM_DMA_ERR_CATASTROPHIC: \
-+ fsl_dma_err = E_FMAN_DMA_ERR_CATASTROPHIC; break; \
-+ case e_FM_DMA_ERR_REPORT: \
-+ fsl_dma_err = E_FMAN_DMA_ERR_REPORT; break; \
-+ default: \
-+ fsl_dma_err = E_FMAN_DMA_ERR_CATASTROPHIC; break; \
-+ }
-+
-+#define FMAN_CATASTROPHIC_ERR_TRANS(fsl_catastrophic_err, _catastrophic_err) \
-+ switch (_catastrophic_err){ \
-+ case e_FM_CATASTROPHIC_ERR_STALL_PORT: \
-+ fsl_catastrophic_err = E_FMAN_CATAST_ERR_STALL_PORT; break; \
-+ case e_FM_CATASTROPHIC_ERR_STALL_TASK: \
-+ fsl_catastrophic_err = E_FMAN_CATAST_ERR_STALL_TASK; break; \
-+ default: \
-+ fsl_catastrophic_err = E_FMAN_CATAST_ERR_STALL_PORT; break; \
-+ }
-+
-+#define FMAN_COUNTERS_TRANS(fsl_counters, _counters) \
-+ switch (_counters){ \
-+ case e_FM_COUNTERS_ENQ_TOTAL_FRAME: \
-+ fsl_counters = E_FMAN_COUNTERS_ENQ_TOTAL_FRAME; break; \
-+ case e_FM_COUNTERS_DEQ_TOTAL_FRAME: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_TOTAL_FRAME; break; \
-+ case e_FM_COUNTERS_DEQ_0: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_0; break; \
-+ case e_FM_COUNTERS_DEQ_1: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_1; break; \
-+ case e_FM_COUNTERS_DEQ_2: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_2; break; \
-+ case e_FM_COUNTERS_DEQ_3: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_3; break; \
-+ case e_FM_COUNTERS_DEQ_FROM_DEFAULT: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_FROM_DEFAULT; break; \
-+ case e_FM_COUNTERS_DEQ_FROM_CONTEXT: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_FROM_CONTEXT; break; \
-+ case e_FM_COUNTERS_DEQ_FROM_FD: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_FROM_FD; break; \
-+ case e_FM_COUNTERS_DEQ_CONFIRM: \
-+ fsl_counters = E_FMAN_COUNTERS_DEQ_CONFIRM; break; \
-+ default: \
-+ fsl_counters = E_FMAN_COUNTERS_ENQ_TOTAL_FRAME; break; \
-+ }
-+
-+/**************************************************************************//**
-+ @Description defaults
-+*//***************************************************************************/
-+#define DEFAULT_exceptions (FM_EX_DMA_BUS_ERROR |\
-+ FM_EX_DMA_READ_ECC |\
-+ FM_EX_DMA_SYSTEM_WRITE_ECC |\
-+ FM_EX_DMA_FM_WRITE_ECC |\
-+ FM_EX_FPM_STALL_ON_TASKS |\
-+ FM_EX_FPM_SINGLE_ECC |\
-+ FM_EX_FPM_DOUBLE_ECC |\
-+ FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID|\
-+ FM_EX_BMI_LIST_RAM_ECC |\
-+ FM_EX_BMI_STORAGE_PROFILE_ECC |\
-+ FM_EX_BMI_STATISTICS_RAM_ECC |\
-+ FM_EX_IRAM_ECC |\
-+ FM_EX_MURAM_ECC |\
-+ FM_EX_BMI_DISPATCH_RAM_ECC |\
-+ FM_EX_QMI_DOUBLE_ECC |\
-+ FM_EX_QMI_SINGLE_ECC)
-+
-+#define DEFAULT_eccEnable FALSE
-+#ifdef FM_PEDANTIC_DMA
-+#define DEFAULT_aidOverride TRUE
-+#else
-+#define DEFAULT_aidOverride FALSE
-+#endif /* FM_PEDANTIC_DMA */
-+#define DEFAULT_aidMode e_FM_DMA_AID_OUT_TNUM
-+#define DEFAULT_dmaStopOnBusError FALSE
-+#define DEFAULT_stopAtBusError FALSE
-+#define DEFAULT_axiDbgNumOfBeats 1
-+#define DEFAULT_dmaReadIntBufLow ((DMA_THRESH_MAX_BUF+1)/2)
-+#define DEFAULT_dmaReadIntBufHigh ((DMA_THRESH_MAX_BUF+1)*3/4)
-+#define DEFAULT_dmaWriteIntBufLow ((DMA_THRESH_MAX_BUF+1)/2)
-+#define DEFAULT_dmaWriteIntBufHigh ((DMA_THRESH_MAX_BUF+1)*3/4)
-+#define DEFAULT_catastrophicErr e_FM_CATASTROPHIC_ERR_STALL_PORT
-+#define DEFAULT_dmaErr e_FM_DMA_ERR_CATASTROPHIC
-+#define DEFAULT_resetOnInit FALSE
-+#define DEFAULT_resetOnInitOverrideCallback NULL
-+#define DEFAULT_haltOnExternalActivation FALSE /* do not change! if changed, must be disabled for rev1 ! */
-+#define DEFAULT_haltOnUnrecoverableEccError FALSE /* do not change! if changed, must be disabled for rev1 ! */
-+#define DEFAULT_externalEccRamsEnable FALSE
-+#define DEFAULT_VerifyUcode FALSE
-+
-+#if (DPAA_VERSION < 11)
-+#define DEFAULT_totalFifoSize(major, minor) \
-+ (((major == 2) || (major == 5)) ? \
-+ (100*KILOBYTE) : ((major == 4) ? \
-+ (49*KILOBYTE) : (122*KILOBYTE)))
-+#define DEFAULT_totalNumOfTasks(major, minor) \
-+ BMI_MAX_NUM_OF_TASKS
-+
-+#define DEFAULT_dmaCommQLow ((DMA_THRESH_MAX_COMMQ+1)/2)
-+#define DEFAULT_dmaCommQHigh ((DMA_THRESH_MAX_COMMQ+1)*3/4)
-+#define DEFAULT_cacheOverride e_FM_DMA_NO_CACHE_OR
-+#define DEFAULT_dmaCamNumOfEntries 32
-+#define DEFAULT_dmaDbgCntMode e_FM_DMA_DBG_NO_CNT
-+#define DEFAULT_dmaEnEmergency FALSE
-+#define DEFAULT_dmaSosEmergency 0
-+#define DEFAULT_dmaWatchdog 0 /* disabled */
-+#define DEFAULT_dmaEnEmergencySmoother FALSE
-+#define DEFAULT_dmaEmergencySwitchCounter 0
-+
-+#define DEFAULT_dispLimit 0
-+#define DEFAULT_prsDispTh 16
-+#define DEFAULT_plcrDispTh 16
-+#define DEFAULT_kgDispTh 16
-+#define DEFAULT_bmiDispTh 16
-+#define DEFAULT_qmiEnqDispTh 16
-+#define DEFAULT_qmiDeqDispTh 16
-+#define DEFAULT_fmCtl1DispTh 16
-+#define DEFAULT_fmCtl2DispTh 16
-+
-+#else /* (DPAA_VERSION < 11) */
-+/* Defaults are registers' reset values */
-+#define DEFAULT_totalFifoSize(major, minor) \
-+ (((major == 6) && ((minor == 1) || (minor == 4))) ? \
-+ (156*KILOBYTE) : (295*KILOBYTE))
-+
-+/* According to the default value of FMBM_CFG2[TNTSKS] */
-+#define DEFAULT_totalNumOfTasks(major, minor) \
-+ (((major == 6) && ((minor == 1) || (minor == 4))) ? 59 : 124)
-+
-+#define DEFAULT_dmaCommQLow 0x2A
-+#define DEFAULT_dmaCommQHigh 0x3F
-+#define DEFAULT_cacheOverride e_FM_DMA_NO_CACHE_OR
-+#define DEFAULT_dmaCamNumOfEntries 64
-+#define DEFAULT_dmaDbgCntMode e_FM_DMA_DBG_NO_CNT
-+#define DEFAULT_dmaEnEmergency FALSE
-+#define DEFAULT_dmaSosEmergency 0
-+#define DEFAULT_dmaWatchdog 0 /* disabled */
-+#define DEFAULT_dmaEnEmergencySmoother FALSE
-+#define DEFAULT_dmaEmergencySwitchCounter 0
-+
-+#define DEFAULT_dispLimit 0
-+#define DEFAULT_prsDispTh 16
-+#define DEFAULT_plcrDispTh 16
-+#define DEFAULT_kgDispTh 16
-+#define DEFAULT_bmiDispTh 16
-+#define DEFAULT_qmiEnqDispTh 16
-+#define DEFAULT_qmiDeqDispTh 16
-+#define DEFAULT_fmCtl1DispTh 16
-+#define DEFAULT_fmCtl2DispTh 16
-+#endif /* (DPAA_VERSION < 11) */
-+
-+#define FM_TIMESTAMP_1_USEC_BIT 8
-+
-+/**************************************************************************//**
-+ @Collection Defines used for enabling/disabling FM interrupts
-+ @{
-+*//***************************************************************************/
-+#define ERR_INTR_EN_DMA 0x00010000
-+#define ERR_INTR_EN_FPM 0x80000000
-+#define ERR_INTR_EN_BMI 0x00800000
-+#define ERR_INTR_EN_QMI 0x00400000
-+#define ERR_INTR_EN_PRS 0x00200000
-+#define ERR_INTR_EN_KG 0x00100000
-+#define ERR_INTR_EN_PLCR 0x00080000
-+#define ERR_INTR_EN_MURAM 0x00040000
-+#define ERR_INTR_EN_IRAM 0x00020000
-+#define ERR_INTR_EN_10G_MAC0 0x00008000
-+#define ERR_INTR_EN_10G_MAC1 0x00000040
-+#define ERR_INTR_EN_1G_MAC0 0x00004000
-+#define ERR_INTR_EN_1G_MAC1 0x00002000
-+#define ERR_INTR_EN_1G_MAC2 0x00001000
-+#define ERR_INTR_EN_1G_MAC3 0x00000800
-+#define ERR_INTR_EN_1G_MAC4 0x00000400
-+#define ERR_INTR_EN_1G_MAC5 0x00000200
-+#define ERR_INTR_EN_1G_MAC6 0x00000100
-+#define ERR_INTR_EN_1G_MAC7 0x00000080
-+#define ERR_INTR_EN_MACSEC_MAC0 0x00000001
-+
-+#define INTR_EN_QMI 0x40000000
-+#define INTR_EN_PRS 0x20000000
-+#define INTR_EN_WAKEUP 0x10000000
-+#define INTR_EN_PLCR 0x08000000
-+#define INTR_EN_1G_MAC0 0x00080000
-+#define INTR_EN_1G_MAC1 0x00040000
-+#define INTR_EN_1G_MAC2 0x00020000
-+#define INTR_EN_1G_MAC3 0x00010000
-+#define INTR_EN_1G_MAC4 0x00000040
-+#define INTR_EN_1G_MAC5 0x00000020
-+#define INTR_EN_1G_MAC6 0x00000008
-+#define INTR_EN_1G_MAC7 0x00000002
-+#define INTR_EN_10G_MAC0 0x00200000
-+#define INTR_EN_10G_MAC1 0x00100000
-+#define INTR_EN_REV0 0x00008000
-+#define INTR_EN_REV1 0x00004000
-+#define INTR_EN_REV2 0x00002000
-+#define INTR_EN_REV3 0x00001000
-+#define INTR_EN_BRK 0x00000080
-+#define INTR_EN_TMR 0x01000000
-+#define INTR_EN_MACSEC_MAC0 0x00000001
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description Memory Mapped Registers
-+*//***************************************************************************/
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+typedef struct
-+{
-+ volatile uint32_t iadd; /**< FM IRAM instruction address register */
-+ volatile uint32_t idata; /**< FM IRAM instruction data register */
-+ volatile uint32_t itcfg; /**< FM IRAM timing config register */
-+ volatile uint32_t iready; /**< FM IRAM ready register */
-+ volatile uint32_t res[0x1FFFC];
-+} t_FMIramRegs;
-+
-+/* Trace buffer registers -
-+ each FM Controller has its own trace buffer residing at FM_MM_TRB(fmCtrlIndex) offset */
-+typedef struct t_FmTrbRegs
-+{
-+ volatile uint32_t tcrh;
-+ volatile uint32_t tcrl;
-+ volatile uint32_t tesr;
-+ volatile uint32_t tecr0h;
-+ volatile uint32_t tecr0l;
-+ volatile uint32_t terf0h;
-+ volatile uint32_t terf0l;
-+ volatile uint32_t tecr1h;
-+ volatile uint32_t tecr1l;
-+ volatile uint32_t terf1h;
-+ volatile uint32_t terf1l;
-+ volatile uint32_t tpcch;
-+ volatile uint32_t tpccl;
-+ volatile uint32_t tpc1h;
-+ volatile uint32_t tpc1l;
-+ volatile uint32_t tpc2h;
-+ volatile uint32_t tpc2l;
-+ volatile uint32_t twdimr;
-+ volatile uint32_t twicvr;
-+ volatile uint32_t tar;
-+ volatile uint32_t tdr;
-+ volatile uint32_t tsnum1;
-+ volatile uint32_t tsnum2;
-+ volatile uint32_t tsnum3;
-+ volatile uint32_t tsnum4;
-+} t_FmTrbRegs;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Description General defines
-+*//***************************************************************************/
-+#define FM_DEBUG_STATUS_REGISTER_OFFSET 0x000d1084UL
-+#define FM_FW_DEBUG_INSTRUCTION 0x6ffff805UL
-+
-+/**************************************************************************//**
-+ @Description FPM defines
-+*//***************************************************************************/
-+/* masks */
-+#define FPM_BRKC_RDBG 0x00000200
-+#define FPM_BRKC_SLP 0x00000800
-+/**************************************************************************//**
-+ @Description BMI defines
-+*//***************************************************************************/
-+/* masks */
-+#define BMI_INIT_START 0x80000000
-+#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
-+#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
-+#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
-+#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
-+/**************************************************************************//**
-+ @Description QMI defines
-+*//***************************************************************************/
-+/* masks */
-+#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
-+#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
-+#define QMI_INTR_EN_SINGLE_ECC 0x80000000
-+
-+/**************************************************************************//**
-+ @Description IRAM defines
-+*//***************************************************************************/
-+/* masks */
-+#define IRAM_IADD_AIE 0x80000000
-+#define IRAM_READY 0x80000000
-+
-+/**************************************************************************//**
-+ @Description TRB defines
-+*//***************************************************************************/
-+/* masks */
-+#define TRB_TCRH_RESET 0x04000000
-+#define TRB_TCRH_ENABLE_COUNTERS 0x84008000
-+#define TRB_TCRH_DISABLE_COUNTERS 0x8400C000
-+#define TRB_TCRL_RESET 0x20000000
-+#define TRB_TCRL_UTIL 0x00000460
-+typedef struct {
-+ void (*f_Isr) (t_Handle h_Arg, uint32_t event);
-+ t_Handle h_SrcHandle;
-+} t_FmanCtrlIntrSrc;
-+
-+
-+typedef void (t_FmanCtrlIsr)( t_Handle h_Fm, uint32_t event);
-+
-+typedef struct
-+{
-+/***************************/
-+/* Master/Guest parameters */
-+/***************************/
-+ uint8_t fmId;
-+ e_FmPortType portsTypes[FM_MAX_NUM_OF_HW_PORT_IDS];
-+ uint16_t fmClkFreq;
-+ uint16_t fmMacClkFreq;
-+ t_FmRevisionInfo revInfo;
-+/**************************/
-+/* Master Only parameters */
-+/**************************/
-+ bool enabledTimeStamp;
-+ uint8_t count1MicroBit;
-+ uint8_t totalNumOfTasks;
-+ uint32_t totalFifoSize;
-+ uint8_t maxNumOfOpenDmas;
-+ uint8_t accumulatedNumOfTasks;
-+ uint32_t accumulatedFifoSize;
-+ uint8_t accumulatedNumOfOpenDmas;
-+ uint8_t accumulatedNumOfDeqTnums;
-+#ifdef FM_LOW_END_RESTRICTION
-+ bool lowEndRestriction;
-+#endif /* FM_LOW_END_RESTRICTION */
-+ uint32_t exceptions;
-+ int irq;
-+ int errIrq;
-+ bool ramsEccEnable;
-+ bool explicitEnable;
-+ bool internalCall;
-+ uint8_t ramsEccOwners;
-+ uint32_t extraFifoPoolSize;
-+ uint8_t extraTasksPoolSize;
-+ uint8_t extraOpenDmasPoolSize;
-+#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)
-+ uint16_t portMaxFrameLengths10G[FM_MAX_NUM_OF_10G_MACS];
-+ uint16_t macMaxFrameLengths10G[FM_MAX_NUM_OF_10G_MACS];
-+#endif /* defined(FM_MAX_NUM_OF_10G_MACS) && ... */
-+ uint16_t portMaxFrameLengths1G[FM_MAX_NUM_OF_1G_MACS];
-+ uint16_t macMaxFrameLengths1G[FM_MAX_NUM_OF_1G_MACS];
-+} t_FmStateStruct;
-+
-+#if (DPAA_VERSION >= 11)
-+typedef struct t_FmMapParam {
-+ uint16_t profilesBase;
-+ uint16_t numOfProfiles;
-+ t_Handle h_FmPort;
-+} t_FmMapParam;
-+
-+typedef struct t_FmAllocMng {
-+ bool allocated;
-+ uint8_t ownerId; /* guestId for KG in multi-partition only,
-+ portId for PLCR in any environment */
-+} t_FmAllocMng;
-+
-+typedef struct t_FmPcdSpEntry {
-+ bool valid;
-+ t_FmAllocMng profilesMng;
-+} t_FmPcdSpEntry;
-+
-+typedef struct t_FmSp {
-+ void *p_FmPcdStoragePrflRegs;
-+ t_FmPcdSpEntry profiles[FM_VSP_MAX_NUM_OF_ENTRIES];
-+ t_FmMapParam portsMapping[FM_MAX_NUM_OF_PORTS];
-+} t_FmSp;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+typedef struct t_Fm
-+{
-+/***************************/
-+/* Master/Guest parameters */
-+/***************************/
-+/* locals for recovery */
-+ uintptr_t baseAddr;
-+
-+/* un-needed for recovery */
-+ t_Handle h_Pcd;
-+ char fmModuleName[MODULE_NAME_SIZE];
-+ char fmIpcHandlerModuleName[FM_MAX_NUM_OF_GUESTS][MODULE_NAME_SIZE];
-+ t_Handle h_IpcSessions[FM_MAX_NUM_OF_GUESTS];
-+ t_FmIntrSrc intrMng[e_FM_EV_DUMMY_LAST]; /* FM exceptions user callback */
-+ uint8_t guestId;
-+/**************************/
-+/* Master Only parameters */
-+/**************************/
-+/* locals for recovery */
-+ struct fman_fpm_regs *p_FmFpmRegs;
-+ struct fman_bmi_regs *p_FmBmiRegs;
-+ struct fman_qmi_regs *p_FmQmiRegs;
-+ struct fman_dma_regs *p_FmDmaRegs;
-+ struct fman_regs *p_FmRegs;
-+ t_FmExceptionsCallback *f_Exception;
-+ t_FmBusErrorCallback *f_BusError;
-+ t_Handle h_App; /* Application handle */
-+ t_Handle h_Spinlock;
-+ bool recoveryMode;
-+ t_FmStateStruct *p_FmStateStruct;
-+ uint16_t tnumAgingPeriod;
-+#if (DPAA_VERSION >= 11)
-+ t_FmSp *p_FmSp;
-+ uint8_t partNumOfVSPs;
-+ uint8_t partVSPBase;
-+ uintptr_t vspBaseAddr;
-+#endif /* (DPAA_VERSION >= 11) */
-+ bool portsPreFetchConfigured[FM_MAX_NUM_OF_HW_PORT_IDS]; /* Prefetch configration per Tx-port */
-+ bool portsPreFetchValue[FM_MAX_NUM_OF_HW_PORT_IDS]; /* Prefetch configration per Tx-port */
-+
-+/* un-needed for recovery */
-+ struct fman_cfg *p_FmDriverParam;
-+ t_Handle h_FmMuram;
-+ uint64_t fmMuramPhysBaseAddr;
-+ bool independentMode;
-+ bool hcPortInitialized;
-+ uintptr_t camBaseAddr; /* save for freeing */
-+ uintptr_t resAddr;
-+ uintptr_t fifoBaseAddr; /* save for freeing */
-+ t_FmanCtrlIntrSrc fmanCtrlIntr[FM_NUM_OF_FMAN_CTRL_EVENT_REGS]; /* FM exceptions user callback */
-+ bool usedEventRegs[FM_NUM_OF_FMAN_CTRL_EVENT_REGS];
-+ t_FmFirmwareParams firmware;
-+ bool fwVerify;
-+ bool resetOnInit;
-+ t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride;
-+ uint32_t userSetExceptions;
-+} t_Fm;
-+
-+
-+#endif /* __FM_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_ipc.h
-@@ -0,0 +1,465 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_ipc.h
-+
-+ @Description FM Inter-Partition prototypes, structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_IPC_H
-+#define __FM_IPC_H
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_IPC_grp FM Inter-Partition messaging Unit
-+
-+ @Description FM Inter-Partition messaging unit API definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Description enum for defining MAC types
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description A structure of parameters for specifying a MAC.
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ uint8_t id;
-+ uint32_t enumType;
-+} _PackedType t_FmIpcMacParams;
-+
-+/**************************************************************************//**
-+ @Description A structure of parameters for specifying a MAC.
-+*//***************************************************************************/
-+typedef _Packed struct
-+{
-+ t_FmIpcMacParams macParams;
-+ uint16_t maxFrameLength;
-+} _PackedType t_FmIpcMacMaxFrameParams;
-+
-+/**************************************************************************//**
-+ @Description FM physical Address
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcPhysAddr
-+{
-+ volatile uint8_t high;
-+ volatile uint32_t low;
-+} _PackedType t_FmIpcPhysAddr;
-+
-+
-+typedef _Packed struct t_FmIpcPortOutInitParams {
-+ uint8_t numOfTasks; /**< OUT */
-+ uint8_t numOfExtraTasks; /**< OUT */
-+ uint8_t numOfOpenDmas; /**< OUT */
-+ uint8_t numOfExtraOpenDmas; /**< OUT */
-+ uint32_t sizeOfFifo; /**< OUT */
-+ uint32_t extraSizeOfFifo; /**< OUT */
-+ t_FmIpcPhysAddr ipcPhysAddr; /**< OUT */
-+} _PackedType t_FmIpcPortOutInitParams;
-+
-+/**************************************************************************//**
-+ @Description Structure for IPC communication during FM_PORT_Init.
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcPortInInitParams {
-+ uint8_t hardwarePortId; /**< IN. port Id */
-+ uint32_t enumPortType; /**< IN. Port type */
-+ uint8_t boolIndependentMode;/**< IN. TRUE if FM Port operates in independent mode */
-+ uint16_t liodnOffset; /**< IN. Port's requested resource */
-+ uint8_t numOfTasks; /**< IN. Port's requested resource */
-+ uint8_t numOfExtraTasks; /**< IN. Port's requested resource */
-+ uint8_t numOfOpenDmas; /**< IN. Port's requested resource */
-+ uint8_t numOfExtraOpenDmas; /**< IN. Port's requested resource */
-+ uint32_t sizeOfFifo; /**< IN. Port's requested resource */
-+ uint32_t extraSizeOfFifo; /**< IN. Port's requested resource */
-+ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
-+ uint16_t maxFrameLength; /**< IN. Port's max frame length. */
-+ uint16_t liodnBase; /**< IN. Irrelevant for P4080 rev 1.
-+ LIODN base for this port, to be
-+ used together with LIODN offset. */
-+} _PackedType t_FmIpcPortInInitParams;
-+
-+
-+/**************************************************************************//**
-+ @Description Structure for IPC communication between port and FM
-+ regarding tasks and open DMA resources management.
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcPortRsrcParams {
-+ uint8_t hardwarePortId; /**< IN. port Id */
-+ uint32_t val; /**< IN. Port's requested resource */
-+ uint32_t extra; /**< IN. Port's requested resource */
-+ uint8_t boolInitialConfig;
-+} _PackedType t_FmIpcPortRsrcParams;
-+
-+
-+/**************************************************************************//**
-+ @Description Structure for IPC communication between port and FM
-+ regarding tasks and open DMA resources management.
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcPortFifoParams {
-+ t_FmIpcPortRsrcParams rsrcParams;
-+ uint32_t enumPortType;
-+ uint8_t boolIndependentMode;
-+ uint8_t deqPipelineDepth;
-+ uint8_t numOfPools;
-+ uint16_t secondLargestBufSize;
-+ uint16_t largestBufSize;
-+ uint8_t boolInitialConfig;
-+} _PackedType t_FmIpcPortFifoParams;
-+
-+/**************************************************************************//**
-+ @Description Structure for port-FM communication during FM_PORT_Free.
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcPortFreeParams {
-+ uint8_t hardwarePortId; /**< IN. port Id */
-+ uint32_t enumPortType; /**< IN. Port type */
-+ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
-+} _PackedType t_FmIpcPortFreeParams;
-+
-+/**************************************************************************//**
-+ @Description Structure for defining DMA status
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcDmaStatus {
-+ uint8_t boolCmqNotEmpty; /**< Command queue is not empty */
-+ uint8_t boolBusError; /**< Bus error occurred */
-+ uint8_t boolReadBufEccError; /**< Double ECC error on buffer Read */
-+ uint8_t boolWriteBufEccSysError; /**< Double ECC error on buffer write from system side */
-+ uint8_t boolWriteBufEccFmError; /**< Double ECC error on buffer write from FM side */
-+ uint8_t boolSinglePortEccError; /**< Single port ECC error from FM side */
-+} _PackedType t_FmIpcDmaStatus;
-+
-+typedef _Packed struct t_FmIpcRegisterIntr
-+{
-+ uint8_t guestId; /* IN */
-+ uint32_t event; /* IN */
-+} _PackedType t_FmIpcRegisterIntr;
-+
-+typedef _Packed struct t_FmIpcIsr
-+{
-+ uint8_t boolErr; /* IN */
-+ uint32_t pendingReg; /* IN */
-+} _PackedType t_FmIpcIsr;
-+
-+/**************************************************************************//**
-+ @Description structure for returning FM parameters
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcParams {
-+ uint16_t fmClkFreq; /**< OUT: FM Clock frequency */
-+ uint16_t fmMacClkFreq; /**< OUT: FM MAC clock frequence */
-+ uint8_t majorRev; /**< OUT: FM Major revision */
-+ uint8_t minorRev; /**< OUT: FM Minor revision */
-+} _PackedType t_FmIpcParams;
-+
-+
-+/**************************************************************************//**
-+ @Description structure for returning Fman Ctrl Code revision information
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcFmanCtrlCodeRevisionInfo {
-+ uint16_t packageRev; /**< OUT: Package revision */
-+ uint8_t majorRev; /**< OUT: Major revision */
-+ uint8_t minorRev; /**< OUT: Minor revision */
-+} _PackedType t_FmIpcFmanCtrlCodeRevisionInfo;
-+
-+/**************************************************************************//**
-+ @Description Structure for defining Fm number of Fman controlers
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcPortNumOfFmanCtrls {
-+ uint8_t hardwarePortId; /**< IN. port Id */
-+ uint8_t numOfFmanCtrls; /**< IN. Port type */
-+ t_FmFmanCtrl orFmanCtrl; /**< IN. fman controller for order restoration*/
-+} t_FmIpcPortNumOfFmanCtrls;
-+
-+/**************************************************************************//**
-+ @Description structure for setting Fman contriller events
-+*//***************************************************************************/
-+typedef _Packed struct t_FmIpcFmanEvents {
-+ uint8_t eventRegId; /**< IN: Fman controller event register id */
-+ uint32_t enableEvents; /**< IN/OUT: required enabled events mask */
-+} _PackedType t_FmIpcFmanEvents;
-+
-+typedef _Packed struct t_FmIpcResourceAllocParams {
-+ uint8_t guestId;
-+ uint16_t base;
-+ uint16_t num;
-+}_PackedType t_FmIpcResourceAllocParams;
-+
-+typedef _Packed struct t_FmIpcVspSetPortWindow {
-+ uint8_t hardwarePortId;
-+ uint8_t baseStorageProfile;
-+ uint8_t log2NumOfProfiles;
-+}_PackedType t_FmIpcVspSetPortWindow;
-+
-+typedef _Packed struct t_FmIpcSetCongestionGroupPfcPriority {
-+ uint32_t congestionGroupId;
-+ uint8_t priorityBitMap;
-+}_PackedType t_FmIpcSetCongestionGroupPfcPriority;
-+
-+#define FM_IPC_MAX_REPLY_BODY_SIZE 20
-+#define FM_IPC_MAX_REPLY_SIZE (FM_IPC_MAX_REPLY_BODY_SIZE + sizeof(uint32_t))
-+#define FM_IPC_MAX_MSG_SIZE 30
-+
-+typedef _Packed struct t_FmIpcMsg
-+{
-+ uint32_t msgId;
-+ uint8_t msgBody[FM_IPC_MAX_MSG_SIZE];
-+} _PackedType t_FmIpcMsg;
-+
-+typedef _Packed struct t_FmIpcReply
-+{
-+ uint32_t error;
-+ uint8_t replyBody[FM_IPC_MAX_REPLY_BODY_SIZE];
-+} _PackedType t_FmIpcReply;
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/***************************************************************************/
-+/************************ FRONT-END-TO-BACK-END*****************************/
-+/***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_GET_TIMESTAMP_SCALE
-+
-+ @Description Used by FM front-end.
-+
-+ @Param[out] uint32_t Pointer
-+*//***************************************************************************/
-+#define FM_GET_TIMESTAMP_SCALE 1
-+
-+/**************************************************************************//**
-+ @Function FM_GET_COUNTER
-+
-+ @Description Used by FM front-end.
-+
-+ @Param[in/out] t_FmIpcGetCounter Pointer
-+*//***************************************************************************/
-+#define FM_GET_COUNTER 2
-+
-+/**************************************************************************//**
-+ @Function FM_GET_SET_PORT_PARAMS
-+
-+ @Description Used by FM front-end for the PORT module in order to set and get
-+ parameters in/from master FM module on FM PORT initialization time.
-+
-+ @Param[in/out] t_FmIcPortInitParams Pointer
-+*//***************************************************************************/
-+#define FM_GET_SET_PORT_PARAMS 4
-+
-+/**************************************************************************//**
-+ @Function FM_FREE_PORT
-+
-+ @Description Used by FM front-end for the PORT module when a port is freed
-+ to free all FM PORT resources.
-+
-+ @Param[in] uint8_t Pointer
-+*//***************************************************************************/
-+#define FM_FREE_PORT 5
-+
-+/**************************************************************************//**
-+ @Function FM_RESET_MAC
-+
-+ @Description Used by front-end for the MAC module to reset the MAC registers
-+
-+ @Param[in] t_FmIpcMacParams Pointer .
-+*//***************************************************************************/
-+#define FM_RESET_MAC 6
-+
-+/**************************************************************************//**
-+ @Function FM_RESUME_STALLED_PORT
-+
-+ @Description Used by FM front-end for the PORT module in order to
-+ release a stalled FM Port.
-+
-+ @Param[in] uint8_t Pointer
-+*//***************************************************************************/
-+#define FM_RESUME_STALLED_PORT 7
-+
-+/**************************************************************************//**
-+ @Function FM_IS_PORT_STALLED
-+
-+ @Description Used by FM front-end for the PORT module in order to check whether
-+ an FM port is stalled.
-+
-+ @Param[in/out] t_FmIcPortIsStalled Pointer
-+*//***************************************************************************/
-+#define FM_IS_PORT_STALLED 8
-+
-+/**************************************************************************//**
-+ @Function FM_GET_PARAMS
-+
-+ @Description Used by FM front-end for the PORT module in order to dump
-+ return FM parameters.
-+
-+ @Param[in] uint8_t Pointer
-+*//***************************************************************************/
-+#define FM_GET_PARAMS 10
-+
-+/**************************************************************************//**
-+ @Function FM_REGISTER_INTR
-+
-+ @Description Used by FM front-end to register an interrupt handler to
-+ be called upon interrupt for guest.
-+
-+ @Param[out] t_FmIpcRegisterIntr Pointer
-+*//***************************************************************************/
-+#define FM_REGISTER_INTR 11
-+
-+/**************************************************************************//**
-+ @Function FM_DMA_STAT
-+
-+ @Description Used by FM front-end to read the FM DMA status.
-+
-+ @Param[out] t_FmIpcDmaStatus Pointer
-+*//***************************************************************************/
-+#define FM_DMA_STAT 13
-+
-+/**************************************************************************//**
-+ @Function FM_ALLOC_FMAN_CTRL_EVENT_REG
-+
-+ @Description Used by FM front-end to allocate event register.
-+
-+ @Param[out] Event register id Pointer
-+*//***************************************************************************/
-+#define FM_ALLOC_FMAN_CTRL_EVENT_REG 14
-+
-+/**************************************************************************//**
-+ @Function FM_FREE_FMAN_CTRL_EVENT_REG
-+
-+ @Description Used by FM front-end to free locate event register.
-+
-+ @Param[in] uint8_t Pointer - Event register id
-+*//***************************************************************************/
-+#define FM_FREE_FMAN_CTRL_EVENT_REG 15
-+
-+/**************************************************************************//**
-+ @Function FM_SET_FMAN_CTRL_EVENTS_ENABLE
-+
-+ @Description Used by FM front-end to enable events in the FPM
-+ Fman controller event register.
-+
-+ @Param[in] t_FmIpcFmanEvents Pointer
-+*//***************************************************************************/
-+#define FM_SET_FMAN_CTRL_EVENTS_ENABLE 16
-+
-+/**************************************************************************//**
-+ @Function FM_SET_FMAN_CTRL_EVENTS_ENABLE
-+
-+ @Description Used by FM front-end to enable events in the FPM
-+ Fman controller event register.
-+
-+ @Param[in/out] t_FmIpcFmanEvents Pointer
-+*//***************************************************************************/
-+#define FM_GET_FMAN_CTRL_EVENTS_ENABLE 17
-+
-+/**************************************************************************//**
-+ @Function FM_SET_MAC_MAX_FRAME
-+
-+ @Description Used by FM front-end to set MAC's MTU/RTU's in
-+ back-end.
-+
-+ @Param[in/out] t_FmIpcMacMaxFrameParams Pointer
-+*//***************************************************************************/
-+#define FM_SET_MAC_MAX_FRAME 18
-+
-+/**************************************************************************//**
-+ @Function FM_GET_PHYS_MURAM_BASE
-+
-+ @Description Used by FM front-end in order to get MURAM base address
-+
-+ @Param[in/out] t_FmIpcPhysAddr Pointer
-+*//***************************************************************************/
-+#define FM_GET_PHYS_MURAM_BASE 19
-+
-+/**************************************************************************//**
-+ @Function FM_MASTER_IS_ALIVE
-+
-+ @Description Used by FM front-end in order to verify Master is up
-+
-+ @Param[in/out] bool
-+*//***************************************************************************/
-+#define FM_MASTER_IS_ALIVE 20
-+
-+#define FM_ENABLE_RAM_ECC 21
-+#define FM_DISABLE_RAM_ECC 22
-+#define FM_SET_NUM_OF_FMAN_CTRL 23
-+#define FM_SET_SIZE_OF_FIFO 24
-+#define FM_SET_NUM_OF_TASKS 25
-+#define FM_SET_NUM_OF_OPEN_DMAS 26
-+#define FM_VSP_ALLOC 27
-+#define FM_VSP_FREE 28
-+#define FM_VSP_SET_PORT_WINDOW 29
-+#define FM_GET_FMAN_CTRL_CODE_REV 30
-+#define FM_SET_CONG_GRP_PFC_PRIO 31
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+#define FM_10G_TX_ECC_WA 100
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+/***************************************************************************/
-+/************************ BACK-END-TO-FRONT-END*****************************/
-+/***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_GUEST_ISR
-+
-+ @Description Used by FM back-end to report an interrupt to the front-end.
-+
-+ @Param[out] t_FmIpcIsr Pointer
-+*//***************************************************************************/
-+#define FM_GUEST_ISR 1
-+
-+
-+
-+/** @} */ /* end of FM_IPC_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#endif /* __FM_IPC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fm_muram.c
-@@ -0,0 +1,174 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File FM_muram.c
-+
-+ @Description FM MURAM ...
-+*//***************************************************************************/
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "mm_ext.h"
-+#include "string_ext.h"
-+#include "sprint_ext.h"
-+#include "fm_muram_ext.h"
-+#include "fm_common.h"
-+
-+#define __ERR_MODULE__ MODULE_FM_MURAM
-+
-+
-+typedef struct
-+{
-+ t_Handle h_Mem;
-+ uintptr_t baseAddr;
-+ uint32_t size;
-+} t_FmMuram;
-+
-+
-+void FmMuramClear(t_Handle h_FmMuram)
-+{
-+ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
-+
-+ SANITY_CHECK_RETURN(h_FmMuram, E_INVALID_HANDLE);
-+ IOMemSet32(UINT_TO_PTR(p_FmMuram->baseAddr), 0, p_FmMuram->size);
-+}
-+
-+
-+t_Handle FM_MURAM_ConfigAndInit(uintptr_t baseAddress, uint32_t size)
-+{
-+ t_Handle h_Mem;
-+ t_FmMuram *p_FmMuram;
-+
-+ if (!baseAddress)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("baseAddress 0 is not supported"));
-+ return NULL;
-+ }
-+
-+ if (baseAddress%4)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("baseAddress not 4 bytes aligned!"));
-+ return NULL;
-+ }
-+
-+ /* Allocate FM MURAM structure */
-+ p_FmMuram = (t_FmMuram *) XX_Malloc(sizeof(t_FmMuram));
-+ if (!p_FmMuram)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MURAM driver structure"));
-+ return NULL;
-+ }
-+ memset(p_FmMuram, 0, sizeof(t_FmMuram));
-+
-+
-+ if ((MM_Init(&h_Mem, baseAddress, size) != E_OK) || (!h_Mem))
-+ {
-+ XX_Free(p_FmMuram);
-+ REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-MURAM partition!!!"));
-+ return NULL;
-+ }
-+
-+ /* Initialize FM MURAM parameters which will be kept by the driver */
-+ p_FmMuram->baseAddr = baseAddress;
-+ p_FmMuram->size = size;
-+ p_FmMuram->h_Mem = h_Mem;
-+
-+ return p_FmMuram;
-+}
-+
-+t_Error FM_MURAM_Free(t_Handle h_FmMuram)
-+{
-+ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
-+
-+ if (p_FmMuram->h_Mem)
-+ MM_Free(p_FmMuram->h_Mem);
-+
-+ XX_Free(h_FmMuram);
-+
-+ return E_OK;
-+}
-+
-+void * FM_MURAM_AllocMem(t_Handle h_FmMuram, uint32_t size, uint32_t align)
-+{
-+ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
-+ uintptr_t addr;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, NULL);
-+
-+ addr = (uintptr_t)MM_Get(p_FmMuram->h_Mem, size, align ,"FM MURAM");
-+
-+ if (addr == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return UINT_TO_PTR(addr);
-+}
-+
-+void * FM_MURAM_AllocMemForce(t_Handle h_FmMuram, uint64_t base, uint32_t size)
-+{
-+ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
-+ uintptr_t addr;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, NULL);
-+ SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, NULL);
-+
-+ addr = (uintptr_t)MM_GetForce(p_FmMuram->h_Mem, base, size, "FM MURAM");
-+
-+ if (addr == ILLEGAL_BASE)
-+ return NULL;
-+
-+ return UINT_TO_PTR(addr);
-+}
-+
-+t_Error FM_MURAM_FreeMem(t_Handle h_FmMuram, void *ptr)
-+{
-+ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
-+
-+ SANITY_CHECK_RETURN_ERROR(h_FmMuram, E_INVALID_HANDLE);
-+ SANITY_CHECK_RETURN_ERROR(p_FmMuram->h_Mem, E_INVALID_HANDLE);
-+
-+ if (MM_Put(p_FmMuram->h_Mem, PTR_TO_UINT(ptr)) == 0)
-+ RETURN_ERROR(MINOR, E_INVALID_ADDRESS, ("memory pointer!!!"));
-+
-+ return E_OK;
-+}
-+
-+uint64_t FM_MURAM_GetFreeMemSize(t_Handle h_FmMuram)
-+{
-+ t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram;
-+
-+ SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, 0);
-+ SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, 0);
-+
-+ return MM_GetFreeMemSize(p_FmMuram->h_Mem);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/fman.c
-@@ -0,0 +1,1400 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include <linux/math64.h>
-+#include "fsl_fman.h"
-+#include "dpaa_integration_ext.h"
-+
-+uint32_t fman_get_bmi_err_event(struct fman_bmi_regs *bmi_rg)
-+{
-+ uint32_t event, mask, force;
-+
-+ event = ioread32be(&bmi_rg->fmbm_ievr);
-+ mask = ioread32be(&bmi_rg->fmbm_ier);
-+ event &= mask;
-+ /* clear the forced events */
-+ force = ioread32be(&bmi_rg->fmbm_ifr);
-+ if (force & event)
-+ iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
-+ /* clear the acknowledged events */
-+ iowrite32be(event, &bmi_rg->fmbm_ievr);
-+ return event;
-+}
-+
-+uint32_t fman_get_qmi_err_event(struct fman_qmi_regs *qmi_rg)
-+{
-+ uint32_t event, mask, force;
-+
-+ event = ioread32be(&qmi_rg->fmqm_eie);
-+ mask = ioread32be(&qmi_rg->fmqm_eien);
-+ event &= mask;
-+
-+ /* clear the forced events */
-+ force = ioread32be(&qmi_rg->fmqm_eif);
-+ if (force & event)
-+ iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
-+ /* clear the acknowledged events */
-+ iowrite32be(event, &qmi_rg->fmqm_eie);
-+ return event;
-+}
-+
-+uint32_t fman_get_dma_com_id(struct fman_dma_regs *dma_rg)
-+{
-+ return ioread32be(&dma_rg->fmdmtcid);
-+}
-+
-+uint64_t fman_get_dma_addr(struct fman_dma_regs *dma_rg)
-+{
-+ uint64_t addr;
-+
-+ addr = (uint64_t)ioread32be(&dma_rg->fmdmtal);
-+ addr |= ((uint64_t)(ioread32be(&dma_rg->fmdmtah)) << 32);
-+
-+ return addr;
-+}
-+
-+uint32_t fman_get_dma_err_event(struct fman_dma_regs *dma_rg)
-+{
-+ uint32_t status, mask;
-+
-+ status = ioread32be(&dma_rg->fmdmsr);
-+ mask = ioread32be(&dma_rg->fmdmmr);
-+
-+ /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
-+ if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
-+ status &= ~DMA_STATUS_BUS_ERR;
-+
-+ /* clear relevant bits if mask has no DMA_MODE_ECC */
-+ if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
-+ status &= ~(DMA_STATUS_FM_SPDAT_ECC |
-+ DMA_STATUS_READ_ECC |
-+ DMA_STATUS_SYSTEM_WRITE_ECC |
-+ DMA_STATUS_FM_WRITE_ECC);
-+
-+ /* clear set events */
-+ iowrite32be(status, &dma_rg->fmdmsr);
-+
-+ return status;
-+}
-+
-+uint32_t fman_get_fpm_err_event(struct fman_fpm_regs *fpm_rg)
-+{
-+ uint32_t event;
-+
-+ event = ioread32be(&fpm_rg->fmfp_ee);
-+ /* clear the all occurred events */
-+ iowrite32be(event, &fpm_rg->fmfp_ee);
-+ return event;
-+}
-+
-+uint32_t fman_get_muram_err_event(struct fman_fpm_regs *fpm_rg)
-+{
-+ uint32_t event, mask;
-+
-+ event = ioread32be(&fpm_rg->fm_rcr);
-+ mask = ioread32be(&fpm_rg->fm_rie);
-+
-+ /* clear MURAM event bit (do not clear IRAM event) */
-+ iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
-+
-+ if ((mask & FPM_MURAM_ECC_ERR_EX_EN))
-+ return event;
-+ else
-+ return 0;
-+}
-+
-+uint32_t fman_get_iram_err_event(struct fman_fpm_regs *fpm_rg)
-+{
-+ uint32_t event, mask;
-+
-+ event = ioread32be(&fpm_rg->fm_rcr) ;
-+ mask = ioread32be(&fpm_rg->fm_rie);
-+ /* clear IRAM event bit (do not clear MURAM event) */
-+ iowrite32be(event & ~FPM_RAM_MURAM_ECC,
-+ &fpm_rg->fm_rcr);
-+
-+ if ((mask & FPM_IRAM_ECC_ERR_EX_EN))
-+ return event;
-+ else
-+ return 0;
-+}
-+
-+uint32_t fman_get_qmi_event(struct fman_qmi_regs *qmi_rg)
-+{
-+ uint32_t event, mask, force;
-+
-+ event = ioread32be(&qmi_rg->fmqm_ie);
-+ mask = ioread32be(&qmi_rg->fmqm_ien);
-+ event &= mask;
-+ /* clear the forced events */
-+ force = ioread32be(&qmi_rg->fmqm_if);
-+ if (force & event)
-+ iowrite32be(force & ~event, &qmi_rg->fmqm_if);
-+ /* clear the acknowledged events */
-+ iowrite32be(event, &qmi_rg->fmqm_ie);
-+ return event;
-+}
-+
-+void fman_enable_time_stamp(struct fman_fpm_regs *fpm_rg,
-+ uint8_t count1ubit,
-+ uint16_t fm_clk_freq)
-+{
-+ uint32_t tmp;
-+ uint64_t frac;
-+ uint32_t intgr;
-+ uint32_t ts_freq = (uint32_t)(1 << count1ubit); /* in Mhz */
-+
-+ /* configure timestamp so that bit 8 will count 1 microsecond
-+ * Find effective count rate at TIMESTAMP least significant bits:
-+ * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
-+ * Find frequency ratio between effective count rate and the clock:
-+ * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
-+ * 256/600 = 0.4266666... */
-+
-+ intgr = ts_freq / fm_clk_freq;
-+ /* we multiply by 2^16 to keep the fraction of the division
-+ * we do not div back, since we write this value as a fraction
-+ * see spec */
-+
-+ frac = ((uint64_t)ts_freq << 16) - ((uint64_t)intgr << 16) * fm_clk_freq;
-+ /* we check remainder of the division in order to round up if not int */
-+ if (do_div(frac, fm_clk_freq))
-+ frac++;
-+
-+ tmp = (intgr << FPM_TS_INT_SHIFT) | (uint16_t)frac;
-+ iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
-+
-+ /* enable timestamp with original clock */
-+ iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
-+}
-+
-+uint32_t fman_get_fpm_error_interrupts(struct fman_fpm_regs *fpm_rg)
-+{
-+ return ioread32be(&fpm_rg->fm_epi);
-+}
-+
-+
-+int fman_set_erratum_10gmac_a004_wa(struct fman_fpm_regs *fpm_rg)
-+{
-+ int timeout = 100;
-+
-+ iowrite32be(0x40000000, &fpm_rg->fmfp_extc);
-+
-+ while ((ioread32be(&fpm_rg->fmfp_extc) & 0x40000000) && --timeout)
-+ udelay(10);
-+
-+ if (!timeout)
-+ return -EBUSY;
-+ return 0;
-+}
-+
-+void fman_set_ctrl_intr(struct fman_fpm_regs *fpm_rg,
-+ uint8_t event_reg_id,
-+ uint32_t enable_events)
-+{
-+ iowrite32be(enable_events, &fpm_rg->fmfp_cee[event_reg_id]);
-+}
-+
-+uint32_t fman_get_ctrl_intr(struct fman_fpm_regs *fpm_rg, uint8_t event_reg_id)
-+{
-+ return ioread32be(&fpm_rg->fmfp_cee[event_reg_id]);
-+}
-+
-+void fman_set_num_of_riscs_per_port(struct fman_fpm_regs *fpm_rg,
-+ uint8_t port_id,
-+ uint8_t num_fman_ctrls,
-+ uint32_t or_fman_ctrl)
-+{
-+ uint32_t tmp = 0;
-+
-+ tmp = (uint32_t)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
-+ /*TODO - maybe to put CTL# according to another criteria*/
-+ if (num_fman_ctrls == 2)
-+ tmp = FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
-+ /* order restoration */
-+ tmp |= (or_fman_ctrl << FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | or_fman_ctrl;
-+
-+ iowrite32be(tmp, &fpm_rg->fmfp_prc);
-+}
-+
-+void fman_set_order_restoration_per_port(struct fman_fpm_regs *fpm_rg,
-+ uint8_t port_id,
-+ bool independent_mode,
-+ bool is_rx_port)
-+{
-+ uint32_t tmp = 0;
-+
-+ tmp = (uint32_t)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
-+ if (independent_mode) {
-+ if (is_rx_port)
-+ tmp |= (FPM_PRT_FM_CTL1 <<
-+ FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PRT_FM_CTL1;
-+ else
-+ tmp |= (FPM_PRT_FM_CTL2 <<
-+ FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PRT_FM_CTL2;
-+ } else {
-+ tmp |= (FPM_PRT_FM_CTL2|FPM_PRT_FM_CTL1);
-+
-+ /* order restoration */
-+ if (port_id % 2)
-+ tmp |= (FPM_PRT_FM_CTL1 <<
-+ FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
-+ else
-+ tmp |= (FPM_PRT_FM_CTL2 <<
-+ FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
-+ }
-+ iowrite32be(tmp, &fpm_rg->fmfp_prc);
-+}
-+
-+uint8_t fman_get_qmi_deq_th(struct fman_qmi_regs *qmi_rg)
-+{
-+ return (uint8_t)ioread32be(&qmi_rg->fmqm_gc);
-+}
-+
-+uint8_t fman_get_qmi_enq_th(struct fman_qmi_regs *qmi_rg)
-+{
-+ return (uint8_t)(ioread32be(&qmi_rg->fmqm_gc) >> 8);
-+}
-+
-+void fman_set_qmi_enq_th(struct fman_qmi_regs *qmi_rg, uint8_t val)
-+{
-+ uint32_t tmp_reg;
-+
-+ tmp_reg = ioread32be(&qmi_rg->fmqm_gc);
-+ tmp_reg &= ~QMI_CFG_ENQ_MASK;
-+ tmp_reg |= ((uint32_t)val << 8);
-+ iowrite32be(tmp_reg, &qmi_rg->fmqm_gc);
-+}
-+
-+void fman_set_qmi_deq_th(struct fman_qmi_regs *qmi_rg, uint8_t val)
-+{
-+ uint32_t tmp_reg;
-+
-+ tmp_reg = ioread32be(&qmi_rg->fmqm_gc);
-+ tmp_reg &= ~QMI_CFG_DEQ_MASK;
-+ tmp_reg |= (uint32_t)val;
-+ iowrite32be(tmp_reg, &qmi_rg->fmqm_gc);
-+}
-+
-+void fman_qmi_disable_dispatch_limit(struct fman_fpm_regs *fpm_rg)
-+{
-+ iowrite32be(0, &fpm_rg->fmfp_mxd);
-+}
-+
-+void fman_set_liodn_per_port(struct fman_rg *fman_rg, uint8_t port_id,
-+ uint16_t liodn_base,
-+ uint16_t liodn_ofst)
-+{
-+ uint32_t tmp;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return;
-+
-+ /* set LIODN base for this port */
-+ tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
-+ if (port_id % 2) {
-+ tmp &= ~FM_LIODN_BASE_MASK;
-+ tmp |= (uint32_t)liodn_base;
-+ } else {
-+ tmp &= ~(FM_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
-+ tmp |= (uint32_t)liodn_base << DMA_LIODN_SHIFT;
-+ }
-+ iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
-+ iowrite32be((uint32_t)liodn_ofst,
-+ &fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
-+}
-+
-+bool fman_is_port_stalled(struct fman_fpm_regs *fpm_rg, uint8_t port_id)
-+{
-+ return (bool)!!(ioread32be(&fpm_rg->fmfp_ps[port_id]) & FPM_PS_STALLED);
-+}
-+
-+void fman_resume_stalled_port(struct fman_fpm_regs *fpm_rg, uint8_t port_id)
-+{
-+ uint32_t tmp;
-+
-+ tmp = (uint32_t)((port_id << FPM_PORT_FM_CTL_PORTID_SHIFT) |
-+ FPM_PRC_REALSE_STALLED);
-+ iowrite32be(tmp, &fpm_rg->fmfp_prc);
-+}
-+
-+int fman_reset_mac(struct fman_fpm_regs *fpm_rg, uint8_t mac_id, bool is_10g)
-+{
-+ uint32_t msk, timeout = 100;
-+
-+ /* Get the relevant bit mask */
-+ if (is_10g) {
-+ switch (mac_id) {
-+ case(0):
-+ msk = FPM_RSTC_10G0_RESET;
-+ break;
-+ case(1):
-+ msk = FPM_RSTC_10G1_RESET;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ } else {
-+ switch (mac_id) {
-+ case(0):
-+ msk = FPM_RSTC_1G0_RESET;
-+ break;
-+ case(1):
-+ msk = FPM_RSTC_1G1_RESET;
-+ break;
-+ case(2):
-+ msk = FPM_RSTC_1G2_RESET;
-+ break;
-+ case(3):
-+ msk = FPM_RSTC_1G3_RESET;
-+ break;
-+ case(4):
-+ msk = FPM_RSTC_1G4_RESET;
-+ break;
-+ case (5):
-+ msk = FPM_RSTC_1G5_RESET;
-+ break;
-+ case (6):
-+ msk = FPM_RSTC_1G6_RESET;
-+ break;
-+ case (7):
-+ msk = FPM_RSTC_1G7_RESET;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ }
-+ /* reset */
-+ iowrite32be(msk, &fpm_rg->fm_rstc);
-+ while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
-+ udelay(10);
-+
-+ if (!timeout)
-+ return -EBUSY;
-+ return 0;
-+}
-+
-+uint16_t fman_get_size_of_fifo(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
-+{
-+ uint32_t tmp_reg;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return 0;
-+
-+ tmp_reg = ioread32be(&bmi_rg->fmbm_pfs[port_id - 1]);
-+ return (uint16_t)((tmp_reg & BMI_FIFO_SIZE_MASK) + 1);
-+}
-+
-+uint32_t fman_get_total_fifo_size(struct fman_bmi_regs *bmi_rg)
-+{
-+ uint32_t reg, res;
-+
-+ reg = ioread32be(&bmi_rg->fmbm_cfg1);
-+ res = (reg >> BMI_CFG1_FIFO_SIZE_SHIFT) & 0x3ff;
-+ return res * FMAN_BMI_FIFO_UNITS;
-+}
-+
-+uint16_t fman_get_size_of_extra_fifo(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id)
-+{
-+ uint32_t tmp_reg;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return 0;
-+
-+ tmp_reg = ioread32be(&bmi_rg->fmbm_pfs[port_id-1]);
-+ return (uint16_t)((tmp_reg & BMI_EXTRA_FIFO_SIZE_MASK) >>
-+ BMI_EXTRA_FIFO_SIZE_SHIFT);
-+}
-+
-+void fman_set_size_of_fifo(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint32_t sz_fifo,
-+ uint32_t extra_sz_fifo)
-+{
-+ uint32_t tmp;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return;
-+
-+ /* calculate reg */
-+ tmp = (uint32_t)((sz_fifo / FMAN_BMI_FIFO_UNITS - 1) |
-+ ((extra_sz_fifo / FMAN_BMI_FIFO_UNITS) <<
-+ BMI_EXTRA_FIFO_SIZE_SHIFT));
-+ iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
-+}
-+
-+uint8_t fman_get_num_of_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
-+{
-+ uint32_t tmp;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return 0;
-+
-+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
-+ return (uint8_t)(((tmp & BMI_NUM_OF_TASKS_MASK) >>
-+ BMI_NUM_OF_TASKS_SHIFT) + 1);
-+}
-+
-+uint8_t fman_get_num_extra_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
-+{
-+ uint32_t tmp;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return 0;
-+
-+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
-+ return (uint8_t)((tmp & BMI_NUM_OF_EXTRA_TASKS_MASK) >>
-+ BMI_EXTRA_NUM_OF_TASKS_SHIFT);
-+}
-+
-+void fman_set_num_of_tasks(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint8_t num_tasks,
-+ uint8_t num_extra_tasks)
-+{
-+ uint32_t tmp;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return;
-+
-+ /* calculate reg */
-+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
-+ ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
-+ tmp |= (uint32_t)(((num_tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
-+ (num_extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
-+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
-+}
-+
-+uint8_t fman_get_num_of_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
-+{
-+ uint32_t tmp;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return 0;
-+
-+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
-+ return (uint8_t)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
-+ BMI_NUM_OF_DMAS_SHIFT) + 1);
-+}
-+
-+uint8_t fman_get_num_extra_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
-+{
-+ uint32_t tmp;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return 0;
-+
-+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
-+ return (uint8_t)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
-+ BMI_EXTRA_NUM_OF_DMAS_SHIFT);
-+}
-+
-+void fman_set_num_of_open_dmas(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint8_t num_open_dmas,
-+ uint8_t num_extra_open_dmas,
-+ uint8_t total_num_dmas)
-+{
-+ uint32_t tmp = 0;
-+
-+ if ((port_id > 63) || (port_id < 1))
-+ return;
-+
-+ /* calculate reg */
-+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
-+ ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
-+ tmp |= (uint32_t)(((num_open_dmas-1) << BMI_NUM_OF_DMAS_SHIFT) |
-+ (num_extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
-+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
-+
-+ /* update total num of DMA's with committed number of open DMAS,
-+ * and max uncommitted pool. */
-+ if (total_num_dmas)
-+ {
-+ tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
-+ tmp |= (uint32_t)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
-+ iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
-+ }
-+}
-+
-+void fman_set_vsp_window(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint8_t base_storage_profile,
-+ uint8_t log2_num_of_profiles)
-+{
-+ uint32_t tmp = 0;
-+ if ((port_id > 63) || (port_id < 1))
-+ return;
-+
-+ tmp = ioread32be(&bmi_rg->fmbm_spliodn[port_id-1]);
-+ tmp |= (uint32_t)((uint32_t)base_storage_profile & 0x3f) << 16;
-+ tmp |= (uint32_t)log2_num_of_profiles << 28;
-+ iowrite32be(tmp, &bmi_rg->fmbm_spliodn[port_id-1]);
-+}
-+
-+void fman_set_congestion_group_pfc_priority(uint32_t *cpg_rg,
-+ uint32_t congestion_group_id,
-+ uint8_t priority_bit_map,
-+ uint32_t reg_num)
-+{
-+ uint32_t offset, tmp = 0;
-+
-+ offset = (congestion_group_id%4)*8;
-+
-+ tmp = ioread32be(&cpg_rg[reg_num]);
-+ tmp &= ~(0xFF<<offset);
-+ tmp |= (uint32_t)priority_bit_map << offset;
-+
-+ iowrite32be(tmp,&cpg_rg[reg_num]);
-+}
-+
-+/*****************************************************************************/
-+/* API Init unit functions */
-+/*****************************************************************************/
-+void fman_defconfig(struct fman_cfg *cfg, bool is_master)
-+{
-+ memset(cfg, 0, sizeof(struct fman_cfg));
-+
-+ cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
-+ cfg->dma_err = DEFAULT_DMA_ERR;
-+ cfg->halt_on_external_activ = DEFAULT_HALT_ON_EXTERNAL_ACTIVATION;
-+ cfg->halt_on_unrecov_ecc_err = DEFAULT_HALT_ON_UNRECOVERABLE_ECC_ERROR;
-+ cfg->en_iram_test_mode = FALSE;
-+ cfg->en_muram_test_mode = FALSE;
-+ cfg->external_ecc_rams_enable = DEFAULT_EXTERNAL_ECC_RAMS_ENABLE;
-+
-+ if (!is_master)
-+ return;
-+
-+ cfg->dma_aid_override = DEFAULT_AID_OVERRIDE;
-+ cfg->dma_aid_mode = DEFAULT_AID_MODE;
-+ cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
-+ cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
-+ cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
-+ cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
-+ cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
-+ cfg->dma_en_emergency = DEFAULT_DMA_EN_EMERGENCY;
-+ cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
-+ cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
-+ cfg->dma_en_emergency_smoother = DEFAULT_DMA_EN_EMERGENCY_SMOOTHER;
-+ cfg->dma_emergency_switch_counter = DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
-+ cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
-+ cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
-+ cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
-+ cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
-+ cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
-+ cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
-+ cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
-+ cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
-+ cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
-+
-+ cfg->pedantic_dma = FALSE;
-+ cfg->tnum_aging_period = DEFAULT_TNUM_AGING_PERIOD;
-+ cfg->dma_stop_on_bus_error = FALSE;
-+ cfg->qmi_deq_option_support = FALSE;
-+}
-+
-+void fman_regconfig(struct fman_rg *fman_rg, struct fman_cfg *cfg)
-+{
-+ uint32_t tmp_reg;
-+
-+ /* read the values from the registers as they are initialized by the HW with
-+ * the required values.
-+ */
-+ tmp_reg = ioread32be(&fman_rg->bmi_rg->fmbm_cfg1);
-+ cfg->total_fifo_size =
-+ (((tmp_reg & BMI_TOTAL_FIFO_SIZE_MASK) >> BMI_CFG1_FIFO_SIZE_SHIFT) + 1) * FMAN_BMI_FIFO_UNITS;
-+
-+ tmp_reg = ioread32be(&fman_rg->bmi_rg->fmbm_cfg2);
-+ cfg->total_num_of_tasks =
-+ (uint8_t)(((tmp_reg & BMI_TOTAL_NUM_OF_TASKS_MASK) >> BMI_CFG2_TASKS_SHIFT) + 1);
-+
-+ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmtr);
-+ cfg->dma_comm_qtsh_asrt_emer = (uint8_t)(tmp_reg >> DMA_THRESH_COMMQ_SHIFT);
-+
-+ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmhy);
-+ cfg->dma_comm_qtsh_clr_emer = (uint8_t)(tmp_reg >> DMA_THRESH_COMMQ_SHIFT);
-+
-+ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmmr);
-+ cfg->dma_cache_override = (enum fman_dma_cache_override)((tmp_reg & DMA_MODE_CACHE_OR_MASK) >> DMA_MODE_CACHE_OR_SHIFT);
-+ cfg->dma_cam_num_of_entries = (uint8_t)((((tmp_reg & DMA_MODE_CEN_MASK) >> DMA_MODE_CEN_SHIFT) +1)*DMA_CAM_UNITS);
-+ cfg->dma_aid_override = (bool)((tmp_reg & DMA_MODE_AID_OR)? TRUE:FALSE);
-+ cfg->dma_dbg_cnt_mode = (enum fman_dma_dbg_cnt_mode)((tmp_reg & DMA_MODE_DBG_MASK) >> DMA_MODE_DBG_SHIFT);
-+ cfg->dma_en_emergency = (bool)((tmp_reg & DMA_MODE_EB)? TRUE : FALSE);
-+
-+ tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_mxd);
-+ cfg->disp_limit_tsh = (uint8_t)((tmp_reg & FPM_DISP_LIMIT_MASK) >> FPM_DISP_LIMIT_SHIFT);
-+
-+ tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_dist1);
-+ cfg->prs_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_PRS_MASK ) >> FPM_THR1_PRS_SHIFT);
-+ cfg->plcr_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_KG_MASK ) >> FPM_THR1_KG_SHIFT);
-+ cfg->kg_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_PLCR_MASK ) >> FPM_THR1_PLCR_SHIFT);
-+ cfg->bmi_disp_tsh = (uint8_t)((tmp_reg & FPM_THR1_BMI_MASK ) >> FPM_THR1_BMI_SHIFT);
-+
-+ tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_dist2);
-+ cfg->qmi_enq_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_QMI_ENQ_MASK ) >> FPM_THR2_QMI_ENQ_SHIFT);
-+ cfg->qmi_deq_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_QMI_DEQ_MASK ) >> FPM_THR2_QMI_DEQ_SHIFT);
-+ cfg->fm_ctl1_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_FM_CTL1_MASK ) >> FPM_THR2_FM_CTL1_SHIFT);
-+ cfg->fm_ctl2_disp_tsh = (uint8_t)((tmp_reg & FPM_THR2_FM_CTL2_MASK ) >> FPM_THR2_FM_CTL2_SHIFT);
-+
-+ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmsetr);
-+ cfg->dma_sos_emergency = tmp_reg;
-+
-+ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmwcr);
-+ cfg->dma_watchdog = tmp_reg/cfg->clk_freq;
-+
-+ tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmemsr);
-+ cfg->dma_en_emergency_smoother = (bool)((tmp_reg & DMA_EMSR_EMSTR_MASK)? TRUE : FALSE);
-+ cfg->dma_emergency_switch_counter = (tmp_reg & DMA_EMSR_EMSTR_MASK);
-+}
-+
-+void fman_reset(struct fman_fpm_regs *fpm_rg)
-+{
-+ iowrite32be(FPM_RSTC_FM_RESET, &fpm_rg->fm_rstc);
-+}
-+
-+/**************************************************************************//**
-+ @Function FM_Init
-+
-+ @Description Initializes the FM module
-+
-+ @Param[in] h_Fm - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+int fman_dma_init(struct fman_dma_regs *dma_rg, struct fman_cfg *cfg)
-+{
-+ uint32_t tmp_reg;
-+
-+ /**********************/
-+ /* Init DMA Registers */
-+ /**********************/
-+ /* clear status reg events */
-+ /* oren - check!!! */
-+ tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
-+ DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
-+ iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg,
-+ &dma_rg->fmdmsr);
-+
-+ /* configure mode register */
-+ tmp_reg = 0;
-+ tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
-+ if (cfg->dma_aid_override)
-+ tmp_reg |= DMA_MODE_AID_OR;
-+ if (cfg->exceptions & FMAN_EX_DMA_BUS_ERROR)
-+ tmp_reg |= DMA_MODE_BER;
-+ if ((cfg->exceptions & FMAN_EX_DMA_SYSTEM_WRITE_ECC) |
-+ (cfg->exceptions & FMAN_EX_DMA_READ_ECC) |
-+ (cfg->exceptions & FMAN_EX_DMA_FM_WRITE_ECC))
-+ tmp_reg |= DMA_MODE_ECC;
-+ if (cfg->dma_stop_on_bus_error)
-+ tmp_reg |= DMA_MODE_SBER;
-+ if(cfg->dma_axi_dbg_num_of_beats)
-+ tmp_reg |= (uint32_t)(DMA_MODE_AXI_DBG_MASK &
-+ ((cfg->dma_axi_dbg_num_of_beats - 1) << DMA_MODE_AXI_DBG_SHIFT));
-+
-+ if (cfg->dma_en_emergency) {
-+ tmp_reg |= cfg->dma_emergency_bus_select;
-+ tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
-+ if (cfg->dma_en_emergency_smoother)
-+ iowrite32be(cfg->dma_emergency_switch_counter,
-+ &dma_rg->fmdmemsr);
-+ }
-+ tmp_reg |= ((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) <<
-+ DMA_MODE_CEN_SHIFT;
-+ tmp_reg |= DMA_MODE_SECURE_PROT;
-+ tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
-+ tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
-+
-+ if (cfg->pedantic_dma)
-+ tmp_reg |= DMA_MODE_EMER_READ;
-+
-+ iowrite32be(tmp_reg, &dma_rg->fmdmmr);
-+
-+ /* configure thresholds register */
-+ tmp_reg = ((uint32_t)cfg->dma_comm_qtsh_asrt_emer <<
-+ DMA_THRESH_COMMQ_SHIFT) |
-+ ((uint32_t)cfg->dma_read_buf_tsh_asrt_emer <<
-+ DMA_THRESH_READ_INT_BUF_SHIFT) |
-+ ((uint32_t)cfg->dma_write_buf_tsh_asrt_emer);
-+
-+ iowrite32be(tmp_reg, &dma_rg->fmdmtr);
-+
-+ /* configure hysteresis register */
-+ tmp_reg = ((uint32_t)cfg->dma_comm_qtsh_clr_emer <<
-+ DMA_THRESH_COMMQ_SHIFT) |
-+ ((uint32_t)cfg->dma_read_buf_tsh_clr_emer <<
-+ DMA_THRESH_READ_INT_BUF_SHIFT) |
-+ ((uint32_t)cfg->dma_write_buf_tsh_clr_emer);
-+
-+ iowrite32be(tmp_reg, &dma_rg->fmdmhy);
-+
-+ /* configure emergency threshold */
-+ iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
-+
-+ /* configure Watchdog */
-+ iowrite32be((cfg->dma_watchdog * cfg->clk_freq),
-+ &dma_rg->fmdmwcr);
-+
-+ iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
-+
-+ return 0;
-+}
-+
-+int fman_fpm_init(struct fman_fpm_regs *fpm_rg, struct fman_cfg *cfg)
-+{
-+ uint32_t tmp_reg;
-+ int i;
-+
-+ /**********************/
-+ /* Init FPM Registers */
-+ /**********************/
-+ tmp_reg = (uint32_t)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
-+ iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
-+
-+ tmp_reg = (((uint32_t)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
-+ ((uint32_t)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
-+ ((uint32_t)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
-+ ((uint32_t)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
-+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
-+
-+ tmp_reg = (((uint32_t)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
-+ ((uint32_t)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
-+ ((uint32_t)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
-+ ((uint32_t)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
-+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
-+
-+ /* define exceptions and error behavior */
-+ tmp_reg = 0;
-+ /* Clear events */
-+ tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
-+ FPM_EV_MASK_SINGLE_ECC);
-+ /* enable interrupts */
-+ if (cfg->exceptions & FMAN_EX_FPM_STALL_ON_TASKS)
-+ tmp_reg |= FPM_EV_MASK_STALL_EN;
-+ if (cfg->exceptions & FMAN_EX_FPM_SINGLE_ECC)
-+ tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
-+ if (cfg->exceptions & FMAN_EX_FPM_DOUBLE_ECC)
-+ tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
-+ tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
-+ tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
-+ if (!cfg->halt_on_external_activ)
-+ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
-+ if (!cfg->halt_on_unrecov_ecc_err)
-+ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
-+ iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
-+
-+ /* clear all fmCtls event registers */
-+ for (i = 0; i < cfg->num_of_fman_ctrl_evnt_regs; i++)
-+ iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
-+
-+ /* RAM ECC - enable and clear events*/
-+ /* first we need to clear all parser memory,
-+ * as it is uninitialized and may cause ECC errors */
-+ /* event bits */
-+ tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
-+ /* Rams enable not effected by RCR bit, but by a COP configuration */
-+ if (cfg->external_ecc_rams_enable)
-+ tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
-+
-+ /* enable test mode */
-+ if (cfg->en_muram_test_mode)
-+ tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
-+ if (cfg->en_iram_test_mode)
-+ tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
-+ iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
-+
-+ tmp_reg = 0;
-+ if (cfg->exceptions & FMAN_EX_IRAM_ECC) {
-+ tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
-+ fman_enable_rams_ecc(fpm_rg);
-+ }
-+ if (cfg->exceptions & FMAN_EX_NURAM_ECC) {
-+ tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
-+ fman_enable_rams_ecc(fpm_rg);
-+ }
-+ iowrite32be(tmp_reg, &fpm_rg->fm_rie);
-+
-+ return 0;
-+}
-+
-+int fman_bmi_init(struct fman_bmi_regs *bmi_rg, struct fman_cfg *cfg)
-+{
-+ uint32_t tmp_reg;
-+
-+ /**********************/
-+ /* Init BMI Registers */
-+ /**********************/
-+
-+ /* define common resources */
-+ tmp_reg = cfg->fifo_base_addr;
-+ tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
-+
-+ tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
-+ BMI_CFG1_FIFO_SIZE_SHIFT);
-+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
-+
-+ tmp_reg = ((uint32_t)(cfg->total_num_of_tasks - 1) <<
-+ BMI_CFG2_TASKS_SHIFT);
-+ /* num of DMA's will be dynamically updated when each port is set */
-+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
-+
-+ /* define unmaskable exceptions, enable and clear events */
-+ tmp_reg = 0;
-+ iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
-+ BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
-+ BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
-+ BMI_ERR_INTR_EN_DISPATCH_RAM_ECC,
-+ &bmi_rg->fmbm_ievr);
-+
-+ if (cfg->exceptions & FMAN_EX_BMI_LIST_RAM_ECC)
-+ tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
-+ if (cfg->exceptions & FMAN_EX_BMI_PIPELINE_ECC)
-+ tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
-+ if (cfg->exceptions & FMAN_EX_BMI_STATISTICS_RAM_ECC)
-+ tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
-+ if (cfg->exceptions & FMAN_EX_BMI_DISPATCH_RAM_ECC)
-+ tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
-+ iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
-+
-+ return 0;
-+}
-+
-+int fman_qmi_init(struct fman_qmi_regs *qmi_rg, struct fman_cfg *cfg)
-+{
-+ uint32_t tmp_reg;
-+ uint16_t period_in_fm_clocks;
-+ uint8_t remainder;
-+ /**********************/
-+ /* Init QMI Registers */
-+ /**********************/
-+ /* Clear error interrupt events */
-+
-+ iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
-+ &qmi_rg->fmqm_eie);
-+ tmp_reg = 0;
-+ if (cfg->exceptions & FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
-+ tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
-+ if (cfg->exceptions & FMAN_EX_QMI_DOUBLE_ECC)
-+ tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
-+ /* enable events */
-+ iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
-+
-+ if (cfg->tnum_aging_period) {
-+ /* tnum_aging_period is in units of usec, p_FmClockFreq in Mhz */
-+ period_in_fm_clocks = (uint16_t)
-+ (cfg->tnum_aging_period * cfg->clk_freq);
-+ /* period_in_fm_clocks must be a 64 multiply */
-+ remainder = (uint8_t)(period_in_fm_clocks % 64);
-+ if (remainder)
-+ tmp_reg = (uint32_t)((period_in_fm_clocks / 64) + 1);
-+ else{
-+ tmp_reg = (uint32_t)(period_in_fm_clocks / 64);
-+ if (!tmp_reg)
-+ tmp_reg = 1;
-+ }
-+ tmp_reg <<= QMI_TAPC_TAP;
-+ iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
-+ }
-+ tmp_reg = 0;
-+ /* Clear interrupt events */
-+ iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
-+ if (cfg->exceptions & FMAN_EX_QMI_SINGLE_ECC)
-+ tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
-+ /* enable events */
-+ iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
-+
-+ return 0;
-+}
-+
-+int fman_enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
-+{
-+ uint32_t cfg_reg = 0;
-+
-+ /**********************/
-+ /* Enable all modules */
-+ /**********************/
-+ /* clear & enable global counters - calculate reg and save for later,
-+ because it's the same reg for QMI enable */
-+ cfg_reg = QMI_CFG_EN_COUNTERS;
-+ if (cfg->qmi_deq_option_support)
-+ cfg_reg |= (uint32_t)(((cfg->qmi_def_tnums_thresh) << 8) |
-+ (uint32_t)cfg->qmi_def_tnums_thresh);
-+
-+ iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
-+ iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
-+ &fman_rg->qmi_rg->fmqm_gc);
-+
-+ return 0;
-+}
-+
-+void fman_free_resources(struct fman_rg *fman_rg)
-+{
-+ /* disable BMI and QMI */
-+ iowrite32be(0, &fman_rg->bmi_rg->fmbm_init);
-+ iowrite32be(0, &fman_rg->qmi_rg->fmqm_gc);
-+
-+ /* release BMI resources */
-+ iowrite32be(0, &fman_rg->bmi_rg->fmbm_cfg2);
-+ iowrite32be(0, &fman_rg->bmi_rg->fmbm_cfg1);
-+
-+ /* disable ECC */
-+ iowrite32be(0, &fman_rg->fpm_rg->fm_rcr);
-+}
-+
-+/****************************************************/
-+/* API Run-time Control uint functions */
-+/****************************************************/
-+uint32_t fman_get_normal_pending(struct fman_fpm_regs *fpm_rg)
-+{
-+ return ioread32be(&fpm_rg->fm_npi);
-+}
-+
-+uint32_t fman_get_controller_event(struct fman_fpm_regs *fpm_rg, uint8_t reg_id)
-+{
-+ uint32_t event;
-+
-+ event = ioread32be(&fpm_rg->fmfp_fcev[reg_id]) &
-+ ioread32be(&fpm_rg->fmfp_cee[reg_id]);
-+ iowrite32be(event, &fpm_rg->fmfp_cev[reg_id]);
-+
-+ return event;
-+}
-+
-+uint32_t fman_get_error_pending(struct fman_fpm_regs *fpm_rg)
-+{
-+ return ioread32be(&fpm_rg->fm_epi);
-+}
-+
-+void fman_set_ports_bandwidth(struct fman_bmi_regs *bmi_rg, uint8_t *weights)
-+{
-+ int i;
-+ uint8_t shift;
-+ uint32_t tmp = 0;
-+
-+ for (i = 0; i < 64; i++) {
-+ if (weights[i] > 1) { /* no need to write 1 since it is 0 */
-+ /* Add this port to tmp_reg */
-+ /* (each 8 ports result in one register)*/
-+ shift = (uint8_t)(32 - 4 * ((i % 8) + 1));
-+ tmp |= ((weights[i] - 1) << shift);
-+ }
-+ if (i % 8 == 7) { /* last in this set */
-+ iowrite32be(tmp, &bmi_rg->fmbm_arb[i / 8]);
-+ tmp = 0;
-+ }
-+ }
-+}
-+
-+void fman_enable_rams_ecc(struct fman_fpm_regs *fpm_rg)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&fpm_rg->fm_rcr);
-+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
-+ iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN,
-+ &fpm_rg->fm_rcr);
-+ else
-+ iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
-+ FPM_RAM_IRAM_ECC_EN,
-+ &fpm_rg->fm_rcr);
-+}
-+
-+void fman_disable_rams_ecc(struct fman_fpm_regs *fpm_rg)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&fpm_rg->fm_rcr);
-+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
-+ iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN,
-+ &fpm_rg->fm_rcr);
-+ else
-+ iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
-+ &fpm_rg->fm_rcr);
-+}
-+
-+int fman_set_exception(struct fman_rg *fman_rg,
-+ enum fman_exceptions exception,
-+ bool enable)
-+{
-+ uint32_t tmp;
-+
-+ switch (exception) {
-+ case(E_FMAN_EX_DMA_BUS_ERROR):
-+ tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
-+ if (enable)
-+ tmp |= DMA_MODE_BER;
-+ else
-+ tmp &= ~DMA_MODE_BER;
-+ /* disable bus error */
-+ iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
-+ break;
-+ case(E_FMAN_EX_DMA_READ_ECC):
-+ case(E_FMAN_EX_DMA_SYSTEM_WRITE_ECC):
-+ case(E_FMAN_EX_DMA_FM_WRITE_ECC):
-+ tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
-+ if (enable)
-+ tmp |= DMA_MODE_ECC;
-+ else
-+ tmp &= ~DMA_MODE_ECC;
-+ iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
-+ break;
-+ case(E_FMAN_EX_FPM_STALL_ON_TASKS):
-+ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
-+ if (enable)
-+ tmp |= FPM_EV_MASK_STALL_EN;
-+ else
-+ tmp &= ~FPM_EV_MASK_STALL_EN;
-+ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
-+ break;
-+ case(E_FMAN_EX_FPM_SINGLE_ECC):
-+ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
-+ if (enable)
-+ tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
-+ else
-+ tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
-+ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
-+ break;
-+ case(E_FMAN_EX_FPM_DOUBLE_ECC):
-+ tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
-+ if (enable)
-+ tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
-+ else
-+ tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
-+ iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
-+ break;
-+ case(E_FMAN_EX_QMI_SINGLE_ECC):
-+ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
-+ if (enable)
-+ tmp |= QMI_INTR_EN_SINGLE_ECC;
-+ else
-+ tmp &= ~QMI_INTR_EN_SINGLE_ECC;
-+ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
-+ break;
-+ case(E_FMAN_EX_QMI_DOUBLE_ECC):
-+ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
-+ if (enable)
-+ tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
-+ else
-+ tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
-+ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
-+ break;
-+ case(E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID):
-+ tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
-+ if (enable)
-+ tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
-+ else
-+ tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
-+ iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
-+ break;
-+ case(E_FMAN_EX_BMI_LIST_RAM_ECC):
-+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
-+ if (enable)
-+ tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
-+ else
-+ tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
-+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
-+ break;
-+ case(E_FMAN_EX_BMI_STORAGE_PROFILE_ECC):
-+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
-+ if (enable)
-+ tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
-+ else
-+ tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
-+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
-+ break;
-+ case(E_FMAN_EX_BMI_STATISTICS_RAM_ECC):
-+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
-+ if (enable)
-+ tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
-+ else
-+ tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
-+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
-+ break;
-+ case(E_FMAN_EX_BMI_DISPATCH_RAM_ECC):
-+ tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
-+ if (enable)
-+ tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
-+ else
-+ tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
-+ iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
-+ break;
-+ case(E_FMAN_EX_IRAM_ECC):
-+ tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
-+ if (enable) {
-+ /* enable ECC if not enabled */
-+ fman_enable_rams_ecc(fman_rg->fpm_rg);
-+ /* enable ECC interrupts */
-+ tmp |= FPM_IRAM_ECC_ERR_EX_EN;
-+ } else {
-+ /* ECC mechanism may be disabled,
-+ * depending on driver status */
-+ fman_disable_rams_ecc(fman_rg->fpm_rg);
-+ tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
-+ }
-+ iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
-+ break;
-+ case(E_FMAN_EX_MURAM_ECC):
-+ tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
-+ if (enable) {
-+ /* enable ECC if not enabled */
-+ fman_enable_rams_ecc(fman_rg->fpm_rg);
-+ /* enable ECC interrupts */
-+ tmp |= FPM_MURAM_ECC_ERR_EX_EN;
-+ } else {
-+ /* ECC mechanism may be disabled,
-+ * depending on driver status */
-+ fman_disable_rams_ecc(fman_rg->fpm_rg);
-+ tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
-+ }
-+ iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+void fman_get_revision(struct fman_fpm_regs *fpm_rg,
-+ uint8_t *major,
-+ uint8_t *minor)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&fpm_rg->fm_ip_rev_1);
-+ *major = (uint8_t)((tmp & FPM_REV1_MAJOR_MASK) >> FPM_REV1_MAJOR_SHIFT);
-+ *minor = (uint8_t)((tmp & FPM_REV1_MINOR_MASK) >> FPM_REV1_MINOR_SHIFT);
-+
-+}
-+
-+uint32_t fman_get_counter(struct fman_rg *fman_rg,
-+ enum fman_counters reg_name)
-+{
-+ uint32_t ret_val;
-+
-+ switch (reg_name) {
-+ case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_etfc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dtfc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_0):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc0);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_1):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc1);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_2):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc2);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_3):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc3);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dfdc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dfcc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_FROM_FD):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dffc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_CONFIRM):
-+ ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dcc);
-+ break;
-+ default:
-+ ret_val = 0;
-+ }
-+ return ret_val;
-+}
-+
-+int fman_modify_counter(struct fman_rg *fman_rg,
-+ enum fman_counters reg_name,
-+ uint32_t val)
-+{
-+ /* When applicable (when there is an 'enable counters' bit,
-+ * check that counters are enabled */
-+ switch (reg_name) {
-+ case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
-+ case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
-+ case(E_FMAN_COUNTERS_DEQ_0):
-+ case(E_FMAN_COUNTERS_DEQ_1):
-+ case(E_FMAN_COUNTERS_DEQ_2):
-+ case(E_FMAN_COUNTERS_DEQ_3):
-+ case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
-+ case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
-+ case(E_FMAN_COUNTERS_DEQ_FROM_FD):
-+ case(E_FMAN_COUNTERS_DEQ_CONFIRM):
-+ if (!(ioread32be(&fman_rg->qmi_rg->fmqm_gc) &
-+ QMI_CFG_EN_COUNTERS))
-+ return -EINVAL;
-+ break;
-+ default:
-+ break;
-+ }
-+ /* Set counter */
-+ switch (reg_name) {
-+ case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_etfc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dtfc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_0):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc0);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_1):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc1);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_2):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc2);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_3):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc3);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dfdc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dfcc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_FROM_FD):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dffc);
-+ break;
-+ case(E_FMAN_COUNTERS_DEQ_CONFIRM):
-+ iowrite32be(val, &fman_rg->qmi_rg->fmqm_dcc);
-+ break;
-+ case(E_FMAN_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT):
-+ iowrite32be(val, &fman_rg->dma_rg->fmdmsefrc);
-+ break;
-+ case(E_FMAN_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT):
-+ iowrite32be(val, &fman_rg->dma_rg->fmdmsqfrc);
-+ break;
-+ case(E_FMAN_COUNTERS_SEMAPHOR_SYNC_REJECT):
-+ iowrite32be(val, &fman_rg->dma_rg->fmdmssrc);
-+ break;
-+ default:
-+ break;
-+ }
-+ return 0;
-+}
-+
-+void fman_set_dma_emergency(struct fman_dma_regs *dma_rg,
-+ bool is_write,
-+ bool enable)
-+{
-+ uint32_t msk;
-+
-+ msk = (uint32_t)(is_write ? DMA_MODE_EMER_WRITE : DMA_MODE_EMER_READ);
-+
-+ if (enable)
-+ iowrite32be(ioread32be(&dma_rg->fmdmmr) | msk,
-+ &dma_rg->fmdmmr);
-+ else /* disable */
-+ iowrite32be(ioread32be(&dma_rg->fmdmmr) & ~msk,
-+ &dma_rg->fmdmmr);
-+}
-+
-+void fman_set_dma_ext_bus_pri(struct fman_dma_regs *dma_rg, uint32_t pri)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&dma_rg->fmdmmr) |
-+ (pri << DMA_MODE_BUS_PRI_SHIFT);
-+
-+ iowrite32be(tmp, &dma_rg->fmdmmr);
-+}
-+
-+uint32_t fman_get_dma_status(struct fman_dma_regs *dma_rg)
-+{
-+ return ioread32be(&dma_rg->fmdmsr);
-+}
-+
-+void fman_force_intr(struct fman_rg *fman_rg,
-+ enum fman_exceptions exception)
-+{
-+ switch (exception) {
-+ case E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
-+ iowrite32be(QMI_ERR_INTR_EN_DEQ_FROM_DEF,
-+ &fman_rg->qmi_rg->fmqm_eif);
-+ break;
-+ case E_FMAN_EX_QMI_SINGLE_ECC:
-+ iowrite32be(QMI_INTR_EN_SINGLE_ECC,
-+ &fman_rg->qmi_rg->fmqm_if);
-+ break;
-+ case E_FMAN_EX_QMI_DOUBLE_ECC:
-+ iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC,
-+ &fman_rg->qmi_rg->fmqm_eif);
-+ break;
-+ case E_FMAN_EX_BMI_LIST_RAM_ECC:
-+ iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC,
-+ &fman_rg->bmi_rg->fmbm_ifr);
-+ break;
-+ case E_FMAN_EX_BMI_STORAGE_PROFILE_ECC:
-+ iowrite32be(BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC,
-+ &fman_rg->bmi_rg->fmbm_ifr);
-+ break;
-+ case E_FMAN_EX_BMI_STATISTICS_RAM_ECC:
-+ iowrite32be(BMI_ERR_INTR_EN_STATISTICS_RAM_ECC,
-+ &fman_rg->bmi_rg->fmbm_ifr);
-+ break;
-+ case E_FMAN_EX_BMI_DISPATCH_RAM_ECC:
-+ iowrite32be(BMI_ERR_INTR_EN_DISPATCH_RAM_ECC,
-+ &fman_rg->bmi_rg->fmbm_ifr);
-+ break;
-+ default:
-+ break;
-+ }
-+}
-+
-+bool fman_is_qmi_halt_not_busy_state(struct fman_qmi_regs *qmi_rg)
-+{
-+ return (bool)!!(ioread32be(&qmi_rg->fmqm_gs) & QMI_GS_HALT_NOT_BUSY);
-+}
-+void fman_resume(struct fman_fpm_regs *fpm_rg)
-+{
-+ uint32_t tmp;
-+
-+ tmp = ioread32be(&fpm_rg->fmfp_ee);
-+ /* clear tmp_reg event bits in order not to clear standing events */
-+ tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
-+ FPM_EV_MASK_STALL |
-+ FPM_EV_MASK_SINGLE_ECC);
-+ tmp |= FPM_EV_MASK_RELEASE_FM;
-+
-+ iowrite32be(tmp, &fpm_rg->fmfp_ee);
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_common.h
-@@ -0,0 +1,1214 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_common.h
-+
-+ @Description FM internal structures and definitions.
-+*//***************************************************************************/
-+#ifndef __FM_COMMON_H
-+#define __FM_COMMON_H
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_ext.h"
-+#include "fm_port_ext.h"
-+
-+
-+#define e_FM_PORT_TYPE_OH_HOST_COMMAND e_FM_PORT_TYPE_DUMMY
-+
-+#define CLS_PLAN_NUM_PER_GRP 8
-+
-+#define IP_OFFLOAD_PACKAGE_NUMBER 106
-+#define CAPWAP_OFFLOAD_PACKAGE_NUMBER 108
-+#define IS_OFFLOAD_PACKAGE(num) ((num == IP_OFFLOAD_PACKAGE_NUMBER) || (num == CAPWAP_OFFLOAD_PACKAGE_NUMBER))
-+
-+
-+
-+/**************************************************************************//**
-+ @Description Modules registers offsets
-+*//***************************************************************************/
-+#define FM_MM_MURAM 0x00000000
-+#define FM_MM_BMI 0x00080000
-+#define FM_MM_QMI 0x00080400
-+#define FM_MM_PRS 0x000c7000
-+#define FM_MM_KG 0x000C1000
-+#define FM_MM_DMA 0x000C2000
-+#define FM_MM_FPM 0x000C3000
-+#define FM_MM_PLCR 0x000C0000
-+#define FM_MM_IMEM 0x000C4000
-+#define FM_MM_CGP 0x000DB000
-+#define FM_MM_TRB(i) (0x000D0200 + 0x400 * (i))
-+#if (DPAA_VERSION >= 11)
-+#define FM_MM_SP 0x000dc000
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+
-+/**************************************************************************//**
-+ @Description Enum for inter-module interrupts registration
-+*//***************************************************************************/
-+typedef enum e_FmEventModules{
-+ e_FM_MOD_PRS, /**< Parser event */
-+ e_FM_MOD_KG, /**< Keygen event */
-+ e_FM_MOD_PLCR, /**< Policer event */
-+ e_FM_MOD_10G_MAC, /**< 10G MAC event */
-+ e_FM_MOD_1G_MAC, /**< 1G MAC event */
-+ e_FM_MOD_TMR, /**< Timer event */
-+ e_FM_MOD_FMAN_CTRL, /**< FMAN Controller Timer event */
-+ e_FM_MOD_MACSEC,
-+ e_FM_MOD_DUMMY_LAST
-+} e_FmEventModules;
-+
-+/**************************************************************************//**
-+ @Description Enum for interrupts types
-+*//***************************************************************************/
-+typedef enum e_FmIntrType {
-+ e_FM_INTR_TYPE_ERR,
-+ e_FM_INTR_TYPE_NORMAL
-+} e_FmIntrType;
-+
-+/**************************************************************************//**
-+ @Description Enum for inter-module interrupts registration
-+*//***************************************************************************/
-+typedef enum e_FmInterModuleEvent
-+{
-+ e_FM_EV_PRS = 0, /**< Parser event */
-+ e_FM_EV_ERR_PRS, /**< Parser error event */
-+ e_FM_EV_KG, /**< Keygen event */
-+ e_FM_EV_ERR_KG, /**< Keygen error event */
-+ e_FM_EV_PLCR, /**< Policer event */
-+ e_FM_EV_ERR_PLCR, /**< Policer error event */
-+ e_FM_EV_ERR_10G_MAC0, /**< 10G MAC 0 error event */
-+ e_FM_EV_ERR_10G_MAC1, /**< 10G MAC 1 error event */
-+ e_FM_EV_ERR_1G_MAC0, /**< 1G MAC 0 error event */
-+ e_FM_EV_ERR_1G_MAC1, /**< 1G MAC 1 error event */
-+ e_FM_EV_ERR_1G_MAC2, /**< 1G MAC 2 error event */
-+ e_FM_EV_ERR_1G_MAC3, /**< 1G MAC 3 error event */
-+ e_FM_EV_ERR_1G_MAC4, /**< 1G MAC 4 error event */
-+ e_FM_EV_ERR_1G_MAC5, /**< 1G MAC 5 error event */
-+ e_FM_EV_ERR_1G_MAC6, /**< 1G MAC 6 error event */
-+ e_FM_EV_ERR_1G_MAC7, /**< 1G MAC 7 error event */
-+ e_FM_EV_ERR_MACSEC_MAC0,
-+ e_FM_EV_TMR, /**< Timer event */
-+ e_FM_EV_10G_MAC0, /**< 10G MAC 0 event (Magic packet detection)*/
-+ e_FM_EV_10G_MAC1, /**< 10G MAC 1 event (Magic packet detection)*/
-+ e_FM_EV_1G_MAC0, /**< 1G MAC 0 event (Magic packet detection)*/
-+ e_FM_EV_1G_MAC1, /**< 1G MAC 1 event (Magic packet detection)*/
-+ e_FM_EV_1G_MAC2, /**< 1G MAC 2 (Magic packet detection)*/
-+ e_FM_EV_1G_MAC3, /**< 1G MAC 3 (Magic packet detection)*/
-+ e_FM_EV_1G_MAC4, /**< 1G MAC 4 (Magic packet detection)*/
-+ e_FM_EV_1G_MAC5, /**< 1G MAC 5 (Magic packet detection)*/
-+ e_FM_EV_1G_MAC6, /**< 1G MAC 6 (Magic packet detection)*/
-+ e_FM_EV_1G_MAC7, /**< 1G MAC 7 (Magic packet detection)*/
-+ e_FM_EV_MACSEC_MAC0, /**< MACSEC MAC 0 event */
-+ e_FM_EV_FMAN_CTRL_0, /**< Fman controller event 0 */
-+ e_FM_EV_FMAN_CTRL_1, /**< Fman controller event 1 */
-+ e_FM_EV_FMAN_CTRL_2, /**< Fman controller event 2 */
-+ e_FM_EV_FMAN_CTRL_3, /**< Fman controller event 3 */
-+ e_FM_EV_DUMMY_LAST
-+} e_FmInterModuleEvent;
-+
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Description PCD KG scheme registers
-+*//***************************************************************************/
-+typedef _Packed struct t_FmPcdPlcrProfileRegs {
-+ volatile uint32_t fmpl_pemode; /* 0x090 FMPL_PEMODE - FM Policer Profile Entry Mode*/
-+ volatile uint32_t fmpl_pegnia; /* 0x094 FMPL_PEGNIA - FM Policer Profile Entry GREEN Next Invoked Action*/
-+ volatile uint32_t fmpl_peynia; /* 0x098 FMPL_PEYNIA - FM Policer Profile Entry YELLOW Next Invoked Action*/
-+ volatile uint32_t fmpl_pernia; /* 0x09C FMPL_PERNIA - FM Policer Profile Entry RED Next Invoked Action*/
-+ volatile uint32_t fmpl_pecir; /* 0x0A0 FMPL_PECIR - FM Policer Profile Entry Committed Information Rate*/
-+ volatile uint32_t fmpl_pecbs; /* 0x0A4 FMPL_PECBS - FM Policer Profile Entry Committed Burst Size*/
-+ volatile uint32_t fmpl_pepepir_eir; /* 0x0A8 FMPL_PEPIR_EIR - FM Policer Profile Entry Peak/Excess Information Rate*/
-+ volatile uint32_t fmpl_pepbs_ebs; /* 0x0AC FMPL_PEPBS_EBS - FM Policer Profile Entry Peak/Excess Information Rate*/
-+ volatile uint32_t fmpl_pelts; /* 0x0B0 FMPL_PELTS - FM Policer Profile Entry Last TimeStamp*/
-+ volatile uint32_t fmpl_pects; /* 0x0B4 FMPL_PECTS - FM Policer Profile Entry Committed Token Status*/
-+ volatile uint32_t fmpl_pepts_ets; /* 0x0B8 FMPL_PEPTS_ETS - FM Policer Profile Entry Peak/Excess Token Status*/
-+ volatile uint32_t fmpl_pegpc; /* 0x0BC FMPL_PEGPC - FM Policer Profile Entry GREEN Packet Counter*/
-+ volatile uint32_t fmpl_peypc; /* 0x0C0 FMPL_PEYPC - FM Policer Profile Entry YELLOW Packet Counter*/
-+ volatile uint32_t fmpl_perpc; /* 0x0C4 FMPL_PERPC - FM Policer Profile Entry RED Packet Counter */
-+ volatile uint32_t fmpl_perypc; /* 0x0C8 FMPL_PERYPC - FM Policer Profile Entry Recolored YELLOW Packet Counter*/
-+ volatile uint32_t fmpl_perrpc; /* 0x0CC FMPL_PERRPC - FM Policer Profile Entry Recolored RED Packet Counter*/
-+ volatile uint32_t fmpl_res1[12]; /* 0x0D0-0x0FF Reserved */
-+} _PackedType t_FmPcdPlcrProfileRegs;
-+
-+
-+typedef _Packed struct t_FmPcdCcCapwapReassmTimeoutParams {
-+ volatile uint32_t portIdAndCapwapReassmTbl;
-+ volatile uint32_t fqidForTimeOutFrames;
-+ volatile uint32_t timeoutRequestTime;
-+}_PackedType t_FmPcdCcCapwapReassmTimeoutParams;
-+
-+/**************************************************************************//**
-+ @Description PCD CTRL Parameters Page
-+*//***************************************************************************/
-+typedef _Packed struct t_FmPcdCtrlParamsPage {
-+ volatile uint8_t reserved0[16];
-+ volatile uint32_t iprIpv4Nia;
-+ volatile uint32_t iprIpv6Nia;
-+ volatile uint8_t reserved1[24];
-+ volatile uint32_t ipfOptionsCounter;
-+ volatile uint8_t reserved2[12];
-+ volatile uint32_t misc;
-+ volatile uint32_t errorsDiscardMask;
-+ volatile uint32_t discardMask;
-+ volatile uint8_t reserved3[4];
-+ volatile uint32_t postBmiFetchNia;
-+ volatile uint8_t reserved4[172];
-+} _PackedType t_FmPcdCtrlParamsPage;
-+
-+
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/*for UNDER_CONSTRUCTION_FM_RMU_USE_SEC its defined in fm_ext.h*/
-+typedef uint32_t t_FmFmanCtrl;
-+
-+#define FPM_PORT_FM_CTL1 0x00000001
-+#define FPM_PORT_FM_CTL2 0x00000002
-+
-+
-+
-+typedef struct t_FmPcdCcFragScratchPoolCmdParams {
-+ uint32_t numOfBuffers;
-+ uint8_t bufferPoolId;
-+} t_FmPcdCcFragScratchPoolCmdParams;
-+
-+typedef struct t_FmPcdCcReassmTimeoutParams {
-+ bool activate;
-+ uint8_t tsbs;
-+ uint32_t iprcpt;
-+} t_FmPcdCcReassmTimeoutParams;
-+
-+typedef struct {
-+ uint8_t baseEntry;
-+ uint16_t numOfClsPlanEntries;
-+ uint32_t vectors[FM_PCD_MAX_NUM_OF_CLS_PLANS];
-+} t_FmPcdKgInterModuleClsPlanSet;
-+
-+/**************************************************************************//**
-+ @Description Structure for binding a port to keygen schemes.
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgInterModuleBindPortToSchemes {
-+ uint8_t hardwarePortId;
-+ uint8_t netEnvId;
-+ bool useClsPlan; /**< TRUE if this port uses the clsPlan mechanism */
-+ uint8_t numOfSchemes;
-+ uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES];
-+} t_FmPcdKgInterModuleBindPortToSchemes;
-+
-+typedef struct {
-+ uint32_t nextCcNodeInfo;
-+ t_List node;
-+} t_CcNodeInfo;
-+
-+typedef struct
-+{
-+ t_Handle h_CcNode;
-+ uint16_t index;
-+ t_List node;
-+}t_CcNodeInformation;
-+#define CC_NODE_F_OBJECT(ptr) LIST_OBJECT(ptr, t_CcNodeInformation, node)
-+
-+typedef enum e_ModifyState
-+{
-+ e_MODIFY_STATE_ADD = 0,
-+ e_MODIFY_STATE_REMOVE,
-+ e_MODIFY_STATE_CHANGE
-+} e_ModifyState;
-+
-+typedef struct
-+{
-+ t_Handle h_Manip;
-+ t_List node;
-+}t_ManipInfo;
-+#define CC_NEXT_NODE_F_OBJECT(ptr) LIST_OBJECT(ptr, t_CcNodeInfo, node)
-+
-+typedef struct {
-+ uint32_t type;
-+ uint8_t prOffset;
-+ uint16_t dataOffset;
-+ uint8_t internalBufferOffset;
-+ uint8_t numOfTasks;
-+ uint8_t numOfExtraTasks;
-+ uint8_t hardwarePortId;
-+ t_FmRevisionInfo revInfo;
-+ uint32_t nia;
-+ uint32_t discardMask;
-+} t_GetCcParams;
-+
-+typedef struct {
-+ uint32_t type;
-+ int psoSize;
-+ uint32_t nia;
-+ t_FmFmanCtrl orFmanCtrl;
-+ bool overwrite;
-+ uint8_t ofpDpde;
-+} t_SetCcParams;
-+
-+typedef struct {
-+ t_GetCcParams getCcParams;
-+ t_SetCcParams setCcParams;
-+} t_FmPortGetSetCcParams;
-+
-+typedef struct {
-+ uint32_t type;
-+ bool sleep;
-+} t_FmSetParams;
-+
-+typedef struct {
-+ uint32_t type;
-+ uint32_t fmqm_gs;
-+ uint32_t fm_npi;
-+ uint32_t fm_cld;
-+ uint32_t fmfp_extc;
-+} t_FmGetParams;
-+
-+typedef struct {
-+ t_FmSetParams setParams;
-+ t_FmGetParams getParams;
-+} t_FmGetSetParams;
-+
-+t_Error FmGetSetParams(t_Handle h_Fm, t_FmGetSetParams *p_Params);
-+
-+static __inline__ bool TRY_LOCK(t_Handle h_Spinlock, volatile bool *p_Flag)
-+{
-+ uint32_t intFlags;
-+ if (h_Spinlock)
-+ intFlags = XX_LockIntrSpinlock(h_Spinlock);
-+ else
-+ intFlags = XX_DisableAllIntr();
-+
-+ if (*p_Flag)
-+ {
-+ if (h_Spinlock)
-+ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
-+ else
-+ XX_RestoreAllIntr(intFlags);
-+ return FALSE;
-+ }
-+ *p_Flag = TRUE;
-+
-+ if (h_Spinlock)
-+ XX_UnlockIntrSpinlock(h_Spinlock, intFlags);
-+ else
-+ XX_RestoreAllIntr(intFlags);
-+
-+ return TRUE;
-+}
-+
-+#define RELEASE_LOCK(_flag) _flag = FALSE;
-+
-+/**************************************************************************//**
-+ @Collection Defines used for manipulation CC and BMI
-+ @{
-+*//***************************************************************************/
-+#define INTERNAL_CONTEXT_OFFSET 0x80000000
-+#define OFFSET_OF_PR 0x40000000
-+#define MANIP_EXTRA_SPACE 0x20000000
-+#define NUM_OF_TASKS 0x10000000
-+#define OFFSET_OF_DATA 0x08000000
-+#define HW_PORT_ID 0x04000000
-+#define FM_REV 0x02000000
-+#define GET_NIA_FPNE 0x01000000
-+#define GET_NIA_PNDN 0x00800000
-+#define NUM_OF_EXTRA_TASKS 0x00400000
-+#define DISCARD_MASK 0x00200000
-+
-+#define UPDATE_NIA_PNEN 0x80000000
-+#define UPDATE_PSO 0x40000000
-+#define UPDATE_NIA_PNDN 0x20000000
-+#define UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY 0x10000000
-+#define UPDATE_OFP_DPTE 0x08000000
-+#define UPDATE_NIA_FENE 0x04000000
-+#define UPDATE_NIA_CMNE 0x02000000
-+#define UPDATE_NIA_FPNE 0x01000000
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection Defines used for manipulation CC and CC
-+ @{
-+*//***************************************************************************/
-+#define UPDATE_NIA_ENQ_WITHOUT_DMA 0x80000000
-+#define UPDATE_CC_WITH_TREE 0x40000000
-+#define UPDATE_CC_WITH_DELETE_TREE 0x20000000
-+#define UPDATE_KG_NIA_CC_WA 0x10000000
-+#define UPDATE_KG_OPT_MODE 0x08000000
-+#define UPDATE_KG_NIA 0x04000000
-+#define UPDATE_CC_SHADOW_CLEAR 0x02000000
-+/* @} */
-+
-+#define UPDATE_FPM_BRKC_SLP 0x80000000
-+#define UPDATE_FPM_EXTC 0x40000000
-+#define UPDATE_FPM_EXTC_CLEAR 0x20000000
-+#define GET_FMQM_GS 0x10000000
-+#define GET_FM_NPI 0x08000000
-+#define GET_FMFP_EXTC 0x04000000
-+#define CLEAR_IRAM_READY 0x02000000
-+#define UPDATE_FM_CLD 0x01000000
-+#define GET_FM_CLD 0x00800000
-+#define FM_MAX_NUM_OF_PORTS (FM_MAX_NUM_OF_OH_PORTS + \
-+ FM_MAX_NUM_OF_1G_RX_PORTS + \
-+ FM_MAX_NUM_OF_10G_RX_PORTS + \
-+ FM_MAX_NUM_OF_1G_TX_PORTS + \
-+ FM_MAX_NUM_OF_10G_TX_PORTS)
-+
-+#define MODULE_NAME_SIZE 30
-+#define DUMMY_PORT_ID 0
-+
-+#define FM_LIODN_OFFSET_MASK 0x3FF
-+
-+/**************************************************************************//**
-+ @Description NIA Description
-+*//***************************************************************************/
-+#define NIA_ENG_MASK 0x007C0000
-+#define NIA_AC_MASK 0x0003ffff
-+
-+#define NIA_ORDER_RESTOR 0x00800000
-+#define NIA_ENG_FM_CTL 0x00000000
-+#define NIA_ENG_PRS 0x00440000
-+#define NIA_ENG_KG 0x00480000
-+#define NIA_ENG_PLCR 0x004C0000
-+#define NIA_ENG_BMI 0x00500000
-+#define NIA_ENG_QMI_ENQ 0x00540000
-+#define NIA_ENG_QMI_DEQ 0x00580000
-+
-+#define NIA_FM_CTL_AC_CC 0x00000006
-+#define NIA_FM_CTL_AC_HC 0x0000000C
-+#define NIA_FM_CTL_AC_IND_MODE_TX 0x00000008
-+#define NIA_FM_CTL_AC_IND_MODE_RX 0x0000000A
-+#define NIA_FM_CTL_AC_POP_TO_N_STEP 0x0000000e
-+#define NIA_FM_CTL_AC_PRE_BMI_FETCH_HEADER 0x00000010
-+#define NIA_FM_CTL_AC_PRE_BMI_FETCH_FULL_FRAME 0x00000018
-+#define NIA_FM_CTL_AC_POST_BMI_FETCH 0x00000012
-+#define NIA_FM_CTL_AC_PRE_BMI_ENQ_FRAME 0x0000001A
-+#define NIA_FM_CTL_AC_PRE_BMI_DISCARD_FRAME 0x0000001E
-+#define NIA_FM_CTL_AC_POST_BMI_ENQ_ORR 0x00000014
-+#define NIA_FM_CTL_AC_POST_BMI_ENQ 0x00000022
-+#define NIA_FM_CTL_AC_PRE_CC 0x00000020
-+#define NIA_FM_CTL_AC_POST_TX 0x00000024
-+/* V3 only */
-+#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
-+#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_DISCARD_FRAME 0x0000002A
-+#define NIA_FM_CTL_AC_NO_IPACC_POP_TO_N_STEP 0x0000002C
-+
-+#define NIA_BMI_AC_ENQ_FRAME 0x00000002
-+#define NIA_BMI_AC_TX_RELEASE 0x000002C0
-+#define NIA_BMI_AC_RELEASE 0x000000C0
-+#define NIA_BMI_AC_DISCARD 0x000000C1
-+#define NIA_BMI_AC_TX 0x00000274
-+#define NIA_BMI_AC_FETCH 0x00000208
-+#define NIA_BMI_AC_MASK 0x000003FF
-+
-+#define NIA_KG_DIRECT 0x00000100
-+#define NIA_KG_CC_EN 0x00000200
-+#define NIA_PLCR_ABSOLUTE 0x00008000
-+
-+#define NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA 0x00000202
-+
-+#if defined(FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675) || defined(FM_ERROR_VSP_NO_MATCH_SW006)
-+#define GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd) \
-+ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
-+ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_ENQ_FRAME) : \
-+ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME))
-+#define GET_NIA_BMI_AC_DISCARD_FRAME(h_FmPcd) \
-+ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
-+ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_DISCARD_FRAME) : \
-+ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_DISCARD_FRAME))
-+#define GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME() \
-+ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME)
-+#else
-+#define GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd) \
-+ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
-+ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_ENQ_FRAME) : \
-+ (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))
-+#define GET_NIA_BMI_AC_DISCARD_FRAME(h_FmPcd) \
-+ (uint32_t)((FmPcdIsAdvancedOffloadSupported(h_FmPcd)) ? \
-+ (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_PRE_BMI_DISCARD_FRAME) : \
-+ (NIA_ENG_BMI | NIA_BMI_AC_DISCARD))
-+#define GET_NO_PCD_NIA_BMI_AC_ENQ_FRAME() \
-+ (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)
-+#endif /* defined(FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675) || ... */
-+
-+/**************************************************************************//**
-+ @Description CTRL Parameters Page defines
-+*//***************************************************************************/
-+#define FM_CTL_PARAMS_PAGE_OP_FIX_EN 0x80000000
-+#define FM_CTL_PARAMS_PAGE_OFFLOAD_SUPPORT_EN 0x40000000
-+#define FM_CTL_PARAMS_PAGE_ALWAYS_ON 0x00000100
-+
-+#define FM_CTL_PARAMS_PAGE_ERROR_VSP_MASK 0x0000003f
-+
-+/**************************************************************************//**
-+ @Description Port Id defines
-+*//***************************************************************************/
-+#if (DPAA_VERSION == 10)
-+#define BASE_OH_PORTID 1
-+#else
-+#define BASE_OH_PORTID 2
-+#endif /* (DPAA_VERSION == 10) */
-+#define BASE_1G_RX_PORTID 8
-+#define BASE_10G_RX_PORTID 0x10
-+#define BASE_1G_TX_PORTID 0x28
-+#define BASE_10G_TX_PORTID 0x30
-+
-+#define FM_PCD_PORT_OH_BASE_INDX 0
-+#define FM_PCD_PORT_1G_RX_BASE_INDX (FM_PCD_PORT_OH_BASE_INDX+FM_MAX_NUM_OF_OH_PORTS)
-+#define FM_PCD_PORT_10G_RX_BASE_INDX (FM_PCD_PORT_1G_RX_BASE_INDX+FM_MAX_NUM_OF_1G_RX_PORTS)
-+#define FM_PCD_PORT_1G_TX_BASE_INDX (FM_PCD_PORT_10G_RX_BASE_INDX+FM_MAX_NUM_OF_10G_RX_PORTS)
-+#define FM_PCD_PORT_10G_TX_BASE_INDX (FM_PCD_PORT_1G_TX_BASE_INDX+FM_MAX_NUM_OF_1G_TX_PORTS)
-+
-+#if (FM_MAX_NUM_OF_OH_PORTS > 0)
-+#define CHECK_PORT_ID_OH_PORTS(_relativePortId) \
-+ if ((_relativePortId) >= FM_MAX_NUM_OF_OH_PORTS) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal OH_PORT port id"))
-+#else
-+#define CHECK_PORT_ID_OH_PORTS(_relativePortId) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal OH_PORT port id"))
-+#endif
-+#if (FM_MAX_NUM_OF_1G_RX_PORTS > 0)
-+#define CHECK_PORT_ID_1G_RX_PORTS(_relativePortId) \
-+ if ((_relativePortId) >= FM_MAX_NUM_OF_1G_RX_PORTS) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_RX_PORT port id"))
-+#else
-+#define CHECK_PORT_ID_1G_RX_PORTS(_relativePortId) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_RX_PORT port id"))
-+#endif
-+#if (FM_MAX_NUM_OF_10G_RX_PORTS > 0)
-+#define CHECK_PORT_ID_10G_RX_PORTS(_relativePortId) \
-+ if ((_relativePortId) >= FM_MAX_NUM_OF_10G_RX_PORTS) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_RX_PORT port id"))
-+#else
-+#define CHECK_PORT_ID_10G_RX_PORTS(_relativePortId) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_RX_PORT port id"))
-+#endif
-+#if (FM_MAX_NUM_OF_1G_TX_PORTS > 0)
-+#define CHECK_PORT_ID_1G_TX_PORTS(_relativePortId) \
-+ if ((_relativePortId) >= FM_MAX_NUM_OF_1G_TX_PORTS) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_TX_PORT port id"))
-+#else
-+#define CHECK_PORT_ID_1G_TX_PORTS(_relativePortId) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_TX_PORT port id"))
-+#endif
-+#if (FM_MAX_NUM_OF_10G_TX_PORTS > 0)
-+#define CHECK_PORT_ID_10G_TX_PORTS(_relativePortId) \
-+ if ((_relativePortId) >= FM_MAX_NUM_OF_10G_TX_PORTS) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_TX_PORT port id"))
-+#else
-+#define CHECK_PORT_ID_10G_TX_PORTS(_relativePortId) \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_TX_PORT port id"))
-+#endif
-+
-+uint8_t SwPortIdToHwPortId(e_FmPortType type, uint8_t relativePortId, uint8_t majorRev, uint8_t minorRev);
-+
-+#define HW_PORT_ID_TO_SW_PORT_ID(_relativePortId, hardwarePortId) \
-+{ if (((hardwarePortId) >= BASE_OH_PORTID) && \
-+ ((hardwarePortId) < BASE_OH_PORTID+FM_MAX_NUM_OF_OH_PORTS)) \
-+ _relativePortId = (uint8_t)((hardwarePortId)-BASE_OH_PORTID); \
-+ else if (((hardwarePortId) >= BASE_10G_TX_PORTID) && \
-+ ((hardwarePortId) < BASE_10G_TX_PORTID+FM_MAX_NUM_OF_10G_TX_PORTS)) \
-+ _relativePortId = (uint8_t)((hardwarePortId)-BASE_10G_TX_PORTID); \
-+ else if (((hardwarePortId) >= BASE_1G_TX_PORTID) && \
-+ ((hardwarePortId) < BASE_1G_TX_PORTID+FM_MAX_NUM_OF_1G_TX_PORTS)) \
-+ _relativePortId = (uint8_t)((hardwarePortId)-BASE_1G_TX_PORTID); \
-+ else if (((hardwarePortId) >= BASE_10G_RX_PORTID) && \
-+ ((hardwarePortId) < BASE_10G_RX_PORTID+FM_MAX_NUM_OF_10G_RX_PORTS)) \
-+ _relativePortId = (uint8_t)((hardwarePortId)-BASE_10G_RX_PORTID); \
-+ else if (((hardwarePortId) >= BASE_1G_RX_PORTID) && \
-+ ((hardwarePortId) < BASE_1G_RX_PORTID+FM_MAX_NUM_OF_1G_RX_PORTS)) \
-+ _relativePortId = (uint8_t)((hardwarePortId)-BASE_1G_RX_PORTID); \
-+ else { \
-+ _relativePortId = (uint8_t)DUMMY_PORT_ID; \
-+ ASSERT_COND(TRUE); \
-+ } \
-+}
-+
-+#define HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId) \
-+do { \
-+ if (((hardwarePortId) >= BASE_OH_PORTID) && ((hardwarePortId) < BASE_OH_PORTID+FM_MAX_NUM_OF_OH_PORTS)) \
-+ swPortIndex = (uint8_t)((hardwarePortId)-BASE_OH_PORTID+FM_PCD_PORT_OH_BASE_INDX); \
-+ else if (((hardwarePortId) >= BASE_1G_RX_PORTID) && \
-+ ((hardwarePortId) < BASE_1G_RX_PORTID+FM_MAX_NUM_OF_1G_RX_PORTS)) \
-+ swPortIndex = (uint8_t)((hardwarePortId)-BASE_1G_RX_PORTID+FM_PCD_PORT_1G_RX_BASE_INDX); \
-+ else if (((hardwarePortId) >= BASE_10G_RX_PORTID) && \
-+ ((hardwarePortId) < BASE_10G_RX_PORTID+FM_MAX_NUM_OF_10G_RX_PORTS)) \
-+ swPortIndex = (uint8_t)((hardwarePortId)-BASE_10G_RX_PORTID+FM_PCD_PORT_10G_RX_BASE_INDX); \
-+ else if (((hardwarePortId) >= BASE_1G_TX_PORTID) && \
-+ ((hardwarePortId) < BASE_1G_TX_PORTID+FM_MAX_NUM_OF_1G_TX_PORTS)) \
-+ swPortIndex = (uint8_t)((hardwarePortId)-BASE_1G_TX_PORTID+FM_PCD_PORT_1G_TX_BASE_INDX); \
-+ else if (((hardwarePortId) >= BASE_10G_TX_PORTID) && \
-+ ((hardwarePortId) < BASE_10G_TX_PORTID+FM_MAX_NUM_OF_10G_TX_PORTS)) \
-+ swPortIndex = (uint8_t)((hardwarePortId)-BASE_10G_TX_PORTID+FM_PCD_PORT_10G_TX_BASE_INDX); \
-+ else ASSERT_COND(FALSE); \
-+} while (0)
-+
-+#define SW_PORT_INDX_TO_HW_PORT_ID(hardwarePortId, swPortIndex) \
-+do { \
-+ if (((swPortIndex) >= FM_PCD_PORT_OH_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_1G_RX_BASE_INDX)) \
-+ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_OH_BASE_INDX+BASE_OH_PORTID); \
-+ else if (((swPortIndex) >= FM_PCD_PORT_1G_RX_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_10G_RX_BASE_INDX)) \
-+ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_1G_RX_BASE_INDX+BASE_1G_RX_PORTID); \
-+ else if (((swPortIndex) >= FM_PCD_PORT_10G_RX_BASE_INDX) && ((swPortIndex) < FM_MAX_NUM_OF_PORTS)) \
-+ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_10G_RX_BASE_INDX+BASE_10G_RX_PORTID); \
-+ else if (((swPortIndex) >= FM_PCD_PORT_1G_TX_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_10G_TX_BASE_INDX)) \
-+ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_1G_TX_BASE_INDX+BASE_1G_TX_PORTID); \
-+ else if (((swPortIndex) >= FM_PCD_PORT_10G_TX_BASE_INDX) && ((swPortIndex) < FM_MAX_NUM_OF_PORTS)) \
-+ hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_10G_TX_BASE_INDX+BASE_10G_TX_PORTID); \
-+ else ASSERT_COND(FALSE); \
-+} while (0)
-+
-+#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
-+#define BMI_FIFO_UNITS 0x100
-+
-+typedef struct {
-+ void (*f_Isr) (t_Handle h_Arg);
-+ t_Handle h_SrcHandle;
-+ uint8_t guestId;
-+} t_FmIntrSrc;
-+
-+#define ILLEGAL_HDR_NUM 0xFF
-+#define NO_HDR_NUM FM_PCD_PRS_NUM_OF_HDRS
-+
-+#define IS_PRIVATE_HEADER(hdr) (((hdr) == HEADER_TYPE_USER_DEFINED_SHIM1) || \
-+ ((hdr) == HEADER_TYPE_USER_DEFINED_SHIM2))
-+#define IS_SPECIAL_HEADER(hdr) ((hdr) == HEADER_TYPE_MACSEC)
-+
-+static __inline__ uint8_t GetPrsHdrNum(e_NetHeaderType hdr)
-+{
-+ switch (hdr)
-+ { case (HEADER_TYPE_ETH): return 0;
-+ case (HEADER_TYPE_LLC_SNAP): return 1;
-+ case (HEADER_TYPE_VLAN): return 2;
-+ case (HEADER_TYPE_PPPoE): return 3;
-+ case (HEADER_TYPE_PPP): return 3;
-+ case (HEADER_TYPE_MPLS): return 4;
-+ case (HEADER_TYPE_IPv4): return 5;
-+ case (HEADER_TYPE_IPv6): return 6;
-+ case (HEADER_TYPE_GRE): return 7;
-+ case (HEADER_TYPE_MINENCAP): return 8;
-+ case (HEADER_TYPE_USER_DEFINED_L3): return 9;
-+ case (HEADER_TYPE_TCP): return 10;
-+ case (HEADER_TYPE_UDP): return 11;
-+ case (HEADER_TYPE_IPSEC_AH):
-+ case (HEADER_TYPE_IPSEC_ESP): return 12;
-+ case (HEADER_TYPE_SCTP): return 13;
-+ case (HEADER_TYPE_DCCP): return 14;
-+ case (HEADER_TYPE_USER_DEFINED_L4): return 15;
-+ case (HEADER_TYPE_USER_DEFINED_SHIM1):
-+ case (HEADER_TYPE_USER_DEFINED_SHIM2):
-+ case (HEADER_TYPE_MACSEC): return NO_HDR_NUM;
-+ default:
-+ return ILLEGAL_HDR_NUM;
-+ }
-+}
-+
-+#define FM_PCD_MAX_NUM_OF_OPTIONS(clsPlanEntries) ((clsPlanEntries==256)? 8:((clsPlanEntries==128)? 7: ((clsPlanEntries==64)? 6: ((clsPlanEntries==32)? 5:0))))
-+
-+
-+/**************************************************************************//**
-+ @Description A structure for initializing a keygen classification plan group
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgInterModuleClsPlanGrpParams {
-+ uint8_t netEnvId; /* IN */
-+ bool grpExists; /* OUT (unused in FmPcdKgBuildClsPlanGrp)*/
-+ uint8_t clsPlanGrpId; /* OUT */
-+ bool emptyClsPlanGrp; /* OUT */
-+ uint8_t numOfOptions; /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/
-+ protocolOpt_t options[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
-+ /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/
-+ uint32_t optVectors[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)];
-+ /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/
-+} t_FmPcdKgInterModuleClsPlanGrpParams;
-+
-+typedef struct t_FmPcdLock {
-+ t_Handle h_Spinlock;
-+ volatile bool flag;
-+ t_List node;
-+} t_FmPcdLock;
-+#define FM_PCD_LOCK_OBJ(ptr) LIST_OBJECT(ptr, t_FmPcdLock, node)
-+
-+
-+typedef t_Error (t_FmPortGetSetCcParamsCallback) (t_Handle h_FmPort,
-+ t_FmPortGetSetCcParams *p_FmPortGetSetCcParams);
-+
-+
-+/***********************************************************************/
-+/* Common API for FM-PCD module */
-+/***********************************************************************/
-+t_Handle FmPcdGetHcHandle(t_Handle h_FmPcd);
-+uint32_t FmPcdGetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr);
-+uint32_t FmPcdGetLcv(t_Handle h_FmPcd, uint32_t netEnvId, uint8_t hdrNum);
-+uint32_t FmPcdGetMacsecLcv(t_Handle h_FmPcd, uint32_t netEnvId);
-+void FmPcdIncNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId);
-+void FmPcdDecNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId);
-+uint8_t FmPcdGetNetEnvId(t_Handle h_NetEnv);
-+void FmPcdPortRegister(t_Handle h_FmPcd, t_Handle h_FmPort, uint8_t hardwarePortId);
-+uint32_t FmPcdLock(t_Handle h_FmPcd);
-+void FmPcdUnlock(t_Handle h_FmPcd, uint32_t intFlags);
-+bool FmPcdNetEnvIsHdrExist(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr);
-+t_Error FmPcdFragHcScratchPoolInit(t_Handle h_FmPcd, uint8_t scratchBpid);
-+t_Error FmPcdRegisterReassmPort(t_Handle h_FmPcd, t_Handle h_IpReasmCommonPramTbl);
-+t_Error FmPcdUnregisterReassmPort(t_Handle h_FmPcd, t_Handle h_IpReasmCommonPramTbl);
-+bool FmPcdIsAdvancedOffloadSupported(t_Handle h_FmPcd);
-+bool FmPcdLockTryLockAll(t_Handle h_FmPcd);
-+void FmPcdLockUnlockAll(t_Handle h_FmPcd);
-+t_Error FmPcdHcSync(t_Handle h_FmPcd);
-+t_Handle FmGetPcd(t_Handle h_Fm);
-+/***********************************************************************/
-+/* Common API for FM-PCD KG module */
-+/***********************************************************************/
-+uint8_t FmPcdKgGetClsPlanGrpBase(t_Handle h_FmPcd, uint8_t clsPlanGrp);
-+uint16_t FmPcdKgGetClsPlanGrpSize(t_Handle h_FmPcd, uint8_t clsPlanGrp);
-+t_Error FmPcdKgBuildClsPlanGrp(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_Grp, t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet);
-+
-+uint8_t FmPcdKgGetSchemeId(t_Handle h_Scheme);
-+#if (DPAA_VERSION >= 11)
-+bool FmPcdKgGetVspe(t_Handle h_Scheme);
-+#endif /* (DPAA_VERSION >= 11) */
-+uint8_t FmPcdKgGetRelativeSchemeId(t_Handle h_FmPcd, uint8_t schemeId);
-+void FmPcdKgDestroyClsPlanGrp(t_Handle h_FmPcd, uint8_t grpId);
-+t_Error FmPcdKgCheckInvalidateSchemeSw(t_Handle h_Scheme);
-+t_Error FmPcdKgBuildBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPortToSchemes, uint32_t *p_SpReg, bool add);
-+bool FmPcdKgHwSchemeIsValid(uint32_t schemeModeReg);
-+uint32_t FmPcdKgBuildWriteSchemeActionReg(uint8_t schemeId, bool updateCounter);
-+uint32_t FmPcdKgBuildReadSchemeActionReg(uint8_t schemeId);
-+uint32_t FmPcdKgBuildWriteClsPlanBlockActionReg(uint8_t grpId);
-+uint32_t FmPcdKgBuildWritePortSchemeBindActionReg(uint8_t hardwarePortId);
-+uint32_t FmPcdKgBuildReadPortSchemeBindActionReg(uint8_t hardwarePortId);
-+uint32_t FmPcdKgBuildWritePortClsPlanBindActionReg(uint8_t hardwarePortId);
-+bool FmPcdKgIsSchemeValidSw(t_Handle h_Scheme);
-+
-+t_Error FmPcdKgBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind);
-+t_Error FmPcdKgUnbindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind);
-+uint32_t FmPcdKgGetRequiredAction(t_Handle h_FmPcd, uint8_t schemeId);
-+uint32_t FmPcdKgGetRequiredActionFlag(t_Handle h_FmPcd, uint8_t schemeId);
-+e_FmPcdDoneAction FmPcdKgGetDoneAction(t_Handle h_FmPcd, uint8_t schemeId);
-+e_FmPcdEngine FmPcdKgGetNextEngine(t_Handle h_FmPcd, uint8_t schemeId);
-+void FmPcdKgUpdateRequiredAction(t_Handle h_Scheme, uint32_t requiredAction);
-+bool FmPcdKgIsDirectPlcr(t_Handle h_FmPcd, uint8_t schemeId);
-+bool FmPcdKgIsDistrOnPlcrProfile(t_Handle h_FmPcd, uint8_t schemeId);
-+uint16_t FmPcdKgGetRelativeProfileId(t_Handle h_FmPcd, uint8_t schemeId);
-+t_Handle FmPcdKgGetSchemeHandle(t_Handle h_FmPcd, uint8_t relativeSchemeId);
-+bool FmPcdKgIsSchemeHasOwners(t_Handle h_Scheme);
-+t_Error FmPcdKgCcGetSetParams(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value);
-+t_Error FmPcdKgSetOrBindToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t netEnvId, protocolOpt_t *p_OptArray, uint8_t *p_ClsPlanGrpId, bool *p_IsEmptyClsPlanGrp);
-+t_Error FmPcdKgDeleteOrUnbindPortToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId);
-+
-+/***********************************************************************/
-+/* Common API for FM-PCD parser module */
-+/***********************************************************************/
-+t_Error FmPcdPrsIncludePortInStatistics(t_Handle p_FmPcd, uint8_t hardwarePortId, bool include);
-+
-+/***********************************************************************/
-+/* Common API for FM-PCD policer module */
-+/***********************************************************************/
-+t_Error FmPcdPlcrAllocProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles);
-+t_Error FmPcdPlcrFreeProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId);
-+bool FmPcdPlcrIsProfileValid(t_Handle h_FmPcd, uint16_t absoluteProfileId);
-+uint16_t FmPcdPlcrGetPortProfilesBase(t_Handle h_FmPcd, uint8_t hardwarePortId);
-+uint16_t FmPcdPlcrGetPortNumOfProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId);
-+uint32_t FmPcdPlcrBuildWritePlcrActionRegs(uint16_t absoluteProfileId);
-+uint32_t FmPcdPlcrBuildCounterProfileReg(e_FmPcdPlcrProfileCounters counter);
-+uint32_t FmPcdPlcrBuildWritePlcrActionReg(uint16_t absoluteProfileId);
-+uint32_t FmPcdPlcrBuildReadPlcrActionReg(uint16_t absoluteProfileId);
-+uint16_t FmPcdPlcrProfileGetAbsoluteId(t_Handle h_Profile);
-+t_Error FmPcdPlcrGetAbsoluteIdByProfileParams(t_Handle h_FmPcd,
-+ e_FmPcdProfileTypeSelection profileType,
-+ t_Handle h_FmPort,
-+ uint16_t relativeProfile,
-+ uint16_t *p_AbsoluteId);
-+void FmPcdPlcrInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
-+void FmPcdPlcrValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
-+bool FmPcdPlcrHwProfileIsValid(uint32_t profileModeReg);
-+uint32_t FmPcdPlcrGetRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId);
-+uint32_t FmPcdPlcrGetRequiredActionFlag(t_Handle h_FmPcd, uint16_t absoluteProfileId);
-+uint32_t FmPcdPlcrBuildNiaProfileReg(bool green, bool yellow, bool red);
-+void FmPcdPlcrUpdateRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId, uint32_t requiredAction);
-+t_Error FmPcdPlcrCcGetSetParams(t_Handle h_FmPcd, uint16_t profileIndx,uint32_t requiredAction);
-+
-+/***********************************************************************/
-+/* Common API for FM-PCD CC module */
-+/***********************************************************************/
-+uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode);
-+uint8_t FmPcdCcGetOffset(t_Handle h_CcNode);
-+t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex);
-+t_Error FmPcdCcAddKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPCdCcKeyParams);
-+t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint16_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask);
-+t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPcdCcKeyParams);
-+t_Error FmPcdCcModifyMissNextEngineParamNode(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+t_Error FmPcdCcModifyNextEngineParamTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd, t_Handle h_Pointer);
-+t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree);
-+void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree, t_Handle h_SavedManipParams);
-+t_Error FmPcdCcTreeAddIPR(t_Handle h_FmPcd, t_Handle h_FmTree, t_Handle h_NetEnv, t_Handle h_ReassemblyManip, bool schemes);
-+t_Error FmPcdCcTreeAddCPR(t_Handle h_FmPcd, t_Handle h_FmTree, t_Handle h_NetEnv, t_Handle h_ReassemblyManip, bool schemes);
-+t_Error FmPcdCcBindTree(t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_CcTree, uint32_t *p_Offset,t_Handle h_FmPort);
-+t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_CcTree);
-+
-+/***********************************************************************/
-+/* Common API for FM-PCD Manip module */
-+/***********************************************************************/
-+t_Error FmPcdManipUpdate(t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_FmPort, t_Handle h_Manip, t_Handle h_Ad, bool validate, int level, t_Handle h_FmTree, bool modify);
-+
-+/***********************************************************************/
-+/* Common API for FM-Port module */
-+/***********************************************************************/
-+#if (DPAA_VERSION >= 11)
-+typedef enum e_FmPortGprFuncType
-+{
-+ e_FM_PORT_GPR_EMPTY = 0,
-+ e_FM_PORT_GPR_MURAM_PAGE
-+} e_FmPortGprFuncType;
-+
-+t_Error FmPortSetGprFunc(t_Handle h_FmPort, e_FmPortGprFuncType gprFunc, void **p_Value);
-+#endif /* DPAA_VERSION >= 11) */
-+t_Error FmGetSetParams(t_Handle h_Fm, t_FmGetSetParams *p_FmGetSetParams);
-+t_Error FmPortGetSetCcParams(t_Handle h_FmPort, t_FmPortGetSetCcParams *p_FmPortGetSetCcParams);
-+uint8_t FmPortGetNetEnvId(t_Handle h_FmPort);
-+uint8_t FmPortGetHardwarePortId(t_Handle h_FmPort);
-+uint32_t FmPortGetPcdEngines(t_Handle h_FmPort);
-+void FmPortPcdKgSwUnbindClsPlanGrp (t_Handle h_FmPort);
-+
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FmPcdFrmReplicUpdate(t_Handle h_FmPcd, t_Handle h_FmPort, t_Handle h_FrmReplic);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Function FmRegisterIntr
-+
-+ @Description Used to register an inter-module event handler to be processed by FM
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] mod The module that causes the event
-+ @Param[in] modId Module id - if more than 1 instansiation of this
-+ mode exists,0 otherwise.
-+ @Param[in] intrType Interrupt type (error/normal) selection.
-+ @Param[in] f_Isr The interrupt service routine.
-+ @Param[in] h_Arg Argument to be passed to f_Isr.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void FmRegisterIntr(t_Handle h_Fm,
-+ e_FmEventModules mod,
-+ uint8_t modId,
-+ e_FmIntrType intrType,
-+ void (*f_Isr) (t_Handle h_Arg),
-+ t_Handle h_Arg);
-+
-+/**************************************************************************//**
-+ @Function FmUnregisterIntr
-+
-+ @Description Used to un-register an inter-module event handler that was processed by FM
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] mod The module that causes the event
-+ @Param[in] modId Module id - if more than 1 instansiation of this
-+ mode exists,0 otherwise.
-+ @Param[in] intrType Interrupt type (error/normal) selection.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void FmUnregisterIntr(t_Handle h_Fm,
-+ e_FmEventModules mod,
-+ uint8_t modId,
-+ e_FmIntrType intrType);
-+
-+/**************************************************************************//**
-+ @Function FmRegisterFmCtlIntr
-+
-+ @Description Used to register to one of the fmCtl events in the FM module
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] eventRegId FmCtl event id (0-7).
-+ @Param[in] f_Isr The interrupt service routine.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+void FmRegisterFmCtlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Fm, uint32_t event));
-+
-+
-+/**************************************************************************//**
-+ @Description enum for defining MAC types
-+*//***************************************************************************/
-+typedef enum e_FmMacType {
-+ e_FM_MAC_10G = 0, /**< 10G MAC */
-+ e_FM_MAC_1G /**< 1G MAC */
-+} e_FmMacType;
-+
-+/**************************************************************************//**
-+ @Description Structure for port-FM communication during FM_PORT_Init.
-+ Fields commented 'IN' are passed by the port module to be used
-+ by the FM module.
-+ Fields commented 'OUT' will be filled by FM before returning to port.
-+ Some fields are optional (depending on configuration) and
-+ will be analized by the port and FM modules accordingly.
-+*//***************************************************************************/
-+typedef struct t_FmInterModulePortInitParams {
-+ uint8_t hardwarePortId; /**< IN. port Id */
-+ e_FmPortType portType; /**< IN. Port type */
-+ bool independentMode; /**< IN. TRUE if FM Port operates in independent mode */
-+ uint16_t liodnOffset; /**< IN. Port's requested resource */
-+ uint8_t numOfTasks; /**< IN. Port's requested resource */
-+ uint8_t numOfExtraTasks; /**< IN. Port's requested resource */
-+ uint8_t numOfOpenDmas; /**< IN. Port's requested resource */
-+ uint8_t numOfExtraOpenDmas; /**< IN. Port's requested resource */
-+ uint32_t sizeOfFifo; /**< IN. Port's requested resource */
-+ uint32_t extraSizeOfFifo; /**< IN. Port's requested resource */
-+ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
-+ uint16_t maxFrameLength; /**< IN. Port's max frame length. */
-+ uint16_t liodnBase; /**< IN. Irrelevant for P4080 rev 1.
-+ LIODN base for this port, to be
-+ used together with LIODN offset. */
-+ t_FmPhysAddr fmMuramPhysBaseAddr;/**< OUT. FM-MURAM physical address*/
-+} t_FmInterModulePortInitParams;
-+
-+/**************************************************************************//**
-+ @Description Structure for port-FM communication during FM_PORT_Free.
-+*//***************************************************************************/
-+typedef struct t_FmInterModulePortFreeParams {
-+ uint8_t hardwarePortId; /**< IN. port Id */
-+ e_FmPortType portType; /**< IN. Port type */
-+ uint8_t deqPipelineDepth; /**< IN. Port's requested resource */
-+} t_FmInterModulePortFreeParams;
-+
-+/**************************************************************************//**
-+ @Function FmGetPcdPrsBaseAddr
-+
-+ @Description Get the base address of the Parser from the FM module
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return Base address.
-+*//***************************************************************************/
-+uintptr_t FmGetPcdPrsBaseAddr(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmGetPcdKgBaseAddr
-+
-+ @Description Get the base address of the Keygen from the FM module
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return Base address.
-+*//***************************************************************************/
-+uintptr_t FmGetPcdKgBaseAddr(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmGetPcdPlcrBaseAddr
-+
-+ @Description Get the base address of the Policer from the FM module
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return Base address.
-+*//***************************************************************************/
-+uintptr_t FmGetPcdPlcrBaseAddr(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmGetMuramHandle
-+
-+ @Description Get the handle of the MURAM from the FM module
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return MURAM module handle.
-+*//***************************************************************************/
-+t_Handle FmGetMuramHandle(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmGetPhysicalMuramBase
-+
-+ @Description Get the physical base address of the MURAM from the FM module
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] fmPhysAddr Physical MURAM base
-+
-+ @Return Physical base address.
-+*//***************************************************************************/
-+void FmGetPhysicalMuramBase(t_Handle h_Fm, t_FmPhysAddr *fmPhysAddr);
-+
-+/**************************************************************************//**
-+ @Function FmGetTimeStampScale
-+
-+ @Description Used internally by other modules in order to get the timeStamp
-+ period as requested by the application.
-+
-+ This function returns bit number that is incremented every 1 usec.
-+ To calculate timestamp period in nsec, use
-+ 1000 / (1 << FmGetTimeStampScale()).
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return Bit that counts 1 usec.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+uint32_t FmGetTimeStampScale(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmResumeStalledPort
-+
-+ @Description Used internally by FM port to release a stalled port.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] hardwarePortId HW port id.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FmResumeStalledPort(t_Handle h_Fm, uint8_t hardwarePortId);
-+
-+/**************************************************************************//**
-+ @Function FmIsPortStalled
-+
-+ @Description Used internally by FM port to read the port's status.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] hardwarePortId HW port id.
-+ @Param[in] p_IsStalled A pointer to the boolean port stalled state
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FmIsPortStalled(t_Handle h_Fm, uint8_t hardwarePortId, bool *p_IsStalled);
-+
-+/**************************************************************************//**
-+ @Function FmResetMac
-+
-+ @Description Used by MAC driver to reset the MAC registers
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] type MAC type.
-+ @Param[in] macId MAC id - according to type.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FmResetMac(t_Handle h_Fm, e_FmMacType type, uint8_t macId);
-+
-+/**************************************************************************//**
-+ @Function FmGetClockFreq
-+
-+ @Description Used by MAC driver to get the FM clock frequency
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return clock-freq on success; 0 otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+uint16_t FmGetClockFreq(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmGetMacClockFreq
-+
-+ @Description Used by MAC driver to get the MAC clock frequency
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return clock-freq on success; 0 otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+uint16_t FmGetMacClockFreq(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmGetId
-+
-+ @Description Used by PCD driver to read rhe FM id
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+uint8_t FmGetId(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmReset
-+
-+ @Description Used to reset the FM
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FmReset(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FmGetSetPortParams
-+
-+ @Description Used by FM-PORT driver to pass and receive parameters between
-+ PORT and FM modules.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in,out] p_PortParams A structure of FM Port parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FmGetSetPortParams(t_Handle h_Fm,t_FmInterModulePortInitParams *p_PortParams);
-+
-+/**************************************************************************//**
-+ @Function FmFreePortParams
-+
-+ @Description Used by FM-PORT driver to free port's resources within the FM.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in,out] p_PortParams A structure of FM Port parameters.
-+
-+ @Return None.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+void FmFreePortParams(t_Handle h_Fm,t_FmInterModulePortFreeParams *p_PortParams);
-+
-+/**************************************************************************//**
-+ @Function FmSetNumOfRiscsPerPort
-+
-+ @Description Used by FM-PORT driver to pass parameter between
-+ PORT and FM modules for working with number of RISC..
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] hardwarePortId hardware port Id.
-+ @Param[in] numOfFmanCtrls number of Fman Controllers.
-+ @Param[in] orFmanCtrl Fman Controller for order restoration.
-+
-+ @Return None.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FmSetNumOfRiscsPerPort(t_Handle h_Fm, uint8_t hardwarePortId, uint8_t numOfFmanCtrls, t_FmFmanCtrl orFmanCtrl);
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/**************************************************************************//*
-+ @Function FmDumpPortRegs
-+
-+ @Description Dumps FM port registers which are part of FM common registers
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] hardwarePortId HW port id.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only FM_Init().
-+*//***************************************************************************/
-+t_Error FmDumpPortRegs(t_Handle h_Fm,uint8_t hardwarePortId);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+void FmRegisterPcd(t_Handle h_Fm, t_Handle h_FmPcd);
-+void FmUnregisterPcd(t_Handle h_Fm);
-+t_Handle FmGetPcdHandle(t_Handle h_Fm);
-+t_Error FmEnableRamsEcc(t_Handle h_Fm);
-+t_Error FmDisableRamsEcc(t_Handle h_Fm);
-+void FmGetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo);
-+t_Error FmAllocFmanCtrlEventReg(t_Handle h_Fm, uint8_t *p_EventId);
-+void FmFreeFmanCtrlEventReg(t_Handle h_Fm, uint8_t eventId);
-+void FmSetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, uint32_t enableEvents);
-+uint32_t FmGetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId);
-+void FmRegisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Fm, uint32_t event), t_Handle h_Arg);
-+void FmUnregisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId);
-+t_Error FmSetMacMaxFrame(t_Handle h_Fm, e_FmMacType type, uint8_t macId, uint16_t mtu);
-+bool FmIsMaster(t_Handle h_Fm);
-+uint8_t FmGetGuestId(t_Handle h_Fm);
-+uint16_t FmGetTnumAgingPeriod(t_Handle h_Fm);
-+t_Error FmSetPortPreFetchConfiguration(t_Handle h_Fm, uint8_t portNum, bool preFetchConfigured);
-+t_Error FmGetPortPreFetchConfiguration(t_Handle h_Fm, uint8_t portNum, bool *p_PortConfigured, bool *p_PreFetchConfigured);
-+
-+
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+t_Error Fm10GTxEccWorkaround(t_Handle h_Fm, uint8_t macId);
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+
-+void FmMuramClear(t_Handle h_FmMuram);
-+t_Error FmSetNumOfOpenDmas(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint8_t *p_NumOfOpenDmas,
-+ uint8_t *p_NumOfExtraOpenDmas,
-+ bool initialConfig);
-+t_Error FmSetNumOfTasks(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint8_t *p_NumOfTasks,
-+ uint8_t *p_NumOfExtraTasks,
-+ bool initialConfig);
-+t_Error FmSetSizeOfFifo(t_Handle h_Fm,
-+ uint8_t hardwarePortId,
-+ uint32_t *p_SizeOfFifo,
-+ uint32_t *p_ExtraSizeOfFifo,
-+ bool initialConfig);
-+
-+t_Error FmSetCongestionGroupPFCpriority(t_Handle h_Fm,
-+ uint32_t congestionGroupId,
-+ uint8_t priorityBitMap);
-+
-+#if (DPAA_VERSION >= 11)
-+t_Error FmVSPAllocForPort(t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint8_t numOfStorageProfiles);
-+
-+t_Error FmVSPFreeForPort(t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId);
-+
-+t_Error FmVSPGetAbsoluteProfileId(t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint16_t relativeProfile,
-+ uint16_t *p_AbsoluteId);
-+t_Error FmVSPCheckRelativeProfile(t_Handle h_Fm,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint16_t relativeProfile);
-+
-+uintptr_t FmGetVSPBaseAddr(t_Handle h_Fm);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+
-+#endif /* __FM_COMMON_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h
-@@ -0,0 +1,93 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __FM_HC_H
-+#define __FM_HC_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "fsl_fman_kg.h"
-+
-+#define __ERR_MODULE__ MODULE_FM_PCD
-+
-+
-+typedef struct t_FmHcParams {
-+ t_Handle h_Fm;
-+ t_Handle h_FmPcd;
-+ t_FmPcdHcParams params;
-+} t_FmHcParams;
-+
-+
-+t_Handle FmHcConfigAndInit(t_FmHcParams *p_FmHcParams);
-+void FmHcFree(t_Handle h_FmHc);
-+t_Error FmHcSetFramesDataMemory(t_Handle h_FmHc,
-+ uint8_t memId);
-+t_Error FmHcDumpRegs(t_Handle h_FmHc);
-+
-+void FmHcTxConf(t_Handle h_FmHc, t_DpaaFD *p_Fd);
-+
-+t_Error FmHcPcdKgSetScheme(t_Handle h_FmHc,
-+ t_Handle h_Scheme,
-+ struct fman_kg_scheme_regs *p_SchemeRegs,
-+ bool updateCounter);
-+t_Error FmHcPcdKgDeleteScheme(t_Handle h_FmHc, t_Handle h_Scheme);
-+t_Error FmHcPcdCcCapwapTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcCapwapReassmTimeoutParams *p_CcCapwapReassmTimeoutParams );
-+t_Error FmHcPcdCcIpFragScratchPollCmd(t_Handle h_FmHc, bool fill, t_FmPcdCcFragScratchPoolCmdParams *p_FmPcdCcFragScratchPoolCmdParams);
-+t_Error FmHcPcdCcTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcReassmTimeoutParams *p_CcReassmTimeoutParams, uint8_t *p_Result);
-+t_Error FmHcPcdKgSetClsPlan(t_Handle h_FmHc, t_FmPcdKgInterModuleClsPlanSet *p_Set);
-+t_Error FmHcPcdKgDeleteClsPlan(t_Handle h_FmHc, uint8_t clsPlanGrpId);
-+
-+t_Error FmHcPcdKgSetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t value);
-+uint32_t FmHcPcdKgGetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme);
-+
-+t_Error FmHcPcdCcDoDynamicChange(t_Handle h_FmHc, uint32_t oldAdAddrOffset, uint32_t newAdAddrOffset);
-+
-+t_Error FmHcPcdPlcrSetProfile(t_Handle h_FmHc, t_Handle h_Profile, t_FmPcdPlcrProfileRegs *p_PlcrRegs);
-+t_Error FmHcPcdPlcrDeleteProfile(t_Handle h_FmHc, t_Handle h_Profile);
-+
-+t_Error FmHcPcdPlcrSetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value);
-+uint32_t FmHcPcdPlcrGetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter);
-+
-+t_Error FmHcKgWriteSp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t spReg, bool add);
-+t_Error FmHcKgWriteCpp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t cppReg);
-+
-+t_Error FmHcPcdKgCcGetSetParams(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value);
-+t_Error FmHcPcdPlcrCcGetSetParams(t_Handle h_FmHc,uint16_t absoluteProfileId, uint32_t requiredAction);
-+
-+t_Error FmHcPcdSync(t_Handle h_FmHc);
-+t_Handle FmHcGetPort(t_Handle h_FmHc);
-+
-+
-+
-+
-+#endif /* __FM_HC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc/fm_sp_common.h
-@@ -0,0 +1,117 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_sp_common.h
-+
-+ @Description FM SP ...
-+*//***************************************************************************/
-+#ifndef __FM_SP_COMMON_H
-+#define __FM_SP_COMMON_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+
-+#include "fm_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fsl_fman.h"
-+
-+/**************************************************************************//**
-+ @Description defaults
-+*//***************************************************************************/
-+#define DEFAULT_FM_SP_bufferPrefixContent_privDataSize 0
-+#define DEFAULT_FM_SP_bufferPrefixContent_passPrsResult FALSE
-+#define DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp FALSE
-+#define DEFAULT_FM_SP_bufferPrefixContent_allOtherPCDInfo FALSE
-+#define DEFAULT_FM_SP_bufferPrefixContent_dataAlign 64
-+
-+/**************************************************************************//**
-+ @Description structure for defining internal context copying
-+*//***************************************************************************/
-+typedef struct
-+{
-+ uint16_t extBufOffset; /**< Offset in External buffer to which internal
-+ context is copied to (Rx) or taken from (Tx, Op). */
-+ uint8_t intContextOffset; /**< Offset within internal context to copy from
-+ (Rx) or to copy to (Tx, Op). */
-+ uint16_t size; /**< Internal offset size to be copied */
-+} t_FmSpIntContextDataCopy;
-+
-+/**************************************************************************//**
-+ @Description struct for defining external buffer margins
-+*//***************************************************************************/
-+typedef struct {
-+ uint16_t startMargins; /**< Number of bytes to be left at the beginning
-+ of the external buffer (must be divisible by 16) */
-+ uint16_t endMargins; /**< number of bytes to be left at the end
-+ of the external buffer(must be divisible by 16) */
-+} t_FmSpBufMargins;
-+
-+typedef struct {
-+ uint32_t dataOffset;
-+ uint32_t prsResultOffset;
-+ uint32_t timeStampOffset;
-+ uint32_t hashResultOffset;
-+ uint32_t pcdInfoOffset;
-+ uint32_t manipOffset;
-+} t_FmSpBufferOffsets;
-+
-+
-+t_Error FmSpBuildBufferStructure(t_FmSpIntContextDataCopy *p_FmPortIntContextDataCopy,
-+ t_FmBufferPrefixContent *p_BufferPrefixContent,
-+ t_FmSpBufMargins *p_FmPortBufMargins,
-+ t_FmSpBufferOffsets *p_FmPortBufferOffsets,
-+ uint8_t *internalBufferOffset);
-+
-+t_Error FmSpCheckIntContextParams(t_FmSpIntContextDataCopy *p_FmSpIntContextDataCopy);
-+t_Error FmSpCheckBufPoolsParams(t_FmExtPools *p_FmExtPools,
-+ t_FmBackupBmPools *p_FmBackupBmPools,
-+ t_FmBufPoolDepletion *p_FmBufPoolDepletion);
-+t_Error FmSpCheckBufMargins(t_FmSpBufMargins *p_FmSpBufMargins);
-+void FmSpSetBufPoolsInAscOrderOfBufSizes(t_FmExtPools *p_FmExtPools, uint8_t *orderedArray, uint16_t *sizesArray);
-+
-+t_Error FmPcdSpAllocProfiles(t_Handle h_FmPcd,
-+ uint8_t hardwarePortId,
-+ uint16_t numOfStorageProfiles,
-+ uint16_t *base,
-+ uint8_t *log2Num);
-+t_Error FmPcdSpGetAbsoluteProfileId(t_Handle h_FmPcd,
-+ t_Handle h_FmPort,
-+ uint16_t relativeProfile,
-+ uint16_t *p_AbsoluteId);
-+void SpInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
-+void SpValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId);
-+
-+
-+#endif /* __FM_SP_COMMON_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/etc/Makefile
-@@ -0,0 +1,12 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+obj-y += fsl-ncsw-etc.o
-+
-+fsl-ncsw-etc-objs := mm.o memcpy.o sprint.o list.o error.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/etc/error.c
-@@ -0,0 +1,95 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/*
-+
-+ @File error.c
-+
-+ @Description General errors and events reporting utilities.
-+*//***************************************************************************/
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+#include "error_ext.h"
-+
-+
-+const char *dbgLevelStrings[] =
-+{
-+ "CRITICAL"
-+ ,"MAJOR"
-+ ,"MINOR"
-+ ,"WARNING"
-+ ,"INFO"
-+ ,"TRACE"
-+};
-+
-+
-+char * ErrTypeStrings (e_ErrorType err)
-+{
-+ switch (err)
-+ {
-+ case (E_OK): return "OK";
-+ case (E_WRITE_FAILED): return "Write Access Failed";
-+ case (E_NO_DEVICE): return "No Device";
-+ case (E_NOT_AVAILABLE): return "Resource Is Unavailable";
-+ case (E_NO_MEMORY): return "Memory Allocation Failed";
-+ case (E_INVALID_ADDRESS): return "Invalid Address";
-+ case (E_BUSY): return "Resource Is Busy";
-+ case (E_ALREADY_EXISTS): return "Resource Already Exists";
-+ case (E_INVALID_OPERATION): return "Invalid Operation";
-+ case (E_INVALID_VALUE): return "Invalid Value";
-+ case (E_NOT_IN_RANGE): return "Value Out Of Range";
-+ case (E_NOT_SUPPORTED): return "Unsupported Operation";
-+ case (E_INVALID_STATE): return "Invalid State";
-+ case (E_INVALID_HANDLE): return "Invalid Handle";
-+ case (E_INVALID_ID): return "Invalid ID";
-+ case (E_NULL_POINTER): return "Unexpected NULL Pointer";
-+ case (E_INVALID_SELECTION): return "Invalid Selection";
-+ case (E_INVALID_COMM_MODE): return "Invalid Communication Mode";
-+ case (E_INVALID_MEMORY_TYPE): return "Invalid Memory Type";
-+ case (E_INVALID_CLOCK): return "Invalid Clock";
-+ case (E_CONFLICT): return "Conflict In Settings";
-+ case (E_NOT_ALIGNED): return "Incorrect Alignment";
-+ case (E_NOT_FOUND): return "Resource Not Found";
-+ case (E_FULL): return "Resource Is Full";
-+ case (E_EMPTY): return "Resource Is Empty";
-+ case (E_ALREADY_FREE): return "Resource Already Free";
-+ case (E_READ_FAILED): return "Read Access Failed";
-+ case (E_INVALID_FRAME): return "Invalid Frame";
-+ case (E_SEND_FAILED): return "Send Operation Failed";
-+ case (E_RECEIVE_FAILED): return "Receive Operation Failed";
-+ case (E_TIMEOUT): return "Operation Timed Out";
-+ default:
-+ break;
-+ }
-+ return NULL;
-+}
-+#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/etc/list.c
-@@ -0,0 +1,71 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+
-+ @File list.c
-+
-+ @Description Implementation of list.
-+*//***************************************************************************/
-+#include "std_ext.h"
-+#include "list_ext.h"
-+
-+
-+void LIST_Append(t_List *p_NewList, t_List *p_Head)
-+{
-+ t_List *p_First = LIST_FIRST(p_NewList);
-+
-+ if (p_First != p_NewList)
-+ {
-+ t_List *p_Last = LIST_LAST(p_NewList);
-+ t_List *p_Cur = LIST_NEXT(p_Head);
-+
-+ LIST_PREV(p_First) = p_Head;
-+ LIST_FIRST(p_Head) = p_First;
-+ LIST_NEXT(p_Last) = p_Cur;
-+ LIST_LAST(p_Cur) = p_Last;
-+ }
-+}
-+
-+
-+int LIST_NumOfObjs(t_List *p_List)
-+{
-+ t_List *p_Tmp;
-+ int numOfObjs = 0;
-+
-+ if (!LIST_IsEmpty(p_List))
-+ LIST_FOR_EACH(p_Tmp, p_List)
-+ numOfObjs++;
-+
-+ return numOfObjs;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/etc/memcpy.c
-@@ -0,0 +1,620 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+
-+#include "std_ext.h"
-+#include "xx_ext.h"
-+#include "memcpy_ext.h"
-+
-+void * MemCpy8(void* pDst, void* pSrc, uint32_t size)
-+{
-+ int i;
-+
-+ for(i = 0; i < size; ++i)
-+ *(((uint8_t*)(pDst)) + i) = *(((uint8_t*)(pSrc)) + i);
-+
-+ return pDst;
-+}
-+
-+void * MemSet8(void* pDst, int c, uint32_t size)
-+{
-+ int i;
-+
-+ for(i = 0; i < size; ++i)
-+ *(((uint8_t*)(pDst)) + i) = (uint8_t)(c);
-+
-+ return pDst;
-+}
-+
-+void * MemCpy32(void* pDst,void* pSrc, uint32_t size)
-+{
-+ uint32_t leftAlign;
-+ uint32_t rightAlign;
-+ uint32_t lastWord;
-+ uint32_t currWord;
-+ uint32_t *p_Src32;
-+ uint32_t *p_Dst32;
-+ uint8_t *p_Src8;
-+ uint8_t *p_Dst8;
-+
-+ p_Src8 = (uint8_t*)(pSrc);
-+ p_Dst8 = (uint8_t*)(pDst);
-+ /* first copy byte by byte till the source first alignment
-+ * this step is necessary to ensure we do not even try to access
-+ * data which is before the source buffer, hence it is not ours.
-+ */
-+ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
-+ {
-+ *p_Dst8++ = *p_Src8++;
-+ size--;
-+ }
-+
-+ /* align destination (possibly disaligning source)*/
-+ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
-+ {
-+ *p_Dst8++ = *p_Src8++;
-+ size--;
-+ }
-+
-+ /* dest is aligned and source is not necessarily aligned */
-+ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
-+ rightAlign = 32 - leftAlign;
-+
-+
-+ if (leftAlign == 0)
-+ {
-+ /* source is also aligned */
-+ p_Src32 = (uint32_t*)(p_Src8);
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ while (size >> 2) /* size >= 4 */
-+ {
-+ *p_Dst32++ = *p_Src32++;
-+ size -= 4;
-+ }
-+ p_Src8 = (uint8_t*)(p_Src32);
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ }
-+ else
-+ {
-+ /* source is not aligned (destination is aligned)*/
-+ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ lastWord = *p_Src32++;
-+ while(size >> 3) /* size >= 8 */
-+ {
-+ currWord = *p_Src32;
-+ *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign);
-+ lastWord = currWord;
-+ p_Src32++;
-+ p_Dst32++;
-+ size -= 4;
-+ }
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
-+ }
-+
-+ /* complete the left overs */
-+ while (size--)
-+ *p_Dst8++ = *p_Src8++;
-+
-+ return pDst;
-+}
-+
-+void * IO2IOCpy32(void* pDst,void* pSrc, uint32_t size)
-+{
-+ uint32_t leftAlign;
-+ uint32_t rightAlign;
-+ uint32_t lastWord;
-+ uint32_t currWord;
-+ uint32_t *p_Src32;
-+ uint32_t *p_Dst32;
-+ uint8_t *p_Src8;
-+ uint8_t *p_Dst8;
-+
-+ p_Src8 = (uint8_t*)(pSrc);
-+ p_Dst8 = (uint8_t*)(pDst);
-+ /* first copy byte by byte till the source first alignment
-+ * this step is necessary to ensure we do not even try to access
-+ * data which is before the source buffer, hence it is not ours.
-+ */
-+ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
-+ {
-+ WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8));
-+ p_Dst8++;p_Src8++;
-+ size--;
-+ }
-+
-+ /* align destination (possibly disaligning source)*/
-+ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
-+ {
-+ WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8));
-+ p_Dst8++;p_Src8++;
-+ size--;
-+ }
-+
-+ /* dest is aligned and source is not necessarily aligned */
-+ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
-+ rightAlign = 32 - leftAlign;
-+
-+ if (leftAlign == 0)
-+ {
-+ /* source is also aligned */
-+ p_Src32 = (uint32_t*)(p_Src8);
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ while (size >> 2) /* size >= 4 */
-+ {
-+ WRITE_UINT32(*p_Dst32, GET_UINT32(*p_Src32));
-+ p_Dst32++;p_Src32++;
-+ size -= 4;
-+ }
-+ p_Src8 = (uint8_t*)(p_Src32);
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ }
-+ else
-+ {
-+ /* source is not aligned (destination is aligned)*/
-+ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ lastWord = GET_UINT32(*p_Src32);
-+ p_Src32++;
-+ while(size >> 3) /* size >= 8 */
-+ {
-+ currWord = GET_UINT32(*p_Src32);
-+ WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign));
-+ lastWord = currWord;
-+ p_Src32++;p_Dst32++;
-+ size -= 4;
-+ }
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
-+ }
-+
-+ /* complete the left overs */
-+ while (size--)
-+ {
-+ WRITE_UINT8(*p_Dst8, GET_UINT8(*p_Src8));
-+ p_Dst8++;p_Src8++;
-+ }
-+
-+ return pDst;
-+}
-+
-+void * Mem2IOCpy32(void* pDst,void* pSrc, uint32_t size)
-+{
-+ uint32_t leftAlign;
-+ uint32_t rightAlign;
-+ uint32_t lastWord;
-+ uint32_t currWord;
-+ uint32_t *p_Src32;
-+ uint32_t *p_Dst32;
-+ uint8_t *p_Src8;
-+ uint8_t *p_Dst8;
-+
-+ p_Src8 = (uint8_t*)(pSrc);
-+ p_Dst8 = (uint8_t*)(pDst);
-+ /* first copy byte by byte till the source first alignment
-+ * this step is necessary to ensure we do not even try to access
-+ * data which is before the source buffer, hence it is not ours.
-+ */
-+ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
-+ {
-+ WRITE_UINT8(*p_Dst8, *p_Src8);
-+ p_Dst8++;p_Src8++;
-+ size--;
-+ }
-+
-+ /* align destination (possibly disaligning source)*/
-+ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
-+ {
-+ WRITE_UINT8(*p_Dst8, *p_Src8);
-+ p_Dst8++;p_Src8++;
-+ size--;
-+ }
-+
-+ /* dest is aligned and source is not necessarily aligned */
-+ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
-+ rightAlign = 32 - leftAlign;
-+
-+ if (leftAlign == 0)
-+ {
-+ /* source is also aligned */
-+ p_Src32 = (uint32_t*)(p_Src8);
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ while (size >> 2) /* size >= 4 */
-+ {
-+ WRITE_UINT32(*p_Dst32, *p_Src32);
-+ p_Dst32++;p_Src32++;
-+ size -= 4;
-+ }
-+ p_Src8 = (uint8_t*)(p_Src32);
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ }
-+ else
-+ {
-+ /* source is not aligned (destination is aligned)*/
-+ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ lastWord = *p_Src32++;
-+ while(size >> 3) /* size >= 8 */
-+ {
-+ currWord = *p_Src32;
-+ WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign));
-+ lastWord = currWord;
-+ p_Src32++;p_Dst32++;
-+ size -= 4;
-+ }
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
-+ }
-+
-+ /* complete the left overs */
-+ while (size--)
-+ {
-+ WRITE_UINT8(*p_Dst8, *p_Src8);
-+ p_Dst8++;p_Src8++;
-+ }
-+
-+ return pDst;
-+}
-+
-+void * IO2MemCpy32(void* pDst,void* pSrc, uint32_t size)
-+{
-+ uint32_t leftAlign;
-+ uint32_t rightAlign;
-+ uint32_t lastWord;
-+ uint32_t currWord;
-+ uint32_t *p_Src32;
-+ uint32_t *p_Dst32;
-+ uint8_t *p_Src8;
-+ uint8_t *p_Dst8;
-+
-+ p_Src8 = (uint8_t*)(pSrc);
-+ p_Dst8 = (uint8_t*)(pDst);
-+ /* first copy byte by byte till the source first alignment
-+ * this step is necessary to ensure we do not even try to access
-+ * data which is before the source buffer, hence it is not ours.
-+ */
-+ while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */
-+ {
-+ *p_Dst8 = GET_UINT8(*p_Src8);
-+ p_Dst8++;p_Src8++;
-+ size--;
-+ }
-+
-+ /* align destination (possibly disaligning source)*/
-+ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
-+ {
-+ *p_Dst8 = GET_UINT8(*p_Src8);
-+ p_Dst8++;p_Src8++;
-+ size--;
-+ }
-+
-+ /* dest is aligned and source is not necessarily aligned */
-+ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */
-+ rightAlign = 32 - leftAlign;
-+
-+ if (leftAlign == 0)
-+ {
-+ /* source is also aligned */
-+ p_Src32 = (uint32_t*)(p_Src8);
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ while (size >> 2) /* size >= 4 */
-+ {
-+ *p_Dst32 = GET_UINT32(*p_Src32);
-+ p_Dst32++;p_Src32++;
-+ size -= 4;
-+ }
-+ p_Src8 = (uint8_t*)(p_Src32);
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ }
-+ else
-+ {
-+ /* source is not aligned (destination is aligned)*/
-+ p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3));
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ lastWord = GET_UINT32(*p_Src32);
-+ p_Src32++;
-+ while(size >> 3) /* size >= 8 */
-+ {
-+ currWord = GET_UINT32(*p_Src32);
-+ *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign);
-+ lastWord = currWord;
-+ p_Src32++;p_Dst32++;
-+ size -= 4;
-+ }
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3);
-+ }
-+
-+ /* complete the left overs */
-+ while (size--)
-+ {
-+ *p_Dst8 = GET_UINT8(*p_Src8);
-+ p_Dst8++;p_Src8++;
-+ }
-+
-+ return pDst;
-+}
-+
-+void * MemCpy64(void* pDst,void* pSrc, uint32_t size)
-+{
-+ uint32_t leftAlign;
-+ uint32_t rightAlign;
-+ uint64_t lastWord;
-+ uint64_t currWord;
-+ uint64_t *pSrc64;
-+ uint64_t *pDst64;
-+ uint8_t *p_Src8;
-+ uint8_t *p_Dst8;
-+
-+ p_Src8 = (uint8_t*)(pSrc);
-+ p_Dst8 = (uint8_t*)(pDst);
-+ /* first copy byte by byte till the source first alignment
-+ * this step is necessarily to ensure we do not even try to access
-+ * data which is before the source buffer, hence it is not ours.
-+ */
-+ while((PTR_TO_UINT(p_Src8) & 7) && size) /* (pSrc mod 8) > 0 and size > 0 */
-+ {
-+ *p_Dst8++ = *p_Src8++;
-+ size--;
-+ }
-+
-+ /* align destination (possibly disaligning source)*/
-+ while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */
-+ {
-+ *p_Dst8++ = *p_Src8++;
-+ size--;
-+ }
-+
-+ /* dest is aligned and source is not necessarily aligned */
-+ leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 7) << 3); /* leftAlign = (pSrc mod 8)*8 */
-+ rightAlign = 64 - leftAlign;
-+
-+
-+ if (leftAlign == 0)
-+ {
-+ /* source is also aligned */
-+ pSrc64 = (uint64_t*)(p_Src8);
-+ pDst64 = (uint64_t*)(p_Dst8);
-+ while (size >> 3) /* size >= 8 */
-+ {
-+ *pDst64++ = *pSrc64++;
-+ size -= 8;
-+ }
-+ p_Src8 = (uint8_t*)(pSrc64);
-+ p_Dst8 = (uint8_t*)(pDst64);
-+ }
-+ else
-+ {
-+ /* source is not aligned (destination is aligned)*/
-+ pSrc64 = (uint64_t*)(p_Src8 - (leftAlign >> 3));
-+ pDst64 = (uint64_t*)(p_Dst8);
-+ lastWord = *pSrc64++;
-+ while(size >> 4) /* size >= 16 */
-+ {
-+ currWord = *pSrc64;
-+ *pDst64 = (lastWord << leftAlign) | (currWord >> rightAlign);
-+ lastWord = currWord;
-+ pSrc64++;
-+ pDst64++;
-+ size -= 8;
-+ }
-+ p_Dst8 = (uint8_t*)(pDst64);
-+ p_Src8 = (uint8_t*)(pSrc64) - 8 + (leftAlign >> 3);
-+ }
-+
-+ /* complete the left overs */
-+ while (size--)
-+ *p_Dst8++ = *p_Src8++;
-+
-+ return pDst;
-+}
-+
-+void * MemSet32(void* pDst, uint8_t val, uint32_t size)
-+{
-+ uint32_t val32;
-+ uint32_t *p_Dst32;
-+ uint8_t *p_Dst8;
-+
-+ p_Dst8 = (uint8_t*)(pDst);
-+
-+ /* generate four 8-bit val's in 32-bit container */
-+ val32 = (uint32_t) val;
-+ val32 |= (val32 << 8);
-+ val32 |= (val32 << 16);
-+
-+ /* align destination to 32 */
-+ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
-+ {
-+ *p_Dst8++ = val;
-+ size--;
-+ }
-+
-+ /* 32-bit chunks */
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ while (size >> 2) /* size >= 4 */
-+ {
-+ *p_Dst32++ = val32;
-+ size -= 4;
-+ }
-+
-+ /* complete the leftovers */
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ while (size--)
-+ *p_Dst8++ = val;
-+
-+ return pDst;
-+}
-+
-+void * IOMemSet32(void* pDst, uint8_t val, uint32_t size)
-+{
-+ uint32_t val32;
-+ uint32_t *p_Dst32;
-+ uint8_t *p_Dst8;
-+
-+ p_Dst8 = (uint8_t*)(pDst);
-+
-+ /* generate four 8-bit val's in 32-bit container */
-+ val32 = (uint32_t) val;
-+ val32 |= (val32 << 8);
-+ val32 |= (val32 << 16);
-+
-+ /* align destination to 32 */
-+ while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */
-+ {
-+ WRITE_UINT8(*p_Dst8, val);
-+ p_Dst8++;
-+ size--;
-+ }
-+
-+ /* 32-bit chunks */
-+ p_Dst32 = (uint32_t*)(p_Dst8);
-+ while (size >> 2) /* size >= 4 */
-+ {
-+ WRITE_UINT32(*p_Dst32, val32);
-+ p_Dst32++;
-+ size -= 4;
-+ }
-+
-+ /* complete the leftovers */
-+ p_Dst8 = (uint8_t*)(p_Dst32);
-+ while (size--)
-+ {
-+ WRITE_UINT8(*p_Dst8, val);
-+ p_Dst8++;
-+ }
-+
-+ return pDst;
-+}
-+
-+void * MemSet64(void* pDst, uint8_t val, uint32_t size)
-+{
-+ uint64_t val64;
-+ uint64_t *pDst64;
-+ uint8_t *p_Dst8;
-+
-+ p_Dst8 = (uint8_t*)(pDst);
-+
-+ /* generate four 8-bit val's in 32-bit container */
-+ val64 = (uint64_t) val;
-+ val64 |= (val64 << 8);
-+ val64 |= (val64 << 16);
-+ val64 |= (val64 << 24);
-+ val64 |= (val64 << 32);
-+
-+ /* align destination to 64 */
-+ while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */
-+ {
-+ *p_Dst8++ = val;
-+ size--;
-+ }
-+
-+ /* 64-bit chunks */
-+ pDst64 = (uint64_t*)(p_Dst8);
-+ while (size >> 4) /* size >= 8 */
-+ {
-+ *pDst64++ = val64;
-+ size -= 8;
-+ }
-+
-+ /* complete the leftovers */
-+ p_Dst8 = (uint8_t*)(pDst64);
-+ while (size--)
-+ *p_Dst8++ = val;
-+
-+ return pDst;
-+}
-+
-+void MemDisp(uint8_t *p, int size)
-+{
-+ uint32_t space = (uint32_t)(PTR_TO_UINT(p) & 0x3);
-+ uint8_t *p_Limit;
-+
-+ if (space)
-+ {
-+ p_Limit = (p - space + 4);
-+
-+ XX_Print("0x%08X: ", (p - space));
-+
-+ while (space--)
-+ {
-+ XX_Print("--");
-+ }
-+ while (size && (p < p_Limit))
-+ {
-+ XX_Print("%02x", *(uint8_t*)p);
-+ size--;
-+ p++;
-+ }
-+
-+ XX_Print(" ");
-+ p_Limit += 12;
-+
-+ while ((size > 3) && (p < p_Limit))
-+ {
-+ XX_Print("%08x ", *(uint32_t*)p);
-+ size -= 4;
-+ p += 4;
-+ }
-+ XX_Print("\r\n");
-+ }
-+
-+ while (size > 15)
-+ {
-+ XX_Print("0x%08X: %08x %08x %08x %08x\r\n",
-+ p, *(uint32_t *)p, *(uint32_t *)(p + 4),
-+ *(uint32_t *)(p + 8), *(uint32_t *)(p + 12));
-+ size -= 16;
-+ p += 16;
-+ }
-+
-+ if (size)
-+ {
-+ XX_Print("0x%08X: ", p);
-+
-+ while (size > 3)
-+ {
-+ XX_Print("%08x ", *(uint32_t *)p);
-+ size -= 4;
-+ p += 4;
-+ }
-+ while (size)
-+ {
-+ XX_Print("%02x", *(uint8_t *)p);
-+ size--;
-+ p++;
-+ }
-+
-+ XX_Print("\r\n");
-+ }
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/etc/mm.c
-@@ -0,0 +1,1155 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#include "string_ext.h"
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "part_ext.h"
-+#include "xx_ext.h"
-+
-+#include "mm.h"
-+
-+
-+
-+
-+/**********************************************************************
-+ * MM internal routines set *
-+ **********************************************************************/
-+
-+/****************************************************************
-+ * Routine: CreateBusyBlock
-+ *
-+ * Description:
-+ * Initializes a new busy block of "size" bytes and started
-+ * rom "base" address. Each busy block has a name that
-+ * specified the purpose of the memory allocation.
-+ *
-+ * Arguments:
-+ * base - base address of the busy block
-+ * size - size of the busy block
-+ * name - name that specified the busy block
-+ *
-+ * Return value:
-+ * A pointer to new created structure returned on success;
-+ * Otherwise, NULL.
-+ ****************************************************************/
-+static t_BusyBlock * CreateBusyBlock(uint64_t base, uint64_t size, char *name)
-+{
-+ t_BusyBlock *p_BusyBlock;
-+ uint32_t n;
-+
-+ p_BusyBlock = (t_BusyBlock *)XX_Malloc(sizeof(t_BusyBlock));
-+ if ( !p_BusyBlock )
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ return NULL;
-+ }
-+
-+ p_BusyBlock->base = base;
-+ p_BusyBlock->end = base + size;
-+
-+ n = strlen(name);
-+ if (n >= MM_MAX_NAME_LEN)
-+ n = MM_MAX_NAME_LEN - 1;
-+ strncpy(p_BusyBlock->name, name, MM_MAX_NAME_LEN-1);
-+ p_BusyBlock->name[n] = '\0';
-+ p_BusyBlock->p_Next = 0;
-+
-+ return p_BusyBlock;
-+}
-+
-+/****************************************************************
-+ * Routine: CreateNewBlock
-+ *
-+ * Description:
-+ * Initializes a new memory block of "size" bytes and started
-+ * from "base" address.
-+ *
-+ * Arguments:
-+ * base - base address of the memory block
-+ * size - size of the memory block
-+ *
-+ * Return value:
-+ * A pointer to new created structure returned on success;
-+ * Otherwise, NULL.
-+ ****************************************************************/
-+static t_MemBlock * CreateNewBlock(uint64_t base, uint64_t size)
-+{
-+ t_MemBlock *p_MemBlock;
-+
-+ p_MemBlock = (t_MemBlock *)XX_Malloc(sizeof(t_MemBlock));
-+ if ( !p_MemBlock )
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ return NULL;
-+ }
-+
-+ p_MemBlock->base = base;
-+ p_MemBlock->end = base+size;
-+ p_MemBlock->p_Next = 0;
-+
-+ return p_MemBlock;
-+}
-+
-+/****************************************************************
-+ * Routine: CreateFreeBlock
-+ *
-+ * Description:
-+ * Initializes a new free block of of "size" bytes and
-+ * started from "base" address.
-+ *
-+ * Arguments:
-+ * base - base address of the free block
-+ * size - size of the free block
-+ *
-+ * Return value:
-+ * A pointer to new created structure returned on success;
-+ * Otherwise, NULL.
-+ ****************************************************************/
-+static t_FreeBlock * CreateFreeBlock(uint64_t base, uint64_t size)
-+{
-+ t_FreeBlock *p_FreeBlock;
-+
-+ p_FreeBlock = (t_FreeBlock *)XX_Malloc(sizeof(t_FreeBlock));
-+ if ( !p_FreeBlock )
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ return NULL;
-+ }
-+
-+ p_FreeBlock->base = base;
-+ p_FreeBlock->end = base + size;
-+ p_FreeBlock->p_Next = 0;
-+
-+ return p_FreeBlock;
-+}
-+
-+/****************************************************************
-+ * Routine: AddFree
-+ *
-+ * Description:
-+ * Adds a new free block to the free lists. It updates each
-+ * free list to include a new free block.
-+ * Note, that all free block in each free list are ordered
-+ * by their base address.
-+ *
-+ * Arguments:
-+ * p_MM - pointer to the MM object
-+ * base - base address of a given free block
-+ * end - end address of a given free block
-+ *
-+ * Return value:
-+ *
-+ *
-+ ****************************************************************/
-+static t_Error AddFree(t_MM *p_MM, uint64_t base, uint64_t end)
-+{
-+ t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB;
-+ uint64_t alignment;
-+ uint64_t alignBase;
-+ int i;
-+
-+ /* Updates free lists to include a just released block */
-+ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
-+ {
-+ p_PrevB = p_NewB = 0;
-+ p_CurrB = p_MM->freeBlocks[i];
-+
-+ alignment = (uint64_t)(0x1 << i);
-+ alignBase = MAKE_ALIGNED(base, alignment);
-+
-+ /* Goes to the next free list if there is no block to free */
-+ if (alignBase >= end)
-+ continue;
-+
-+ /* Looks for a free block that should be updated */
-+ while ( p_CurrB )
-+ {
-+ if ( alignBase <= p_CurrB->end )
-+ {
-+ if ( end > p_CurrB->end )
-+ {
-+ t_FreeBlock *p_NextB;
-+ while ( p_CurrB->p_Next && end > p_CurrB->p_Next->end )
-+ {
-+ p_NextB = p_CurrB->p_Next;
-+ p_CurrB->p_Next = p_CurrB->p_Next->p_Next;
-+ XX_Free(p_NextB);
-+ }
-+
-+ p_NextB = p_CurrB->p_Next;
-+ if ( !p_NextB || (p_NextB && end < p_NextB->base) )
-+ {
-+ p_CurrB->end = end;
-+ }
-+ else
-+ {
-+ p_CurrB->end = p_NextB->end;
-+ p_CurrB->p_Next = p_NextB->p_Next;
-+ XX_Free(p_NextB);
-+ }
-+ }
-+ else if ( (end < p_CurrB->base) && ((end-alignBase) >= alignment) )
-+ {
-+ if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+
-+ p_NewB->p_Next = p_CurrB;
-+ if (p_PrevB)
-+ p_PrevB->p_Next = p_NewB;
-+ else
-+ p_MM->freeBlocks[i] = p_NewB;
-+ break;
-+ }
-+
-+ if ((alignBase < p_CurrB->base) && (end >= p_CurrB->base))
-+ {
-+ p_CurrB->base = alignBase;
-+ }
-+
-+ /* if size of the free block is less then alignment
-+ * deletes that free block from the free list. */
-+ if ( (p_CurrB->end - p_CurrB->base) < alignment)
-+ {
-+ if ( p_PrevB )
-+ p_PrevB->p_Next = p_CurrB->p_Next;
-+ else
-+ p_MM->freeBlocks[i] = p_CurrB->p_Next;
-+ XX_Free(p_CurrB);
-+ p_CurrB = NULL;
-+ }
-+ break;
-+ }
-+ else
-+ {
-+ p_PrevB = p_CurrB;
-+ p_CurrB = p_CurrB->p_Next;
-+ }
-+ }
-+
-+ /* If no free block found to be updated, insert a new free block
-+ * to the end of the free list.
-+ */
-+ if ( !p_CurrB && ((((uint64_t)(end-base)) & ((uint64_t)(alignment-1))) == 0) )
-+ {
-+ if ((p_NewB = CreateFreeBlock(alignBase, end-base)) == NULL)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+
-+ if (p_PrevB)
-+ p_PrevB->p_Next = p_NewB;
-+ else
-+ p_MM->freeBlocks[i] = p_NewB;
-+ }
-+
-+ /* Update boundaries of the new free block */
-+ if ((alignment == 1) && !p_NewB)
-+ {
-+ if ( p_CurrB && base > p_CurrB->base )
-+ base = p_CurrB->base;
-+ if ( p_CurrB && end < p_CurrB->end )
-+ end = p_CurrB->end;
-+ }
-+ }
-+
-+ return (E_OK);
-+}
-+
-+/****************************************************************
-+ * Routine: CutFree
-+ *
-+ * Description:
-+ * Cuts a free block from holdBase to holdEnd from the free lists.
-+ * That is, it updates all free lists of the MM object do
-+ * not include a block of memory from holdBase to holdEnd.
-+ * For each free lists it seek for a free block that holds
-+ * either holdBase or holdEnd. If such block is found it updates it.
-+ *
-+ * Arguments:
-+ * p_MM - pointer to the MM object
-+ * holdBase - base address of the allocated block
-+ * holdEnd - end address of the allocated block
-+ *
-+ * Return value:
-+ * E_OK is returned on success,
-+ * otherwise returns an error code.
-+ *
-+ ****************************************************************/
-+static t_Error CutFree(t_MM *p_MM, uint64_t holdBase, uint64_t holdEnd)
-+{
-+ t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB;
-+ uint64_t alignBase, base, end;
-+ uint64_t alignment;
-+ int i;
-+
-+ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
-+ {
-+ p_PrevB = p_NewB = 0;
-+ p_CurrB = p_MM->freeBlocks[i];
-+
-+ alignment = (uint64_t)(0x1 << i);
-+ alignBase = MAKE_ALIGNED(holdEnd, alignment);
-+
-+ while ( p_CurrB )
-+ {
-+ base = p_CurrB->base;
-+ end = p_CurrB->end;
-+
-+ if ( (holdBase <= base) && (holdEnd <= end) && (holdEnd > base) )
-+ {
-+ if ( alignBase >= end ||
-+ (alignBase < end && ((end-alignBase) < alignment)) )
-+ {
-+ if (p_PrevB)
-+ p_PrevB->p_Next = p_CurrB->p_Next;
-+ else
-+ p_MM->freeBlocks[i] = p_CurrB->p_Next;
-+ XX_Free(p_CurrB);
-+ }
-+ else
-+ {
-+ p_CurrB->base = alignBase;
-+ }
-+ break;
-+ }
-+ else if ( (holdBase > base) && (holdEnd <= end) )
-+ {
-+ if ( (holdBase-base) >= alignment )
-+ {
-+ if ( (alignBase < end) && ((end-alignBase) >= alignment) )
-+ {
-+ if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ p_NewB->p_Next = p_CurrB->p_Next;
-+ p_CurrB->p_Next = p_NewB;
-+ }
-+ p_CurrB->end = holdBase;
-+ }
-+ else if ( (alignBase < end) && ((end-alignBase) >= alignment) )
-+ {
-+ p_CurrB->base = alignBase;
-+ }
-+ else
-+ {
-+ if (p_PrevB)
-+ p_PrevB->p_Next = p_CurrB->p_Next;
-+ else
-+ p_MM->freeBlocks[i] = p_CurrB->p_Next;
-+ XX_Free(p_CurrB);
-+ }
-+ break;
-+ }
-+ else
-+ {
-+ p_PrevB = p_CurrB;
-+ p_CurrB = p_CurrB->p_Next;
-+ }
-+ }
-+ }
-+
-+ return (E_OK);
-+}
-+
-+/****************************************************************
-+ * Routine: AddBusy
-+ *
-+ * Description:
-+ * Adds a new busy block to the list of busy blocks. Note,
-+ * that all busy blocks are ordered by their base address in
-+ * the busy list.
-+ *
-+ * Arguments:
-+ * MM - handler to the MM object
-+ * p_NewBusyB - pointer to the a busy block
-+ *
-+ * Return value:
-+ * None.
-+ *
-+ ****************************************************************/
-+static void AddBusy(t_MM *p_MM, t_BusyBlock *p_NewBusyB)
-+{
-+ t_BusyBlock *p_CurrBusyB, *p_PrevBusyB;
-+
-+ /* finds a place of a new busy block in the list of busy blocks */
-+ p_PrevBusyB = 0;
-+ p_CurrBusyB = p_MM->busyBlocks;
-+
-+ while ( p_CurrBusyB && p_NewBusyB->base > p_CurrBusyB->base )
-+ {
-+ p_PrevBusyB = p_CurrBusyB;
-+ p_CurrBusyB = p_CurrBusyB->p_Next;
-+ }
-+
-+ /* insert the new busy block into the list of busy blocks */
-+ if ( p_CurrBusyB )
-+ p_NewBusyB->p_Next = p_CurrBusyB;
-+ if ( p_PrevBusyB )
-+ p_PrevBusyB->p_Next = p_NewBusyB;
-+ else
-+ p_MM->busyBlocks = p_NewBusyB;
-+}
-+
-+/****************************************************************
-+ * Routine: CutBusy
-+ *
-+ * Description:
-+ * Cuts a block from base to end from the list of busy blocks.
-+ * This is done by updating the list of busy blocks do not
-+ * include a given block, that block is going to be free. If a
-+ * given block is a part of some other busy block, so that
-+ * busy block is updated. If there are number of busy blocks
-+ * included in the given block, so all that blocks are removed
-+ * from the busy list and the end blocks are updated.
-+ * If the given block devides some block into two parts, a new
-+ * busy block is added to the busy list.
-+ *
-+ * Arguments:
-+ * p_MM - pointer to the MM object
-+ * base - base address of a given busy block
-+ * end - end address of a given busy block
-+ *
-+ * Return value:
-+ * E_OK on success, E_NOMEMORY otherwise.
-+ *
-+ ****************************************************************/
-+static t_Error CutBusy(t_MM *p_MM, uint64_t base, uint64_t end)
-+{
-+ t_BusyBlock *p_CurrB, *p_PrevB, *p_NewB;
-+
-+ p_CurrB = p_MM->busyBlocks;
-+ p_PrevB = p_NewB = 0;
-+
-+ while ( p_CurrB )
-+ {
-+ if ( base < p_CurrB->end )
-+ {
-+ if ( end > p_CurrB->end )
-+ {
-+ t_BusyBlock *p_NextB;
-+ while ( p_CurrB->p_Next && end >= p_CurrB->p_Next->end )
-+ {
-+ p_NextB = p_CurrB->p_Next;
-+ p_CurrB->p_Next = p_CurrB->p_Next->p_Next;
-+ XX_Free(p_NextB);
-+ }
-+
-+ p_NextB = p_CurrB->p_Next;
-+ if ( p_NextB && end > p_NextB->base )
-+ {
-+ p_NextB->base = end;
-+ }
-+ }
-+
-+ if ( base <= p_CurrB->base )
-+ {
-+ if ( end < p_CurrB->end && end > p_CurrB->base )
-+ {
-+ p_CurrB->base = end;
-+ }
-+ else if ( end >= p_CurrB->end )
-+ {
-+ if ( p_PrevB )
-+ p_PrevB->p_Next = p_CurrB->p_Next;
-+ else
-+ p_MM->busyBlocks = p_CurrB->p_Next;
-+ XX_Free(p_CurrB);
-+ }
-+ }
-+ else
-+ {
-+ if ( end < p_CurrB->end && end > p_CurrB->base )
-+ {
-+ if ((p_NewB = CreateBusyBlock(end,
-+ p_CurrB->end-end,
-+ p_CurrB->name)) == NULL)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ p_NewB->p_Next = p_CurrB->p_Next;
-+ p_CurrB->p_Next = p_NewB;
-+ }
-+ p_CurrB->end = base;
-+ }
-+ break;
-+ }
-+ else
-+ {
-+ p_PrevB = p_CurrB;
-+ p_CurrB = p_CurrB->p_Next;
-+ }
-+ }
-+
-+ return (E_OK);
-+}
-+
-+/****************************************************************
-+ * Routine: MmGetGreaterAlignment
-+ *
-+ * Description:
-+ * Allocates a block of memory according to the given size
-+ * and the alignment. That routine is called from the MM_Get
-+ * routine if the required alignment is greater then MM_MAX_ALIGNMENT.
-+ * In that case, it goes over free blocks of 64 byte align list
-+ * and checks if it has the required size of bytes of the required
-+ * alignment. If no blocks found returns ILLEGAL_BASE.
-+ * After the block is found and data is allocated, it calls
-+ * the internal CutFree routine to update all free lists
-+ * do not include a just allocated block. Of course, each
-+ * free list contains a free blocks with the same alignment.
-+ * It is also creates a busy block that holds
-+ * information about an allocated block.
-+ *
-+ * Arguments:
-+ * MM - handle to the MM object
-+ * size - size of the MM
-+ * alignment - index as a power of two defines
-+ * a required alignment that is greater then 64.
-+ * name - the name that specifies an allocated block.
-+ *
-+ * Return value:
-+ * base address of an allocated block.
-+ * ILLEGAL_BASE if can't allocate a block
-+ *
-+ ****************************************************************/
-+static uint64_t MmGetGreaterAlignment(t_MM *p_MM, uint64_t size, uint64_t alignment, char* name)
-+{
-+ t_FreeBlock *p_FreeB;
-+ t_BusyBlock *p_NewBusyB;
-+ uint64_t holdBase, holdEnd, alignBase = 0;
-+
-+ /* goes over free blocks of the 64 byte alignment list
-+ and look for a block of the suitable size and
-+ base address according to the alignment. */
-+ p_FreeB = p_MM->freeBlocks[MM_MAX_ALIGNMENT];
-+
-+ while ( p_FreeB )
-+ {
-+ alignBase = MAKE_ALIGNED(p_FreeB->base, alignment);
-+
-+ /* the block is found if the aligned base inside the block
-+ * and has the anough size. */
-+ if ( alignBase >= p_FreeB->base &&
-+ alignBase < p_FreeB->end &&
-+ size <= (p_FreeB->end - alignBase) )
-+ break;
-+ else
-+ p_FreeB = p_FreeB->p_Next;
-+ }
-+
-+ /* If such block isn't found */
-+ if ( !p_FreeB )
-+ return (uint64_t)(ILLEGAL_BASE);
-+
-+ holdBase = alignBase;
-+ holdEnd = alignBase + size;
-+
-+ /* init a new busy block */
-+ if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL)
-+ return (uint64_t)(ILLEGAL_BASE);
-+
-+ /* calls Update routine to update a lists of free blocks */
-+ if ( CutFree ( p_MM, holdBase, holdEnd ) != E_OK )
-+ {
-+ XX_Free(p_NewBusyB);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* insert the new busy block into the list of busy blocks */
-+ AddBusy ( p_MM, p_NewBusyB );
-+
-+ return (holdBase);
-+}
-+
-+
-+/**********************************************************************
-+ * MM API routines set *
-+ **********************************************************************/
-+
-+/*****************************************************************************/
-+t_Error MM_Init(t_Handle *h_MM, uint64_t base, uint64_t size)
-+{
-+ t_MM *p_MM;
-+ uint64_t newBase, newSize;
-+ int i;
-+
-+ if (!size)
-+ {
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Size (should be positive)"));
-+ }
-+
-+ /* Initializes a new MM object */
-+ p_MM = (t_MM *)XX_Malloc(sizeof(t_MM));
-+ if (!p_MM)
-+ {
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ }
-+
-+ p_MM->h_Spinlock = XX_InitSpinlock();
-+ if (!p_MM->h_Spinlock)
-+ {
-+ XX_Free(p_MM);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MM spinlock!"));
-+ }
-+
-+ /* Initializes counter of free memory to total size */
-+ p_MM->freeMemSize = size;
-+
-+ /* A busy list is empty */
-+ p_MM->busyBlocks = 0;
-+
-+ /* Initializes a new memory block */
-+ if ((p_MM->memBlocks = CreateNewBlock(base, size)) == NULL)
-+ {
-+ MM_Free(p_MM);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ }
-+
-+ /* Initializes a new free block for each free list*/
-+ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
-+ {
-+ newBase = MAKE_ALIGNED( base, (0x1 << i) );
-+ newSize = size - (newBase - base);
-+
-+ if ((p_MM->freeBlocks[i] = CreateFreeBlock(newBase, newSize)) == NULL)
-+ {
-+ MM_Free(p_MM);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ }
-+ }
-+
-+ *h_MM = p_MM;
-+
-+ return (E_OK);
-+}
-+
-+/*****************************************************************************/
-+void MM_Free(t_Handle h_MM)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ t_MemBlock *p_MemBlock;
-+ t_BusyBlock *p_BusyBlock;
-+ t_FreeBlock *p_FreeBlock;
-+ void *p_Block;
-+ int i;
-+
-+ ASSERT_COND(p_MM);
-+
-+ /* release memory allocated for busy blocks */
-+ p_BusyBlock = p_MM->busyBlocks;
-+ while ( p_BusyBlock )
-+ {
-+ p_Block = p_BusyBlock;
-+ p_BusyBlock = p_BusyBlock->p_Next;
-+ XX_Free(p_Block);
-+ }
-+
-+ /* release memory allocated for free blocks */
-+ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
-+ {
-+ p_FreeBlock = p_MM->freeBlocks[i];
-+ while ( p_FreeBlock )
-+ {
-+ p_Block = p_FreeBlock;
-+ p_FreeBlock = p_FreeBlock->p_Next;
-+ XX_Free(p_Block);
-+ }
-+ }
-+
-+ /* release memory allocated for memory blocks */
-+ p_MemBlock = p_MM->memBlocks;
-+ while ( p_MemBlock )
-+ {
-+ p_Block = p_MemBlock;
-+ p_MemBlock = p_MemBlock->p_Next;
-+ XX_Free(p_Block);
-+ }
-+
-+ if (p_MM->h_Spinlock)
-+ XX_FreeSpinlock(p_MM->h_Spinlock);
-+
-+ /* release memory allocated for MM object itself */
-+ XX_Free(p_MM);
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_Get(t_Handle h_MM, uint64_t size, uint64_t alignment, char* name)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ t_FreeBlock *p_FreeB;
-+ t_BusyBlock *p_NewBusyB;
-+ uint64_t holdBase, holdEnd, j, i = 0;
-+ uint32_t intFlags;
-+
-+ SANITY_CHECK_RETURN_VALUE(p_MM, E_INVALID_HANDLE, (uint64_t)ILLEGAL_BASE);
-+
-+ /* checks that alignment value is greater then zero */
-+ if (alignment == 0)
-+ {
-+ alignment = 1;
-+ }
-+
-+ j = alignment;
-+
-+ /* checks if alignment is a power of two, if it correct and if the
-+ required size is multiple of the given alignment. */
-+ while ((j & 0x1) == 0)
-+ {
-+ i++;
-+ j = j >> 1;
-+ }
-+
-+ /* if the given alignment isn't power of two, returns an error */
-+ if (j != 1)
-+ {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("alignment (should be power of 2)"));
-+ return (uint64_t)ILLEGAL_BASE;
-+ }
-+
-+ if (i > MM_MAX_ALIGNMENT)
-+ {
-+ return (MmGetGreaterAlignment(p_MM, size, alignment, name));
-+ }
-+
-+ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
-+ /* look for a block of the size greater or equal to the required size. */
-+ p_FreeB = p_MM->freeBlocks[i];
-+ while ( p_FreeB && (p_FreeB->end - p_FreeB->base) < size )
-+ p_FreeB = p_FreeB->p_Next;
-+
-+ /* If such block is found */
-+ if ( !p_FreeB )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ holdBase = p_FreeB->base;
-+ holdEnd = holdBase + size;
-+
-+ /* init a new busy block */
-+ if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL)
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* calls Update routine to update a lists of free blocks */
-+ if ( CutFree ( p_MM, holdBase, holdEnd ) != E_OK )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ XX_Free(p_NewBusyB);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* Decreasing the allocated memory size from free memory size */
-+ p_MM->freeMemSize -= size;
-+
-+ /* insert the new busy block into the list of busy blocks */
-+ AddBusy ( p_MM, p_NewBusyB );
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+
-+ return (holdBase);
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_GetForce(t_Handle h_MM, uint64_t base, uint64_t size, char* name)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ t_FreeBlock *p_FreeB;
-+ t_BusyBlock *p_NewBusyB;
-+ uint32_t intFlags;
-+ bool blockIsFree = FALSE;
-+
-+ ASSERT_COND(p_MM);
-+
-+ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
-+ p_FreeB = p_MM->freeBlocks[0]; /* The biggest free blocks are in the
-+ free list with alignment 1 */
-+
-+ while ( p_FreeB )
-+ {
-+ if ( base >= p_FreeB->base && (base+size) <= p_FreeB->end )
-+ {
-+ blockIsFree = TRUE;
-+ break;
-+ }
-+ else
-+ p_FreeB = p_FreeB->p_Next;
-+ }
-+
-+ if ( !blockIsFree )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* init a new busy block */
-+ if ((p_NewBusyB = CreateBusyBlock(base, size, name)) == NULL)
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* calls Update routine to update a lists of free blocks */
-+ if ( CutFree ( p_MM, base, base+size ) != E_OK )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ XX_Free(p_NewBusyB);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* Decreasing the allocated memory size from free memory size */
-+ p_MM->freeMemSize -= size;
-+
-+ /* insert the new busy block into the list of busy blocks */
-+ AddBusy ( p_MM, p_NewBusyB );
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+
-+ return (base);
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_GetForceMin(t_Handle h_MM, uint64_t size, uint64_t alignment, uint64_t min, char* name)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ t_FreeBlock *p_FreeB;
-+ t_BusyBlock *p_NewBusyB;
-+ uint64_t holdBase, holdEnd, j = alignment, i=0;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_MM);
-+
-+ /* checks if alignment is a power of two, if it correct and if the
-+ required size is multiple of the given alignment. */
-+ while ((j & 0x1) == 0)
-+ {
-+ i++;
-+ j = j >> 1;
-+ }
-+
-+ if ( (j != 1) || (i > MM_MAX_ALIGNMENT) )
-+ {
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
-+ p_FreeB = p_MM->freeBlocks[i];
-+
-+ /* look for the first block that contains the minimum
-+ base address. If the whole required size may be fit
-+ into it, use that block, otherwise look for the next
-+ block of size greater or equal to the required size. */
-+ while ( p_FreeB && (min >= p_FreeB->end))
-+ p_FreeB = p_FreeB->p_Next;
-+
-+ /* If such block is found */
-+ if ( !p_FreeB )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* if this block is large enough, use this block */
-+ holdBase = ( min <= p_FreeB->base ) ? p_FreeB->base : min;
-+ if ((holdBase + size) <= p_FreeB->end )
-+ {
-+ holdEnd = holdBase + size;
-+ }
-+ else
-+ {
-+ p_FreeB = p_FreeB->p_Next;
-+ while ( p_FreeB && ((p_FreeB->end - p_FreeB->base) < size) )
-+ p_FreeB = p_FreeB->p_Next;
-+
-+ /* If such block is found */
-+ if ( !p_FreeB )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ holdBase = p_FreeB->base;
-+ holdEnd = holdBase + size;
-+ }
-+
-+ /* init a new busy block */
-+ if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL)
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* calls Update routine to update a lists of free blocks */
-+ if ( CutFree( p_MM, holdBase, holdEnd ) != E_OK )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ XX_Free(p_NewBusyB);
-+ return (uint64_t)(ILLEGAL_BASE);
-+ }
-+
-+ /* Decreasing the allocated memory size from free memory size */
-+ p_MM->freeMemSize -= size;
-+
-+ /* insert the new busy block into the list of busy blocks */
-+ AddBusy( p_MM, p_NewBusyB );
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+
-+ return (holdBase);
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_Put(t_Handle h_MM, uint64_t base)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ t_BusyBlock *p_BusyB, *p_PrevBusyB;
-+ uint64_t size;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_MM);
-+
-+ /* Look for a busy block that have the given base value.
-+ * That block will be returned back to the memory.
-+ */
-+ p_PrevBusyB = 0;
-+
-+ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
-+ p_BusyB = p_MM->busyBlocks;
-+ while ( p_BusyB && base != p_BusyB->base )
-+ {
-+ p_PrevBusyB = p_BusyB;
-+ p_BusyB = p_BusyB->p_Next;
-+ }
-+
-+ if ( !p_BusyB )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(0);
-+ }
-+
-+ if ( AddFree( p_MM, p_BusyB->base, p_BusyB->end ) != E_OK )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(0);
-+ }
-+
-+ /* removes a busy block form the list of busy blocks */
-+ if ( p_PrevBusyB )
-+ p_PrevBusyB->p_Next = p_BusyB->p_Next;
-+ else
-+ p_MM->busyBlocks = p_BusyB->p_Next;
-+
-+ size = p_BusyB->end - p_BusyB->base;
-+
-+ /* Adding the deallocated memory size to free memory size */
-+ p_MM->freeMemSize += size;
-+
-+ XX_Free(p_BusyB);
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+
-+ return (size);
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_PutForce(t_Handle h_MM, uint64_t base, uint64_t size)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ uint64_t end = base + size;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_MM);
-+
-+ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
-+
-+ if ( CutBusy( p_MM, base, end ) != E_OK )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(0);
-+ }
-+
-+ if ( AddFree ( p_MM, base, end ) != E_OK )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ return (uint64_t)(0);
-+ }
-+
-+ /* Adding the deallocated memory size to free memory size */
-+ p_MM->freeMemSize += size;
-+
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+
-+ return (size);
-+}
-+
-+/*****************************************************************************/
-+t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ t_MemBlock *p_MemB, *p_NewMemB;
-+ t_Error errCode;
-+ uint32_t intFlags;
-+
-+ ASSERT_COND(p_MM);
-+
-+ /* find a last block in the list of memory blocks to insert a new
-+ * memory block
-+ */
-+ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock);
-+
-+ p_MemB = p_MM->memBlocks;
-+ while ( p_MemB->p_Next )
-+ {
-+ if ( base >= p_MemB->base && base < p_MemB->end )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
-+ }
-+ p_MemB = p_MemB->p_Next;
-+ }
-+ /* check for a last memory block */
-+ if ( base >= p_MemB->base && base < p_MemB->end )
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG);
-+ }
-+
-+ /* create a new memory block */
-+ if ((p_NewMemB = CreateNewBlock(base, size)) == NULL)
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ }
-+
-+ /* append a new memory block to the end of the list of memory blocks */
-+ p_MemB->p_Next = p_NewMemB;
-+
-+ /* add a new free block to the free lists */
-+ errCode = AddFree(p_MM, base, base+size);
-+ if (errCode)
-+ {
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+ p_MemB->p_Next = 0;
-+ XX_Free(p_NewMemB);
-+ return ((t_Error)errCode);
-+ }
-+
-+ /* Adding the new block size to free memory size */
-+ p_MM->freeMemSize += size;
-+
-+ XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags);
-+
-+ return (E_OK);
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_GetMemBlock(t_Handle h_MM, int index)
-+{
-+ t_MM *p_MM = (t_MM*)h_MM;
-+ t_MemBlock *p_MemBlock;
-+ int i;
-+
-+ ASSERT_COND(p_MM);
-+
-+ p_MemBlock = p_MM->memBlocks;
-+ for (i=0; i < index; i++)
-+ p_MemBlock = p_MemBlock->p_Next;
-+
-+ if ( p_MemBlock )
-+ return (p_MemBlock->base);
-+ else
-+ return (uint64_t)ILLEGAL_BASE;
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_GetBase(t_Handle h_MM)
-+{
-+ t_MM *p_MM = (t_MM*)h_MM;
-+ t_MemBlock *p_MemBlock;
-+
-+ ASSERT_COND(p_MM);
-+
-+ p_MemBlock = p_MM->memBlocks;
-+ return p_MemBlock->base;
-+}
-+
-+/*****************************************************************************/
-+bool MM_InRange(t_Handle h_MM, uint64_t addr)
-+{
-+ t_MM *p_MM = (t_MM*)h_MM;
-+ t_MemBlock *p_MemBlock;
-+
-+ ASSERT_COND(p_MM);
-+
-+ p_MemBlock = p_MM->memBlocks;
-+
-+ if ((addr >= p_MemBlock->base) && (addr < p_MemBlock->end))
-+ return TRUE;
-+ else
-+ return FALSE;
-+}
-+
-+/*****************************************************************************/
-+uint64_t MM_GetFreeMemSize(t_Handle h_MM)
-+{
-+ t_MM *p_MM = (t_MM*)h_MM;
-+
-+ ASSERT_COND(p_MM);
-+
-+ return p_MM->freeMemSize;
-+}
-+
-+/*****************************************************************************/
-+void MM_Dump(t_Handle h_MM)
-+{
-+ t_MM *p_MM = (t_MM *)h_MM;
-+ t_FreeBlock *p_FreeB;
-+ t_BusyBlock *p_BusyB;
-+ int i;
-+
-+ p_BusyB = p_MM->busyBlocks;
-+ XX_Print("List of busy blocks:\n");
-+ while (p_BusyB)
-+ {
-+ XX_Print("\t0x%p: (%s: b=0x%llx, e=0x%llx)\n", p_BusyB, p_BusyB->name, p_BusyB->base, p_BusyB->end );
-+ p_BusyB = p_BusyB->p_Next;
-+ }
-+
-+ XX_Print("\nLists of free blocks according to alignment:\n");
-+ for (i=0; i <= MM_MAX_ALIGNMENT; i++)
-+ {
-+ XX_Print("%d alignment:\n", (0x1 << i));
-+ p_FreeB = p_MM->freeBlocks[i];
-+ while (p_FreeB)
-+ {
-+ XX_Print("\t0x%p: (b=0x%llx, e=0x%llx)\n", p_FreeB, p_FreeB->base, p_FreeB->end);
-+ p_FreeB = p_FreeB->p_Next;
-+ }
-+ XX_Print("\n");
-+ }
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/etc/mm.h
-@@ -0,0 +1,105 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/****************************************************************
-+ *
-+ * File: mm.h
-+ *
-+ *
-+ * Description:
-+ * MM (Memory Management) object definitions.
-+ * It also includes definitions of the Free Block, Busy Block
-+ * and Memory Block structures used by the MM object.
-+ *
-+ ****************************************************************/
-+
-+#ifndef __MM_H
-+#define __MM_H
-+
-+
-+#include "mm_ext.h"
-+
-+#define __ERR_MODULE__ MODULE_MM
-+
-+
-+#define MAKE_ALIGNED(addr, align) \
-+ (((uint64_t)(addr) + ((align) - 1)) & (~(((uint64_t)align) - 1)))
-+
-+
-+/* t_MemBlock data structure defines parameters of the Memory Block */
-+typedef struct t_MemBlock
-+{
-+ struct t_MemBlock *p_Next; /* Pointer to the next memory block */
-+
-+ uint64_t base; /* Base address of the memory block */
-+ uint64_t end; /* End address of the memory block */
-+} t_MemBlock;
-+
-+
-+/* t_FreeBlock data structure defines parameters of the Free Block */
-+typedef struct t_FreeBlock
-+{
-+ struct t_FreeBlock *p_Next; /* Pointer to the next free block */
-+
-+ uint64_t base; /* Base address of the block */
-+ uint64_t end; /* End address of the block */
-+} t_FreeBlock;
-+
-+
-+/* t_BusyBlock data structure defines parameters of the Busy Block */
-+typedef struct t_BusyBlock
-+{
-+ struct t_BusyBlock *p_Next; /* Pointer to the next free block */
-+
-+ uint64_t base; /* Base address of the block */
-+ uint64_t end; /* End address of the block */
-+ char name[MM_MAX_NAME_LEN]; /* That block of memory was allocated for
-+ something specified by the Name */
-+} t_BusyBlock;
-+
-+
-+/* t_MM data structure defines parameters of the MM object */
-+typedef struct t_MM
-+{
-+ t_Handle h_Spinlock;
-+
-+ t_MemBlock *memBlocks; /* List of memory blocks (Memory list) */
-+ t_BusyBlock *busyBlocks; /* List of busy blocks (Busy list) */
-+ t_FreeBlock *freeBlocks[MM_MAX_ALIGNMENT + 1];
-+ /* Alignment lists of free blocks (Free lists) */
-+
-+ uint64_t freeMemSize; /* Total size of free memory (in bytes) */
-+} t_MM;
-+
-+
-+#endif /* __MM_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/etc/sprint.c
-@@ -0,0 +1,81 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/*------------------------------------------------------*/
-+/* File: sprint.c */
-+/* */
-+/* Description: */
-+/* Debug routines (externals) */
-+/*------------------------------------------------------*/
-+#include "string_ext.h"
-+#include "stdlib_ext.h"
-+#include "stdarg_ext.h"
-+#include "sprint_ext.h"
-+#include "std_ext.h"
-+#include "xx_ext.h"
-+
-+
-+int Sprint(char * buf, const char *fmt, ...)
-+{
-+ va_list args;
-+ int i;
-+
-+ va_start(args, fmt);
-+ i=vsprintf(buf,fmt,args);
-+ va_end(args);
-+ return i;
-+}
-+
-+int Snprint(char * buf, uint32_t size, const char *fmt, ...)
-+{
-+ va_list args;
-+ int i;
-+
-+ va_start(args, fmt);
-+ i=vsnprintf(buf,size,fmt,args);
-+ va_end(args);
-+ return i;
-+}
-+
-+#ifndef NCSW_VXWORKS
-+int Sscan(const char * buf, const char * fmt, ...)
-+{
-+ va_list args;
-+ int i;
-+
-+ va_start(args,fmt);
-+ i = vsscanf(buf,fmt,args);
-+ va_end(args);
-+ return i;
-+}
-+#endif /* NCSW_VXWORKS */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/fmanv3h_dflags.h
-@@ -0,0 +1,57 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __dflags_h
-+#define __dflags_h
-+
-+
-+#define NCSW_LINUX
-+
-+#define T4240
-+#define NCSW_PPC_CORE
-+
-+#define DEBUG_ERRORS 1
-+
-+#if defined(DEBUG)
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
-+
-+#define DEBUG_XX_MALLOC
-+#define DEBUG_MEM_LEAKS
-+
-+#else
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
-+#endif /* (DEBUG) */
-+
-+#define REPORT_EVENTS 1
-+#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
-+
-+#endif /* __dflags_h */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/fmanv3l_dflags.h
-@@ -0,0 +1,56 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __dflags_h
-+#define __dflags_h
-+
-+
-+#define NCSW_LINUX
-+
-+#define NCSW_PPC_CORE
-+
-+#define DEBUG_ERRORS 1
-+
-+#if defined(DEBUG)
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
-+
-+#define DEBUG_XX_MALLOC
-+#define DEBUG_MEM_LEAKS
-+
-+#else
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
-+#endif /* (DEBUG) */
-+
-+#define REPORT_EVENTS 1
-+#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
-+
-+#endif /* __dflags_h */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/crc_mac_addr_ext.h
-@@ -0,0 +1,364 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/*------------------------------------------------------*/
-+/* */
-+/* File: crc_mac_addr_ext.h */
-+/* */
-+/* Description: */
-+/* Define a macro that calculate the crc value of */
-+/* an Ethernet MAC address (48 bitd address */
-+/*------------------------------------------------------*/
-+
-+#ifndef __crc_mac_addr_ext_h
-+#define __crc_mac_addr_ext_h
-+
-+#include "std_ext.h"
-+
-+
-+static uint32_t crc_table[256] =
-+{
-+ 0x00000000,
-+ 0x77073096,
-+ 0xee0e612c,
-+ 0x990951ba,
-+ 0x076dc419,
-+ 0x706af48f,
-+ 0xe963a535,
-+ 0x9e6495a3,
-+ 0x0edb8832,
-+ 0x79dcb8a4,
-+ 0xe0d5e91e,
-+ 0x97d2d988,
-+ 0x09b64c2b,
-+ 0x7eb17cbd,
-+ 0xe7b82d07,
-+ 0x90bf1d91,
-+ 0x1db71064,
-+ 0x6ab020f2,
-+ 0xf3b97148,
-+ 0x84be41de,
-+ 0x1adad47d,
-+ 0x6ddde4eb,
-+ 0xf4d4b551,
-+ 0x83d385c7,
-+ 0x136c9856,
-+ 0x646ba8c0,
-+ 0xfd62f97a,
-+ 0x8a65c9ec,
-+ 0x14015c4f,
-+ 0x63066cd9,
-+ 0xfa0f3d63,
-+ 0x8d080df5,
-+ 0x3b6e20c8,
-+ 0x4c69105e,
-+ 0xd56041e4,
-+ 0xa2677172,
-+ 0x3c03e4d1,
-+ 0x4b04d447,
-+ 0xd20d85fd,
-+ 0xa50ab56b,
-+ 0x35b5a8fa,
-+ 0x42b2986c,
-+ 0xdbbbc9d6,
-+ 0xacbcf940,
-+ 0x32d86ce3,
-+ 0x45df5c75,
-+ 0xdcd60dcf,
-+ 0xabd13d59,
-+ 0x26d930ac,
-+ 0x51de003a,
-+ 0xc8d75180,
-+ 0xbfd06116,
-+ 0x21b4f4b5,
-+ 0x56b3c423,
-+ 0xcfba9599,
-+ 0xb8bda50f,
-+ 0x2802b89e,
-+ 0x5f058808,
-+ 0xc60cd9b2,
-+ 0xb10be924,
-+ 0x2f6f7c87,
-+ 0x58684c11,
-+ 0xc1611dab,
-+ 0xb6662d3d,
-+ 0x76dc4190,
-+ 0x01db7106,
-+ 0x98d220bc,
-+ 0xefd5102a,
-+ 0x71b18589,
-+ 0x06b6b51f,
-+ 0x9fbfe4a5,
-+ 0xe8b8d433,
-+ 0x7807c9a2,
-+ 0x0f00f934,
-+ 0x9609a88e,
-+ 0xe10e9818,
-+ 0x7f6a0dbb,
-+ 0x086d3d2d,
-+ 0x91646c97,
-+ 0xe6635c01,
-+ 0x6b6b51f4,
-+ 0x1c6c6162,
-+ 0x856530d8,
-+ 0xf262004e,
-+ 0x6c0695ed,
-+ 0x1b01a57b,
-+ 0x8208f4c1,
-+ 0xf50fc457,
-+ 0x65b0d9c6,
-+ 0x12b7e950,
-+ 0x8bbeb8ea,
-+ 0xfcb9887c,
-+ 0x62dd1ddf,
-+ 0x15da2d49,
-+ 0x8cd37cf3,
-+ 0xfbd44c65,
-+ 0x4db26158,
-+ 0x3ab551ce,
-+ 0xa3bc0074,
-+ 0xd4bb30e2,
-+ 0x4adfa541,
-+ 0x3dd895d7,
-+ 0xa4d1c46d,
-+ 0xd3d6f4fb,
-+ 0x4369e96a,
-+ 0x346ed9fc,
-+ 0xad678846,
-+ 0xda60b8d0,
-+ 0x44042d73,
-+ 0x33031de5,
-+ 0xaa0a4c5f,
-+ 0xdd0d7cc9,
-+ 0x5005713c,
-+ 0x270241aa,
-+ 0xbe0b1010,
-+ 0xc90c2086,
-+ 0x5768b525,
-+ 0x206f85b3,
-+ 0xb966d409,
-+ 0xce61e49f,
-+ 0x5edef90e,
-+ 0x29d9c998,
-+ 0xb0d09822,
-+ 0xc7d7a8b4,
-+ 0x59b33d17,
-+ 0x2eb40d81,
-+ 0xb7bd5c3b,
-+ 0xc0ba6cad,
-+ 0xedb88320,
-+ 0x9abfb3b6,
-+ 0x03b6e20c,
-+ 0x74b1d29a,
-+ 0xead54739,
-+ 0x9dd277af,
-+ 0x04db2615,
-+ 0x73dc1683,
-+ 0xe3630b12,
-+ 0x94643b84,
-+ 0x0d6d6a3e,
-+ 0x7a6a5aa8,
-+ 0xe40ecf0b,
-+ 0x9309ff9d,
-+ 0x0a00ae27,
-+ 0x7d079eb1,
-+ 0xf00f9344,
-+ 0x8708a3d2,
-+ 0x1e01f268,
-+ 0x6906c2fe,
-+ 0xf762575d,
-+ 0x806567cb,
-+ 0x196c3671,
-+ 0x6e6b06e7,
-+ 0xfed41b76,
-+ 0x89d32be0,
-+ 0x10da7a5a,
-+ 0x67dd4acc,
-+ 0xf9b9df6f,
-+ 0x8ebeeff9,
-+ 0x17b7be43,
-+ 0x60b08ed5,
-+ 0xd6d6a3e8,
-+ 0xa1d1937e,
-+ 0x38d8c2c4,
-+ 0x4fdff252,
-+ 0xd1bb67f1,
-+ 0xa6bc5767,
-+ 0x3fb506dd,
-+ 0x48b2364b,
-+ 0xd80d2bda,
-+ 0xaf0a1b4c,
-+ 0x36034af6,
-+ 0x41047a60,
-+ 0xdf60efc3,
-+ 0xa867df55,
-+ 0x316e8eef,
-+ 0x4669be79,
-+ 0xcb61b38c,
-+ 0xbc66831a,
-+ 0x256fd2a0,
-+ 0x5268e236,
-+ 0xcc0c7795,
-+ 0xbb0b4703,
-+ 0x220216b9,
-+ 0x5505262f,
-+ 0xc5ba3bbe,
-+ 0xb2bd0b28,
-+ 0x2bb45a92,
-+ 0x5cb36a04,
-+ 0xc2d7ffa7,
-+ 0xb5d0cf31,
-+ 0x2cd99e8b,
-+ 0x5bdeae1d,
-+ 0x9b64c2b0,
-+ 0xec63f226,
-+ 0x756aa39c,
-+ 0x026d930a,
-+ 0x9c0906a9,
-+ 0xeb0e363f,
-+ 0x72076785,
-+ 0x05005713,
-+ 0x95bf4a82,
-+ 0xe2b87a14,
-+ 0x7bb12bae,
-+ 0x0cb61b38,
-+ 0x92d28e9b,
-+ 0xe5d5be0d,
-+ 0x7cdcefb7,
-+ 0x0bdbdf21,
-+ 0x86d3d2d4,
-+ 0xf1d4e242,
-+ 0x68ddb3f8,
-+ 0x1fda836e,
-+ 0x81be16cd,
-+ 0xf6b9265b,
-+ 0x6fb077e1,
-+ 0x18b74777,
-+ 0x88085ae6,
-+ 0xff0f6a70,
-+ 0x66063bca,
-+ 0x11010b5c,
-+ 0x8f659eff,
-+ 0xf862ae69,
-+ 0x616bffd3,
-+ 0x166ccf45,
-+ 0xa00ae278,
-+ 0xd70dd2ee,
-+ 0x4e048354,
-+ 0x3903b3c2,
-+ 0xa7672661,
-+ 0xd06016f7,
-+ 0x4969474d,
-+ 0x3e6e77db,
-+ 0xaed16a4a,
-+ 0xd9d65adc,
-+ 0x40df0b66,
-+ 0x37d83bf0,
-+ 0xa9bcae53,
-+ 0xdebb9ec5,
-+ 0x47b2cf7f,
-+ 0x30b5ffe9,
-+ 0xbdbdf21c,
-+ 0xcabac28a,
-+ 0x53b39330,
-+ 0x24b4a3a6,
-+ 0xbad03605,
-+ 0xcdd70693,
-+ 0x54de5729,
-+ 0x23d967bf,
-+ 0xb3667a2e,
-+ 0xc4614ab8,
-+ 0x5d681b02,
-+ 0x2a6f2b94,
-+ 0xb40bbe37,
-+ 0xc30c8ea1,
-+ 0x5a05df1b,
-+ 0x2d02ef8d
-+};
-+
-+
-+#define GET_MAC_ADDR_CRC(addr, crc) \
-+{ \
-+ uint32_t i; \
-+ uint8_t data; \
-+ \
-+ /* CRC calculation */ \
-+ crc = 0xffffffff; \
-+ for (i=0; i < 6; i++) \
-+ { \
-+ data = (uint8_t)(addr >> ((5-i)*8)); \
-+ crc = crc^data; \
-+ crc = crc_table[crc&0xff] ^ (crc>>8); \
-+ } \
-+} \
-+
-+/* Define a macro for getting the mirrored value of */
-+/* a byte size number. (0x11010011 --> 0x11001011) */
-+/* Sometimes the mirrored value of the CRC is required */
-+static __inline__ uint8_t GetMirror(uint8_t n)
-+{
-+ uint8_t mirror[16] =
-+ {
-+ 0x00,
-+ 0x08,
-+ 0x04,
-+ 0x0c,
-+ 0x02,
-+ 0x0a,
-+ 0x06,
-+ 0x0e,
-+ 0x01,
-+ 0x09,
-+ 0x05,
-+ 0x0d,
-+ 0x03,
-+ 0x0b,
-+ 0x07,
-+ 0x0f
-+ };
-+ return ((uint8_t)(((mirror[n & 0x0f] << 4) | (mirror[n >> 4]))));
-+}
-+
-+static __inline__ uint32_t GetMirror32(uint32_t n)
-+{
-+ return (((uint32_t)GetMirror((uint8_t)(n))<<24) |
-+ ((uint32_t)GetMirror((uint8_t)(n>>8))<<16) |
-+ ((uint32_t)GetMirror((uint8_t)(n>>16))<<8) |
-+ ((uint32_t)GetMirror((uint8_t)(n>>24))));
-+}
-+
-+#define MIRROR GetMirror
-+#define MIRROR_32 GetMirror32
-+
-+
-+#endif /* __crc_mac_addr_ext_h */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/dpaa_ext.h
-@@ -0,0 +1,210 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File dpaa_ext.h
-+
-+ @Description DPAA Application Programming Interface.
-+*//***************************************************************************/
-+#ifndef __DPAA_EXT_H
-+#define __DPAA_EXT_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group DPAA_grp Data Path Acceleration Architecture API
-+
-+ @Description DPAA API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Description Frame descriptor
-+*//***************************************************************************/
-+typedef _Packed struct t_DpaaFD {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ volatile uint8_t liodn;
-+ volatile uint8_t bpid;
-+ volatile uint8_t elion;
-+ volatile uint8_t addrh;
-+ volatile uint32_t addrl;
-+#else
-+ volatile uint32_t addrl;
-+ volatile uint8_t addrh;
-+ volatile uint8_t elion;
-+ volatile uint8_t bpid;
-+ volatile uint8_t liodn;
-+ #endif
-+ volatile uint32_t length; /**< Frame length */
-+ volatile uint32_t status; /**< FD status */
-+} _PackedType t_DpaaFD;
-+
-+/**************************************************************************//**
-+ @Description enum for defining frame format
-+*//***************************************************************************/
-+typedef enum e_DpaaFDFormatType {
-+ e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF = 0x0, /**< Simple frame Single buffer; Offset and
-+ small length (9b OFFSET, 20b LENGTH) */
-+ e_DPAA_FD_FORMAT_TYPE_LONG_SBSF = 0x2, /**< Simple frame, single buffer; big length
-+ (29b LENGTH ,No OFFSET) */
-+ e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF = 0x4, /**< Simple frame, Scatter Gather table; Offset
-+ and small length (9b OFFSET, 20b LENGTH) */
-+ e_DPAA_FD_FORMAT_TYPE_LONG_MBSF = 0x6, /**< Simple frame, Scatter Gather table;
-+ big length (29b LENGTH ,No OFFSET) */
-+ e_DPAA_FD_FORMAT_TYPE_COMPOUND = 0x1, /**< Compound Frame (29b CONGESTION-WEIGHT
-+ No LENGTH or OFFSET) */
-+ e_DPAA_FD_FORMAT_TYPE_DUMMY
-+} e_DpaaFDFormatType;
-+
-+/**************************************************************************//**
-+ @Collection Frame descriptor macros
-+*//***************************************************************************/
-+#define DPAA_FD_DD_MASK 0xc0000000 /**< FD DD field mask */
-+#define DPAA_FD_PID_MASK 0x3f000000 /**< FD PID field mask */
-+#define DPAA_FD_ELIODN_MASK 0x0000f000 /**< FD ELIODN field mask */
-+#define DPAA_FD_BPID_MASK 0x00ff0000 /**< FD BPID field mask */
-+#define DPAA_FD_ADDRH_MASK 0x000000ff /**< FD ADDRH field mask */
-+#define DPAA_FD_ADDRL_MASK 0xffffffff /**< FD ADDRL field mask */
-+#define DPAA_FD_FORMAT_MASK 0xe0000000 /**< FD FORMAT field mask */
-+#define DPAA_FD_OFFSET_MASK 0x1ff00000 /**< FD OFFSET field mask */
-+#define DPAA_FD_LENGTH_MASK 0x000fffff /**< FD LENGTH field mask */
-+
-+#define DPAA_FD_GET_ADDRH(fd) ((t_DpaaFD *)fd)->addrh /**< Macro to get FD ADDRH field */
-+#define DPAA_FD_GET_ADDRL(fd) ((t_DpaaFD *)fd)->addrl /**< Macro to get FD ADDRL field */
-+#define DPAA_FD_GET_PHYS_ADDR(fd) ((physAddress_t)(((uint64_t)DPAA_FD_GET_ADDRH(fd) << 32) | (uint64_t)DPAA_FD_GET_ADDRL(fd))) /**< Macro to get FD ADDR field */
-+#define DPAA_FD_GET_FORMAT(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_FORMAT_MASK) >> (31-2)) /**< Macro to get FD FORMAT field */
-+#define DPAA_FD_GET_OFFSET(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_OFFSET_MASK) >> (31-11)) /**< Macro to get FD OFFSET field */
-+#define DPAA_FD_GET_LENGTH(fd) (((t_DpaaFD *)fd)->length & DPAA_FD_LENGTH_MASK) /**< Macro to get FD LENGTH field */
-+#define DPAA_FD_GET_STATUS(fd) ((t_DpaaFD *)fd)->status /**< Macro to get FD STATUS field */
-+#define DPAA_FD_GET_ADDR(fd) XX_PhysToVirt(DPAA_FD_GET_PHYS_ADDR(fd)) /**< Macro to get FD ADDR (virtual) */
-+
-+#define DPAA_FD_SET_ADDRH(fd,val) ((t_DpaaFD *)fd)->addrh = (val) /**< Macro to set FD ADDRH field */
-+#define DPAA_FD_SET_ADDRL(fd,val) ((t_DpaaFD *)fd)->addrl = (val) /**< Macro to set FD ADDRL field */
-+#define DPAA_FD_SET_ADDR(fd,val) \
-+do { \
-+ uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \
-+ DPAA_FD_SET_ADDRH(fd, ((uint32_t)(physAddr >> 32))); \
-+ DPAA_FD_SET_ADDRL(fd, (uint32_t)physAddr); \
-+} while (0) /**< Macro to set FD ADDR field */
-+#define DPAA_FD_SET_FORMAT(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_FORMAT_MASK) | (((val) << (31-2))& DPAA_FD_FORMAT_MASK))) /**< Macro to set FD FORMAT field */
-+#define DPAA_FD_SET_OFFSET(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_OFFSET_MASK) | (((val) << (31-11))& DPAA_FD_OFFSET_MASK) )) /**< Macro to set FD OFFSET field */
-+#define DPAA_FD_SET_LENGTH(fd,val) (((t_DpaaFD *)fd)->length = (((t_DpaaFD *)fd)->length & ~DPAA_FD_LENGTH_MASK) | ((val) & DPAA_FD_LENGTH_MASK)) /**< Macro to set FD LENGTH field */
-+#define DPAA_FD_SET_STATUS(fd,val) ((t_DpaaFD *)fd)->status = (val) /**< Macro to set FD STATUS field */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description Frame Scatter/Gather Table Entry
-+*//***************************************************************************/
-+typedef _Packed struct t_DpaaSGTE {
-+ volatile uint32_t addrh; /**< Buffer Address high */
-+ volatile uint32_t addrl; /**< Buffer Address low */
-+ volatile uint32_t length; /**< Buffer length */
-+ volatile uint32_t offset; /**< SGTE offset */
-+} _PackedType t_DpaaSGTE;
-+
-+#define DPAA_NUM_OF_SG_TABLE_ENTRY 16
-+
-+/**************************************************************************//**
-+ @Description Frame Scatter/Gather Table
-+*//***************************************************************************/
-+typedef _Packed struct t_DpaaSGT {
-+ t_DpaaSGTE tableEntry[DPAA_NUM_OF_SG_TABLE_ENTRY];
-+ /**< Structure that holds information about
-+ a single S/G entry. */
-+} _PackedType t_DpaaSGT;
-+
-+/**************************************************************************//**
-+ @Description Compound Frame Table
-+*//***************************************************************************/
-+typedef _Packed struct t_DpaaCompTbl {
-+ t_DpaaSGTE outputBuffInfo; /**< Structure that holds information about
-+ the compound-frame output buffer;
-+ NOTE: this may point to a S/G table */
-+ t_DpaaSGTE inputBuffInfo; /**< Structure that holds information about
-+ the compound-frame input buffer;
-+ NOTE: this may point to a S/G table */
-+} _PackedType t_DpaaCompTbl;
-+
-+/**************************************************************************//**
-+ @Collection Frame Scatter/Gather Table Entry macros
-+*//***************************************************************************/
-+#define DPAA_SGTE_ADDRH_MASK 0x000000ff /**< SGTE ADDRH field mask */
-+#define DPAA_SGTE_ADDRL_MASK 0xffffffff /**< SGTE ADDRL field mask */
-+#define DPAA_SGTE_E_MASK 0x80000000 /**< SGTE Extension field mask */
-+#define DPAA_SGTE_F_MASK 0x40000000 /**< SGTE Final field mask */
-+#define DPAA_SGTE_LENGTH_MASK 0x3fffffff /**< SGTE LENGTH field mask */
-+#define DPAA_SGTE_BPID_MASK 0x00ff0000 /**< SGTE BPID field mask */
-+#define DPAA_SGTE_OFFSET_MASK 0x00001fff /**< SGTE OFFSET field mask */
-+
-+#define DPAA_SGTE_GET_ADDRH(sgte) (((t_DpaaSGTE *)sgte)->addrh & DPAA_SGTE_ADDRH_MASK) /**< Macro to get SGTE ADDRH field */
-+#define DPAA_SGTE_GET_ADDRL(sgte) ((t_DpaaSGTE *)sgte)->addrl /**< Macro to get SGTE ADDRL field */
-+#define DPAA_SGTE_GET_PHYS_ADDR(sgte) ((physAddress_t)(((uint64_t)DPAA_SGTE_GET_ADDRH(sgte) << 32) | (uint64_t)DPAA_SGTE_GET_ADDRL(sgte))) /**< Macro to get FD ADDR field */
-+#define DPAA_SGTE_GET_EXTENSION(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_E_MASK) >> (31-0)) /**< Macro to get SGTE EXTENSION field */
-+#define DPAA_SGTE_GET_FINAL(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_F_MASK) >> (31-1)) /**< Macro to get SGTE FINAL field */
-+#define DPAA_SGTE_GET_LENGTH(sgte) (((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_LENGTH_MASK) /**< Macro to get SGTE LENGTH field */
-+#define DPAA_SGTE_GET_BPID(sgte) ((((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_BPID_MASK) >> (31-15)) /**< Macro to get SGTE BPID field */
-+#define DPAA_SGTE_GET_OFFSET(sgte) (((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_OFFSET_MASK) /**< Macro to get SGTE OFFSET field */
-+#define DPAA_SGTE_GET_ADDR(sgte) XX_PhysToVirt(DPAA_SGTE_GET_PHYS_ADDR(sgte))
-+
-+#define DPAA_SGTE_SET_ADDRH(sgte,val) (((t_DpaaSGTE *)sgte)->addrh = ((((t_DpaaSGTE *)sgte)->addrh & ~DPAA_SGTE_ADDRH_MASK) | ((val) & DPAA_SGTE_ADDRH_MASK))) /**< Macro to set SGTE ADDRH field */
-+#define DPAA_SGTE_SET_ADDRL(sgte,val) ((t_DpaaSGTE *)sgte)->addrl = (val) /**< Macro to set SGTE ADDRL field */
-+#define DPAA_SGTE_SET_ADDR(sgte,val) \
-+do { \
-+ uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \
-+ DPAA_SGTE_SET_ADDRH(sgte, ((uint32_t)(physAddr >> 32))); \
-+ DPAA_SGTE_SET_ADDRL(sgte, (uint32_t)physAddr); \
-+} while (0) /**< Macro to set SGTE ADDR field */
-+#define DPAA_SGTE_SET_EXTENSION(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_E_MASK) | (((val) << (31-0))& DPAA_SGTE_E_MASK))) /**< Macro to set SGTE EXTENSION field */
-+#define DPAA_SGTE_SET_FINAL(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_F_MASK) | (((val) << (31-1))& DPAA_SGTE_F_MASK))) /**< Macro to set SGTE FINAL field */
-+#define DPAA_SGTE_SET_LENGTH(sgte,val) (((t_DpaaSGTE *)sgte)->length = (((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_LENGTH_MASK) | ((val) & DPAA_SGTE_LENGTH_MASK)) /**< Macro to set SGTE LENGTH field */
-+#define DPAA_SGTE_SET_BPID(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_BPID_MASK) | (((val) << (31-15))& DPAA_SGTE_BPID_MASK))) /**< Macro to set SGTE BPID field */
-+#define DPAA_SGTE_SET_OFFSET(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_OFFSET_MASK) | (((val) << (31-31))& DPAA_SGTE_OFFSET_MASK) )) /**< Macro to set SGTE OFFSET field */
-+/* @} */
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+#define DPAA_LIODN_DONT_OVERRIDE (-1)
-+
-+/** @} */ /* end of DPAA_grp group */
-+
-+
-+#endif /* __DPAA_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_ext.h
-@@ -0,0 +1,1731 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_ext.h
-+
-+ @Description FM Application Programming Interface.
-+*//***************************************************************************/
-+#ifndef __FM_EXT
-+#define __FM_EXT
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "dpaa_ext.h"
-+#include "fsl_fman_sp.h"
-+
-+/**************************************************************************//**
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_lib_grp FM library
-+
-+ @Description FM API functions, definitions and enums.
-+
-+ The FM module is the main driver module and is a mandatory module
-+ for FM driver users. This module must be initialized first prior
-+ to any other drivers modules.
-+ The FM is a "singleton" module. It is responsible of the common
-+ HW modules: FPM, DMA, common QMI and common BMI initializations and
-+ run-time control routines. This module must be initialized always
-+ when working with any of the FM modules.
-+ NOTE - We assume that the FM library will be initialized only by core No. 0!
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Enum for defining port types
-+*//***************************************************************************/
-+typedef enum e_FmPortType {
-+ e_FM_PORT_TYPE_OH_OFFLINE_PARSING = 0, /**< Offline parsing port */
-+ e_FM_PORT_TYPE_RX, /**< 1G Rx port */
-+ e_FM_PORT_TYPE_RX_10G, /**< 10G Rx port */
-+ e_FM_PORT_TYPE_TX, /**< 1G Tx port */
-+ e_FM_PORT_TYPE_TX_10G, /**< 10G Tx port */
-+ e_FM_PORT_TYPE_DUMMY
-+} e_FmPortType;
-+
-+/**************************************************************************//**
-+ @Collection General FM defines
-+*//***************************************************************************/
-+#define FM_MAX_NUM_OF_PARTITIONS 64 /**< Maximum number of partitions */
-+#define FM_PHYS_ADDRESS_SIZE 6 /**< FM Physical address size */
-+/* @} */
-+
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(push,1)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Description FM physical Address
-+*//***************************************************************************/
-+typedef _Packed struct t_FmPhysAddr {
-+ volatile uint8_t high; /**< High part of the physical address */
-+ volatile uint32_t low; /**< Low part of the physical address */
-+} _PackedType t_FmPhysAddr;
-+
-+/**************************************************************************//**
-+ @Description Parse results memory layout
-+*//***************************************************************************/
-+typedef _Packed struct t_FmPrsResult {
-+ volatile uint8_t lpid; /**< Logical port id */
-+ volatile uint8_t shimr; /**< Shim header result */
-+ volatile uint16_t l2r; /**< Layer 2 result */
-+ volatile uint16_t l3r; /**< Layer 3 result */
-+ volatile uint8_t l4r; /**< Layer 4 result */
-+ volatile uint8_t cplan; /**< Classification plan id */
-+ volatile uint16_t nxthdr; /**< Next Header */
-+ volatile uint16_t cksum; /**< Running-sum */
-+ volatile uint16_t flags_frag_off; /**< Flags & fragment-offset field of the last IP-header */
-+ volatile uint8_t route_type; /**< Routing type field of a IPv6 routing extension header */
-+ volatile uint8_t rhp_ip_valid; /**< Routing Extension Header Present; last bit is IP valid */
-+ volatile uint8_t shim_off[2]; /**< Shim offset */
-+ volatile uint8_t ip_pid_off; /**< IP PID (last IP-proto) offset */
-+ volatile uint8_t eth_off; /**< ETH offset */
-+ volatile uint8_t llc_snap_off; /**< LLC_SNAP offset */
-+ volatile uint8_t vlan_off[2]; /**< VLAN offset */
-+ volatile uint8_t etype_off; /**< ETYPE offset */
-+ volatile uint8_t pppoe_off; /**< PPP offset */
-+ volatile uint8_t mpls_off[2]; /**< MPLS offset */
-+ volatile uint8_t ip_off[2]; /**< IP offset */
-+ volatile uint8_t gre_off; /**< GRE offset */
-+ volatile uint8_t l4_off; /**< Layer 4 offset */
-+ volatile uint8_t nxthdr_off; /**< Parser end point */
-+} _PackedType t_FmPrsResult;
-+
-+/**************************************************************************//**
-+ @Collection FM Parser results
-+*//***************************************************************************/
-+#define FM_PR_L2_VLAN_STACK 0x00000100 /**< Parse Result: VLAN stack */
-+#define FM_PR_L2_ETHERNET 0x00008000 /**< Parse Result: Ethernet*/
-+#define FM_PR_L2_VLAN 0x00004000 /**< Parse Result: VLAN */
-+#define FM_PR_L2_LLC_SNAP 0x00002000 /**< Parse Result: LLC_SNAP */
-+#define FM_PR_L2_MPLS 0x00001000 /**< Parse Result: MPLS */
-+#define FM_PR_L2_PPPoE 0x00000800 /**< Parse Result: PPPoE */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection FM Frame descriptor macros
-+*//***************************************************************************/
-+#define FM_FD_CMD_FCO 0x80000000 /**< Frame queue Context Override */
-+#define FM_FD_CMD_RPD 0x40000000 /**< Read Prepended Data */
-+#define FM_FD_CMD_UPD 0x20000000 /**< Update Prepended Data */
-+#define FM_FD_CMD_DTC 0x10000000 /**< Do L4 Checksum */
-+#define FM_FD_CMD_DCL4C 0x10000000 /**< Didn't calculate L4 Checksum */
-+#define FM_FD_CMD_CFQ 0x00ffffff /**< Confirmation Frame Queue */
-+
-+#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000 /**< Not for Rx-Port! Unsupported Format */
-+#define FM_FD_ERR_LENGTH 0x02000000 /**< Not for Rx-Port! Length Error */
-+#define FM_FD_ERR_DMA 0x01000000 /**< DMA Data error */
-+
-+#define FM_FD_IPR 0x00000001 /**< IPR frame (not error) */
-+
-+#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR) /**< IPR non-consistent-sp */
-+#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR) /**< IPR error */
-+#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR) /**< IPR timeout */
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+#define FM_FD_ERR_CRE 0x00200000
-+#define FM_FD_ERR_CHE 0x00100000
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+#define FM_FD_ERR_PHYSICAL 0x00080000 /**< Rx FIFO overflow, FCS error, code error, running disparity
-+ error (SGMII and TBI modes), FIFO parity error. PHY
-+ Sequence error, PHY error control character detected. */
-+#define FM_FD_ERR_SIZE 0x00040000 /**< Frame too long OR Frame size exceeds max_length_frame */
-+#define FM_FD_ERR_CLS_DISCARD 0x00020000 /**< classification discard */
-+#define FM_FD_ERR_EXTRACTION 0x00008000 /**< Extract Out of Frame */
-+#define FM_FD_ERR_NO_SCHEME 0x00004000 /**< No Scheme Selected */
-+#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000 /**< Keysize Overflow */
-+#define FM_FD_ERR_COLOR_RED 0x00000800 /**< Frame color is red */
-+#define FM_FD_ERR_COLOR_YELLOW 0x00000400 /**< Frame color is yellow */
-+#define FM_FD_ERR_ILL_PLCR 0x00000200 /**< Illegal Policer Profile selected */
-+#define FM_FD_ERR_PLCR_FRAME_LEN 0x00000100 /**< Policer frame length error */
-+#define FM_FD_ERR_PRS_TIMEOUT 0x00000080 /**< Parser Time out Exceed */
-+#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040 /**< Invalid Soft Parser instruction */
-+#define FM_FD_ERR_PRS_HDR_ERR 0x00000020 /**< Header error was identified during parsing */
-+#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008 /**< Frame parsed beyind 256 first bytes */
-+
-+#define FM_FD_TX_STATUS_ERR_MASK (FM_FD_ERR_UNSUPPORTED_FORMAT | \
-+ FM_FD_ERR_LENGTH | \
-+ FM_FD_ERR_DMA) /**< TX Error FD bits */
-+
-+#define FM_FD_RX_STATUS_ERR_MASK (FM_FD_ERR_UNSUPPORTED_FORMAT | \
-+ FM_FD_ERR_LENGTH | \
-+ FM_FD_ERR_DMA | \
-+ FM_FD_ERR_IPR | \
-+ FM_FD_ERR_IPR_TO | \
-+ FM_FD_ERR_IPR_NCSP | \
-+ FM_FD_ERR_PHYSICAL | \
-+ FM_FD_ERR_SIZE | \
-+ FM_FD_ERR_CLS_DISCARD | \
-+ FM_FD_ERR_COLOR_RED | \
-+ FM_FD_ERR_COLOR_YELLOW | \
-+ FM_FD_ERR_ILL_PLCR | \
-+ FM_FD_ERR_PLCR_FRAME_LEN | \
-+ FM_FD_ERR_EXTRACTION | \
-+ FM_FD_ERR_NO_SCHEME | \
-+ FM_FD_ERR_KEYSIZE_OVERFLOW | \
-+ FM_FD_ERR_PRS_TIMEOUT | \
-+ FM_FD_ERR_PRS_ILL_INSTRUCT | \
-+ FM_FD_ERR_PRS_HDR_ERR | \
-+ FM_FD_ERR_BLOCK_LIMIT_EXCEEDED) /**< RX Error FD bits */
-+
-+#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000 /**< non Frame-Manager error */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description Context A
-+*//***************************************************************************/
-+typedef _Packed struct t_FmContextA {
-+ volatile uint32_t command; /**< ContextA Command */
-+ volatile uint8_t res0[4]; /**< ContextA Reserved bits */
-+} _PackedType t_FmContextA;
-+
-+/**************************************************************************//**
-+ @Description Context B
-+*//***************************************************************************/
-+typedef uint32_t t_FmContextB;
-+
-+/**************************************************************************//**
-+ @Collection Special Operation options
-+*//***************************************************************************/
-+typedef uint32_t fmSpecialOperations_t; /**< typedef for defining Special Operation options */
-+
-+#define FM_SP_OP_IPSEC 0x80000000 /**< activate features that related to IPSec (e.g fix Eth-type) */
-+#define FM_SP_OP_IPSEC_UPDATE_UDP_LEN 0x40000000 /**< update the UDP-Len after Encryption */
-+#define FM_SP_OP_IPSEC_MANIP 0x20000000 /**< handle the IPSec-manip options */
-+#define FM_SP_OP_RPD 0x10000000 /**< Set the RPD bit */
-+#define FM_SP_OP_DCL4C 0x08000000 /**< Set the DCL4C bit */
-+#define FM_SP_OP_CHECK_SEC_ERRORS 0x04000000 /**< Check SEC errors */
-+#define FM_SP_OP_CLEAR_RPD 0x02000000 /**< Clear the RPD bit */
-+#define FM_SP_OP_CAPWAP_DTLS_ENC 0x01000000 /**< activate features that related to CAPWAP-DTLS post Encryption */
-+#define FM_SP_OP_CAPWAP_DTLS_DEC 0x00800000 /**< activate features that related to CAPWAP-DTLS post Decryption */
-+#define FM_SP_OP_IPSEC_NO_ETH_HDR 0x00400000 /**< activate features that related to IPSec without Eth hdr */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection Context A macros
-+*//***************************************************************************/
-+#define FM_CONTEXTA_OVERRIDE_MASK 0x80000000
-+#define FM_CONTEXTA_ICMD_MASK 0x40000000
-+#define FM_CONTEXTA_A1_VALID_MASK 0x20000000
-+#define FM_CONTEXTA_MACCMD_MASK 0x00ff0000
-+#define FM_CONTEXTA_MACCMD_VALID_MASK 0x00800000
-+#define FM_CONTEXTA_MACCMD_SECURED_MASK 0x00100000
-+#define FM_CONTEXTA_MACCMD_SC_MASK 0x000f0000
-+#define FM_CONTEXTA_A1_MASK 0x0000ffff
-+
-+#define FM_CONTEXTA_GET_OVERRIDE(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_OVERRIDE_MASK) >> (31-0))
-+#define FM_CONTEXTA_GET_ICMD(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_ICMD_MASK) >> (31-1))
-+#define FM_CONTEXTA_GET_A1_VALID(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_A1_VALID_MASK) >> (31-2))
-+#define FM_CONTEXTA_GET_A1(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_A1_MASK) >> (31-31))
-+#define FM_CONTEXTA_GET_MACCMD(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_MASK) >> (31-15))
-+#define FM_CONTEXTA_GET_MACCMD_VALID(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_VALID_MASK) >> (31-8))
-+#define FM_CONTEXTA_GET_MACCMD_SECURED(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_SECURED_MASK) >> (31-11))
-+#define FM_CONTEXTA_GET_MACCMD_SECURE_CHANNEL(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_SC_MASK) >> (31-15))
-+
-+#define FM_CONTEXTA_SET_OVERRIDE(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_OVERRIDE_MASK) | (((uint32_t)(val) << (31-0)) & FM_CONTEXTA_OVERRIDE_MASK) ))
-+#define FM_CONTEXTA_SET_ICMD(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_ICMD_MASK) | (((val) << (31-1)) & FM_CONTEXTA_ICMD_MASK) ))
-+#define FM_CONTEXTA_SET_A1_VALID(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_A1_VALID_MASK) | (((val) << (31-2)) & FM_CONTEXTA_A1_VALID_MASK) ))
-+#define FM_CONTEXTA_SET_A1(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_A1_MASK) | (((val) << (31-31)) & FM_CONTEXTA_A1_MASK) ))
-+#define FM_CONTEXTA_SET_MACCMD(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_MASK) | (((val) << (31-15)) & FM_CONTEXTA_MACCMD_MASK) ))
-+#define FM_CONTEXTA_SET_MACCMD_VALID(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_VALID_MASK) | (((val) << (31-8)) & FM_CONTEXTA_MACCMD_VALID_MASK) ))
-+#define FM_CONTEXTA_SET_MACCMD_SECURED(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_SECURED_MASK) | (((val) << (31-11)) & FM_CONTEXTA_MACCMD_SECURED_MASK) ))
-+#define FM_CONTEXTA_SET_MACCMD_SECURE_CHANNEL(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_SC_MASK) | (((val) << (31-15)) & FM_CONTEXTA_MACCMD_SC_MASK) ))
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection Context B macros
-+*//***************************************************************************/
-+#define FM_CONTEXTB_FQID_MASK 0x00ffffff
-+
-+#define FM_CONTEXTB_GET_FQID(contextB) (*((t_FmContextB *)contextB) & FM_CONTEXTB_FQID_MASK)
-+#define FM_CONTEXTB_SET_FQID(contextB,val) (*((t_FmContextB *)contextB) = ((*((t_FmContextB *)contextB) & ~FM_CONTEXTB_FQID_MASK) | ((val) & FM_CONTEXTB_FQID_MASK)))
-+/* @} */
-+
-+#if defined(__MWERKS__) && !defined(__GNUC__)
-+#pragma pack(pop)
-+#endif /* defined(__MWERKS__) && ... */
-+
-+
-+/**************************************************************************//**
-+ @Description FM Exceptions
-+*//***************************************************************************/
-+typedef enum e_FmExceptions {
-+ e_FM_EX_DMA_BUS_ERROR = 0, /**< DMA bus error. */
-+ e_FM_EX_DMA_READ_ECC, /**< Read Buffer ECC error (Valid for FM rev < 6)*/
-+ e_FM_EX_DMA_SYSTEM_WRITE_ECC, /**< Write Buffer ECC error on system side (Valid for FM rev < 6)*/
-+ e_FM_EX_DMA_FM_WRITE_ECC, /**< Write Buffer ECC error on FM side (Valid for FM rev < 6)*/
-+ e_FM_EX_DMA_SINGLE_PORT_ECC, /**< Single Port ECC error on FM side (Valid for FM rev > 6)*/
-+ e_FM_EX_FPM_STALL_ON_TASKS, /**< Stall of tasks on FPM */
-+ e_FM_EX_FPM_SINGLE_ECC, /**< Single ECC on FPM. */
-+ e_FM_EX_FPM_DOUBLE_ECC, /**< Double ECC error on FPM ram access */
-+ e_FM_EX_QMI_SINGLE_ECC, /**< Single ECC on QMI. */
-+ e_FM_EX_QMI_DOUBLE_ECC, /**< Double bit ECC occurred on QMI */
-+ e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/**< Dequeue from unknown port id */
-+ e_FM_EX_BMI_LIST_RAM_ECC, /**< Linked List RAM ECC error */
-+ e_FM_EX_BMI_STORAGE_PROFILE_ECC, /**< Storage Profile ECC Error */
-+ e_FM_EX_BMI_STATISTICS_RAM_ECC, /**< Statistics Count RAM ECC Error Enable */
-+ e_FM_EX_BMI_DISPATCH_RAM_ECC, /**< Dispatch RAM ECC Error Enable */
-+ e_FM_EX_IRAM_ECC, /**< Double bit ECC occurred on IRAM*/
-+ e_FM_EX_MURAM_ECC /**< Double bit ECC occurred on MURAM*/
-+} e_FmExceptions;
-+
-+/**************************************************************************//**
-+ @Description Enum for defining port DMA swap mode
-+*//***************************************************************************/
-+typedef enum e_FmDmaSwapOption {
-+ e_FM_DMA_NO_SWP = FMAN_DMA_NO_SWP, /**< No swap, transfer data as is.*/
-+ e_FM_DMA_SWP_PPC_LE = FMAN_DMA_SWP_PPC_LE, /**< The transferred data should be swapped
-+ in PowerPc Little Endian mode. */
-+ e_FM_DMA_SWP_BE = FMAN_DMA_SWP_BE /**< The transferred data should be swapped
-+ in Big Endian mode */
-+} e_FmDmaSwapOption;
-+
-+/**************************************************************************//**
-+ @Description Enum for defining port DMA cache attributes
-+*//***************************************************************************/
-+typedef enum e_FmDmaCacheOption {
-+ e_FM_DMA_NO_STASH = FMAN_DMA_NO_STASH, /**< Cacheable, no Allocate (No Stashing) */
-+ e_FM_DMA_STASH = FMAN_DMA_STASH /**< Cacheable and Allocate (Stashing on) */
-+} e_FmDmaCacheOption;
-+
-+
-+/**************************************************************************//**
-+ @Group FM_init_grp FM Initialization Unit
-+
-+ @Description FM Initialization Unit
-+
-+ Initialization Flow
-+ Initialization of the FM Module will be carried out by the application
-+ according to the following sequence:
-+ - Calling the configuration routine with basic parameters.
-+ - Calling the advance initialization routines to change driver's defaults.
-+ - Calling the initialization routine.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function t_FmExceptionsCallback
-+
-+ @Description Exceptions user callback routine, will be called upon an
-+ exception passing the exception identification.
-+
-+ @Param[in] h_App - User's application descriptor.
-+ @Param[in] exception - The exception.
-+*//***************************************************************************/
-+typedef void (t_FmExceptionsCallback)(t_Handle h_App,
-+ e_FmExceptions exception);
-+
-+
-+/**************************************************************************//**
-+ @Function t_FmBusErrorCallback
-+
-+ @Description Bus error user callback routine, will be called upon a
-+ bus error, passing parameters describing the errors and the owner.
-+
-+ @Param[in] h_App - User's application descriptor.
-+ @Param[in] portType - Port type (e_FmPortType)
-+ @Param[in] portId - Port id - relative to type.
-+ @Param[in] addr - Address that caused the error
-+ @Param[in] tnum - Owner of error
-+ @Param[in] liodn - Logical IO device number
-+*//***************************************************************************/
-+typedef void (t_FmBusErrorCallback) (t_Handle h_App,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint64_t addr,
-+ uint8_t tnum,
-+ uint16_t liodn);
-+
-+/**************************************************************************//**
-+ @Description A structure for defining buffer prefix area content.
-+*//***************************************************************************/
-+typedef struct t_FmBufferPrefixContent {
-+ uint16_t privDataSize; /**< Number of bytes to be left at the beginning
-+ of the external buffer; Note that the private-area will
-+ start from the base of the buffer address. */
-+ bool passPrsResult; /**< TRUE to pass the parse result to/from the FM;
-+ User may use FM_PORT_GetBufferPrsResult() in order to
-+ get the parser-result from a buffer. */
-+ bool passTimeStamp; /**< TRUE to pass the timeStamp to/from the FM
-+ User may use FM_PORT_GetBufferTimeStamp() in order to
-+ get the parser-result from a buffer. */
-+ bool passHashResult; /**< TRUE to pass the KG hash result to/from the FM
-+ User may use FM_PORT_GetBufferHashResult() in order to
-+ get the parser-result from a buffer. */
-+ bool passAllOtherPCDInfo;/**< Add all other Internal-Context information:
-+ AD, hash-result, key, etc. */
-+ uint16_t dataAlign; /**< 0 to use driver's default alignment [DEFAULT_FM_SP_bufferPrefixContent_dataAlign],
-+ other value for selecting a data alignment (must be a power of 2);
-+ if write optimization is used, must be >= 16. */
-+ uint8_t manipExtraSpace; /**< Maximum extra size needed (insertion-size minus removal-size);
-+ Note that this field impacts the size of the buffer-prefix
-+ (i.e. it pushes the data offset);
-+ This field is irrelevant if DPAA_VERSION==10 */
-+} t_FmBufferPrefixContent;
-+
-+/**************************************************************************//**
-+ @Description A structure of information about each of the external
-+ buffer pools used by a port or storage-profile.
-+*//***************************************************************************/
-+typedef struct t_FmExtPoolParams {
-+ uint8_t id; /**< External buffer pool id */
-+ uint16_t size; /**< External buffer pool buffer size */
-+} t_FmExtPoolParams;
-+
-+/**************************************************************************//**
-+ @Description A structure for informing the driver about the external
-+ buffer pools allocated in the BM and used by a port or a
-+ storage-profile.
-+*//***************************************************************************/
-+typedef struct t_FmExtPools {
-+ uint8_t numOfPoolsUsed; /**< Number of pools use by this port */
-+ t_FmExtPoolParams extBufPool[FM_PORT_MAX_NUM_OF_EXT_POOLS];
-+ /**< Parameters for each port */
-+} t_FmExtPools;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining backup BM Pools.
-+*//***************************************************************************/
-+typedef struct t_FmBackupBmPools {
-+ uint8_t numOfBackupPools; /**< Number of BM backup pools -
-+ must be smaller than the total number of
-+ pools defined for the specified port.*/
-+ uint8_t poolIds[FM_PORT_MAX_NUM_OF_EXT_POOLS];
-+ /**< numOfBackupPools pool id's, specifying which
-+ pools should be used only as backup. Pool
-+ id's specified here must be a subset of the
-+ pools used by the specified port.*/
-+} t_FmBackupBmPools;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining BM pool depletion criteria
-+*//***************************************************************************/
-+typedef struct t_FmBufPoolDepletion {
-+ bool poolsGrpModeEnable; /**< select mode in which pause frames will be sent after
-+ a number of pools (all together!) are depleted */
-+ uint8_t numOfPools; /**< the number of depleted pools that will invoke
-+ pause frames transmission. */
-+ bool poolsToConsider[BM_MAX_NUM_OF_POOLS];
-+ /**< For each pool, TRUE if it should be considered for
-+ depletion (Note - this pool must be used by this port!). */
-+ bool singlePoolModeEnable; /**< select mode in which pause frames will be sent after
-+ a single-pool is depleted; */
-+ bool poolsToConsiderForSingleMode[BM_MAX_NUM_OF_POOLS];
-+ /**< For each pool, TRUE if it should be considered for
-+ depletion (Note - this pool must be used by this port!) */
-+#if (DPAA_VERSION >= 11)
-+ bool pfcPrioritiesEn[FM_MAX_NUM_OF_PFC_PRIORITIES];
-+ /**< This field is used by the MAC as the Priority Enable Vector in the PFC frame which is transmitted */
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmBufPoolDepletion;
-+
-+/**************************************************************************//**
-+ @Description A Structure for defining Ucode patch for loading.
-+*//***************************************************************************/
-+typedef struct t_FmFirmwareParams {
-+ uint32_t size; /**< Size of uCode */
-+ uint32_t *p_Code; /**< A pointer to the uCode */
-+} t_FmFirmwareParams;
-+
-+/**************************************************************************//**
-+ @Description A Structure for defining FM initialization parameters
-+*//***************************************************************************/
-+typedef struct t_FmParams {
-+ uint8_t fmId; /**< Index of the FM */
-+ uint8_t guestId; /**< FM Partition Id */
-+ uintptr_t baseAddr; /**< A pointer to base of memory mapped FM registers (virtual);
-+ this field is optional when the FM runs in "guest-mode"
-+ (i.e. guestId != NCSW_MASTER_ID); in that case, the driver will
-+ use the memory-map instead of calling the IPC where possible;
-+ NOTE that this should include ALL common registers of the FM including
-+ the PCD registers area (i.e. until the VSP pages - 880KB). */
-+ t_Handle h_FmMuram; /**< A handle of an initialized MURAM object,
-+ to be used by the FM. */
-+ uint16_t fmClkFreq; /**< In Mhz;
-+ Relevant when FM not runs in "guest-mode". */
-+ uint16_t fmMacClkRatio; /**< FM MAC Clock ratio, for backward comparability:
-+ when fmMacClkRatio = 0, ratio is 2:1
-+ when fmMacClkRatio = 1, ratio is 1:1 */
-+ t_FmExceptionsCallback *f_Exception; /**< An application callback routine to handle exceptions;
-+ Relevant when FM not runs in "guest-mode". */
-+ t_FmBusErrorCallback *f_BusError; /**< An application callback routine to handle exceptions;
-+ Relevant when FM not runs in "guest-mode". */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks;
-+ Relevant when FM not runs in "guest-mode". */
-+ int irq; /**< FM interrupt source for normal events;
-+ Relevant when FM not runs in "guest-mode". */
-+ int errIrq; /**< FM interrupt source for errors;
-+ Relevant when FM not runs in "guest-mode". */
-+ t_FmFirmwareParams firmware; /**< The firmware parameters structure;
-+ Relevant when FM not runs in "guest-mode". */
-+
-+#if (DPAA_VERSION >= 11)
-+ uintptr_t vspBaseAddr; /**< A pointer to base of memory mapped FM VSP registers (virtual);
-+ i.e. up to 24KB, depending on the specific chip. */
-+ uint8_t partVSPBase; /**< The first Virtual-Storage-Profile-id dedicated to this partition.
-+ NOTE: this parameter relevant only when working with multiple partitions. */
-+ uint8_t partNumOfVSPs; /**< Number of VSPs dedicated to this partition.
-+ NOTE: this parameter relevant only when working with multiple partitions. */
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_Config
-+
-+ @Description Creates the FM module and returns its handle (descriptor).
-+ This descriptor must be passed as first parameter to all other
-+ FM function calls.
-+
-+ No actual initialization or configuration of FM hardware is
-+ done by this routine. All FM parameters get default values that
-+ may be changed by calling one or more of the advance config routines.
-+
-+ @Param[in] p_FmParams - A pointer to a data structure of mandatory FM parameters
-+
-+ @Return A handle to the FM object, or NULL for Failure.
-+*//***************************************************************************/
-+t_Handle FM_Config(t_FmParams *p_FmParams);
-+
-+/**************************************************************************//**
-+ @Function FM_Init
-+
-+ @Description Initializes the FM module by defining the software structure
-+ and configuring the hardware registers.
-+
-+ @Param[in] h_Fm - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_Init(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_Free
-+
-+ @Description Frees all resources that were assigned to FM module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_Fm - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_Free(t_Handle h_Fm);
-+
-+
-+/**************************************************************************//**
-+ @Group FM_advanced_init_grp FM Advanced Configuration Unit
-+
-+ @Description Advanced configuration routines are optional routines that may
-+ be called in order to change the default driver settings.
-+
-+ Note: Advanced configuration routines are not available for guest partition.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Enum for selecting DMA debug mode
-+*//***************************************************************************/
-+typedef enum e_FmDmaDbgCntMode {
-+ e_FM_DMA_DBG_NO_CNT = 0, /**< No counting */
-+ e_FM_DMA_DBG_CNT_DONE, /**< Count DONE commands */
-+ e_FM_DMA_DBG_CNT_COMM_Q_EM, /**< count command queue emergency signals */
-+ e_FM_DMA_DBG_CNT_INT_READ_EM, /**< Count Internal Read buffer emergency signal */
-+ e_FM_DMA_DBG_CNT_INT_WRITE_EM, /**< Count Internal Write buffer emergency signal */
-+ e_FM_DMA_DBG_CNT_FPM_WAIT, /**< Count FPM WAIT signal */
-+ e_FM_DMA_DBG_CNT_SIGLE_BIT_ECC, /**< Single bit ECC errors. */
-+ e_FM_DMA_DBG_CNT_RAW_WAR_PROT /**< Number of times there was a need for RAW & WAR protection. */
-+} e_FmDmaDbgCntMode;
-+
-+/**************************************************************************//**
-+ @Description Enum for selecting DMA Cache Override
-+*//***************************************************************************/
-+typedef enum e_FmDmaCacheOverride {
-+ e_FM_DMA_NO_CACHE_OR = 0, /**< No override of the Cache field */
-+ e_FM_DMA_NO_STASH_DATA, /**< Data should not be stashed in system level cache */
-+ e_FM_DMA_MAY_STASH_DATA, /**< Data may be stashed in system level cache */
-+ e_FM_DMA_STASH_DATA /**< Data should be stashed in system level cache */
-+} e_FmDmaCacheOverride;
-+
-+/**************************************************************************//**
-+ @Description Enum for selecting DMA External Bus Priority
-+*//***************************************************************************/
-+typedef enum e_FmDmaExtBusPri {
-+ e_FM_DMA_EXT_BUS_NORMAL = 0, /**< Normal priority */
-+ e_FM_DMA_EXT_BUS_EBS, /**< AXI extended bus service priority */
-+ e_FM_DMA_EXT_BUS_SOS, /**< AXI sos priority */
-+ e_FM_DMA_EXT_BUS_EBS_AND_SOS /**< AXI ebs + sos priority */
-+} e_FmDmaExtBusPri;
-+
-+/**************************************************************************//**
-+ @Description Enum for choosing the field that will be output on AID
-+*//***************************************************************************/
-+typedef enum e_FmDmaAidMode {
-+ e_FM_DMA_AID_OUT_PORT_ID = 0, /**< 4 LSB of PORT_ID */
-+ e_FM_DMA_AID_OUT_TNUM /**< 4 LSB of TNUM */
-+} e_FmDmaAidMode;
-+
-+/**************************************************************************//**
-+ @Description Enum for selecting FPM Catastrophic error behavior
-+*//***************************************************************************/
-+typedef enum e_FmCatastrophicErr {
-+ e_FM_CATASTROPHIC_ERR_STALL_PORT = 0, /**< Port_ID is stalled (only reset can release it) */
-+ e_FM_CATASTROPHIC_ERR_STALL_TASK /**< Only erroneous task is stalled */
-+} e_FmCatastrophicErr;
-+
-+/**************************************************************************//**
-+ @Description Enum for selecting FPM DMA Error behavior
-+*//***************************************************************************/
-+typedef enum e_FmDmaErr {
-+ e_FM_DMA_ERR_CATASTROPHIC = 0, /**< Dma error is treated as a catastrophic
-+ error (e_FmCatastrophicErr)*/
-+ e_FM_DMA_ERR_REPORT /**< Dma error is just reported */
-+} e_FmDmaErr;
-+
-+/**************************************************************************//**
-+ @Description Enum for selecting DMA Emergency level by BMI emergency signal
-+*//***************************************************************************/
-+typedef enum e_FmDmaEmergencyLevel {
-+ e_FM_DMA_EM_EBS = 0, /**< EBS emergency */
-+ e_FM_DMA_EM_SOS /**< SOS emergency */
-+} e_FmDmaEmergencyLevel;
-+
-+/**************************************************************************//**
-+ @Collection Enum for selecting DMA Emergency options
-+*//***************************************************************************/
-+typedef uint32_t fmEmergencyBus_t; /**< DMA emergency options */
-+
-+#define FM_DMA_MURAM_READ_EMERGENCY 0x00800000 /**< Enable emergency for MURAM1 */
-+#define FM_DMA_MURAM_WRITE_EMERGENCY 0x00400000 /**< Enable emergency for MURAM2 */
-+#define FM_DMA_EXT_BUS_EMERGENCY 0x00100000 /**< Enable emergency for external bus */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description A structure for defining DMA emergency level
-+*//***************************************************************************/
-+typedef struct t_FmDmaEmergency {
-+ fmEmergencyBus_t emergencyBusSelect; /**< An OR of the busses where emergency
-+ should be enabled */
-+ e_FmDmaEmergencyLevel emergencyLevel; /**< EBS/SOS */
-+} t_FmDmaEmergency;
-+
-+/**************************************************************************//*
-+ @Description structure for defining FM threshold
-+*//***************************************************************************/
-+typedef struct t_FmThresholds {
-+ uint8_t dispLimit; /**< The number of times a frames may
-+ be passed in the FM before assumed to
-+ be looping. */
-+ uint8_t prsDispTh; /**< This is the number pf packets that may be
-+ queued in the parser dispatch queue*/
-+ uint8_t plcrDispTh; /**< This is the number pf packets that may be
-+ queued in the policer dispatch queue*/
-+ uint8_t kgDispTh; /**< This is the number pf packets that may be
-+ queued in the keygen dispatch queue*/
-+ uint8_t bmiDispTh; /**< This is the number pf packets that may be
-+ queued in the BMI dispatch queue*/
-+ uint8_t qmiEnqDispTh; /**< This is the number pf packets that may be
-+ queued in the QMI enqueue dispatch queue*/
-+ uint8_t qmiDeqDispTh; /**< This is the number pf packets that may be
-+ queued in the QMI dequeue dispatch queue*/
-+ uint8_t fmCtl1DispTh; /**< This is the number pf packets that may be
-+ queued in fmCtl1 dispatch queue*/
-+ uint8_t fmCtl2DispTh; /**< This is the number pf packets that may be
-+ queued in fmCtl2 dispatch queue*/
-+} t_FmThresholds;
-+
-+/**************************************************************************//*
-+ @Description structure for defining DMA thresholds
-+*//***************************************************************************/
-+typedef struct t_FmDmaThresholds {
-+ uint8_t assertEmergency; /**< When this value is reached,
-+ assert emergency (Threshold)*/
-+ uint8_t clearEmergency; /**< After emergency is asserted, it is held
-+ until this value is reached (Hystheresis) */
-+} t_FmDmaThresholds;
-+
-+/**************************************************************************//**
-+ @Function t_FmResetOnInitOverrideCallback
-+
-+ @Description FMan specific reset on init user callback routine,
-+ will be used to override the standard FMan reset on init procedure
-+
-+ @Param[in] h_Fm - FMan handler
-+*//***************************************************************************/
-+typedef void (t_FmResetOnInitOverrideCallback)(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigResetOnInit
-+
-+ @Description Define whether to reset the FM before initialization.
-+ Change the default configuration [DEFAULT_resetOnInit].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] enable When TRUE, FM will be reset before any initialization.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigResetOnInit(t_Handle h_Fm, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigResetOnInitOverrideCallback
-+
-+ @Description Define a special reset of FM before initialization.
-+ Change the default configuration [DEFAULT_resetOnInitOverrideCallback].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] f_ResetOnInitOverride FM specific reset on init user callback routine.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigResetOnInitOverrideCallback(t_Handle h_Fm, t_FmResetOnInitOverrideCallback *f_ResetOnInitOverride);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigTotalFifoSize
-+
-+ @Description Define Total FIFO size for the whole FM.
-+ Calling this routine changes the total Fifo size in the internal driver
-+ data base from its default configuration [DEFAULT_totalFifoSize]
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] totalFifoSize The selected new value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigTotalFifoSize(t_Handle h_Fm, uint32_t totalFifoSize);
-+
-+ /**************************************************************************//**
-+ @Function FM_ConfigDmaCacheOverride
-+
-+ @Description Define cache override mode.
-+ Calling this routine changes the cache override mode
-+ in the internal driver data base from its default configuration [DEFAULT_cacheOverride]
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] cacheOverride The selected new value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaCacheOverride(t_Handle h_Fm, e_FmDmaCacheOverride cacheOverride);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaAidOverride
-+
-+ @Description Define DMA AID override mode.
-+ Calling this routine changes the AID override mode
-+ in the internal driver data base from its default configuration [DEFAULT_aidOverride]
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] aidOverride The selected new value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaAidOverride(t_Handle h_Fm, bool aidOverride);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaAidMode
-+
-+ @Description Define DMA AID mode.
-+ Calling this routine changes the AID mode in the internal
-+ driver data base from its default configuration [DEFAULT_aidMode]
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] aidMode The selected new value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaAidMode(t_Handle h_Fm, e_FmDmaAidMode aidMode);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaAxiDbgNumOfBeats
-+
-+ @Description Define DMA AXI number of beats.
-+ Calling this routine changes the AXI number of beats in the internal
-+ driver data base from its default configuration [DEFAULT_axiDbgNumOfBeats]
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] axiDbgNumOfBeats The selected new value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaAxiDbgNumOfBeats(t_Handle h_Fm, uint8_t axiDbgNumOfBeats);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaCamNumOfEntries
-+
-+ @Description Define number of CAM entries.
-+ Calling this routine changes the number of CAM entries in the internal
-+ driver data base from its default configuration [DEFAULT_dmaCamNumOfEntries].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] numOfEntries The selected new value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaCamNumOfEntries(t_Handle h_Fm, uint8_t numOfEntries);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigEnableCounters
-+
-+ @Description Obsolete, always return E_OK.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_ConfigEnableCounters(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaDbgCounter
-+
-+ @Description Define DMA debug counter.
-+ Calling this routine changes the number of the DMA debug counter in the internal
-+ driver data base from its default configuration [DEFAULT_dmaDbgCntMode].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] fmDmaDbgCntMode An enum selecting the debug counter mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaDbgCounter(t_Handle h_Fm, e_FmDmaDbgCntMode fmDmaDbgCntMode);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaStopOnBusErr
-+
-+ @Description Define bus error behavior.
-+ Calling this routine changes the bus error behavior definition
-+ in the internal driver data base from its default
-+ configuration [DEFAULT_dmaStopOnBusError].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] stop TRUE to stop on bus error, FALSE to continue.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ Only if bus error is enabled.
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaStopOnBusErr(t_Handle h_Fm, bool stop);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaEmergency
-+
-+ @Description Define DMA emergency.
-+ Calling this routine changes the DMA emergency definition
-+ in the internal driver data base from its default
-+ configuration where's it's disabled.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] p_Emergency An OR mask of all required options.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaEmergency(t_Handle h_Fm, t_FmDmaEmergency *p_Emergency);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigDmaErr
-+
-+ @Description DMA error treatment.
-+ Calling this routine changes the DMA error treatment
-+ in the internal driver data base from its default
-+ configuration [DEFAULT_dmaErr].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] dmaErr The selected new choice.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaErr(t_Handle h_Fm, e_FmDmaErr dmaErr);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigCatastrophicErr
-+
-+ @Description Define FM behavior on catastrophic error.
-+ Calling this routine changes the FM behavior on catastrophic
-+ error in the internal driver data base from its default
-+ [DEFAULT_catastrophicErr].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] catastrophicErr The selected new choice.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigCatastrophicErr(t_Handle h_Fm, e_FmCatastrophicErr catastrophicErr);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigEnableMuramTestMode
-+
-+ @Description Enable MURAM test mode.
-+ Calling this routine changes the internal driver data base
-+ from its default selection of test mode where it's disabled.
-+ This routine is only avaiable on old FM revisions (FMan v2).
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigEnableMuramTestMode(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigEnableIramTestMode
-+
-+ @Description Enable IRAM test mode.
-+ Calling this routine changes the internal driver data base
-+ from its default selection of test mode where it's disabled.
-+ This routine is only avaiable on old FM revisions (FMan v2).
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigEnableIramTestMode(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigHaltOnExternalActivation
-+
-+ @Description Define FM behavior on external halt activation.
-+ Calling this routine changes the FM behavior on external halt
-+ activation in the internal driver data base from its default
-+ [DEFAULT_haltOnExternalActivation].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] enable TRUE to enable halt on external halt
-+ activation.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigHaltOnExternalActivation(t_Handle h_Fm, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigHaltOnUnrecoverableEccError
-+
-+ @Description Define FM behavior on external halt activation.
-+ Calling this routine changes the FM behavior on unrecoverable
-+ ECC error in the internal driver data base from its default
-+ [DEFAULT_haltOnUnrecoverableEccError].
-+ This routine is only avaiable on old FM revisions (FMan v2).
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] enable TRUE to enable halt on unrecoverable Ecc error
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigHaltOnUnrecoverableEccError(t_Handle h_Fm, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigException
-+
-+ @Description Define FM exceptions.
-+ Calling this routine changes the exceptions defaults in the
-+ internal driver data base where all exceptions are enabled.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigException(t_Handle h_Fm, e_FmExceptions exception, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigExternalEccRamsEnable
-+
-+ @Description Select external ECC enabling.
-+ Calling this routine changes the ECC enabling control in the internal
-+ driver data base from its default [DEFAULT_externalEccRamsEnable].
-+ When this option is enabled Rams ECC enabling is not effected
-+ by FM_EnableRamsEcc/FM_DisableRamsEcc, but by a JTAG.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] enable TRUE to enable this option.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigExternalEccRamsEnable(t_Handle h_Fm, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_ConfigTnumAgingPeriod
-+
-+ @Description Define Tnum aging period.
-+ Calling this routine changes the Tnum aging of dequeue TNUMs
-+ in the QMI in the internal driver data base from its default
-+ [DEFAULT_tnumAgingPeriod].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] tnumAgingPeriod Tnum Aging Period in microseconds.
-+ Note that period is recalculated in units of
-+ 64 FM clocks. Driver will pick the closest
-+ possible period.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+ NOTE that if some MAC is configured for PFC, '0' value is NOT
-+ allowed.
-+*//***************************************************************************/
-+t_Error FM_ConfigTnumAgingPeriod(t_Handle h_Fm, uint16_t tnumAgingPeriod);
-+
-+/**************************************************************************//*
-+ @Function FM_ConfigDmaEmergencySmoother
-+
-+ @Description Define DMA emergency smoother.
-+ Calling this routine changes the definition of the minimum
-+ amount of DATA beats transferred on the AXI READ and WRITE
-+ ports before lowering the emergency level.
-+ By default smoother is disabled.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] emergencyCnt emergency switching counter.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaEmergencySmoother(t_Handle h_Fm, uint32_t emergencyCnt);
-+
-+/**************************************************************************//*
-+ @Function FM_ConfigThresholds
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default FM threshold configuration:
-+ dispLimit: [DEFAULT_dispLimit]
-+ prsDispTh: [DEFAULT_prsDispTh]
-+ plcrDispTh: [DEFAULT_plcrDispTh]
-+ kgDispTh: [DEFAULT_kgDispTh]
-+ bmiDispTh: [DEFAULT_bmiDispTh]
-+ qmiEnqDispTh: [DEFAULT_qmiEnqDispTh]
-+ qmiDeqDispTh: [DEFAULT_qmiDeqDispTh]
-+ fmCtl1DispTh: [DEFAULT_fmCtl1DispTh]
-+ fmCtl2DispTh: [DEFAULT_fmCtl2DispTh]
-+
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] p_FmThresholds A structure of threshold parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigThresholds(t_Handle h_Fm, t_FmThresholds *p_FmThresholds);
-+
-+/**************************************************************************//*
-+ @Function FM_ConfigDmaSosEmergencyThreshold
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default dma SOS emergency configuration [DEFAULT_dmaSosEmergency]
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] dmaSosEmergency The selected new value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaSosEmergencyThreshold(t_Handle h_Fm, uint32_t dmaSosEmergency);
-+
-+/**************************************************************************//*
-+ @Function FM_ConfigDmaWriteBufThresholds
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default configuration of DMA write buffer threshold
-+ assertEmergency: [DEFAULT_dmaWriteIntBufLow]
-+ clearEmergency: [DEFAULT_dmaWriteIntBufHigh]
-+ This routine is only avaiable on old FM revisions (FMan v2).
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior -
-+ When 'assertEmergency' value is reached, emergency is asserted,
-+ then it is held until 'clearEmergency' value is reached.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaWriteBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds);
-+
-+ /**************************************************************************//*
-+ @Function FM_ConfigDmaCommQThresholds
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default configuration of DMA command queue threshold
-+ assertEmergency: [DEFAULT_dmaCommQLow]
-+ clearEmergency: [DEFAULT_dmaCommQHigh]
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior -
-+ When 'assertEmergency' value is reached, emergency is asserted,
-+ then it is held until 'clearEmergency' value is reached..
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaCommQThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds);
-+
-+/**************************************************************************//*
-+ @Function FM_ConfigDmaReadBufThresholds
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default configuration of DMA read buffer threshold
-+ assertEmergency: [DEFAULT_dmaReadIntBufLow]
-+ clearEmergency: [DEFAULT_dmaReadIntBufHigh]
-+ This routine is only avaiable on old FM revisions (FMan v2).
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior -
-+ When 'assertEmergency' value is reached, emergency is asserted,
-+ then it is held until 'clearEmergency' value is reached..
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaReadBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds);
-+
-+/**************************************************************************//*
-+ @Function FM_ConfigDmaWatchdog
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default watchdog configuration, which is disabled
-+ [DEFAULT_dmaWatchdog].
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] watchDogValue The selected new value - in microseconds.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ConfigDmaWatchdog(t_Handle h_Fm, uint32_t watchDogValue);
-+
-+/** @} */ /* end of FM_advanced_init_grp group */
-+/** @} */ /* end of FM_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_runtime_control_grp FM Runtime Control Unit
-+
-+ @Description FM Runtime control unit API functions, definitions and enums.
-+ The FM driver provides a set of control routines.
-+ These routines may only be called after the module was fully
-+ initialized (both configuration and initialization routines were
-+ called). They are typically used to get information from hardware
-+ (status, counters/statistics, revision etc.), to modify a current
-+ state or to force/enable a required action. Run-time control may
-+ be called whenever necessary and as many times as needed.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection General FM defines.
-+*//***************************************************************************/
-+#define FM_MAX_NUM_OF_VALID_PORTS (FM_MAX_NUM_OF_OH_PORTS + \
-+ FM_MAX_NUM_OF_1G_RX_PORTS + \
-+ FM_MAX_NUM_OF_10G_RX_PORTS + \
-+ FM_MAX_NUM_OF_1G_TX_PORTS + \
-+ FM_MAX_NUM_OF_10G_TX_PORTS) /**< Number of available FM ports */
-+/* @} */
-+
-+/**************************************************************************//*
-+ @Description A Structure for Port bandwidth requirement. Port is identified
-+ by type and relative id.
-+*//***************************************************************************/
-+typedef struct t_FmPortBandwidth {
-+ e_FmPortType type; /**< FM port type */
-+ uint8_t relativePortId; /**< Type relative port id */
-+ uint8_t bandwidth; /**< bandwidth - (in term of percents) */
-+} t_FmPortBandwidth;
-+
-+/**************************************************************************//*
-+ @Description A Structure containing an array of Port bandwidth requirements.
-+ The user should state the ports requiring bandwidth in terms of
-+ percentage - i.e. all port's bandwidths in the array must add
-+ up to 100.
-+*//***************************************************************************/
-+typedef struct t_FmPortsBandwidthParams {
-+ uint8_t numOfPorts; /**< The number of relevant ports, which is the
-+ number of valid entries in the array below */
-+ t_FmPortBandwidth portsBandwidths[FM_MAX_NUM_OF_VALID_PORTS];
-+ /**< for each port, it's bandwidth (all port's
-+ bandwidths must add up to 100.*/
-+} t_FmPortsBandwidthParams;
-+
-+/**************************************************************************//**
-+ @Description DMA Emergency control on MURAM
-+*//***************************************************************************/
-+typedef enum e_FmDmaMuramPort {
-+ e_FM_DMA_MURAM_PORT_WRITE, /**< MURAM write port */
-+ e_FM_DMA_MURAM_PORT_READ /**< MURAM read port */
-+} e_FmDmaMuramPort;
-+
-+/**************************************************************************//**
-+ @Description Enum for defining FM counters
-+*//***************************************************************************/
-+typedef enum e_FmCounters {
-+ e_FM_COUNTERS_ENQ_TOTAL_FRAME = 0, /**< QMI total enqueued frames counter */
-+ e_FM_COUNTERS_DEQ_TOTAL_FRAME, /**< QMI total dequeued frames counter */
-+ e_FM_COUNTERS_DEQ_0, /**< QMI 0 frames from QMan counter */
-+ e_FM_COUNTERS_DEQ_1, /**< QMI 1 frames from QMan counter */
-+ e_FM_COUNTERS_DEQ_2, /**< QMI 2 frames from QMan counter */
-+ e_FM_COUNTERS_DEQ_3, /**< QMI 3 frames from QMan counter */
-+ e_FM_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI dequeue from default queue counter */
-+ e_FM_COUNTERS_DEQ_FROM_CONTEXT, /**< QMI dequeue from FQ context counter */
-+ e_FM_COUNTERS_DEQ_FROM_FD, /**< QMI dequeue from FD command field counter */
-+ e_FM_COUNTERS_DEQ_CONFIRM /**< QMI dequeue confirm counter */
-+} e_FmCounters;
-+
-+/**************************************************************************//**
-+ @Description A Structure for returning FM revision information
-+*//***************************************************************************/
-+typedef struct t_FmRevisionInfo {
-+ uint8_t majorRev; /**< Major revision */
-+ uint8_t minorRev; /**< Minor revision */
-+} t_FmRevisionInfo;
-+
-+/**************************************************************************//**
-+ @Description A Structure for returning FM ctrl code revision information
-+*//***************************************************************************/
-+typedef struct t_FmCtrlCodeRevisionInfo {
-+ uint16_t packageRev; /**< Package revision */
-+ uint8_t majorRev; /**< Major revision */
-+ uint8_t minorRev; /**< Minor revision */
-+} t_FmCtrlCodeRevisionInfo;
-+
-+/**************************************************************************//**
-+ @Description A Structure for defining DMA status
-+*//***************************************************************************/
-+typedef struct t_FmDmaStatus {
-+ bool cmqNotEmpty; /**< Command queue is not empty */
-+ bool busError; /**< Bus error occurred */
-+ bool readBufEccError; /**< Double ECC error on buffer Read (Valid for FM rev < 6)*/
-+ bool writeBufEccSysError; /**< Double ECC error on buffer write from system side (Valid for FM rev < 6)*/
-+ bool writeBufEccFmError; /**< Double ECC error on buffer write from FM side (Valid for FM rev < 6) */
-+ bool singlePortEccError; /**< Single Port ECC error from FM side (Valid for FM rev >= 6)*/
-+} t_FmDmaStatus;
-+
-+/**************************************************************************//**
-+ @Description A Structure for obtaining FM controller monitor values
-+*//***************************************************************************/
-+typedef struct t_FmCtrlMon {
-+ uint8_t percentCnt[2]; /**< Percentage value */
-+} t_FmCtrlMon;
-+
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/**************************************************************************//**
-+ @Function FM_DumpRegs
-+
-+ @Description Dumps all FM registers
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success;
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FM_DumpRegs(t_Handle h_Fm);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+/**************************************************************************//**
-+ @Function FM_SetException
-+
-+ @Description Calling this routine enables/disables the specified exception.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_SetException(t_Handle h_Fm, e_FmExceptions exception, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_EnableRamsEcc
-+
-+ @Description Enables ECC mechanism for all the different FM RAM's; E.g. IRAM,
-+ MURAM, Parser, Keygen, Policer, etc.
-+ Note:
-+ If FM_ConfigExternalEccRamsEnable was called to enable external
-+ setting of ECC, this routine effects IRAM ECC only.
-+ This routine is also called by the driver if an ECC exception is
-+ enabled.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_EnableRamsEcc(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_DisableRamsEcc
-+
-+ @Description Disables ECC mechanism for all the different FM RAM's; E.g. IRAM,
-+ MURAM, Parser, Keygen, Policer, etc.
-+ Note:
-+ If FM_ConfigExternalEccRamsEnable was called to enable external
-+ setting of ECC, this routine effects IRAM ECC only.
-+ In opposed to FM_EnableRamsEcc, this routine must be called
-+ explicitly to disable all Rams ECC.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Config() and before FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_DisableRamsEcc(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_GetRevision
-+
-+ @Description Returns the FM revision
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[out] p_FmRevisionInfo A structure of revision information parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FM_GetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo);
-+
-+/**************************************************************************//**
-+ @Function FM_GetFmanCtrlCodeRevision
-+
-+ @Description Returns the Fman controller code revision
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[out] p_RevisionInfo A structure of revision information parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FM_GetFmanCtrlCodeRevision(t_Handle h_Fm, t_FmCtrlCodeRevisionInfo *p_RevisionInfo);
-+
-+/**************************************************************************//**
-+ @Function FM_GetCounter
-+
-+ @Description Reads one of the FM counters.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] counter The requested counter.
-+
-+ @Return Counter's current value.
-+
-+ @Cautions Allowed only following FM_Init().
-+ Note that it is user's responsibility to call this routine only
-+ for enabled counters, and there will be no indication if a
-+ disabled counter is accessed.
-+*//***************************************************************************/
-+uint32_t FM_GetCounter(t_Handle h_Fm, e_FmCounters counter);
-+
-+/**************************************************************************//**
-+ @Function FM_ModifyCounter
-+
-+ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] counter The requested counter.
-+ @Param[in] val The requested value to be written into the counter.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ModifyCounter(t_Handle h_Fm, e_FmCounters counter, uint32_t val);
-+
-+/**************************************************************************//**
-+ @Function FM_Resume
-+
-+ @Description Release FM after halt FM command or after unrecoverable ECC error.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+void FM_Resume(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_SetDmaEmergency
-+
-+ @Description Manual emergency set
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] muramPort MURAM direction select.
-+ @Param[in] enable TRUE to manually enable emergency, FALSE to disable.
-+
-+ @Return None.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+void FM_SetDmaEmergency(t_Handle h_Fm, e_FmDmaMuramPort muramPort, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_SetDmaExtBusPri
-+
-+ @Description Set the DMA external bus priority
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] pri External bus priority select
-+
-+ @Return None.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+void FM_SetDmaExtBusPri(t_Handle h_Fm, e_FmDmaExtBusPri pri);
-+
-+/**************************************************************************//**
-+ @Function FM_GetDmaStatus
-+
-+ @Description Reads the DMA current status
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[out] p_FmDmaStatus A structure of DMA status parameters.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+void FM_GetDmaStatus(t_Handle h_Fm, t_FmDmaStatus *p_FmDmaStatus);
-+
-+/**************************************************************************//**
-+ @Function FM_ErrorIsr
-+
-+ @Description FM interrupt-service-routine for errors.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; E_EMPTY if no errors found in register, other
-+ error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ErrorIsr(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_EventIsr
-+
-+ @Description FM interrupt-service-routine for normal events.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+void FM_EventIsr(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_GetSpecialOperationCoding
-+
-+ @Description Return a specific coding according to the input mask.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] spOper special operation mask.
-+ @Param[out] p_SpOperCoding special operation code.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FM_GetSpecialOperationCoding(t_Handle h_Fm,
-+ fmSpecialOperations_t spOper,
-+ uint8_t *p_SpOperCoding);
-+
-+/**************************************************************************//**
-+ @Function FM_CtrlMonStart
-+
-+ @Description Start monitoring utilization of all available FM controllers.
-+
-+ In order to obtain FM controllers utilization the following sequence
-+ should be used:
-+ -# FM_CtrlMonStart()
-+ -# FM_CtrlMonStop()
-+ -# FM_CtrlMonGetCounters() - issued for each FM controller
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID).
-+*//***************************************************************************/
-+t_Error FM_CtrlMonStart(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_CtrlMonStop
-+
-+ @Description Stop monitoring utilization of all available FM controllers.
-+
-+ In order to obtain FM controllers utilization the following sequence
-+ should be used:
-+ -# FM_CtrlMonStart()
-+ -# FM_CtrlMonStop()
-+ -# FM_CtrlMonGetCounters() - issued for each FM controller
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID).
-+*//***************************************************************************/
-+t_Error FM_CtrlMonStop(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_CtrlMonGetCounters
-+
-+ @Description Obtain FM controller utilization parameters.
-+
-+ In order to obtain FM controllers utilization the following sequence
-+ should be used:
-+ -# FM_CtrlMonStart()
-+ -# FM_CtrlMonStop()
-+ -# FM_CtrlMonGetCounters() - issued for each FM controller
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] fmCtrlIndex FM Controller index for that utilization results
-+ are requested.
-+ @Param[in] p_Mon Pointer to utilization results structure.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID).
-+*//***************************************************************************/
-+t_Error FM_CtrlMonGetCounters(t_Handle h_Fm, uint8_t fmCtrlIndex, t_FmCtrlMon *p_Mon);
-+
-+
-+/**************************************************************************//*
-+ @Function FM_ForceIntr
-+
-+ @Description Causes an interrupt event on the requested source.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] exception An exception to be forced.
-+
-+ @Return E_OK on success; Error code if the exception is not enabled,
-+ or is not able to create interrupt.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_ForceIntr (t_Handle h_Fm, e_FmExceptions exception);
-+
-+/**************************************************************************//*
-+ @Function FM_SetPortsBandwidth
-+
-+ @Description Sets relative weights between ports when accessing common resources.
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+ @Param[in] p_PortsBandwidth A structure of ports bandwidths in percentage, i.e.
-+ total must equal 100.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_SetPortsBandwidth(t_Handle h_Fm, t_FmPortsBandwidthParams *p_PortsBandwidth);
-+
-+/**************************************************************************//*
-+ @Function FM_GetMuramHandle
-+
-+ @Description Gets the corresponding MURAM handle
-+
-+ @Param[in] h_Fm A handle to an FM Module.
-+
-+ @Return MURAM handle; NULL otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Handle FM_GetMuramHandle(t_Handle h_Fm);
-+
-+/** @} */ /* end of FM_runtime_control_grp group */
-+/** @} */ /* end of FM_lib_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#ifdef NCSW_BACKWARD_COMPATIBLE_API
-+typedef t_FmFirmwareParams t_FmPcdFirmwareParams;
-+typedef t_FmBufferPrefixContent t_FmPortBufferPrefixContent;
-+typedef t_FmExtPoolParams t_FmPortExtPoolParams;
-+typedef t_FmExtPools t_FmPortExtPools;
-+typedef t_FmBackupBmPools t_FmPortBackupBmPools;
-+typedef t_FmBufPoolDepletion t_FmPortBufPoolDepletion;
-+typedef e_FmDmaSwapOption e_FmPortDmaSwapOption;
-+typedef e_FmDmaCacheOption e_FmPortDmaCacheOption;
-+
-+#define FM_CONTEXTA_GET_OVVERIDE FM_CONTEXTA_GET_OVERRIDE
-+#define FM_CONTEXTA_SET_OVVERIDE FM_CONTEXTA_SET_OVERRIDE
-+
-+#define e_FM_EX_BMI_PIPELINE_ECC e_FM_EX_BMI_STORAGE_PROFILE_ECC
-+#define e_FM_PORT_DMA_NO_SWP e_FM_DMA_NO_SWP
-+#define e_FM_PORT_DMA_SWP_PPC_LE e_FM_DMA_SWP_PPC_LE
-+#define e_FM_PORT_DMA_SWP_BE e_FM_DMA_SWP_BE
-+#define e_FM_PORT_DMA_NO_STASH e_FM_DMA_NO_STASH
-+#define e_FM_PORT_DMA_STASH e_FM_DMA_STASH
-+#endif /* NCSW_BACKWARD_COMPATIBLE_API */
-+
-+
-+#endif /* __FM_EXT */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_mac_ext.h
-@@ -0,0 +1,887 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_mac_ext.h
-+
-+ @Description FM MAC ...
-+*//***************************************************************************/
-+#ifndef __FM_MAC_EXT_H
-+#define __FM_MAC_EXT_H
-+
-+#include "std_ext.h"
-+#include "enet_ext.h"
-+
-+
-+/**************************************************************************//**
-+
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_mac_grp FM MAC
-+
-+ @Description FM MAC API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define FM_MAC_NO_PFC 0xff
-+
-+
-+/**************************************************************************//**
-+ @Description FM MAC Exceptions
-+*//***************************************************************************/
-+typedef enum e_FmMacExceptions {
-+ e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO = 0 /**< 10GEC MDIO scan event interrupt */
-+ ,e_FM_MAC_EX_10G_MDIO_CMD_CMPL /**< 10GEC MDIO command completion interrupt */
-+ ,e_FM_MAC_EX_10G_REM_FAULT /**< 10GEC, mEMAC Remote fault interrupt */
-+ ,e_FM_MAC_EX_10G_LOC_FAULT /**< 10GEC, mEMAC Local fault interrupt */
-+ ,e_FM_MAC_EX_10G_1TX_ECC_ER /**< 10GEC, mEMAC Transmit frame ECC error interrupt */
-+ ,e_FM_MAC_EX_10G_TX_FIFO_UNFL /**< 10GEC, mEMAC Transmit FIFO underflow interrupt */
-+ ,e_FM_MAC_EX_10G_TX_FIFO_OVFL /**< 10GEC, mEMAC Transmit FIFO overflow interrupt */
-+ ,e_FM_MAC_EX_10G_TX_ER /**< 10GEC Transmit frame error interrupt */
-+ ,e_FM_MAC_EX_10G_RX_FIFO_OVFL /**< 10GEC, mEMAC Receive FIFO overflow interrupt */
-+ ,e_FM_MAC_EX_10G_RX_ECC_ER /**< 10GEC, mEMAC Receive frame ECC error interrupt */
-+ ,e_FM_MAC_EX_10G_RX_JAB_FRM /**< 10GEC Receive jabber frame interrupt */
-+ ,e_FM_MAC_EX_10G_RX_OVRSZ_FRM /**< 10GEC Receive oversized frame interrupt */
-+ ,e_FM_MAC_EX_10G_RX_RUNT_FRM /**< 10GEC Receive runt frame interrupt */
-+ ,e_FM_MAC_EX_10G_RX_FRAG_FRM /**< 10GEC Receive fragment frame interrupt */
-+ ,e_FM_MAC_EX_10G_RX_LEN_ER /**< 10GEC Receive payload length error interrupt */
-+ ,e_FM_MAC_EX_10G_RX_CRC_ER /**< 10GEC Receive CRC error interrupt */
-+ ,e_FM_MAC_EX_10G_RX_ALIGN_ER /**< 10GEC Receive alignment error interrupt */
-+ ,e_FM_MAC_EX_1G_BAB_RX /**< dTSEC Babbling receive error */
-+ ,e_FM_MAC_EX_1G_RX_CTL /**< dTSEC Receive control (pause frame) interrupt */
-+ ,e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET /**< dTSEC Graceful transmit stop complete */
-+ ,e_FM_MAC_EX_1G_BAB_TX /**< dTSEC Babbling transmit error */
-+ ,e_FM_MAC_EX_1G_TX_CTL /**< dTSEC Transmit control (pause frame) interrupt */
-+ ,e_FM_MAC_EX_1G_TX_ERR /**< dTSEC Transmit error */
-+ ,e_FM_MAC_EX_1G_LATE_COL /**< dTSEC Late collision */
-+ ,e_FM_MAC_EX_1G_COL_RET_LMT /**< dTSEC Collision retry limit */
-+ ,e_FM_MAC_EX_1G_TX_FIFO_UNDRN /**< dTSEC Transmit FIFO underrun */
-+ ,e_FM_MAC_EX_1G_MAG_PCKT /**< dTSEC Magic Packet detection */
-+ ,e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET /**< dTSEC MII management read completion */
-+ ,e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET /**< dTSEC MII management write completion */
-+ ,e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET /**< dTSEC Graceful receive stop complete */
-+ ,e_FM_MAC_EX_1G_TX_DATA_ERR /**< dTSEC Internal data error on transmit */
-+ ,e_FM_MAC_EX_1G_RX_DATA_ERR /**< dTSEC Internal data error on receive */
-+ ,e_FM_MAC_EX_1G_1588_TS_RX_ERR /**< dTSEC Time-Stamp Receive Error */
-+ ,e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL /**< dTSEC MIB counter overflow */
-+ ,e_FM_MAC_EX_TS_FIFO_ECC_ERR /**< mEMAC Time-stamp FIFO ECC error interrupt;
-+ not supported on T4240/B4860 rev1 chips */
-+ ,e_FM_MAC_EX_MAGIC_PACKET_INDICATION = e_FM_MAC_EX_1G_MAG_PCKT
-+ /**< mEMAC Magic Packet Indication Interrupt */
-+} e_FmMacExceptions;
-+
-+/**************************************************************************//**
-+ @Description TM MAC statistics level
-+*//***************************************************************************/
-+typedef enum e_FmMacStatisticsLevel {
-+ e_FM_MAC_NONE_STATISTICS = 0, /**< No statistics */
-+ e_FM_MAC_PARTIAL_STATISTICS, /**< Only error counters are available; Optimized for performance */
-+ e_FM_MAC_FULL_STATISTICS /**< All counters available; Not optimized for performance */
-+} e_FmMacStatisticsLevel;
-+
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Priority Flow Control Parameters
-+*//***************************************************************************/
-+typedef struct t_FmMacPfcParams {
-+ bool pfcEnable; /**< Enable/Disable PFC */
-+
-+ uint16_t pauseQuanta[FM_MAX_NUM_OF_PFC_PRIORITIES]; /**< Pause Quanta per priority to be sent in a pause frame. Each quanta represents a 512 bit-times*/
-+
-+ uint16_t pauseThresholdQuanta[FM_MAX_NUM_OF_PFC_PRIORITIES];/**< Pause threshold per priority, when timer passes this threshold time a PFC frames is sent again if the port is still congested or BM pool in depletion*/
-+
-+
-+} t_FmMacPfcParams;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Function t_FmMacExceptionCallback
-+
-+ @Description Fm Mac Exception Callback from FM MAC to the user
-+
-+ @Param[in] h_App - Handle to the upper layer handler
-+
-+ @Param[in] exceptions - The exception that occurred
-+
-+ @Return void.
-+*//***************************************************************************/
-+typedef void (t_FmMacExceptionCallback)(t_Handle h_App, e_FmMacExceptions exceptions);
-+
-+
-+/**************************************************************************//**
-+ @Description TM MAC statistics rfc3635
-+*//***************************************************************************/
-+typedef struct t_FmMacStatistics {
-+/* RMON */
-+ uint64_t eStatPkts64; /**< r-10G tr-DT 64 byte frame counter */
-+ uint64_t eStatPkts65to127; /**< r-10G 65 to 127 byte frame counter */
-+ uint64_t eStatPkts128to255; /**< r-10G 128 to 255 byte frame counter */
-+ uint64_t eStatPkts256to511; /**< r-10G 256 to 511 byte frame counter */
-+ uint64_t eStatPkts512to1023; /**< r-10G 512 to 1023 byte frame counter */
-+ uint64_t eStatPkts1024to1518; /**< r-10G 1024 to 1518 byte frame counter */
-+ uint64_t eStatPkts1519to1522; /**< r-10G 1519 to 1522 byte good frame count */
-+/* */
-+ uint64_t eStatFragments; /**< Total number of packets that were less than 64 octets long with a wrong CRC.*/
-+ uint64_t eStatJabbers; /**< Total number of packets longer than valid maximum length octets */
-+ uint64_t eStatsDropEvents; /**< number of dropped packets due to internal errors of the MAC Client (during receive). */
-+ uint64_t eStatCRCAlignErrors; /**< Incremented when frames of correct length but with CRC error are received.*/
-+ uint64_t eStatUndersizePkts; /**< Incremented for frames under 64 bytes with a valid FCS and otherwise well formed;
-+ This count does not include range length errors */
-+ uint64_t eStatOversizePkts; /**< Incremented for frames which exceed 1518 (non VLAN) or 1522 (VLAN) and contains
-+ a valid FCS and otherwise well formed */
-+/* Pause */
-+ uint64_t teStatPause; /**< Pause MAC Control received */
-+ uint64_t reStatPause; /**< Pause MAC Control sent */
-+/* MIB II */
-+ uint64_t ifInOctets; /**< Total number of byte received. */
-+ uint64_t ifInPkts; /**< Total number of packets received.*/
-+ uint64_t ifInUcastPkts; /**< Total number of unicast frame received;
-+ NOTE: this counter is not supported on dTSEC MAC */
-+ uint64_t ifInMcastPkts; /**< Total number of multicast frame received*/
-+ uint64_t ifInBcastPkts; /**< Total number of broadcast frame received */
-+ uint64_t ifInDiscards; /**< Frames received, but discarded due to problems within the MAC RX. */
-+ uint64_t ifInErrors; /**< Number of frames received with error:
-+ - FIFO Overflow Error
-+ - CRC Error
-+ - Frame Too Long Error
-+ - Alignment Error
-+ - The dedicated Error Code (0xfe, not a code error) was received */
-+ uint64_t ifOutOctets; /**< Total number of byte sent. */
-+ uint64_t ifOutPkts; /**< Total number of packets sent .*/
-+ uint64_t ifOutUcastPkts; /**< Total number of unicast frame sent;
-+ NOTE: this counter is not supported on dTSEC MAC */
-+ uint64_t ifOutMcastPkts; /**< Total number of multicast frame sent */
-+ uint64_t ifOutBcastPkts; /**< Total number of multicast frame sent */
-+ uint64_t ifOutDiscards; /**< Frames received, but discarded due to problems within the MAC TX N/A!.*/
-+ uint64_t ifOutErrors; /**< Number of frames transmitted with error:
-+ - FIFO Overflow Error
-+ - FIFO Underflow Error
-+ - Other */
-+} t_FmMacStatistics;
-+
-+/**************************************************************************//**
-+ @Description FM MAC Frame Size Counters
-+*//***************************************************************************/
-+typedef struct t_FmMacFrameSizeCounters {
-+
-+ uint64_t count_pkts_64; /**< 64 byte frame counter */
-+ uint64_t count_pkts_65_to_127; /**< 65 to 127 byte frame counter */
-+ uint64_t count_pkts_128_to_255; /**< 128 to 255 byte frame counter */
-+ uint64_t count_pkts_256_to_511; /**< 256 to 511 byte frame counter */
-+ uint64_t count_pkts_512_to_1023; /**< 512 to 1023 byte frame counter */
-+ uint64_t count_pkts_1024_to_1518; /**< 1024 to 1518 byte frame counter */
-+ uint64_t count_pkts_1519_to_1522; /**< 1519 to 1522 byte good frame count */
-+} t_FmMacFrameSizeCounters;
-+
-+/**************************************************************************//**
-+ @Group FM_mac_init_grp FM MAC Initialization Unit
-+
-+ @Description FM MAC Initialization Unit
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description FM MAC config input
-+*//***************************************************************************/
-+typedef struct t_FmMacParams {
-+ uintptr_t baseAddr; /**< Base of memory mapped FM MAC registers */
-+ t_EnetAddr addr; /**< MAC address of device; First octet is sent first */
-+ uint8_t macId; /**< MAC ID;
-+ numbering of dTSEC and 1G-mEMAC:
-+ 0 - FM_MAX_NUM_OF_1G_MACS;
-+ numbering of 10G-MAC (TGEC) and 10G-mEMAC:
-+ 0 - FM_MAX_NUM_OF_10G_MACS */
-+ e_EnetMode enetMode; /**< Ethernet operation mode (MAC-PHY interface and speed);
-+ Note that the speed should indicate the maximum rate that
-+ this MAC should support rather than the actual speed;
-+ i.e. user should use the FM_MAC_AdjustLink() routine to
-+ provide accurate speed;
-+ In case of mEMAC RGMII mode, the MAC is configured to RGMII
-+ automatic mode, where actual speed/duplex mode information
-+ is provided by PHY automatically in-band; FM_MAC_AdjustLink()
-+ function should be used to switch to manual RGMII speed/duplex mode
-+ configuration if RGMII PHY doesn't support in-band status signaling;
-+ In addition, in mEMAC, in case where user is using the higher MACs
-+ (i.e. the MACs that should support 10G), user should pass here
-+ speed=10000 even if the interface is not allowing that (e.g. SGMII). */
-+ t_Handle h_Fm; /**< A handle to the FM object this port related to */
-+ int mdioIrq; /**< MDIO exceptions interrupt source - not valid for all
-+ MACs; MUST be set to 'NO_IRQ' for MACs that don't have
-+ mdio-irq, or for polling */
-+ t_FmMacExceptionCallback *f_Event; /**< MDIO Events Callback Routine */
-+ t_FmMacExceptionCallback *f_Exception; /**< Exception Callback Routine */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks */
-+} t_FmMacParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_Config
-+
-+ @Description Creates descriptor for the FM MAC module.
-+
-+ The routine returns a handle (descriptor) to the FM MAC object.
-+ This descriptor must be passed as first parameter to all other
-+ FM MAC function calls.
-+
-+ No actual initialization or configuration of FM MAC hardware is
-+ done by this routine.
-+
-+ @Param[in] p_FmMacParam - Pointer to data structure of parameters
-+
-+ @Retval Handle to FM MAC object, or NULL for Failure.
-+*//***************************************************************************/
-+t_Handle FM_MAC_Config(t_FmMacParams *p_FmMacParam);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_Init
-+
-+ @Description Initializes the FM MAC module
-+
-+ @Param[in] h_FmMac - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MAC_Init(t_Handle h_FmMac);
-+
-+/**************************************************************************//**
-+ @Function FM_Free
-+
-+ @Description Frees all resources that were assigned to FM MAC module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmMac - FM module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MAC_Free(t_Handle h_FmMac);
-+
-+
-+/**************************************************************************//**
-+ @Group FM_mac_advanced_init_grp FM MAC Advanced Configuration Unit
-+
-+ @Description Configuration functions used to change default values.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigResetOnInit
-+
-+ @Description Tell the driver whether to reset the FM MAC before initialization or
-+ not. It changes the default configuration [DEFAULT_resetOnInit].
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] enable When TRUE, FM will be reset before any initialization.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigResetOnInit(t_Handle h_FmMac, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigLoopback
-+
-+ @Description Enable/Disable internal loopback mode
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] enable TRUE to enable or FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigLoopback(t_Handle h_FmMac, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigMaxFrameLength
-+
-+ @Description Setup maximum Rx Frame Length (in 1G MAC, effects also Tx)
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] newVal MAX Frame length
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigMaxFrameLength(t_Handle h_FmMac, uint16_t newVal);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigWan
-+
-+ @Description ENABLE WAN mode in 10G-MAC
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] enable TRUE to enable or FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigWan(t_Handle h_FmMac, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigPadAndCrc
-+
-+ @Description Config PAD and CRC mode
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] enable TRUE to enable or FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+ Not supported on 10G-MAC (i.e. CRC & PAD are added automatically
-+ by HW); on mEMAC, this routine supports only PAD (i.e. CRC is
-+ added automatically by HW).
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigPadAndCrc(t_Handle h_FmMac, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigHalfDuplex
-+
-+ @Description Config Half Duplex Mode
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] enable TRUE to enable or FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigHalfDuplex(t_Handle h_FmMac, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigTbiPhyAddr
-+
-+ @Description Configures the address of internal TBI PHY.
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] newVal TBI PHY address (1-31).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigTbiPhyAddr(t_Handle h_FmMac, uint8_t newVal);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigLengthCheck
-+
-+ @Description Configure the frame length checking.
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] enable TRUE to enable or FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigLengthCheck(t_Handle h_FmMac, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ConfigException
-+
-+ @Description Change Exception selection from default
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] ex Type of the desired exceptions
-+ @Param[in] enable TRUE to enable the specified exception, FALSE to disable it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ConfigException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable);
-+
-+#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+t_Error FM_MAC_ConfigSkipFman11Workaround (t_Handle h_FmMac);
-+#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */
-+/** @} */ /* end of FM_mac_advanced_init_grp group */
-+/** @} */ /* end of FM_mac_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_mac_runtime_control_grp FM MAC Runtime Control Unit
-+
-+ @Description FM MAC Runtime control unit API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_Enable
-+
-+ @Description Enable the MAC
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] mode Mode of operation (RX, TX, Both)
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_Enable(t_Handle h_FmMac, e_CommMode mode);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_Disable
-+
-+ @Description DISABLE the MAC
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+ @Param[in] mode Define what part to Disable (RX, TX or BOTH)
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_Disable(t_Handle h_FmMac, e_CommMode mode);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_Resume
-+
-+ @Description Re-init the MAC after suspend
-+
-+ @Param[in] h_FmMac A handle to a FM MAC Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_Resume(t_Handle h_FmMac);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_Enable1588TimeStamp
-+
-+ @Description Enables the TSU operation.
-+
-+ @Param[in] h_Fm - Handle to the PTP as returned from the FM_MAC_PtpConfig.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_Enable1588TimeStamp(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_Disable1588TimeStamp
-+
-+ @Description Disables the TSU operation.
-+
-+ @Param[in] h_Fm - Handle to the PTP as returned from the FM_MAC_PtpConfig.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_Disable1588TimeStamp(t_Handle h_Fm);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_SetTxAutoPauseFrames
-+
-+ @Description Enable/Disable transmission of Pause-Frames.
-+ The routine changes the default configuration [DEFAULT_TX_PAUSE_TIME].
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] pauseTime - Pause quanta value used with transmitted pause frames.
-+ Each quanta represents a 512 bit-times; Note that '0'
-+ as an input here will be used as disabling the
-+ transmission of the pause-frames.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_SetTxAutoPauseFrames(t_Handle h_FmMac,
-+ uint16_t pauseTime);
-+
-+ /**************************************************************************//**
-+ @Function FM_MAC_SetTxPauseFrames
-+
-+ @Description Enable/Disable transmission of Pause-Frames.
-+ The routine changes the default configuration:
-+ pause-time - [DEFAULT_TX_PAUSE_TIME]
-+ threshold-time - [0]
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] priority - the PFC class of service; use 'FM_MAC_NO_PFC'
-+ to indicate legacy pause support (i.e. no PFC).
-+ @Param[in] pauseTime - Pause quanta value used with transmitted pause frames.
-+ Each quanta represents a 512 bit-times;
-+ Note that '0' as an input here will be used as disabling the
-+ transmission of the pause-frames.
-+ @Param[in] threshTime - Pause Threshold equanta value used by the MAC to retransmit pause frame.
-+ if the situation causing a pause frame to be sent didn't finish when the timer
-+ reached the threshold quanta, the MAC will retransmit the pause frame.
-+ Each quanta represents a 512 bit-times.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+ In order for PFC to work properly the user must configure
-+ TNUM-aging in the tx-port it is recommended that pre-fetch and
-+ rate limit in the tx port should be disabled;
-+ PFC is supported only on new mEMAC; i.e. in MACs that don't have
-+ PFC support (10G-MAC and dTSEC), user should use 'FM_MAC_NO_PFC'
-+ in the 'priority' field.
-+*//***************************************************************************/
-+t_Error FM_MAC_SetTxPauseFrames(t_Handle h_FmMac,
-+ uint8_t priority,
-+ uint16_t pauseTime,
-+ uint16_t threshTime);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_SetRxIgnorePauseFrames
-+
-+ @Description Enable/Disable ignoring of Pause-Frames.
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] en - boolean indicates whether to ignore the incoming pause
-+ frames or not.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_SetRxIgnorePauseFrames(t_Handle h_FmMac, bool en);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_SetWakeOnLan
-+
-+ @Description Enable/Disable Wake On Lan support
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] en - boolean indicates whether to enable Wake On Lan
-+ support or not.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_SetWakeOnLan(t_Handle h_FmMac, bool en);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ResetCounters
-+
-+ @Description reset all statistics counters
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ResetCounters(t_Handle h_FmMac);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_SetException
-+
-+ @Description Enable/Disable a specific Exception
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] ex - Type of the desired exceptions
-+ @Param[in] enable - TRUE to enable the specified exception, FALSE to disable it.
-+
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_SetException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_SetStatistics
-+
-+ @Description Define Statistics level.
-+ Where applicable, the routine also enables the MIB counters
-+ overflow interrupt in order to keep counters accurate
-+ and account for overflows.
-+ This routine is relevant only for dTSEC.
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] statisticsLevel - Full statistics level provides all standard counters but may
-+ reduce performance. Partial statistics provides only special
-+ event counters (errors etc.). If selected, regular counters (such as
-+ byte/packet) will be invalid and will return -1.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_SetStatistics(t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_GetStatistics
-+
-+ @Description get all statistics counters
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] p_Statistics - Structure with statistics
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_GetStatistics(t_Handle h_FmMac, t_FmMacStatistics *p_Statistics);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_GetFrameSizeCounters
-+
-+ @Description get MAC statistics counters for different frame size
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] p_FrameSizeCounters - Structure with counters
-+ @Param[in] type - Type of counters to be read
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_GetFrameSizeCounters(t_Handle h_FmMac, t_FmMacFrameSizeCounters *p_FrameSizeCounters, e_CommMode type);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_ModifyMacAddr
-+
-+ @Description Replace the main MAC Address
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] p_EnetAddr - Ethernet Mac address
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_ModifyMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_AddHashMacAddr
-+
-+ @Description Add an Address to the hash table. This is for filter purpose only.
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] p_EnetAddr - Ethernet Mac address
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init(). It is a filter only address.
-+ @Cautions Some address need to be filterd out in upper FM blocks.
-+*//***************************************************************************/
-+t_Error FM_MAC_AddHashMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_RemoveHashMacAddr
-+
-+ @Description Delete an Address to the hash table. This is for filter purpose only.
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] p_EnetAddr - Ethernet Mac address
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_RemoveHashMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_AddExactMatchMacAddr
-+
-+ @Description Add a unicast or multicast mac address for exact-match filtering
-+ (8 on dTSEC, 2 for 10G-MAC)
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] p_EnetAddr - MAC Address to ADD
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_AddExactMatchMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_RemovelExactMatchMacAddr
-+
-+ @Description Remove a uni cast or multi cast mac address.
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] p_EnetAddr - MAC Address to remove
-+
-+ @Return E_OK on success; Error code otherwise..
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_RemovelExactMatchMacAddr(t_Handle h_FmMac, t_EnetAddr *p_EnetAddr);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_SetPromiscuous
-+
-+ @Description Enable/Disable MAC Promiscuous mode for ALL mac addresses.
-+
-+ @Param[in] h_FmMac - A handle to a FM MAC Module.
-+ @Param[in] enable - TRUE to enable or FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_SetPromiscuous(t_Handle h_FmMac, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_AdjustLink
-+
-+ @Description Adjusts the Ethernet link with new speed/duplex setup.
-+ This routine is relevant for dTSEC and mEMAC.
-+ In case of mEMAC, this routine is also used for manual
-+ re-configuration of RGMII speed and duplex mode for
-+ RGMII PHYs not supporting in-band status information
-+ to MAC.
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] speed - Ethernet speed.
-+ @Param[in] fullDuplex - TRUE for full-duplex mode;
-+ FALSE for half-duplex mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MAC_AdjustLink(t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_RestartAutoneg
-+
-+ @Description Restarts the auto-negotiation process.
-+ When auto-negotiation process is invoked under traffic the
-+ auto-negotiation process between the internal SGMII PHY and the
-+ external PHY does not always complete successfully. Calling this
-+ function will restart the auto-negotiation process that will end
-+ successfully. It is recommended to call this function after issuing
-+ auto-negotiation restart command to the Eth Phy.
-+ This routine is relevant only for dTSEC.
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MAC_RestartAutoneg(t_Handle h_FmMac);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_GetId
-+
-+ @Description Return the MAC ID
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[out] p_MacId - MAC ID of device
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_GetId(t_Handle h_FmMac, uint32_t *p_MacId);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_GetVesrion
-+
-+ @Description Return Mac HW chip version
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[out] p_MacVresion - Mac version as defined by the chip
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_GetVesrion(t_Handle h_FmMac, uint32_t *p_MacVresion);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_MII_WritePhyReg
-+
-+ @Description Write data into Phy Register
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] phyAddr - Phy Address on the MII bus
-+ @Param[in] reg - Register Number.
-+ @Param[in] data - Data to write.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_MII_WritePhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data);
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_MII_ReadPhyReg
-+
-+ @Description Read data from Phy Register
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+ @Param[in] phyAddr - Phy Address on the MII bus
-+ @Param[in] reg - Register Number.
-+ @Param[out] p_Data - Data from PHY.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_MII_ReadPhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data);
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/**************************************************************************//**
-+ @Function FM_MAC_DumpRegs
-+
-+ @Description Dump internal registers
-+
-+ @Param[in] h_FmMac - A handle to a FM Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MAC_Init().
-+*//***************************************************************************/
-+t_Error FM_MAC_DumpRegs(t_Handle h_FmMac);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+/** @} */ /* end of FM_mac_runtime_control_grp group */
-+/** @} */ /* end of FM_mac_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#endif /* __FM_MAC_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_macsec_ext.h
-@@ -0,0 +1,1271 @@
-+/*
-+ * Copyright 2008-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File fm_macsec_ext.h
-+
-+ @Description FM MACSEC ...
-+*//***************************************************************************/
-+#ifndef __FM_MACSEC_EXT_H
-+#define __FM_MACSEC_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_grp FM MACSEC
-+
-+ @Description FM MACSEC API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description MACSEC Exceptions
-+*//***************************************************************************/
-+typedef enum e_FmMacsecExceptions {
-+ e_FM_MACSEC_EX_SINGLE_BIT_ECC, /**< Single bit ECC error */
-+ e_FM_MACSEC_EX_MULTI_BIT_ECC /**< Multi bit ECC error */
-+} e_FmMacsecExceptions;
-+
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_init_grp FM-MACSEC Initialization Unit
-+
-+ @Description FM MACSEC Initialization Unit
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function t_FmMacsecExceptionsCallback
-+
-+ @Description Exceptions user callback routine, will be called upon an
-+ exception passing the exception identification.
-+
-+ @Param[in] h_App A handle to an application layer object; This handle
-+ will be passed by the driver upon calling this callback.
-+ @Param[in] exception The exception.
-+*//***************************************************************************/
-+typedef void (t_FmMacsecExceptionsCallback) ( t_Handle h_App,
-+ e_FmMacsecExceptions exception);
-+
-+
-+/**************************************************************************//**
-+ @Description FM MACSEC config input
-+*//***************************************************************************/
-+typedef struct t_FmMacsecParams {
-+ t_Handle h_Fm; /**< A handle to the FM object related to */
-+ bool guestMode; /**< Partition-id */
-+ union {
-+ struct {
-+ uint8_t fmMacId; /**< FM MAC id */
-+ } guestParams;
-+
-+ struct {
-+ uintptr_t baseAddr; /**< Base of memory mapped FM MACSEC registers */
-+ t_Handle h_FmMac; /**< A handle to the FM MAC object related to */
-+ t_FmMacsecExceptionsCallback *f_Exception; /**< Exception Callback Routine */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks */
-+ } nonGuestParams;
-+ };
-+} t_FmMacsecParams;
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_Config
-+
-+ @Description Creates descriptor for the FM MACSEC module;
-+
-+ The routine returns a handle (descriptor) to the FM MACSEC object;
-+ This descriptor must be passed as first parameter to all other
-+ FM MACSEC function calls;
-+
-+ No actual initialization or configuration of FM MACSEC hardware is
-+ done by this routine.
-+
-+ @Param[in] p_FmMacsecParam Pointer to data structure of parameters.
-+
-+ @Retval Handle to FM MACSEC object, or NULL for Failure.
-+*//***************************************************************************/
-+t_Handle FM_MACSEC_Config(t_FmMacsecParams *p_FmMacsecParam);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_Init
-+
-+ @Description Initializes the FM MACSEC module.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MACSEC_Init(t_Handle h_FmMacsec);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_Free
-+
-+ @Description Frees all resources that were assigned to FM MACSEC module;
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MACSEC_Free(t_Handle h_FmMacsec);
-+
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_advanced_init_grp FM-MACSEC Advanced Configuration Unit
-+
-+ @Description Configuration functions used to change default values.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description enum for unknown sci frame treatment
-+*//***************************************************************************/
-+typedef enum e_FmMacsecUnknownSciFrameTreatment {
-+ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_BOTH = 0, /**< Controlled port - Strict mode */
-+ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED, /**< If C bit clear deliver on controlled port, else discard
-+ Controlled port - Check or Disable mode */
-+ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED, /**< Controlled port - Strict mode */
-+ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_OR_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED /**< If C bit set deliver on uncontrolled port and discard on controlled port,
-+ else discard on uncontrolled port and deliver on controlled port
-+ Controlled port - Check or Disable mode */
-+} e_FmMacsecUnknownSciFrameTreatment;
-+
-+/**************************************************************************//**
-+ @Description enum for untag frame treatment
-+*//***************************************************************************/
-+typedef enum e_FmMacsecUntagFrameTreatment {
-+ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED = 0, /**< Controlled port - Strict mode */
-+ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_BOTH, /**< Controlled port - Strict mode */
-+ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_CONTROLLED_UNMODIFIED /**< Controlled port - Strict mode */
-+} e_FmMacsecUntagFrameTreatment;
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigUnknownSciFrameTreatment
-+
-+ @Description Change the treatment for received frames with unknown sci from its default
-+ configuration [DEFAULT_unknownSciFrameTreatment].
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] treatMode The selected mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigUnknownSciFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUnknownSciFrameTreatment treatMode);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigInvalidTagsFrameTreatment
-+
-+ @Description Change the treatment for received frames with invalid tags or
-+ a zero value PN or an invalid ICV from its default configuration
-+ [DEFAULT_invalidTagsFrameTreatment].
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] deliverUncontrolled If True deliver on the uncontrolled port, else discard;
-+ In both cases discard on the controlled port;
-+ this provide Strict, Check or Disable mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigInvalidTagsFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment
-+
-+ @Description Change the treatment for received frames with the Encryption bit
-+ set and the Changed Text bit clear from its default configuration
-+ [DEFAULT_encryptWithNoChangedTextFrameTreatment].
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] discardUncontrolled If True discard on the uncontrolled port, else deliver;
-+ In both cases discard on the controlled port;
-+ this provide Strict, Check or Disable mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(t_Handle h_FmMacsec, bool discardUncontrolled);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment
-+
-+ @Description Change the treatment for received frames with the Encryption bit
-+ clear and the Changed Text bit set from its default configuration
-+ [DEFAULT_changedTextWithNoEncryptFrameTreatment].
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] deliverUncontrolled If True deliver on the uncontrolled port, else discard;
-+ In both cases discard on the controlled port;
-+ this provide Strict, Check or Disable mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigChangedTextWithNoEncryptFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigUntagFrameTreatment
-+
-+ @Description Change the treatment for received frames without the MAC security tag (SecTAG)
-+ from its default configuration [DEFAULT_untagFrameTreatment].
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] treatMode The selected mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigUntagFrameTreatment(t_Handle h_FmMacsec, e_FmMacsecUntagFrameTreatment treatMode);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment
-+
-+ @Description Change the treatment for received frames with only SCB bit set
-+ from its default configuration [DEFAULT_onlyScbIsSetFrameTreatment].
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] deliverUncontrolled If True deliver on the uncontrolled port, else discard;
-+ In both cases discard on the controlled port;
-+ this provide Strict, Check or Disable mode.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigOnlyScbIsSetFrameTreatment(t_Handle h_FmMacsec, bool deliverUncontrolled);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigPnExhaustionThreshold
-+
-+ @Description It's provide the ability to configure a PN exhaustion threshold;
-+ When the NextPn crosses this value an interrupt event
-+ is asserted to warn that the active SA should re-key.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] pnExhThr If the threshold is reached, an interrupt event
-+ is asserted to re-key.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigPnExhaustionThreshold(t_Handle h_FmMacsec, uint32_t pnExhThr);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigKeysUnreadable
-+
-+ @Description Turn on privacy mode; All the keys and their hash values can't be read any more;
-+ Can not be cleared unless hard reset.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigKeysUnreadable(t_Handle h_FmMacsec);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigSectagWithoutSCI
-+
-+ @Description Promise that all generated Sectag will be without SCI included.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigSectagWithoutSCI(t_Handle h_FmMacsec);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_ConfigException
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default selection of exceptions enablement;
-+ By default all exceptions are enabled.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Config() and before FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_ConfigException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
-+
-+/** @} */ /* end of FM_MACSEC_advanced_init_grp group */
-+/** @} */ /* end of FM_MACSEC_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_runtime_control_grp FM-MACSEC Runtime Control Data Unit
-+
-+ @Description FM MACSEC runtime control data unit API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_GetRevision
-+
-+ @Description Return MACSEC HW chip revision
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[out] p_MacsecRevision MACSEC revision as defined by the chip.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_GetRevision(t_Handle h_FmMacsec, uint32_t *p_MacsecRevision);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_Enable
-+
-+ @Description This routine should be called after MACSEC is initialized for enabling all
-+ MACSEC engines according to their existing configuration.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Init() and when MACSEC is disabled.
-+*//***************************************************************************/
-+t_Error FM_MACSEC_Enable(t_Handle h_FmMacsec);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_Disable
-+
-+ @Description This routine may be called when MACSEC is enabled in order to
-+ disable all MACSEC engines; The MACSEC is working in bypass mode.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Init() and when MACSEC is enabled.
-+*//***************************************************************************/
-+t_Error FM_MACSEC_Disable(t_Handle h_FmMacsec);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SetException
-+
-+ @Description Calling this routine enables/disables the specified exception.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SetException(t_Handle h_FmMacsec, e_FmMacsecExceptions exception, bool enable);
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/**************************************************************************//**
-+ @Function FM_MACSEC_DumpRegs
-+
-+ @Description Dump internal registers.
-+
-+ @Param[in] h_FmMacsec - FM MACSEC module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_DumpRegs(t_Handle h_FmMacsec);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+#ifdef VERIFICATION_SUPPORT
-+/********************* VERIFICATION ONLY ********************************/
-+/**************************************************************************//**
-+ @Function FM_MACSEC_BackdoorSet
-+
-+ @Description Set register of the MACSEC memory map
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[out] offset Register offset.
-+ @Param[out] value Value to write.
-+
-+
-+ @Return None
-+
-+ @Cautions Allowed only following FM_MACSEC_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_BackdoorSet(t_Handle h_FmMacsec, uint32_t offset, uint32_t value);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_BackdoorGet
-+
-+ @Description Read from register of the MACSEC memory map.
-+
-+ @Param[in] h_FmMacsec FM MACSEC module descriptor.
-+ @Param[out] offset Register offset.
-+
-+ @Return Value read
-+
-+ @Cautions Allowed only following FM_MACSEC_Init().
-+*//***************************************************************************/
-+uint32_t FM_MACSEC_BackdoorGet(t_Handle h_FmMacsec, uint32_t offset);
-+#endif /* VERIFICATION_SUPPORT */
-+
-+/** @} */ /* end of FM_MACSEC_runtime_control_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_SECY_grp FM-MACSEC SecY
-+
-+ @Description FM-MACSEC SecY API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+typedef uint8_t macsecSAKey_t[32];
-+typedef uint64_t macsecSCI_t;
-+typedef uint8_t macsecAN_t;
-+
-+/**************************************************************************//**
-+@Description MACSEC SECY Cipher Suite
-+*//***************************************************************************/
-+typedef enum e_FmMacsecSecYCipherSuite {
-+ e_FM_MACSEC_SECY_GCM_AES_128 = 0, /**< GCM-AES-128 */
-+#if (DPAA_VERSION >= 11)
-+ e_FM_MACSEC_SECY_GCM_AES_256 /**< GCM-AES-256 */
-+#endif /* (DPAA_VERSION >= 11) */
-+} e_FmMacsecSecYCipherSuite;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SECY Exceptions
-+*//***************************************************************************/
-+typedef enum e_FmMacsecSecYExceptions {
-+ e_FM_MACSEC_SECY_EX_FRAME_DISCARDED /**< Frame Discarded */
-+} e_FmMacsecSecYExceptions;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SECY Events
-+*//***************************************************************************/
-+typedef enum e_FmMacsecSecYEvents {
-+ e_FM_MACSEC_SECY_EV_NEXT_PN /**< Next Packet Number exhaustion threshold reached */
-+} e_FmMacsecSecYEvents;
-+
-+/**************************************************************************//**
-+ @Collection MACSEC SECY Frame Discarded Descriptor error
-+*//***************************************************************************/
-+typedef uint8_t macsecTxScFrameDiscardedErrSelect_t; /**< typedef for defining Frame Discarded Descriptor errors */
-+
-+#define FM_MACSEC_SECY_TX_SC_FRM_DISCAR_ERR_NEXT_PN_ZERO 0x8000 /**< NextPn == 0 */
-+#define FM_MACSEC_SECY_TX_SC_FRM_DISCAR_ERR_SC_DISBALE 0x4000 /**< SC is disable */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Function t_FmMacsecSecYExceptionsCallback
-+
-+ @Description Exceptions user callback routine, will be called upon an
-+ exception passing the exception identification.
-+
-+ @Param[in] h_App A handle to an application layer object; This handle
-+ will be passed by the driver upon calling this callback.
-+ @Param[in] exception The exception.
-+*//***************************************************************************/
-+typedef void (t_FmMacsecSecYExceptionsCallback) ( t_Handle h_App,
-+ e_FmMacsecSecYExceptions exception);
-+
-+/**************************************************************************//**
-+ @Function t_FmMacsecSecYEventsCallback
-+
-+ @Description Events user callback routine, will be called upon an
-+ event passing the event identification.
-+
-+ @Param[in] h_App A handle to an application layer object; This handle
-+ will be passed by the driver upon calling this callback.
-+ @Param[in] event The event.
-+*//***************************************************************************/
-+typedef void (t_FmMacsecSecYEventsCallback) ( t_Handle h_App,
-+ e_FmMacsecSecYEvents event);
-+
-+/**************************************************************************//**
-+ @Description RFC2863 MIB
-+*//***************************************************************************/
-+typedef struct t_MIBStatistics {
-+ uint64_t ifInOctets; /**< Total number of byte received */
-+ uint64_t ifInPkts; /**< Total number of packets received */
-+ uint64_t ifInMcastPkts; /**< Total number of multicast frame received */
-+ uint64_t ifInBcastPkts; /**< Total number of broadcast frame received */
-+ uint64_t ifInDiscards; /**< Frames received, but discarded due to problems within the MAC RX :
-+ - InPktsNoTag,
-+ - InPktsLate,
-+ - InPktsOverrun */
-+ uint64_t ifInErrors; /**< Number of frames received with error:
-+ - InPktsBadTag,
-+ - InPktsNoSCI,
-+ - InPktsNotUsingSA
-+ - InPktsNotValid */
-+ uint64_t ifOutOctets; /**< Total number of byte sent */
-+ uint64_t ifOutPkts; /**< Total number of packets sent */
-+ uint64_t ifOutMcastPkts; /**< Total number of multicast frame sent */
-+ uint64_t ifOutBcastPkts; /**< Total number of multicast frame sent */
-+ uint64_t ifOutDiscards; /**< Frames received, but discarded due to problems within the MAC TX N/A! */
-+ uint64_t ifOutErrors; /**< Number of frames transmitted with error:
-+ - FIFO Overflow Error
-+ - FIFO Underflow Error
-+ - Other */
-+} t_MIBStatistics;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SecY Rx SA Statistics
-+*//***************************************************************************/
-+typedef struct t_FmMacsecSecYRxSaStatistics {
-+ uint32_t inPktsOK; /**< The number of frames with resolved SCI, have passed all
-+ frame validation frame validation with the validateFrame not set to disable */
-+ uint32_t inPktsInvalid; /**< The number of frames with resolved SCI, that have failed frame
-+ validation with the validateFrame set to check */
-+ uint32_t inPktsNotValid; /**< The number of frames with resolved SCI, discarded on the controlled port,
-+ that have failed frame validation with the validateFrame set to strict or the c bit is set */
-+ uint32_t inPktsNotUsingSA; /**< The number of frames received with resolved SCI and discarded on disabled or
-+ not provisioned SA with validateFrame in the strict mode or the C bit is set */
-+ uint32_t inPktsUnusedSA; /**< The number of frames received with resolved SCI on disabled or not provisioned SA
-+ with validateFrame not in the strict mode and the C bit is cleared */
-+} t_FmMacsecSecYRxSaStatistics;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SecY Tx SA Statistics
-+*//***************************************************************************/
-+typedef struct t_FmMacsecSecYTxSaStatistics {
-+ uint64_t outPktsProtected; /**< The number of frames, that the user of the controlled port requested to
-+ be transmitted, which were integrity protected */
-+ uint64_t outPktsEncrypted; /**< The number of frames, that the user of the controlled port requested to
-+ be transmitted, which were confidentiality protected */
-+} t_FmMacsecSecYTxSaStatistics;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SecY Rx SC Statistics
-+*//***************************************************************************/
-+typedef struct t_FmMacsecSecYRxScStatistics {
-+ uint64_t inPktsUnchecked; /**< The number of frames with resolved SCI, delivered to the user of a controlled port,
-+ that are not validated with the validateFrame set to disable */
-+ uint64_t inPktsDelayed; /**< The number of frames with resolved SCI, delivered to the user of a controlled port,
-+ that have their PN smaller than the lowest_PN with the validateFrame set to
-+ disable or replayProtect disabled */
-+ uint64_t inPktsLate; /**< The number of frames with resolved SCI, discarded on the controlled port,
-+ that have their PN smaller than the lowest_PN with the validateFrame set to
-+ Check or Strict and replayProtect enabled */
-+ uint64_t inPktsOK; /**< The number of frames with resolved SCI, have passed all
-+ frame validation frame validation with the validateFrame not set to disable */
-+ uint64_t inPktsInvalid; /**< The number of frames with resolved SCI, that have failed frame
-+ validation with the validateFrame set to check */
-+ uint64_t inPktsNotValid; /**< The number of frames with resolved SCI, discarded on the controlled port,
-+ that have failed frame validation with the validateFrame set to strict or the c bit is set */
-+ uint64_t inPktsNotUsingSA; /**< The number of frames received with resolved SCI and discarded on disabled or
-+ not provisioned SA with validateFrame in the strict mode or the C bit is set */
-+ uint64_t inPktsUnusedSA; /**< The number of frames received with resolved SCI on disabled or not provisioned SA
-+ with validateFrame not in the strict mode and the C bit is cleared */
-+} t_FmMacsecSecYRxScStatistics;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SecY Tx SC Statistics
-+*//***************************************************************************/
-+typedef struct t_FmMacsecSecYTxScStatistics {
-+ uint64_t outPktsProtected; /**< The number of frames, that the user of the controlled port requested to
-+ be transmitted, which were integrity protected */
-+ uint64_t outPktsEncrypted; /**< The number of frames, that the user of the controlled port requested to
-+ be transmitted, which were confidentiality protected */
-+} t_FmMacsecSecYTxScStatistics;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SecY Statistics
-+*//***************************************************************************/
-+typedef struct t_FmMacsecSecYStatistics {
-+ t_MIBStatistics mibCtrlStatistics; /**< Controlled port MIB statistics */
-+ t_MIBStatistics mibNonCtrlStatistics; /**< Uncontrolled port MIB statistics */
-+/* Frame verification statistics */
-+ uint64_t inPktsUntagged; /**< The number of received packets without the MAC security tag
-+ (SecTAG) with validateFrames which is not in the strict mode */
-+ uint64_t inPktsNoTag; /**< The number of received packets discarded without the
-+ MAC security tag (SecTAG) with validateFrames which is in the strict mode */
-+ uint64_t inPktsBadTag; /**< The number of received packets discarded with an invalid
-+ SecTAG or a zero value PN or an invalid ICV */
-+ uint64_t inPktsUnknownSCI; /**< The number of received packets with unknown SCI with the
-+ condition : validateFrames is not in the strict mode and the
-+ C bit in the SecTAG is not set */
-+ uint64_t inPktsNoSCI; /**< The number of received packets discarded with unknown SCI
-+ information with the condition : validateFrames is in the strict mode
-+ or the C bit in the SecTAG is set */
-+ uint64_t inPktsOverrun; /**< The number of packets discarded because the number of
-+ received packets exceeded the cryptographic performance capabilities */
-+/* Frame validation statistics */
-+ uint64_t inOctetsValidated; /**< The number of octets of plaintext recovered from received frames with
-+ resolved SCI that were integrity protected but not encrypted */
-+ uint64_t inOctetsDecrypted; /**< The number of octets of plaintext recovered from received frames with
-+ resolved SCI that were integrity protected and encrypted */
-+/* Frame generation statistics */
-+ uint64_t outPktsUntagged; /**< The number of frames, that the user of the controlled port requested to
-+ be transmitted, with protectFrame false */
-+ uint64_t outPktsTooLong; /**< The number of frames, that the user of the controlled port requested to
-+ be transmitted, discarded due to length being larger than Maximum Frame Length (MACSEC_MFL) */
-+/* Frame protection statistics */
-+ uint64_t outOctetsProtected; /**< The number of octets of User Data in transmitted frames that were
-+ integrity protected but not encrypted */
-+ uint64_t outOctetsEncrypted; /**< The number of octets of User Data in transmitted frames that were
-+ both integrity protected and encrypted */
-+} t_FmMacsecSecYStatistics;
-+
-+
-+/**************************************************************************//**
-+ @Description MACSEC SecY SC Params
-+*//***************************************************************************/
-+typedef struct t_FmMacsecSecYSCParams {
-+ macsecSCI_t sci; /**< The secure channel identification of the SC */
-+ e_FmMacsecSecYCipherSuite cipherSuite; /**< Cipher suite to be used for the SC */
-+} t_FmMacsecSecYSCParams;
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_SECY_init_grp FM-MACSEC SecY Initialization Unit
-+
-+ @Description FM-MACSEC SecY Initialization Unit
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description enum for validate frames
-+*//***************************************************************************/
-+typedef enum e_FmMacsecValidFrameBehavior {
-+ e_FM_MACSEC_VALID_FRAME_BEHAVIOR_DISABLE = 0, /**< disable the validation function */
-+ e_FM_MACSEC_VALID_FRAME_BEHAVIOR_CHECK, /**< enable the validation function but only for checking
-+ without filtering out invalid frames */
-+ e_FM_MACSEC_VALID_FRAME_BEHAVIOR_STRICT /**< enable the validation function and also strictly filter
-+ out those invalid frames */
-+} e_FmMacsecValidFrameBehavior;
-+
-+/**************************************************************************//**
-+ @Description enum for sci insertion
-+*//***************************************************************************/
-+typedef enum e_FmMacsecSciInsertionMode {
-+ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG = 0, /**< explicit sci in the sectag */
-+ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_MAC_SA, /**< mac sa is overwritten with the sci*/
-+ e_FM_MACSEC_SCI_INSERTION_MODE_IMPLICT_PTP /**< implicit point-to-point sci (pre-shared) */
-+} e_FmMacsecSciInsertionMode;
-+
-+/**************************************************************************//**
-+ @Description FM MACSEC SecY config input
-+*//***************************************************************************/
-+typedef struct t_FmMacsecSecYParams {
-+ t_Handle h_FmMacsec; /**< A handle to the FM MACSEC object */
-+ t_FmMacsecSecYSCParams txScParams; /**< Tx SC Params */
-+ uint32_t numReceiveChannels; /**< Number of receive channels dedicated to this SecY */
-+ t_FmMacsecSecYExceptionsCallback *f_Exception; /**< Callback routine to be called by the driver upon SecY exception */
-+ t_FmMacsecSecYEventsCallback *f_Event; /**< Callback routine to be called by the driver upon SecY event */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks */
-+} t_FmMacsecSecYParams;
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_Config
-+
-+ @Description Creates descriptor for the FM MACSEC SECY module;
-+
-+ The routine returns a handle (descriptor) to the FM MACSEC SECY object;
-+ This descriptor must be passed as first parameter to all other
-+ FM MACSEC SECY function calls;
-+ No actual initialization or configuration of FM MACSEC SecY hardware is
-+ done by this routine.
-+
-+ @Param[in] p_FmMacsecSecYParam Pointer to data structure of parameters.
-+
-+ @Return Handle to FM MACSEC SECY object, or NULL for Failure.
-+*//***************************************************************************/
-+t_Handle FM_MACSEC_SECY_Config(t_FmMacsecSecYParams *p_FmMacsecSecYParam);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_Init
-+
-+ @Description Initializes the FM MACSEC SECY module.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_Init(t_Handle h_FmMacsecSecY);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_Free
-+
-+ @Description Frees all resources that were assigned to FM MACSEC SECY module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_Free(t_Handle h_FmMacsecSecY);
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_SECY_advanced_init_grp FM-MACSEC SecY Advanced Configuration Unit
-+
-+ @Description Configuration functions used to change default values.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigSciInsertionMode
-+
-+ @Description Calling this routine changes the SCI-insertion-mode in the
-+ internal driver data base from its default configuration
-+ [DEFAULT_sciInsertionMode]
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] sciInsertionMode Sci insertion mode
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
-+
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigSciInsertionMode(t_Handle h_FmMacsecSecY, e_FmMacsecSciInsertionMode sciInsertionMode);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigProtectFrames
-+
-+ @Description Calling this routine changes the protect-frame mode in the
-+ internal driver data base from its default configuration
-+ [DEFAULT_protectFrames]
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] protectFrames If FALSE, frames are transmitted without modification
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
-+
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigProtectFrames(t_Handle h_FmMacsecSecY, bool protectFrames);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigReplayWindow
-+
-+ @Description Calling this routine changes the replay-window settings in the
-+ internal driver data base from its default configuration
-+ [DEFAULT_replayEnable], [DEFAULT_replayWindow]
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] replayProtect; Replay protection function mode
-+ @Param[in] replayWindow; The size of the replay window
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
-+
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigReplayWindow(t_Handle h_FmMacsecSecY, bool replayProtect, uint32_t replayWindow);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigValidationMode
-+
-+ @Description Calling this routine changes the frame-validation-behavior mode
-+ in the internal driver data base from its default configuration
-+ [DEFAULT_validateFrames]
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] validateFrames Validation function mode
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
-+
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigValidationMode(t_Handle h_FmMacsecSecY, e_FmMacsecValidFrameBehavior validateFrames);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigConfidentiality
-+
-+ @Description Calling this routine changes the confidentiality settings in the
-+ internal driver data base from its default configuration
-+ [DEFAULT_confidentialityEnable], [DEFAULT_confidentialityOffset]
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] confidentialityEnable TRUE - confidentiality protection and integrity protection
-+ FALSE - no confidentiality protection, only integrity protection
-+ @Param[in] confidentialityOffset The number of initial octets of each MSDU without confidentiality protection
-+ common values are 0, 30, and 50
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
-+
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigConfidentiality(t_Handle h_FmMacsecSecY, bool confidentialityEnable, uint16_t confidentialityOffset);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigPointToPoint
-+
-+ @Description configure this SecY to work in point-to-point mode, means that
-+ it will have only one rx sc;
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init();
-+ Can be called only once in a system; only the first secY that will call this
-+ routine will be able to operate in Point-To-Point mode.
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigPointToPoint(t_Handle h_FmMacsecSecY);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigException
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default selection of exceptions enablement;
-+ By default all exceptions are enabled.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigException(t_Handle h_FmMacsecSecY, e_FmMacsecSecYExceptions exception, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_ConfigEvent
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default selection of events enablement;
-+ By default all events are enabled.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] event The event to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_ConfigEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable);
-+
-+/** @} */ /* end of FM_MACSEC_SECY_advanced_init_grp group */
-+/** @} */ /* end of FM_MACSEC_SECY_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_MACSEC_SECY_runtime_control_grp FM-MACSEC SecY Runtime Control Unit
-+
-+ @Description FM MACSEC SECY Runtime control unit API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_CreateRxSc
-+
-+ @Description Create a receive secure channel.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] scParams secure channel params.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Handle FM_MACSEC_SECY_CreateRxSc(t_Handle h_FmMacsecSecY, t_FmMacsecSecYSCParams *p_ScParams);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_DeleteRxSc
-+
-+ @Description Deleting an initialized secure channel.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSc().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_DeleteRxSc(t_Handle h_FmMacsecSecY, t_Handle h_Sc);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_CreateRxSa
-+
-+ @Description Create a receive secure association for the secure channel;
-+ the SA cannot be used to receive frames until FM_MACSEC_SECY_RxSaEnableReceive is called.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[in] an association number represent the SA.
-+ @Param[in] lowestPn the lowest acceptable PN value for a received frame.
-+ @Param[in] key the desired key for this SA.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSc().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_CreateRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t lowestPn, macsecSAKey_t key);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_DeleteRxSa
-+
-+ @Description Deleting an initialized secure association.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[in] an association number represent the SA.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_DeleteRxSa(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_RxSaEnableReceive
-+
-+ @Description Enabling the SA to receive frames.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[in] an association number represent the SA.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_RxSaEnableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_RxSaDisableReceive
-+
-+ @Description Disabling the SA from receive frames.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[in] an association number represent the SA.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_RxSaDisableReceive(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_RxSaUpdateNextPn
-+
-+ @Description Update the next packet number expected on RX;
-+ The value of nextPN shall be set to the greater of its existing value and the
-+ supplied of updtNextPN (802.1AE-2006 10.7.15).
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[in] an association number represent the SA.
-+ @Param[in] updtNextPN the next PN value for a received frame.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_RxSaUpdateNextPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtNextPN);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_RxSaUpdateLowestPn
-+
-+ @Description Update the lowest packet number expected on RX;
-+ The value of lowestPN shall be set to the greater of its existing value and the
-+ supplied of updtLowestPN (802.1AE-2006 10.7.15).
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[in] an association number represent the SA.
-+ @Param[in] updtLowestPN the lowest PN acceptable value for a received frame.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_RxSaUpdateLowestPn(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, uint32_t updtLowestPN);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_RxSaModifyKey
-+
-+ @Description Modify the current key of the SA with a new one.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[in] an association number represent the SA.
-+ @Param[in] key new key to replace the current key.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSa().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_RxSaModifyKey(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, macsecSAKey_t key);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_CreateTxSa
-+
-+ @Description Create a transmit secure association for the secure channel;
-+ the SA cannot be used to transmit frames until FM_MACSEC_SECY_TxSaSetActivate is called;
-+ Only one SA can be active at a time.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] an association number represent the SA.
-+ @Param[in] key the desired key for this SA.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_CreateTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an, macsecSAKey_t key);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_DeleteTxSa
-+
-+ @Description Deleting an initialized secure association.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] an association number represent the SA.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_DeleteTxSa(t_Handle h_FmMacsecSecY, macsecAN_t an);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_TxSaModifyKey
-+
-+ @Description Modify the key of the inactive SA with a new one.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] nextActiveAn association number represent the next SA to be activated.
-+ @Param[in] key new key to replace the current key.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_TxSaModifyKey(t_Handle h_FmMacsecSecY, macsecAN_t nextActiveAn, macsecSAKey_t key);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_TxSaSetActive
-+
-+ @Description Set this SA to the active SA to be used on TX for SC;
-+ only one SA can be active at a time.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] an association number represent the SA.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_TxSaSetActive(t_Handle h_FmMacsecSecY, macsecAN_t an);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_TxSaGetActive
-+
-+ @Description Get the active SA that being used for TX.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[out] p_An the active an.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_TxSaGetActive(t_Handle h_FmMacsecSecY, macsecAN_t *p_An);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_GetStatistics
-+
-+ @Description get all statistics counters.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] p_Statistics Structure with statistics.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_GetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYStatistics *p_Statistics);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_RxScGetStatistics
-+
-+ @Description get all statistics counters.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc Rx Sc handle.
-+ @Param[in] p_Statistics Structure with statistics.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_RxScGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, t_FmMacsecSecYRxScStatistics *p_Statistics);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_RxSaGetStatistics
-+
-+ @Description get all statistics counters
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc Rx Sc handle.
-+ @Param[in] an association number represent the SA.
-+ @Param[in] p_Statistics Structure with statistics.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_RxSaGetStatistics(t_Handle h_FmMacsecSecY, t_Handle h_Sc, macsecAN_t an, t_FmMacsecSecYRxSaStatistics *p_Statistics);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_TxScGetStatistics
-+
-+ @Description get all statistics counters.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] p_Statistics Structure with statistics.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_TxScGetStatistics(t_Handle h_FmMacsecSecY, t_FmMacsecSecYTxScStatistics *p_Statistics);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_TxSaGetStatistics
-+
-+ @Description get all statistics counters.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] an association number represent the SA.
-+ @Param[in] p_Statistics Structure with statistics.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_TxSaGetStatistics(t_Handle h_FmMacsecSecY, macsecAN_t an, t_FmMacsecSecYTxSaStatistics *p_Statistics);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_SetException
-+
-+ @Description Calling this routine enables/disables the specified exception.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_SetException(t_Handle h_FmMacsecSecY, e_FmMacsecExceptions exception, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_SetEvent
-+
-+ @Description Calling this routine enables/disables the specified event.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] event The event to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Config() and before FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_SetEvent(t_Handle h_FmMacsecSecY, e_FmMacsecSecYEvents event, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_GetRxScPhysId
-+
-+ @Description return the physical id of the Secure Channel.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[in] h_Sc SC handle as returned by FM_MACSEC_SECY_CreateRxSc.
-+ @Param[out] p_ScPhysId the SC physical id.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_CreateRxSc().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_GetRxScPhysId(t_Handle h_FmMacsecSecY, t_Handle h_Sc, uint32_t *p_ScPhysId);
-+
-+/**************************************************************************//**
-+ @Function FM_MACSEC_SECY_GetTxScPhysId
-+
-+ @Description return the physical id of the Secure Channel.
-+
-+ @Param[in] h_FmMacsecSecY FM MACSEC SECY module descriptor.
-+ @Param[out] p_ScPhysId the SC physical id.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MACSEC_SECY_Init().
-+*//***************************************************************************/
-+t_Error FM_MACSEC_SECY_GetTxScPhysId(t_Handle h_FmMacsecSecY, uint32_t *p_ScPhysId);
-+
-+/** @} */ /* end of FM_MACSEC_SECY_runtime_control_grp group */
-+/** @} */ /* end of FM_MACSEC_SECY_grp group */
-+/** @} */ /* end of FM_MACSEC_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#endif /* __FM_MACSEC_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_muram_ext.h
-@@ -0,0 +1,170 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_muram_ext.h
-+
-+ @Description FM MURAM Application Programming Interface.
-+*//***************************************************************************/
-+#ifndef __FM_MURAM_EXT
-+#define __FM_MURAM_EXT
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_muram_grp FM MURAM
-+
-+ @Description FM MURAM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_muram_init_grp FM MURAM Initialization Unit
-+
-+ @Description FM MURAM initialization API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_MURAM_ConfigAndInit
-+
-+ @Description Creates partition in the MURAM.
-+
-+ The routine returns a handle (descriptor) to the MURAM partition.
-+ This descriptor must be passed as first parameter to all other
-+ FM-MURAM function calls.
-+
-+ No actual initialization or configuration of FM_MURAM hardware is
-+ done by this routine.
-+
-+ @Param[in] baseAddress - Pointer to base of memory mapped FM-MURAM.
-+ @Param[in] size - Size of the FM-MURAM partition.
-+
-+ @Return Handle to FM-MURAM object, or NULL for Failure.
-+*//***************************************************************************/
-+t_Handle FM_MURAM_ConfigAndInit(uintptr_t baseAddress, uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function FM_MURAM_Free
-+
-+ @Description Frees all resources that were assigned to FM-MURAM module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmMuram - FM-MURAM module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MURAM_Free(t_Handle h_FmMuram);
-+
-+/** @} */ /* end of FM_muram_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_muram_ctrl_grp FM MURAM Control Unit
-+
-+ @Description FM MURAM control API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_MURAM_AllocMem
-+
-+ @Description Allocate some memory from FM-MURAM partition.
-+
-+ @Param[in] h_FmMuram - FM-MURAM module descriptor.
-+ @Param[in] size - size of the memory to be allocated.
-+ @Param[in] align - Alignment of the memory.
-+
-+ @Return address of the allocated memory; NULL otherwise.
-+*//***************************************************************************/
-+void * FM_MURAM_AllocMem(t_Handle h_FmMuram, uint32_t size, uint32_t align);
-+
-+/**************************************************************************//**
-+ @Function FM_MURAM_AllocMemForce
-+
-+ @Description Allocate some specific memory from FM-MURAM partition (according
-+ to base).
-+
-+ @Param[in] h_FmMuram - FM-MURAM module descriptor.
-+ @Param[in] base - the desired base-address to be allocated.
-+ @Param[in] size - size of the memory to be allocated.
-+
-+ @Return address of the allocated memory; NULL otherwise.
-+*//***************************************************************************/
-+void * FM_MURAM_AllocMemForce(t_Handle h_FmMuram, uint64_t base, uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function FM_MURAM_FreeMem
-+
-+ @Description Free an allocated memory from FM-MURAM partition.
-+
-+ @Param[in] h_FmMuram - FM-MURAM module descriptor.
-+ @Param[in] ptr - A pointer to an allocated memory.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_MURAM_FreeMem(t_Handle h_FmMuram, void *ptr);
-+
-+/**************************************************************************//**
-+ @Function FM_MURAM_GetFreeMemSize
-+
-+ @Description Returns the size (in bytes) of free MURAM memory.
-+
-+ @Param[in] h_FmMuram - FM-MURAM module descriptor.
-+
-+ @Return Free MURAM memory size in bytes.
-+*//***************************************************************************/
-+uint64_t FM_MURAM_GetFreeMemSize(t_Handle h_FmMuram);
-+
-+/** @} */ /* end of FM_muram_ctrl_grp group */
-+/** @} */ /* end of FM_muram_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+
-+#endif /* __FM_MURAM_EXT */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_pcd_ext.h
-@@ -0,0 +1,3974 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_pcd_ext.h
-+
-+ @Description FM PCD API definitions
-+*//***************************************************************************/
-+#ifndef __FM_PCD_EXT
-+#define __FM_PCD_EXT
-+
-+#include "std_ext.h"
-+#include "net_ext.h"
-+#include "list_ext.h"
-+#include "fm_ext.h"
-+#include "fsl_fman_kg.h"
-+
-+
-+/**************************************************************************//**
-+ @Group FM_grp Frame Manager API
-+
-+ @Description Frame Manager Application Programming Interface
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_PCD_grp FM PCD
-+
-+ @Description Frame Manager PCD (Parse-Classify-Distribute) API.
-+
-+ The FM PCD module is responsible for the initialization of all
-+ global classifying FM modules. This includes the parser general and
-+ common registers, the key generator global and common registers,
-+ and the policer global and common registers.
-+ In addition, the FM PCD SW module will initialize all required
-+ key generator schemes, coarse classification flows, and policer
-+ profiles. When FM module is configured to work with one of these
-+ entities, it will register to it using the FM PORT API. The PCD
-+ module will manage the PCD resources - i.e. resource management of
-+ KeyGen schemes, etc.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection General PCD defines
-+*//***************************************************************************/
-+#define FM_PCD_MAX_NUM_OF_PRIVATE_HDRS 2 /**< Number of units/headers saved for user */
-+
-+#define FM_PCD_PRS_NUM_OF_HDRS 16 /**< Number of headers supported by HW parser */
-+#define FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS (32 - FM_PCD_MAX_NUM_OF_PRIVATE_HDRS)
-+ /**< Number of distinction units is limited by
-+ register size (32 bits) minus reserved bits
-+ for private headers. */
-+#define FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS 4 /**< Maximum number of interchangeable headers
-+ in a distinction unit */
-+#define FM_PCD_KG_NUM_OF_GENERIC_REGS FM_KG_NUM_OF_GENERIC_REGS /**< Total number of generic KeyGen registers */
-+#define FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY 35 /**< Max number allowed on any configuration;
-+ For HW implementation reasons, in most
-+ cases less than this will be allowed; The
-+ driver will return an initialization error
-+ if resource is unavailable. */
-+#define FM_PCD_KG_NUM_OF_EXTRACT_MASKS 4 /**< Total number of masks allowed on KeyGen extractions. */
-+#define FM_PCD_KG_NUM_OF_DEFAULT_GROUPS 16 /**< Number of default value logical groups */
-+
-+#define FM_PCD_PRS_NUM_OF_LABELS 32 /**< Maximum number of SW parser labels */
-+#define FM_SW_PRS_MAX_IMAGE_SIZE (FM_PCD_SW_PRS_SIZE /*- FM_PCD_PRS_SW_OFFSET -FM_PCD_PRS_SW_TAIL_SIZE*/-FM_PCD_PRS_SW_PATCHES_SIZE)
-+ /**< Maximum size of SW parser code */
-+
-+#define FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE 128 /**< Maximum size of insertion template for
-+ insert manipulation */
-+
-+#if (DPAA_VERSION >= 11)
-+#define FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES 64 /**< Maximum possible entries for frame replicator group */
-+#endif /* (DPAA_VERSION >= 11) */
-+/* @} */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_PCD_init_grp FM PCD Initialization Unit
-+
-+ @Description Frame Manager PCD Initialization Unit API
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description PCD counters
-+*//***************************************************************************/
-+typedef enum e_FmPcdCounters {
-+ e_FM_PCD_KG_COUNTERS_TOTAL, /**< KeyGen counter */
-+ e_FM_PCD_PLCR_COUNTERS_RED, /**< Policer counter - counts the total number of RED packets that exit the Policer. */
-+ e_FM_PCD_PLCR_COUNTERS_YELLOW, /**< Policer counter - counts the total number of YELLOW packets that exit the Policer. */
-+ e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED, /**< Policer counter - counts the number of packets that changed color to RED by the Policer;
-+ This is a subset of e_FM_PCD_PLCR_COUNTERS_RED packet count, indicating active color changes. */
-+ e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW, /**< Policer counter - counts the number of packets that changed color to YELLOW by the Policer;
-+ This is a subset of e_FM_PCD_PLCR_COUNTERS_YELLOW packet count, indicating active color changes. */
-+ e_FM_PCD_PLCR_COUNTERS_TOTAL, /**< Policer counter - counts the total number of packets passed in the Policer. */
-+ e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH, /**< Policer counter - counts the number of packets with length mismatch. */
-+ e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH, /**< Parser counter - counts the number of times the parser block is dispatched. */
-+ e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L2 parse result is returned (including errors). */
-+ e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L3 parse result is returned (including errors). */
-+ e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L4 parse result is returned (including errors). */
-+ e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times SHIM parse result is returned (including errors). */
-+ e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L2 parse result is returned with errors. */
-+ e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L3 parse result is returned with errors. */
-+ e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L4 parse result is returned with errors. */
-+ e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times SHIM parse result is returned with errors. */
-+ e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES, /**< Parser counter - counts the number of cycles spent executing soft parser instruction (including stall cycles). */
-+ e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES, /**< Parser counter - counts the number of cycles stalled waiting for parser internal memory reads while executing soft parser instruction. */
-+ e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES, /**< Parser counter - counts the number of cycles spent executing hard parser (including stall cycles). */
-+ e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory read. */
-+ e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory read. */
-+ e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory write. */
-+ e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory write. */
-+ e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES /**< FPM counter - counts the number of cycles stalled while performing a FPM Command. */
-+} e_FmPcdCounters;
-+
-+/**************************************************************************//**
-+ @Description PCD interrupts
-+*//***************************************************************************/
-+typedef enum e_FmPcdExceptions {
-+ e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC, /**< KeyGen double-bit ECC error is detected on internal memory read access. */
-+ e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, /**< KeyGen scheme configuration error indicating a key size larger than 56 bytes. */
-+ e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC, /**< Policer double-bit ECC error has been detected on PRAM read access. */
-+ e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR, /**< Policer access to a non-initialized profile has been detected. */
-+ e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE, /**< Policer RAM self-initialization complete */
-+ e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE, /**< Policer atomic action complete */
-+ e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC, /**< Parser double-bit ECC error */
-+ e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC /**< Parser single-bit ECC error */
-+} e_FmPcdExceptions;
-+
-+
-+/**************************************************************************//**
-+ @Description Exceptions user callback routine, will be called upon an
-+ exception passing the exception identification.
-+
-+ @Param[in] h_App - User's application descriptor.
-+ @Param[in] exception - The exception.
-+ *//***************************************************************************/
-+typedef void (t_FmPcdExceptionCallback) (t_Handle h_App, e_FmPcdExceptions exception);
-+
-+/**************************************************************************//**
-+ @Description Exceptions user callback routine, will be called upon an exception
-+ passing the exception identification.
-+
-+ @Param[in] h_App - User's application descriptor.
-+ @Param[in] exception - The exception.
-+ @Param[in] index - id of the relevant source (may be scheme or profile id).
-+ *//***************************************************************************/
-+typedef void (t_FmPcdIdExceptionCallback) ( t_Handle h_App,
-+ e_FmPcdExceptions exception,
-+ uint16_t index);
-+
-+/**************************************************************************//**
-+ @Description A callback for enqueuing frame onto a QM queue.
-+
-+ @Param[in] h_QmArg - Application's handle passed to QM module on enqueue.
-+ @Param[in] p_Fd - Frame descriptor for the frame.
-+
-+ @Return E_OK on success; Error code otherwise.
-+ *//***************************************************************************/
-+typedef t_Error (t_FmPcdQmEnqueueCallback) (t_Handle h_QmArg, void *p_Fd);
-+
-+/**************************************************************************//**
-+ @Description Host-Command parameters structure.
-+
-+ When using Host command for PCD functionalities, a dedicated port
-+ must be used. If this routine is called for a PCD in a single partition
-+ environment, or it is the Master partition in a Multi-partition
-+ environment, The port will be initialized by the PCD driver
-+ initialization routine.
-+ *//***************************************************************************/
-+typedef struct t_FmPcdHcParams {
-+ uintptr_t portBaseAddr; /**< Virtual Address of Host-Command Port memory mapped registers.*/
-+ uint8_t portId; /**< Port Id (0-6 relative to Host-Command/Offline-Parsing ports);
-+ NOTE: When configuring Host Command port for
-+ FMANv3 devices (DPAA_VERSION 11 and higher),
-+ portId=0 MUST be used. */
-+ uint16_t liodnBase; /**< LIODN base for this port, to be used together with LIODN offset
-+ (irrelevant for P4080 revision 1.0) */
-+ uint32_t errFqid; /**< Host-Command Port error queue Id. */
-+ uint32_t confFqid; /**< Host-Command Port confirmation queue Id. */
-+ uint32_t qmChannel; /**< QM channel dedicated to this Host-Command port;
-+ will be used by the FM for dequeue. */
-+ t_FmPcdQmEnqueueCallback *f_QmEnqueue; /**< Callback routine for enqueuing a frame to the QM */
-+ t_Handle h_QmArg; /**< Application's handle passed to QM module on enqueue */
-+} t_FmPcdHcParams;
-+
-+/**************************************************************************//**
-+ @Description The main structure for PCD initialization
-+ *//***************************************************************************/
-+typedef struct t_FmPcdParams {
-+ bool prsSupport; /**< TRUE if Parser will be used for any of the FM ports. */
-+ bool ccSupport; /**< TRUE if Coarse Classification will be used for any
-+ of the FM ports. */
-+ bool kgSupport; /**< TRUE if KeyGen will be used for any of the FM ports. */
-+ bool plcrSupport; /**< TRUE if Policer will be used for any of the FM ports. */
-+ t_Handle h_Fm; /**< A handle to the FM module. */
-+ uint8_t numOfSchemes; /**< Number of schemes dedicated to this partition.
-+ this parameter is relevant if 'kgSupport'=TRUE. */
-+ bool useHostCommand; /**< Optional for single partition, Mandatory for Multi partition */
-+ t_FmPcdHcParams hc; /**< Host Command parameters, relevant only if 'useHostCommand'=TRUE;
-+ Relevant when FM not runs in "guest-mode". */
-+
-+ t_FmPcdExceptionCallback *f_Exception; /**< Callback routine for general PCD exceptions;
-+ Relevant when FM not runs in "guest-mode". */
-+ t_FmPcdIdExceptionCallback *f_ExceptionId; /**< Callback routine for specific KeyGen scheme or
-+ Policer profile exceptions;
-+ Relevant when FM not runs in "guest-mode". */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks;
-+ Relevant when FM not runs in "guest-mode". */
-+ uint8_t partPlcrProfilesBase; /**< The first policer-profile-id dedicated to this partition.
-+ this parameter is relevant if 'plcrSupport'=TRUE.
-+ NOTE: this parameter relevant only when working with multiple partitions. */
-+ uint16_t partNumOfPlcrProfiles; /**< Number of policer-profiles dedicated to this partition.
-+ this parameter is relevant if 'plcrSupport'=TRUE.
-+ NOTE: this parameter relevant only when working with multiple partitions. */
-+} t_FmPcdParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_Config
-+
-+ @Description Basic configuration of the PCD module.
-+ Creates descriptor for the FM PCD module.
-+
-+ @Param[in] p_FmPcdParams A structure of parameters for the initialization of PCD.
-+
-+ @Return A handle to the initialized module.
-+*//***************************************************************************/
-+t_Handle FM_PCD_Config(t_FmPcdParams *p_FmPcdParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_Init
-+
-+ @Description Initialization of the PCD module.
-+
-+ @Param[in] h_FmPcd - FM PCD module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PCD_Init(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_Free
-+
-+ @Description Frees all resources that were assigned to FM module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmPcd - FM PCD module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PCD_Free(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Group FM_PCD_advanced_cfg_grp FM PCD Advanced Configuration Unit
-+
-+ @Description Frame Manager PCD Advanced Configuration API.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ConfigException
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default selection of exceptions enabling.
-+ [DEFAULT_numOfSharedPlcrProfiles].
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_ConfigException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ConfigHcFramesDataMemory
-+
-+ @Description Configures memory-partition-id for FMan-Controller Host-Command
-+ frames. Calling this routine changes the internal driver data
-+ base from its default configuration [0].
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] memId Memory partition ID.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions This routine may be called only if 'useHostCommand' was TRUE
-+ when FM_PCD_Config() routine was called.
-+*//***************************************************************************/
-+t_Error FM_PCD_ConfigHcFramesDataMemory(t_Handle h_FmPcd, uint8_t memId);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ConfigPlcrNumOfSharedProfiles
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default selection of exceptions enablement.
-+ [DEFAULT_numOfSharedPlcrProfiles].
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] numOfSharedPlcrProfiles Number of profiles to
-+ be shared between ports on this partition
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PCD_ConfigPlcrNumOfSharedProfiles(t_Handle h_FmPcd, uint16_t numOfSharedPlcrProfiles);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ConfigPlcrAutoRefreshMode
-+
-+ @Description Calling this routine changes the internal driver data base
-+ from its default selection of exceptions enablement.
-+ By default auto-refresh is [DEFAULT_plcrAutoRefresh].
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] enable TRUE to enable, FALSE to disable
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_ConfigPlcrAutoRefreshMode(t_Handle h_FmPcd, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ConfigPrsMaxCycleLimit
-+
-+ @Description Calling this routine changes the internal data structure for
-+ the maximum parsing time from its default value
-+ [DEFAULT_MAX_PRS_CYC_LIM].
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] value 0 to disable the mechanism, or new
-+ maximum parsing time.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_ConfigPrsMaxCycleLimit(t_Handle h_FmPcd,uint16_t value);
-+
-+/** @} */ /* end of FM_PCD_advanced_cfg_grp group */
-+/** @} */ /* end of FM_PCD_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_PCD_Runtime_grp FM PCD Runtime Unit
-+
-+ @Description Frame Manager PCD Runtime Unit API
-+
-+ The runtime control allows creation of PCD infrastructure modules
-+ such as Network Environment Characteristics, Classification Plan
-+ Groups and Coarse Classification Trees.
-+ It also allows on-the-fly initialization, modification and removal
-+ of PCD modules such as KeyGen schemes, coarse classification nodes
-+ and Policer profiles.
-+
-+ In order to explain the programming model of the PCD driver interface
-+ a few terms should be explained, and will be used below.
-+ - Distinction Header - One of the 16 protocols supported by the FM parser,
-+ or one of the SHIM headers (1 or 2). May be a header with a special
-+ option (see below).
-+ - Interchangeable Headers Group - This is a group of Headers recognized
-+ by either one of them. For example, if in a specific context the user
-+ chooses to treat IPv4 and IPV6 in the same way, they may create an
-+ interchangeable Headers Unit consisting of these 2 headers.
-+ - A Distinction Unit - a Distinction Header or an Interchangeable Headers
-+ Group.
-+ - Header with special option - applies to Ethernet, MPLS, VLAN, IPv4 and
-+ IPv6, includes multicast, broadcast and other protocol specific options.
-+ In terms of hardware it relates to the options available in the classification
-+ plan.
-+ - Network Environment Characteristics - a set of Distinction Units that define
-+ the total recognizable header selection for a certain environment. This is
-+ NOT the list of all headers that will ever appear in a flow, but rather
-+ everything that needs distinction in a flow, where distinction is made by KeyGen
-+ schemes and coarse classification action descriptors.
-+
-+ The PCD runtime modules initialization is done in stages. The first stage after
-+ initializing the PCD module itself is to establish a Network Flows Environment
-+ Definition. The application may choose to establish one or more such environments.
-+ Later, when needed, the application will have to state, for some of its modules,
-+ to which single environment it belongs.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description A structure for SW parser labels
-+ *//***************************************************************************/
-+typedef struct t_FmPcdPrsLabelParams {
-+ uint32_t instructionOffset; /**< SW parser label instruction offset (2 bytes
-+ resolution), relative to Parser RAM. */
-+ e_NetHeaderType hdr; /**< The existence of this header will invoke
-+ the SW parser code; Use HEADER_TYPE_NONE
-+ to indicate that sw parser is to run
-+ independent of the existence of any protocol
-+ (run before HW parser). */
-+ uint8_t indexPerHdr; /**< Normally 0, if more than one SW parser
-+ attachments for the same header, use this
-+ index to distinguish between them. */
-+} t_FmPcdPrsLabelParams;
-+
-+/**************************************************************************//**
-+ @Description A structure for SW parser
-+ *//***************************************************************************/
-+typedef struct t_FmPcdPrsSwParams {
-+ bool override; /**< FALSE to invoke a check that nothing else
-+ was loaded to this address, including
-+ internal patches.
-+ TRUE to override any existing code.*/
-+ uint32_t size; /**< SW parser code size */
-+ uint16_t base; /**< SW parser base (in instruction counts!
-+ must be larger than 0x20)*/
-+ uint8_t *p_Code; /**< SW parser code */
-+ uint32_t swPrsDataParams[FM_PCD_PRS_NUM_OF_HDRS];
-+ /**< SW parser data (parameters) */
-+ uint8_t numOfLabels; /**< Number of labels for SW parser. */
-+ t_FmPcdPrsLabelParams labelsTable[FM_PCD_PRS_NUM_OF_LABELS];
-+ /**< SW parser labels table, containing
-+ numOfLabels entries */
-+} t_FmPcdPrsSwParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_Enable
-+
-+ @Description This routine should be called after PCD is initialized for enabling all
-+ PCD engines according to their existing configuration.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
-+*//***************************************************************************/
-+t_Error FM_PCD_Enable(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_Disable
-+
-+ @Description This routine may be called when PCD is enabled in order to
-+ disable all PCD engines. It may be called
-+ only when none of the ports in the system are using the PCD.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init() and when PCD is enabled.
-+*//***************************************************************************/
-+t_Error FM_PCD_Disable(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_GetCounter
-+
-+ @Description Reads one of the FM PCD counters.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] counter The requested counter.
-+
-+ @Return Counter's current value.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ Note that it is user's responsibility to call this routine only
-+ for enabled counters, and there will be no indication if a
-+ disabled counter is accessed.
-+*//***************************************************************************/
-+uint32_t FM_PCD_GetCounter(t_Handle h_FmPcd, e_FmPcdCounters counter);
-+
-+/**************************************************************************//**
-+@Function FM_PCD_PrsLoadSw
-+
-+@Description This routine may be called in order to load software parsing code.
-+
-+
-+@Param[in] h_FmPcd FM PCD module descriptor.
-+@Param[in] p_SwPrs A pointer to a structure of software
-+ parser parameters, including the software
-+ parser image.
-+
-+@Return E_OK on success; Error code otherwise.
-+
-+@Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_PrsLoadSw(t_Handle h_FmPcd, t_FmPcdPrsSwParams *p_SwPrs);
-+
-+/**************************************************************************//**
-+@Function FM_PCD_SetAdvancedOffloadSupport
-+
-+@Description This routine must be called in order to support the following features:
-+ IP-fragmentation, IP-reassembly, IPsec, Header-manipulation, frame-replicator.
-+
-+@Param[in] h_FmPcd FM PCD module descriptor.
-+
-+@Return E_OK on success; Error code otherwise.
-+
-+@Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_SetAdvancedOffloadSupport(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSetDfltValue
-+
-+ @Description Calling this routine sets a global default value to be used
-+ by the KeyGen when parser does not recognize a required
-+ field/header.
-+ By default default values are 0.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] valueId 0,1 - one of 2 global default values.
-+ @Param[in] value The requested default value.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_KgSetDfltValue(t_Handle h_FmPcd, uint8_t valueId, uint32_t value);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSetAdditionalDataAfterParsing
-+
-+ @Description Calling this routine allows the KeyGen to access data past
-+ the parser finishing point.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] payloadOffset the number of bytes beyond the parser location.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled.
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_KgSetAdditionalDataAfterParsing(t_Handle h_FmPcd, uint8_t payloadOffset);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_SetException
-+
-+ @Description Calling this routine enables/disables PCD interrupts.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_SetException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ModifyCounter
-+
-+ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] counter The requested counter.
-+ @Param[in] value The requested value to be written into the counter.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_ModifyCounter(t_Handle h_FmPcd, e_FmPcdCounters counter, uint32_t value);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_SetPlcrStatistics
-+
-+ @Description This routine may be used to enable/disable policer statistics
-+ counter. By default the statistics is enabled.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor
-+ @Param[in] enable TRUE to enable, FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_SetPlcrStatistics(t_Handle h_FmPcd, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_SetPrsStatistics
-+
-+ @Description Defines whether to gather parser statistics including all ports.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] enable TRUE to enable, FALSE to disable.
-+
-+ @Return None
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+void FM_PCD_SetPrsStatistics(t_Handle h_FmPcd, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HcTxConf
-+
-+ @Description This routine should be called to confirm frames that were
-+ received on the HC confirmation queue.
-+
-+ @Param[in] h_FmPcd A handle to an FM PCD Module.
-+ @Param[in] p_Fd Frame descriptor of the received frame.
-+
-+ @Cautions Allowed only following FM_PCD_Init(). Allowed only if 'useHostCommand'
-+ option was selected in the initialization.
-+*//***************************************************************************/
-+void FM_PCD_HcTxConf(t_Handle h_FmPcd, t_DpaaFD *p_Fd);
-+
-+/**************************************************************************//*
-+ @Function FM_PCD_ForceIntr
-+
-+ @Description Causes an interrupt event on the requested source.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] exception An exception to be forced.
-+
-+ @Return E_OK on success; Error code if the exception is not enabled,
-+ or is not able to create interrupt.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PCD_ForceIntr (t_Handle h_FmPcd, e_FmPcdExceptions exception);
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/**************************************************************************//**
-+ @Function FM_PCD_DumpRegs
-+
-+ @Description Dumps all PCD registers
-+
-+ @Param[in] h_FmPcd A handle to an FM PCD Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ NOTE: this routine may be called only for FM in master mode
-+ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
-+ are mapped.
-+*//***************************************************************************/
-+t_Error FM_PCD_DumpRegs(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgDumpRegs
-+
-+ @Description Dumps all PCD KG registers
-+
-+ @Param[in] h_FmPcd A handle to an FM PCD Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ NOTE: this routine may be called only for FM in master mode
-+ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
-+ are mapped.
-+*//***************************************************************************/
-+t_Error FM_PCD_KgDumpRegs(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrDumpRegs
-+
-+ @Description Dumps all PCD Policer registers
-+
-+ @Param[in] h_FmPcd A handle to an FM PCD Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ NOTE: this routine may be called only for FM in master mode
-+ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
-+ are mapped.
-+*//***************************************************************************/
-+t_Error FM_PCD_PlcrDumpRegs(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrProfileDumpRegs
-+
-+ @Description Dumps all PCD Policer profile registers
-+
-+ @Param[in] h_Profile A handle to a Policer profile.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ NOTE: this routine may be called only for FM in master mode
-+ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
-+ are mapped.
-+*//***************************************************************************/
-+t_Error FM_PCD_PlcrProfileDumpRegs(t_Handle h_Profile);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PrsDumpRegs
-+
-+ @Description Dumps all PCD Parser registers
-+
-+ @Param[in] h_FmPcd A handle to an FM PCD Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ NOTE: this routine may be called only for FM in master mode
-+ (i.e. 'guestId'=NCSW_MASTER_ID) or in a case that the registers
-+ are mapped.
-+*//***************************************************************************/
-+t_Error FM_PCD_PrsDumpRegs(t_Handle h_FmPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HcDumpRegs
-+
-+ @Description Dumps HC Port registers
-+
-+ @Param[in] h_FmPcd A handle to an FM PCD Module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+ NOTE: this routine may be called only for FM in master mode
-+ (i.e. 'guestId'=NCSW_MASTER_ID).
-+*//***************************************************************************/
-+t_Error FM_PCD_HcDumpRegs(t_Handle h_FmPcd);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+
-+
-+/**************************************************************************//**
-+ KeyGen FM_PCD_Runtime_build_grp FM PCD Runtime Building Unit
-+
-+ @Description Frame Manager PCD Runtime Building API
-+
-+ This group contains routines for setting, deleting and modifying
-+ PCD resources, for defining the total PCD tree.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection Definitions of coarse classification
-+ parameters as required by KeyGen (when coarse classification
-+ is the next engine after this scheme).
-+*//***************************************************************************/
-+#define FM_PCD_MAX_NUM_OF_CC_TREES 8
-+#define FM_PCD_MAX_NUM_OF_CC_GROUPS 16
-+#define FM_PCD_MAX_NUM_OF_CC_UNITS 4
-+#define FM_PCD_MAX_NUM_OF_KEYS 256
-+#define FM_PCD_MAX_NUM_OF_FLOWS (4*KILOBYTE)
-+#define FM_PCD_MAX_SIZE_OF_KEY 56
-+#define FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP 16
-+#define FM_PCD_LAST_KEY_INDEX 0xffff
-+
-+#define FM_PCD_MAX_NUM_OF_CC_NODES 255 /* Obsolete, not used - will be removed in the future */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection A set of definitions to allow protocol
-+ special option description.
-+*//***************************************************************************/
-+typedef uint32_t protocolOpt_t; /**< A general type to define a protocol option. */
-+
-+typedef protocolOpt_t ethProtocolOpt_t; /**< Ethernet protocol options. */
-+#define ETH_BROADCAST 0x80000000 /**< Ethernet Broadcast. */
-+#define ETH_MULTICAST 0x40000000 /**< Ethernet Multicast. */
-+
-+typedef protocolOpt_t vlanProtocolOpt_t; /**< VLAN protocol options. */
-+#define VLAN_STACKED 0x20000000 /**< Stacked VLAN. */
-+
-+typedef protocolOpt_t mplsProtocolOpt_t; /**< MPLS protocol options. */
-+#define MPLS_STACKED 0x10000000 /**< Stacked MPLS. */
-+
-+typedef protocolOpt_t ipv4ProtocolOpt_t; /**< IPv4 protocol options. */
-+#define IPV4_BROADCAST_1 0x08000000 /**< IPv4 Broadcast. */
-+#define IPV4_MULTICAST_1 0x04000000 /**< IPv4 Multicast. */
-+#define IPV4_UNICAST_2 0x02000000 /**< Tunneled IPv4 - Unicast. */
-+#define IPV4_MULTICAST_BROADCAST_2 0x01000000 /**< Tunneled IPv4 - Broadcast/Multicast. */
-+
-+#define IPV4_FRAG_1 0x00000008 /**< IPV4 reassembly option.
-+ IPV4 Reassembly manipulation requires network
-+ environment with IPV4 header and IPV4_FRAG_1 option */
-+
-+typedef protocolOpt_t ipv6ProtocolOpt_t; /**< IPv6 protocol options. */
-+#define IPV6_MULTICAST_1 0x00800000 /**< IPv6 Multicast. */
-+#define IPV6_UNICAST_2 0x00400000 /**< Tunneled IPv6 - Unicast. */
-+#define IPV6_MULTICAST_2 0x00200000 /**< Tunneled IPv6 - Multicast. */
-+
-+#define IPV6_FRAG_1 0x00000004 /**< IPV6 reassembly option.
-+ IPV6 Reassembly manipulation requires network
-+ environment with IPV6 header and IPV6_FRAG_1 option;
-+ in case where fragment found, the fragment-extension offset
-+ may be found at 'shim2' (in parser-result). */
-+#if (DPAA_VERSION >= 11)
-+typedef protocolOpt_t capwapProtocolOpt_t; /**< CAPWAP protocol options. */
-+#define CAPWAP_FRAG_1 0x00000008 /**< CAPWAP reassembly option.
-+ CAPWAP Reassembly manipulation requires network
-+ environment with CAPWAP header and CAPWAP_FRAG_1 option;
-+ in case where fragment found, the fragment-extension offset
-+ may be found at 'shim2' (in parser-result). */
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+
-+/* @} */
-+
-+#define FM_PCD_MANIP_MAX_HDR_SIZE 256
-+#define FM_PCD_MANIP_DSCP_TO_VLAN_TRANS 64
-+
-+/**************************************************************************//**
-+ @Collection A set of definitions to support Header Manipulation selection.
-+*//***************************************************************************/
-+typedef uint32_t hdrManipFlags_t; /**< A general type to define a HMan update command flags. */
-+
-+typedef hdrManipFlags_t ipv4HdrManipUpdateFlags_t; /**< IPv4 protocol HMan update command flags. */
-+
-+#define HDR_MANIP_IPV4_TOS 0x80000000 /**< update TOS with the given value ('tos' field
-+ of t_FmPcdManipHdrFieldUpdateIpv4) */
-+#define HDR_MANIP_IPV4_ID 0x40000000 /**< update IP ID with the given value ('id' field
-+ of t_FmPcdManipHdrFieldUpdateIpv4) */
-+#define HDR_MANIP_IPV4_TTL 0x20000000 /**< Decrement TTL by 1 */
-+#define HDR_MANIP_IPV4_SRC 0x10000000 /**< update IP source address with the given value
-+ ('src' field of t_FmPcdManipHdrFieldUpdateIpv4) */
-+#define HDR_MANIP_IPV4_DST 0x08000000 /**< update IP destination address with the given value
-+ ('dst' field of t_FmPcdManipHdrFieldUpdateIpv4) */
-+
-+typedef hdrManipFlags_t ipv6HdrManipUpdateFlags_t; /**< IPv6 protocol HMan update command flags. */
-+
-+#define HDR_MANIP_IPV6_TC 0x80000000 /**< update Traffic Class address with the given value
-+ ('trafficClass' field of t_FmPcdManipHdrFieldUpdateIpv6) */
-+#define HDR_MANIP_IPV6_HL 0x40000000 /**< Decrement Hop Limit by 1 */
-+#define HDR_MANIP_IPV6_SRC 0x20000000 /**< update IP source address with the given value
-+ ('src' field of t_FmPcdManipHdrFieldUpdateIpv6) */
-+#define HDR_MANIP_IPV6_DST 0x10000000 /**< update IP destination address with the given value
-+ ('dst' field of t_FmPcdManipHdrFieldUpdateIpv6) */
-+
-+typedef hdrManipFlags_t tcpUdpHdrManipUpdateFlags_t;/**< TCP/UDP protocol HMan update command flags. */
-+
-+#define HDR_MANIP_TCP_UDP_SRC 0x80000000 /**< update TCP/UDP source address with the given value
-+ ('src' field of t_FmPcdManipHdrFieldUpdateTcpUdp) */
-+#define HDR_MANIP_TCP_UDP_DST 0x40000000 /**< update TCP/UDP destination address with the given value
-+ ('dst' field of t_FmPcdManipHdrFieldUpdateTcpUdp) */
-+#define HDR_MANIP_TCP_UDP_CHECKSUM 0x20000000 /**< update TCP/UDP checksum */
-+
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description A type used for returning the order of the key extraction.
-+ each value in this array represents the index of the extraction
-+ command as defined by the user in the initialization extraction array.
-+ The valid size of this array is the user define number of extractions
-+ required (also marked by the second '0' in this array).
-+*//***************************************************************************/
-+typedef uint8_t t_FmPcdKgKeyOrder [FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
-+
-+/**************************************************************************//**
-+ @Description All PCD engines
-+*//***************************************************************************/
-+typedef enum e_FmPcdEngine {
-+ e_FM_PCD_INVALID = 0, /**< Invalid PCD engine */
-+ e_FM_PCD_DONE, /**< No PCD Engine indicated */
-+ e_FM_PCD_KG, /**< KeyGen */
-+ e_FM_PCD_CC, /**< Coarse classifier */
-+ e_FM_PCD_PLCR, /**< Policer */
-+ e_FM_PCD_PRS, /**< Parser */
-+#if (DPAA_VERSION >= 11)
-+ e_FM_PCD_FR, /**< Frame-Replicator */
-+#endif /* (DPAA_VERSION >= 11) */
-+ e_FM_PCD_HASH /**< Hash table */
-+} e_FmPcdEngine;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting extraction by header types
-+*//***************************************************************************/
-+typedef enum e_FmPcdExtractByHdrType {
-+ e_FM_PCD_EXTRACT_FROM_HDR, /**< Extract bytes from header */
-+ e_FM_PCD_EXTRACT_FROM_FIELD, /**< Extract bytes from header field */
-+ e_FM_PCD_EXTRACT_FULL_FIELD /**< Extract a full field */
-+} e_FmPcdExtractByHdrType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting extraction source
-+ (when it is not the header)
-+*//***************************************************************************/
-+typedef enum e_FmPcdExtractFrom {
-+ e_FM_PCD_EXTRACT_FROM_FRAME_START, /**< KG & CC: Extract from beginning of frame */
-+ e_FM_PCD_EXTRACT_FROM_DFLT_VALUE, /**< KG only: Extract from a default value */
-+ e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE, /**< KG & CC: Extract from the point where parsing had finished */
-+ e_FM_PCD_EXTRACT_FROM_KEY, /**< CC only: Field where saved KEY */
-+ e_FM_PCD_EXTRACT_FROM_HASH, /**< CC only: Field where saved HASH */
-+ e_FM_PCD_EXTRACT_FROM_PARSE_RESULT, /**< KG only: Extract from the parser result */
-+ e_FM_PCD_EXTRACT_FROM_ENQ_FQID, /**< KG & CC: Extract from enqueue FQID */
-+ e_FM_PCD_EXTRACT_FROM_FLOW_ID /**< CC only: Field where saved Dequeue FQID */
-+} e_FmPcdExtractFrom;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting extraction type
-+*//***************************************************************************/
-+typedef enum e_FmPcdExtractType {
-+ e_FM_PCD_EXTRACT_BY_HDR, /**< Extract according to header */
-+ e_FM_PCD_EXTRACT_NON_HDR, /**< Extract from data that is not the header */
-+ e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO /**< Extract private info as specified by user */
-+} e_FmPcdExtractType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting default extraction value
-+*//***************************************************************************/
-+typedef enum e_FmPcdKgExtractDfltSelect {
-+ e_FM_PCD_KG_DFLT_GBL_0, /**< Default selection is KG register 0 */
-+ e_FM_PCD_KG_DFLT_GBL_1, /**< Default selection is KG register 1 */
-+ e_FM_PCD_KG_DFLT_PRIVATE_0, /**< Default selection is a per scheme register 0 */
-+ e_FM_PCD_KG_DFLT_PRIVATE_1, /**< Default selection is a per scheme register 1 */
-+ e_FM_PCD_KG_DFLT_ILLEGAL /**< Illegal selection */
-+} e_FmPcdKgExtractDfltSelect;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type defining all default groups - each group shares
-+ a default value, one of four user-initialized values.
-+*//***************************************************************************/
-+typedef enum e_FmPcdKgKnownFieldsDfltTypes {
-+ e_FM_PCD_KG_MAC_ADDR, /**< MAC Address */
-+ e_FM_PCD_KG_TCI, /**< TCI field */
-+ e_FM_PCD_KG_ENET_TYPE, /**< ENET Type */
-+ e_FM_PCD_KG_PPP_SESSION_ID, /**< PPP Session id */
-+ e_FM_PCD_KG_PPP_PROTOCOL_ID, /**< PPP Protocol id */
-+ e_FM_PCD_KG_MPLS_LABEL, /**< MPLS label */
-+ e_FM_PCD_KG_IP_ADDR, /**< IP address */
-+ e_FM_PCD_KG_PROTOCOL_TYPE, /**< Protocol type */
-+ e_FM_PCD_KG_IP_TOS_TC, /**< TOS or TC */
-+ e_FM_PCD_KG_IPV6_FLOW_LABEL, /**< IPV6 flow label */
-+ e_FM_PCD_KG_IPSEC_SPI, /**< IPSEC SPI */
-+ e_FM_PCD_KG_L4_PORT, /**< L4 Port */
-+ e_FM_PCD_KG_TCP_FLAG, /**< TCP Flag */
-+ e_FM_PCD_KG_GENERIC_FROM_DATA, /**< grouping implemented by SW,
-+ any data extraction that is not the full
-+ field described above */
-+ e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V, /**< grouping implemented by SW,
-+ any data extraction without validation */
-+ e_FM_PCD_KG_GENERIC_NOT_FROM_DATA /**< grouping implemented by SW,
-+ extraction from parser result or
-+ direct use of default value */
-+} e_FmPcdKgKnownFieldsDfltTypes;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for defining header index for scenarios with
-+ multiple (tunneled) headers
-+*//***************************************************************************/
-+typedef enum e_FmPcdHdrIndex {
-+ e_FM_PCD_HDR_INDEX_NONE = 0, /**< used when multiple headers not used, also
-+ to specify regular IP (not tunneled). */
-+ e_FM_PCD_HDR_INDEX_1, /**< may be used for VLAN, MPLS, tunneled IP */
-+ e_FM_PCD_HDR_INDEX_2, /**< may be used for MPLS, tunneled IP */
-+ e_FM_PCD_HDR_INDEX_3, /**< may be used for MPLS */
-+ e_FM_PCD_HDR_INDEX_LAST = 0xFF /**< may be used for VLAN, MPLS */
-+} e_FmPcdHdrIndex;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile functional type
-+*//***************************************************************************/
-+typedef enum e_FmPcdProfileTypeSelection {
-+ e_FM_PCD_PLCR_PORT_PRIVATE, /**< Port dedicated profile */
-+ e_FM_PCD_PLCR_SHARED /**< Shared profile (shared within partition) */
-+} e_FmPcdProfileTypeSelection;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile algorithm
-+*//***************************************************************************/
-+typedef enum e_FmPcdPlcrAlgorithmSelection {
-+ e_FM_PCD_PLCR_PASS_THROUGH, /**< Policer pass through */
-+ e_FM_PCD_PLCR_RFC_2698, /**< Policer algorithm RFC 2698 */
-+ e_FM_PCD_PLCR_RFC_4115 /**< Policer algorithm RFC 4115 */
-+} e_FmPcdPlcrAlgorithmSelection;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting a policer profile color mode
-+*//***************************************************************************/
-+typedef enum e_FmPcdPlcrColorMode {
-+ e_FM_PCD_PLCR_COLOR_BLIND, /**< Color blind */
-+ e_FM_PCD_PLCR_COLOR_AWARE /**< Color aware */
-+} e_FmPcdPlcrColorMode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting a policer profile color
-+*//***************************************************************************/
-+typedef enum e_FmPcdPlcrColor {
-+ e_FM_PCD_PLCR_GREEN, /**< Green color code */
-+ e_FM_PCD_PLCR_YELLOW, /**< Yellow color code */
-+ e_FM_PCD_PLCR_RED, /**< Red color code */
-+ e_FM_PCD_PLCR_OVERRIDE /**< Color override code */
-+} e_FmPcdPlcrColor;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile packet frame length selector
-+*//***************************************************************************/
-+typedef enum e_FmPcdPlcrFrameLengthSelect {
-+ e_FM_PCD_PLCR_L2_FRM_LEN, /**< L2 frame length */
-+ e_FM_PCD_PLCR_L3_FRM_LEN, /**< L3 frame length */
-+ e_FM_PCD_PLCR_L4_FRM_LEN, /**< L4 frame length */
-+ e_FM_PCD_PLCR_FULL_FRM_LEN /**< Full frame length */
-+} e_FmPcdPlcrFrameLengthSelect;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting roll-back frame
-+*//***************************************************************************/
-+typedef enum e_FmPcdPlcrRollBackFrameSelect {
-+ e_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN, /**< Roll-back L2 frame length */
-+ e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN /**< Roll-back Full frame length */
-+} e_FmPcdPlcrRollBackFrameSelect;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile packet or byte mode
-+*//***************************************************************************/
-+typedef enum e_FmPcdPlcrRateMode {
-+ e_FM_PCD_PLCR_BYTE_MODE, /**< Byte mode */
-+ e_FM_PCD_PLCR_PACKET_MODE /**< Packet mode */
-+} e_FmPcdPlcrRateMode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for defining action of frame
-+*//***************************************************************************/
-+typedef enum e_FmPcdDoneAction {
-+ e_FM_PCD_ENQ_FRAME = 0, /**< Enqueue frame */
-+ e_FM_PCD_DROP_FRAME /**< Mark this frame as error frame and continue
-+ to error flow; 'FM_PORT_FRM_ERR_CLS_DISCARD'
-+ flag will be set for this frame. */
-+} e_FmPcdDoneAction;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer counter
-+*//***************************************************************************/
-+typedef enum e_FmPcdPlcrProfileCounters {
-+ e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER, /**< Green packets counter */
-+ e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER, /**< Yellow packets counter */
-+ e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER, /**< Red packets counter */
-+ e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER, /**< Recolored yellow packets counter */
-+ e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER /**< Recolored red packets counter */
-+} e_FmPcdPlcrProfileCounters;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the PCD action after extraction
-+*//***************************************************************************/
-+typedef enum e_FmPcdAction {
-+ e_FM_PCD_ACTION_NONE, /**< NONE */
-+ e_FM_PCD_ACTION_EXACT_MATCH, /**< Exact match on the selected extraction */
-+ e_FM_PCD_ACTION_INDEXED_LOOKUP /**< Indexed lookup on the selected extraction */
-+} e_FmPcdAction;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of insert manipulation
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrInsrtType {
-+ e_FM_PCD_MANIP_INSRT_GENERIC, /**< Insert according to offset & size */
-+ e_FM_PCD_MANIP_INSRT_BY_HDR, /**< Insert according to protocol */
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ e_FM_PCD_MANIP_INSRT_BY_TEMPLATE /**< Insert template to start of frame */
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+} e_FmPcdManipHdrInsrtType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of remove manipulation
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrRmvType {
-+ e_FM_PCD_MANIP_RMV_GENERIC, /**< Remove according to offset & size */
-+ e_FM_PCD_MANIP_RMV_BY_HDR /**< Remove according to offset & size */
-+} e_FmPcdManipHdrRmvType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific L2 fields removal
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrRmvSpecificL2 {
-+ e_FM_PCD_MANIP_HDR_RMV_ETHERNET, /**< Ethernet/802.3 MAC */
-+ e_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS, /**< stacked QTags */
-+ e_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS, /**< MPLS and Ethernet/802.3 MAC header until
-+ the header which follows the MPLS header */
-+ e_FM_PCD_MANIP_HDR_RMV_MPLS, /**< Remove MPLS header (Unlimited MPLS labels) */
-+ e_FM_PCD_MANIP_HDR_RMV_PPPOE /**< Remove the PPPoE header and PPP protocol field. */
-+} e_FmPcdManipHdrRmvSpecificL2;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific fields updates
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrFieldUpdateType {
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN, /**< VLAN updates */
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4, /**< IPV4 updates */
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6, /**< IPV6 updates */
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP, /**< TCP_UDP updates */
-+} e_FmPcdManipHdrFieldUpdateType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting VLAN updates
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrFieldUpdateVlan {
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI, /**< Replace VPri of outer most VLAN tag. */
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN /**< DSCP to VLAN priority bits translation */
-+} e_FmPcdManipHdrFieldUpdateVlan;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific L2 header insertion
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrInsrtSpecificL2 {
-+ e_FM_PCD_MANIP_HDR_INSRT_MPLS, /**< Insert MPLS header (Unlimited MPLS labels) */
-+ e_FM_PCD_MANIP_HDR_INSRT_PPPOE /**< Insert PPPOE */
-+} e_FmPcdManipHdrInsrtSpecificL2;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting QoS mapping mode
-+
-+ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE'
-+ User should instruct the port to read the hash-result
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrQosMappingMode {
-+ e_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE = 0, /**< No mapping, QoS field will not be changed */
-+ e_FM_PCD_MANIP_HDR_QOS_MAPPING_AS_IS, /**< QoS field will be overwritten by the last byte in the hash-result. */
-+} e_FmPcdManipHdrQosMappingMode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting QoS source
-+
-+ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_SRC_NONE'
-+ User should left room for the hash-result on input/output buffer
-+ and instruct the port to read/write the hash-result to the buffer (RPD should be set)
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrQosSrc {
-+ e_FM_PCD_MANIP_HDR_QOS_SRC_NONE = 0, /**< TODO */
-+ e_FM_PCD_MANIP_HDR_QOS_SRC_USER_DEFINED, /**< QoS will be taken from the last byte in the hash-result. */
-+} e_FmPcdManipHdrQosSrc;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of header insertion
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrInsrtByHdrType {
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2, /**< Specific L2 fields insertion */
-+#if (DPAA_VERSION >= 11)
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_IP, /**< IP insertion */
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP, /**< UDP insertion */
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, /**< UDP lite insertion */
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP /**< CAPWAP insertion */
-+#endif /* (DPAA_VERSION >= 11) */
-+} e_FmPcdManipHdrInsrtByHdrType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific customCommand
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrCustomType {
-+ e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE, /**< Replace IPv4/IPv6 */
-+ e_FM_PCD_MANIP_HDR_CUSTOM_GEN_FIELD_REPLACE, /**< Replace IPv4/IPv6 */
-+} e_FmPcdManipHdrCustomType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific customCommand
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrCustomIpReplace {
-+ e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV4_BY_IPV6, /**< Replace IPv4 by IPv6 */
-+ e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 /**< Replace IPv6 by IPv4 */
-+} e_FmPcdManipHdrCustomIpReplace;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of header removal
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipHdrRmvByHdrType {
-+ e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2 = 0, /**< Specific L2 fields removal */
-+#if (DPAA_VERSION >= 11)
-+ e_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP, /**< CAPWAP removal */
-+#endif /* (DPAA_VERSION >= 11) */
-+#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START, /**< Locate from data that is not the header */
-+#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+} e_FmPcdManipHdrRmvByHdrType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of timeout mode
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipReassemTimeOutMode {
-+ e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES, /**< Limits the time of the reassembly process
-+ from the first fragment to the last */
-+ e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG /**< Limits the time of receiving the fragment */
-+} e_FmPcdManipReassemTimeOutMode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of WaysNumber mode
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipReassemWaysNumber {
-+ e_FM_PCD_MANIP_ONE_WAY_HASH = 1, /**< One way hash */
-+ e_FM_PCD_MANIP_TWO_WAYS_HASH, /**< Two ways hash */
-+ e_FM_PCD_MANIP_THREE_WAYS_HASH, /**< Three ways hash */
-+ e_FM_PCD_MANIP_FOUR_WAYS_HASH, /**< Four ways hash */
-+ e_FM_PCD_MANIP_FIVE_WAYS_HASH, /**< Five ways hash */
-+ e_FM_PCD_MANIP_SIX_WAYS_HASH, /**< Six ways hash */
-+ e_FM_PCD_MANIP_SEVEN_WAYS_HASH, /**< Seven ways hash */
-+ e_FM_PCD_MANIP_EIGHT_WAYS_HASH /**< Eight ways hash */
-+} e_FmPcdManipReassemWaysNumber;
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of statistics mode
-+*//***************************************************************************/
-+typedef enum e_FmPcdStatsType {
-+ e_FM_PCD_STATS_PER_FLOWID = 0 /**< Flow ID is used as index for getting statistics */
-+} e_FmPcdStatsType;
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting manipulation type
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipType {
-+ e_FM_PCD_MANIP_HDR = 0, /**< Header manipulation */
-+ e_FM_PCD_MANIP_REASSEM, /**< Reassembly */
-+ e_FM_PCD_MANIP_FRAG, /**< Fragmentation */
-+ e_FM_PCD_MANIP_SPECIAL_OFFLOAD /**< Special Offloading */
-+} e_FmPcdManipType;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of statistics mode
-+*//***************************************************************************/
-+typedef enum e_FmPcdCcStatsMode {
-+ e_FM_PCD_CC_STATS_MODE_NONE = 0, /**< No statistics support */
-+ e_FM_PCD_CC_STATS_MODE_FRAME, /**< Frame count statistics */
-+ e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME, /**< Byte and frame count statistics */
-+#if (DPAA_VERSION >= 11)
-+ e_FM_PCD_CC_STATS_MODE_RMON, /**< Byte and frame length range count statistics;
-+ This mode is supported only on B4860 device */
-+#endif /* (DPAA_VERSION >= 11) */
-+} e_FmPcdCcStatsMode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for determining the action in case an IP packet
-+ is larger than MTU but its DF (Don't Fragment) bit is set.
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipDontFragAction {
-+ e_FM_PCD_MANIP_DISCARD_PACKET = 0, /**< Discard packet */
-+ e_FM_PCD_MANIP_ENQ_TO_ERR_Q_OR_DISCARD_PACKET = e_FM_PCD_MANIP_DISCARD_PACKET,
-+ /**< Obsolete, cannot enqueue to error queue;
-+ In practice, selects to discard packets;
-+ Will be removed in the future */
-+ e_FM_PCD_MANIP_FRAGMENT_PACKET, /**< Fragment packet and continue normal processing */
-+ e_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG /**< Continue normal processing without fragmenting the packet */
-+} e_FmPcdManipDontFragAction;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of special offload manipulation
-+*//***************************************************************************/
-+typedef enum e_FmPcdManipSpecialOffloadType {
-+ e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC, /**< IPSec offload manipulation */
-+#if (DPAA_VERSION >= 11)
-+ e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP /**< CAPWAP offload manipulation */
-+#endif /* (DPAA_VERSION >= 11) */
-+} e_FmPcdManipSpecialOffloadType;
-+
-+
-+/**************************************************************************//**
-+ @Description A Union of protocol dependent special options
-+*//***************************************************************************/
-+typedef union u_FmPcdHdrProtocolOpt {
-+ ethProtocolOpt_t ethOpt; /**< Ethernet options */
-+ vlanProtocolOpt_t vlanOpt; /**< VLAN options */
-+ mplsProtocolOpt_t mplsOpt; /**< MPLS options */
-+ ipv4ProtocolOpt_t ipv4Opt; /**< IPv4 options */
-+ ipv6ProtocolOpt_t ipv6Opt; /**< IPv6 options */
-+#if (DPAA_VERSION >= 11)
-+ capwapProtocolOpt_t capwapOpt; /**< CAPWAP options */
-+#endif /* (DPAA_VERSION >= 11) */
-+} u_FmPcdHdrProtocolOpt;
-+
-+/**************************************************************************//**
-+ @Description A union holding protocol fields
-+
-+
-+ Fields supported as "full fields":
-+ HEADER_TYPE_ETH:
-+ NET_HEADER_FIELD_ETH_DA
-+ NET_HEADER_FIELD_ETH_SA
-+ NET_HEADER_FIELD_ETH_TYPE
-+
-+ HEADER_TYPE_LLC_SNAP:
-+ NET_HEADER_FIELD_LLC_SNAP_TYPE
-+
-+ HEADER_TYPE_VLAN:
-+ NET_HEADER_FIELD_VLAN_TCI
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
-+ e_FM_PCD_HDR_INDEX_LAST)
-+
-+ HEADER_TYPE_MPLS:
-+ NET_HEADER_FIELD_MPLS_LABEL_STACK
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
-+ e_FM_PCD_HDR_INDEX_2,
-+ e_FM_PCD_HDR_INDEX_LAST)
-+
-+ HEADER_TYPE_IPv4:
-+ NET_HEADER_FIELD_IPv4_SRC_IP
-+ NET_HEADER_FIELD_IPv4_DST_IP
-+ NET_HEADER_FIELD_IPv4_PROTO
-+ NET_HEADER_FIELD_IPv4_TOS
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
-+ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
-+
-+ HEADER_TYPE_IPv6:
-+ NET_HEADER_FIELD_IPv6_SRC_IP
-+ NET_HEADER_FIELD_IPv6_DST_IP
-+ NET_HEADER_FIELD_IPv6_NEXT_HDR
-+ NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC (must come together!)
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
-+ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
-+
-+ (Note that starting from DPAA 1-1, NET_HEADER_FIELD_IPv6_NEXT_HDR applies to
-+ the last next header indication, meaning the next L4, which may be
-+ present at the Ipv6 last extension. On earlier revisions this field
-+ applies to the Next-Header field of the main IPv6 header)
-+
-+ HEADER_TYPE_IP:
-+ NET_HEADER_FIELD_IP_PROTO
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_LAST)
-+ NET_HEADER_FIELD_IP_DSCP
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1)
-+ HEADER_TYPE_GRE:
-+ NET_HEADER_FIELD_GRE_TYPE
-+
-+ HEADER_TYPE_MINENCAP
-+ NET_HEADER_FIELD_MINENCAP_SRC_IP
-+ NET_HEADER_FIELD_MINENCAP_DST_IP
-+ NET_HEADER_FIELD_MINENCAP_TYPE
-+
-+ HEADER_TYPE_TCP:
-+ NET_HEADER_FIELD_TCP_PORT_SRC
-+ NET_HEADER_FIELD_TCP_PORT_DST
-+ NET_HEADER_FIELD_TCP_FLAGS
-+
-+ HEADER_TYPE_UDP:
-+ NET_HEADER_FIELD_UDP_PORT_SRC
-+ NET_HEADER_FIELD_UDP_PORT_DST
-+
-+ HEADER_TYPE_UDP_LITE:
-+ NET_HEADER_FIELD_UDP_LITE_PORT_SRC
-+ NET_HEADER_FIELD_UDP_LITE_PORT_DST
-+
-+ HEADER_TYPE_IPSEC_AH:
-+ NET_HEADER_FIELD_IPSEC_AH_SPI
-+ NET_HEADER_FIELD_IPSEC_AH_NH
-+
-+ HEADER_TYPE_IPSEC_ESP:
-+ NET_HEADER_FIELD_IPSEC_ESP_SPI
-+
-+ HEADER_TYPE_SCTP:
-+ NET_HEADER_FIELD_SCTP_PORT_SRC
-+ NET_HEADER_FIELD_SCTP_PORT_DST
-+
-+ HEADER_TYPE_DCCP:
-+ NET_HEADER_FIELD_DCCP_PORT_SRC
-+ NET_HEADER_FIELD_DCCP_PORT_DST
-+
-+ HEADER_TYPE_PPPoE:
-+ NET_HEADER_FIELD_PPPoE_PID
-+ NET_HEADER_FIELD_PPPoE_SID
-+
-+ *****************************************************************
-+ Fields supported as "from fields":
-+ HEADER_TYPE_ETH (with or without validation):
-+ NET_HEADER_FIELD_ETH_TYPE
-+
-+ HEADER_TYPE_VLAN (with or without validation):
-+ NET_HEADER_FIELD_VLAN_TCI
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
-+ e_FM_PCD_HDR_INDEX_LAST)
-+
-+ HEADER_TYPE_IPv4 (without validation):
-+ NET_HEADER_FIELD_IPv4_PROTO
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
-+ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
-+
-+ HEADER_TYPE_IPv6 (without validation):
-+ NET_HEADER_FIELD_IPv6_NEXT_HDR
-+ (index may apply:
-+ e_FM_PCD_HDR_INDEX_NONE/e_FM_PCD_HDR_INDEX_1,
-+ e_FM_PCD_HDR_INDEX_2/e_FM_PCD_HDR_INDEX_LAST)
-+
-+*//***************************************************************************/
-+typedef union t_FmPcdFields {
-+ headerFieldEth_t eth; /**< Ethernet */
-+ headerFieldVlan_t vlan; /**< VLAN */
-+ headerFieldLlcSnap_t llcSnap; /**< LLC SNAP */
-+ headerFieldPppoe_t pppoe; /**< PPPoE */
-+ headerFieldMpls_t mpls; /**< MPLS */
-+ headerFieldIp_t ip; /**< IP */
-+ headerFieldIpv4_t ipv4; /**< IPv4 */
-+ headerFieldIpv6_t ipv6; /**< IPv6 */
-+ headerFieldUdp_t udp; /**< UDP */
-+ headerFieldUdpLite_t udpLite; /**< UDP Lite */
-+ headerFieldTcp_t tcp; /**< TCP */
-+ headerFieldSctp_t sctp; /**< SCTP */
-+ headerFieldDccp_t dccp; /**< DCCP */
-+ headerFieldGre_t gre; /**< GRE */
-+ headerFieldMinencap_t minencap; /**< Minimal Encapsulation */
-+ headerFieldIpsecAh_t ipsecAh; /**< IPSec AH */
-+ headerFieldIpsecEsp_t ipsecEsp; /**< IPSec ESP */
-+ headerFieldUdpEncapEsp_t udpEncapEsp; /**< UDP Encapsulation ESP */
-+} t_FmPcdFields;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header extraction for key generation
-+*//***************************************************************************/
-+typedef struct t_FmPcdFromHdr {
-+ uint8_t size; /**< Size in byte */
-+ uint8_t offset; /**< Byte offset */
-+} t_FmPcdFromHdr;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining field extraction for key generation
-+*//***************************************************************************/
-+typedef struct t_FmPcdFromField {
-+ t_FmPcdFields field; /**< Field selection */
-+ uint8_t size; /**< Size in byte */
-+ uint8_t offset; /**< Byte offset */
-+} t_FmPcdFromField;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a single network environment unit
-+
-+ A distinction unit should be defined if it will later be used
-+ by one or more PCD engines to distinguish between flows.
-+*//***************************************************************************/
-+typedef struct t_FmPcdDistinctionUnit {
-+ struct {
-+ e_NetHeaderType hdr; /**< One of the headers supported by the FM */
-+ u_FmPcdHdrProtocolOpt opt; /**< Select only one option ! */
-+ } hdrs[FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS];
-+} t_FmPcdDistinctionUnit;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining all different distinction units supported
-+ by a specific PCD Network Environment Characteristics module.
-+
-+ Each unit represent a protocol or a group of protocols that may
-+ be used later by the different PCD engines to distinguish
-+ between flows.
-+*//***************************************************************************/
-+typedef struct t_FmPcdNetEnvParams {
-+ uint8_t numOfDistinctionUnits; /**< Number of different units to be identified */
-+ t_FmPcdDistinctionUnit units[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /**< An array of numOfDistinctionUnits of the
-+ different units to be identified */
-+} t_FmPcdNetEnvParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a single extraction action when
-+ creating a key
-+*//***************************************************************************/
-+typedef struct t_FmPcdExtractEntry {
-+ e_FmPcdExtractType type; /**< Extraction type select */
-+ union {
-+ struct {
-+ e_NetHeaderType hdr; /**< Header selection */
-+ bool ignoreProtocolValidation;
-+ /**< Ignore protocol validation */
-+ e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled
-+ IP. Otherwise should be cleared. */
-+ e_FmPcdExtractByHdrType type; /**< Header extraction type select */
-+ union {
-+ t_FmPcdFromHdr fromHdr; /**< Extract bytes from header parameters */
-+ t_FmPcdFromField fromField; /**< Extract bytes from field parameters */
-+ t_FmPcdFields fullField; /**< Extract full filed parameters */
-+ } extractByHdrType;
-+ } extractByHdr; /**< used when type = e_FM_PCD_KG_EXTRACT_BY_HDR */
-+ struct {
-+ e_FmPcdExtractFrom src; /**< Non-header extraction source */
-+ e_FmPcdAction action; /**< Relevant for CC Only */
-+ uint16_t icIndxMask; /**< Relevant only for CC when
-+ action = e_FM_PCD_ACTION_INDEXED_LOOKUP;
-+ Note that the number of bits that are set within
-+ this mask must be log2 of the CC-node 'numOfKeys'.
-+ Note that the mask cannot be set on the lower bits. */
-+ uint8_t offset; /**< Byte offset */
-+ uint8_t size; /**< Size in byte */
-+ } extractNonHdr; /**< used when type = e_FM_PCD_KG_EXTRACT_NON_HDR */
-+ };
-+} t_FmPcdExtractEntry;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining masks for each extracted field in the key.
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgExtractMask {
-+ uint8_t extractArrayIndex; /**< Index in the extraction array, as initialized by user */
-+ uint8_t offset; /**< Byte offset */
-+ uint8_t mask; /**< A byte mask (selected bits will be used) */
-+} t_FmPcdKgExtractMask;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining default selection per groups of fields
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgExtractDflt {
-+ e_FmPcdKgKnownFieldsDfltTypes type; /**< Default type select */
-+ e_FmPcdKgExtractDfltSelect dfltSelect; /**< Default register select */
-+} t_FmPcdKgExtractDflt;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining key extraction and hashing
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgKeyExtractAndHashParams {
-+ uint32_t privateDflt0; /**< Scheme default register 0 */
-+ uint32_t privateDflt1; /**< Scheme default register 1 */
-+ uint8_t numOfUsedExtracts; /**< defines the valid size of the following array */
-+ t_FmPcdExtractEntry extractArray [FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY]; /**< An array of extractions definition. */
-+ uint8_t numOfUsedDflts; /**< defines the valid size of the following array */
-+ t_FmPcdKgExtractDflt dflts[FM_PCD_KG_NUM_OF_DEFAULT_GROUPS];
-+ /**< For each extraction used in this scheme, specify the required
-+ default register to be used when header is not found.
-+ types not specified in this array will get undefined value. */
-+ uint8_t numOfUsedMasks; /**< defines the valid size of the following array */
-+ t_FmPcdKgExtractMask masks[FM_PCD_KG_NUM_OF_EXTRACT_MASKS];
-+ uint8_t hashShift; /**< hash result right shift. Select the 24 bits out of the 64 hash
-+ result. 0 means using the 24 LSB's, otherwise use the
-+ 24 LSB's after shifting right.*/
-+ uint32_t hashDistributionNumOfFqids; /**< must be > 1 and a power of 2. Represents the range
-+ of queues for the key and hash functionality */
-+ uint8_t hashDistributionFqidsShift; /**< selects the FQID bits that will be effected by the hash */
-+ bool symmetricHash; /**< TRUE to generate the same hash for frames with swapped source and
-+ destination fields on all layers; If TRUE, driver will check that for
-+ all layers, if SRC extraction is selected, DST extraction must also be
-+ selected, and vice versa. */
-+} t_FmPcdKgKeyExtractAndHashParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a single FQID mask (extracted OR).
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgExtractedOrParams {
-+ e_FmPcdExtractType type; /**< Extraction type select */
-+ union {
-+ struct { /**< used when type = e_FM_PCD_KG_EXTRACT_BY_HDR */
-+ e_NetHeaderType hdr;
-+ e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled
-+ IP. Otherwise should be cleared.*/
-+ bool ignoreProtocolValidation;
-+ /**< continue extraction even if protocol is not recognized */
-+ } extractByHdr; /**< Header to extract by */
-+ e_FmPcdExtractFrom src; /**< used when type = e_FM_PCD_KG_EXTRACT_NON_HDR */
-+ };
-+ uint8_t extractionOffset; /**< Offset for extraction (in bytes). */
-+ e_FmPcdKgExtractDfltSelect dfltValue; /**< Select register from which extraction is taken if
-+ field not found */
-+ uint8_t mask; /**< Extraction mask (specified bits are used) */
-+ uint8_t bitOffsetInFqid; /**< 0-31, Selects which bits of the 24 FQID bits to effect using
-+ the extracted byte; Assume byte is placed as the 8 MSB's in
-+ a 32 bit word where the lower bits
-+ are the FQID; i.e if bitOffsetInFqid=1 than its LSB
-+ will effect the FQID MSB, if bitOffsetInFqid=24 than the
-+ extracted byte will effect the 8 LSB's of the FQID,
-+ if bitOffsetInFqid=31 than the byte's MSB will effect
-+ the FQID's LSB; 0 means - no effect on FQID;
-+ Note that one, and only one of
-+ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
-+ extracted byte must effect either FQID or Policer profile).*/
-+ uint8_t bitOffsetInPlcrProfile;
-+ /**< 0-15, Selects which bits of the 8 policer profile id bits to
-+ effect using the extracted byte; Assume byte is placed
-+ as the 8 MSB's in a 16 bit word where the lower bits
-+ are the policer profile id; i.e if bitOffsetInPlcrProfile=1
-+ than its LSB will effect the profile MSB, if bitOffsetInFqid=8
-+ than the extracted byte will effect the whole policer profile id,
-+ if bitOffsetInFqid=15 than the byte's MSB will effect
-+ the Policer Profile id's LSB;
-+ 0 means - no effect on policer profile; Note that one, and only one of
-+ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
-+ extracted byte must effect either FQID or Policer profile).*/
-+} t_FmPcdKgExtractedOrParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring a scheme counter
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgSchemeCounter {
-+ bool update; /**< FALSE to keep the current counter state
-+ and continue from that point, TRUE to update/reset
-+ the counter when the scheme is written. */
-+ uint32_t value; /**< If update=TRUE, this value will be written into the
-+ counter. clear this field to reset the counter. */
-+} t_FmPcdKgSchemeCounter;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring a policer profile for a KeyGen scheme
-+ (when policer is the next engine after this scheme).
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgPlcrProfile {
-+ bool sharedProfile; /**< TRUE if this profile is shared between ports
-+ (managed by master partition); Must not be TRUE
-+ if profile is after Coarse Classification*/
-+ bool direct; /**< if TRUE, directRelativeProfileId only selects the profile
-+ id, if FALSE fqidOffsetRelativeProfileIdBase is used
-+ together with fqidOffsetShift and numOfProfiles
-+ parameters, to define a range of profiles from
-+ which the KeyGen result will determine the
-+ destination policer profile. */
-+ union {
-+ uint16_t directRelativeProfileId; /**< Used if 'direct' is TRUE, to select policer profile.
-+ should indicate the policer profile offset within the
-+ port's policer profiles or shared window. */
-+ struct {
-+ uint8_t fqidOffsetShift; /**< Shift on the KeyGen create FQID offset (i.e. not the
-+ final FQID - without the FQID base). */
-+ uint8_t fqidOffsetRelativeProfileIdBase;
-+ /**< The base of the FMan Port's relative Storage-Profile ID;
-+ this value will be "OR'ed" with the KeyGen create FQID
-+ offset (i.e. not the final FQID - without the FQID base);
-+ the final result should indicate the Storage-Profile offset
-+ within the FMan Port's relative Storage-Profiles window/
-+ (or the SHARED window depends on 'sharedProfile'). */
-+ uint8_t numOfProfiles; /**< Range of profiles starting at base */
-+ } indirectProfile; /**< Indirect profile parameters */
-+ } profileSelect; /**< Direct/indirect profile selection and parameters */
-+} t_FmPcdKgPlcrProfile;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Parameters for configuring a storage profile for a KeyGen scheme.
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgStorageProfile {
-+ bool direct; /**< If TRUE, directRelativeProfileId only selects the
-+ profile id;
-+ If FALSE, fqidOffsetRelativeProfileIdBase is used
-+ together with fqidOffsetShift and numOfProfiles
-+ parameters to define a range of profiles from which
-+ the KeyGen result will determine the destination
-+ storage profile. */
-+ union {
-+ uint16_t directRelativeProfileId; /**< Used when 'direct' is TRUE, to select a storage profile;
-+ should indicate the storage profile offset within the
-+ port's storage profiles window. */
-+ struct {
-+ uint8_t fqidOffsetShift; /**< Shift on the KeyGen create FQID offset (i.e. not the
-+ final FQID - without the FQID base). */
-+ uint8_t fqidOffsetRelativeProfileIdBase;
-+ /**< The base of the FMan Port's relative Storage-Profile ID;
-+ this value will be "OR'ed" with the KeyGen create FQID
-+ offset (i.e. not the final FQID - without the FQID base);
-+ the final result should indicate the Storage-Profile offset
-+ within the FMan Port's relative Storage-Profiles window. */
-+ uint8_t numOfProfiles; /**< Range of profiles starting at base. */
-+ } indirectProfile; /**< Indirect profile parameters. */
-+ } profileSelect; /**< Direct/indirect profile selection and parameters. */
-+} t_FmPcdKgStorageProfile;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CC as the next engine after KeyGen
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgCc {
-+ t_Handle h_CcTree; /**< A handle to a CC Tree */
-+ uint8_t grpId; /**< CC group id within the CC tree */
-+ bool plcrNext; /**< TRUE if after CC, in case of data frame,
-+ policing is required. */
-+ bool bypassPlcrProfileGeneration; /**< TRUE to bypass KeyGen policer profile generation;
-+ selected profile is the one set at port initialization. */
-+ t_FmPcdKgPlcrProfile plcrProfile; /**< Valid only if plcrNext = TRUE and
-+ bypassPlcrProfileGeneration = FALSE */
-+} t_FmPcdKgCc;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining initializing a KeyGen scheme
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgSchemeParams {
-+ bool modify; /**< TRUE to change an existing scheme */
-+ union
-+ {
-+ uint8_t relativeSchemeId; /**< if modify=FALSE:Partition relative scheme id */
-+ t_Handle h_Scheme; /**< if modify=TRUE: a handle of the existing scheme */
-+ } id;
-+ bool alwaysDirect; /**< This scheme is reached only directly, i.e. no need
-+ for match vector; KeyGen will ignore it when matching */
-+ struct { /**< HL Relevant only if alwaysDirect = FALSE */
-+ t_Handle h_NetEnv; /**< A handle to the Network environment as returned
-+ by FM_PCD_NetEnvCharacteristicsSet() */
-+ uint8_t numOfDistinctionUnits; /**< Number of NetEnv units listed in unitIds array */
-+ uint8_t unitIds[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
-+ /**< Indexes as passed to SetNetEnvCharacteristics array*/
-+ } netEnvParams;
-+ bool useHash; /**< use the KeyGen Hash functionality */
-+ t_FmPcdKgKeyExtractAndHashParams keyExtractAndHashParams;
-+ /**< used only if useHash = TRUE */
-+ bool bypassFqidGeneration; /**< Normally - FALSE, TRUE to avoid FQID update in the IC;
-+ In such a case FQID after KeyGen will be the default FQID
-+ defined for the relevant port, or the FQID defined by CC
-+ in cases where CC was the previous engine. */
-+ uint32_t baseFqid; /**< Base FQID; Relevant only if bypassFqidGeneration = FALSE;
-+ If hash is used and an even distribution is expected
-+ according to hashDistributionNumOfFqids, baseFqid must be aligned to
-+ hashDistributionNumOfFqids. */
-+ uint8_t numOfUsedExtractedOrs; /**< Number of FQID masks listed in extractedOrs array */
-+ t_FmPcdKgExtractedOrParams extractedOrs[FM_PCD_KG_NUM_OF_GENERIC_REGS];
-+ /**< FM_PCD_KG_NUM_OF_GENERIC_REGS
-+ registers are shared between qidMasks
-+ functionality and some of the extraction
-+ actions; Normally only some will be used
-+ for qidMask. Driver will return error if
-+ resource is full at initialization time. */
-+
-+#if (DPAA_VERSION >= 11)
-+ bool overrideStorageProfile; /**< TRUE if KeyGen override previously decided storage profile */
-+ t_FmPcdKgStorageProfile storageProfile; /**< Used when overrideStorageProfile TRUE */
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ e_FmPcdEngine nextEngine; /**< may be BMI, PLCR or CC */
-+ union { /**< depends on nextEngine */
-+ e_FmPcdDoneAction doneAction; /**< Used when next engine is BMI (done) */
-+ t_FmPcdKgPlcrProfile plcrProfile; /**< Used when next engine is PLCR */
-+ t_FmPcdKgCc cc; /**< Used when next engine is CC */
-+ } kgNextEngineParams;
-+ t_FmPcdKgSchemeCounter schemeCounter; /**< A structure of parameters for updating
-+ the scheme counter */
-+} t_FmPcdKgSchemeParams;
-+
-+/**************************************************************************//**
-+ @Collection Definitions for CC statistics
-+*//***************************************************************************/
-+#if (DPAA_VERSION >= 11)
-+#define FM_PCD_CC_STATS_MAX_NUM_OF_FLR 10 /* Maximal supported number of frame length ranges */
-+#define FM_PCD_CC_STATS_FLR_SIZE 2 /* Size in bytes of a frame length range limit */
-+#endif /* (DPAA_VERSION >= 11) */
-+#define FM_PCD_CC_STATS_COUNTER_SIZE 4 /* Size in bytes of a frame length range counter */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CC as the next engine after a CC node.
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcNextCcParams {
-+ t_Handle h_CcNode; /**< A handle of the next CC node */
-+} t_FmPcdCcNextCcParams;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Parameters for defining Frame replicator as the next engine after a CC node.
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcNextFrParams {
-+ t_Handle h_FrmReplic; /**< A handle of the next frame replicator group */
-+} t_FmPcdCcNextFrParams;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining Policer as the next engine after a CC node.
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcNextPlcrParams {
-+ bool overrideParams; /**< TRUE if CC override previously decided parameters*/
-+ bool sharedProfile; /**< Relevant only if overrideParams=TRUE:
-+ TRUE if this profile is shared between ports */
-+ uint16_t newRelativeProfileId; /**< Relevant only if overrideParams=TRUE:
-+ (otherwise profile id is taken from KeyGen);
-+ This parameter should indicate the policer
-+ profile offset within the port's
-+ policer profiles or from SHARED window.*/
-+ uint32_t newFqid; /**< Relevant only if overrideParams=TRUE:
-+ FQID for enqueuing the frame;
-+ In earlier chips if policer next engine is KEYGEN,
-+ this parameter can be 0, because the KEYGEN
-+ always decides the enqueue FQID.*/
-+#if (DPAA_VERSION >= 11)
-+ uint8_t newRelativeStorageProfileId;
-+ /**< Indicates the relative storage profile offset within
-+ the port's storage profiles window;
-+ Relevant only if the port was configured with VSP. */
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmPcdCcNextPlcrParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining enqueue as the next action after a CC node.
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcNextEnqueueParams {
-+ e_FmPcdDoneAction action; /**< Action - when next engine is BMI (done) */
-+ bool overrideFqid; /**< TRUE if CC override previously decided fqid and vspid,
-+ relevant if action = e_FM_PCD_ENQ_FRAME */
-+ uint32_t newFqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
-+ (otherwise FQID is taken from KeyGen),
-+ relevant if action = e_FM_PCD_ENQ_FRAME */
-+#if (DPAA_VERSION >= 11)
-+ uint8_t newRelativeStorageProfileId;
-+ /**< Valid if overrideFqid=TRUE, Indicates the relative virtual
-+ storage profile offset within the port's storage profiles
-+ window; Relevant only if the port was configured with VSP. */
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmPcdCcNextEnqueueParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining KeyGen as the next engine after a CC node.
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcNextKgParams {
-+ bool overrideFqid; /**< TRUE if CC override previously decided fqid and vspid,
-+ Note - this parameters irrelevant for earlier chips */
-+ uint32_t newFqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
-+ (otherwise FQID is taken from KeyGen),
-+ Note - this parameters irrelevant for earlier chips */
-+#if (DPAA_VERSION >= 11)
-+ uint8_t newRelativeStorageProfileId;
-+ /**< Valid if overrideFqid=TRUE, Indicates the relative virtual
-+ storage profile offset within the port's storage profiles
-+ window; Relevant only if the port was configured with VSP. */
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ t_Handle h_DirectScheme; /**< Direct scheme handle to go to. */
-+} t_FmPcdCcNextKgParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the next engine after a CC node.
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcNextEngineParams {
-+ e_FmPcdEngine nextEngine; /**< User has to initialize parameters
-+ according to nextEngine definition */
-+ union {
-+ t_FmPcdCcNextCcParams ccParams; /**< Parameters in case next engine is CC */
-+ t_FmPcdCcNextPlcrParams plcrParams; /**< Parameters in case next engine is PLCR */
-+ t_FmPcdCcNextEnqueueParams enqueueParams; /**< Parameters in case next engine is BMI */
-+ t_FmPcdCcNextKgParams kgParams; /**< Parameters in case next engine is KG */
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdCcNextFrParams frParams; /**< Parameters in case next engine is FR */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } params; /**< union used for all the next-engine parameters options */
-+
-+ t_Handle h_Manip; /**< Handle to Manipulation object.
-+ Relevant if next engine is of type result
-+ (e_FM_PCD_PLCR, e_FM_PCD_KG, e_FM_PCD_DONE) */
-+
-+ bool statisticsEn; /**< If TRUE, statistics counters are incremented
-+ for each frame passing through this
-+ Coarse Classification entry. */
-+} t_FmPcdCcNextEngineParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a single CC key
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcKeyParams {
-+ uint8_t *p_Key; /**< Relevant only if 'action' = e_FM_PCD_ACTION_EXACT_MATCH;
-+ pointer to the key of the size defined in keySize */
-+ uint8_t *p_Mask; /**< Relevant only if 'action' = e_FM_PCD_ACTION_EXACT_MATCH;
-+ pointer to the Mask per key of the size defined
-+ in keySize. p_Key and p_Mask (if defined) has to be
-+ of the same size defined in the keySize;
-+ NOTE that if this value is equal for all entries whithin
-+ this table, the driver will automatically use global-mask
-+ (i.e. one common mask for all entries) instead of private
-+ one; that is done in order to spare some memory and for
-+ better performance. */
-+ t_FmPcdCcNextEngineParams ccNextEngineParams;
-+ /**< parameters for the next for the defined Key in
-+ the p_Key */
-+} t_FmPcdCcKeyParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CC keys parameters
-+ The driver supports two methods for CC node allocation: dynamic and static.
-+ Static mode was created in order to prevent runtime alloc/free
-+ of FMan memory (MURAM), which may cause fragmentation; in this mode,
-+ the driver automatically allocates the memory according to
-+ 'maxNumOfKeys' parameter. The driver calculates the maximal memory
-+ size that may be used for this CC-Node taking into consideration
-+ 'maskSupport' and 'statisticsMode' parameters.
-+ When 'action' = e_FM_PCD_ACTION_INDEXED_LOOKUP in the extraction
-+ parameters of this node, 'maxNumOfKeys' must be equal to 'numOfKeys'.
-+ In dynamic mode, 'maxNumOfKeys' must be zero. At initialization,
-+ all required structures are allocated according to 'numOfKeys'
-+ parameter. During runtime modification, these structures are
-+ re-allocated according to the updated number of keys.
-+
-+ Please note that 'action' and 'icIndxMask' mentioned in the
-+ specific parameter explanations are passed in the extraction
-+ parameters of the node (fields of extractCcParams.extractNonHdr).
-+*//***************************************************************************/
-+typedef struct t_KeysParams {
-+ uint16_t maxNumOfKeys; /**< Maximum number of keys that will (ever) be used in this CC-Node;
-+ A value of zero may be used for dynamic memory allocation. */
-+ bool maskSupport; /**< This parameter is relevant only if a node is initialized with
-+ 'action' = e_FM_PCD_ACTION_EXACT_MATCH and maxNumOfKeys > 0;
-+ Should be TRUE to reserve table memory for key masks, even if
-+ initial keys do not contain masks, or if the node was initialized
-+ as 'empty' (without keys); this will allow user to add keys with
-+ masks at runtime.
-+ NOTE that if user want to use only global-masks (i.e. one common mask
-+ for all the entries within this table, this parameter should set to 'FALSE'. */
-+ e_FmPcdCcStatsMode statisticsMode; /**< Determines the supported statistics mode for all node's keys.
-+ To enable statistics gathering, statistics should be enabled per
-+ every key, using 'statisticsEn' in next engine parameters structure
-+ of that key;
-+ If 'maxNumOfKeys' is set, all required structures will be
-+ preallocated for all keys. */
-+#if (DPAA_VERSION >= 11)
-+ uint16_t frameLengthRanges[FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
-+ /**< Relevant only for 'RMON' statistics mode
-+ (this feature is supported only on B4860 device);
-+ Holds a list of programmable thresholds - for each received frame,
-+ its length in bytes is examined against these range thresholds and
-+ the appropriate counter is incremented by 1 - for example, to belong
-+ to range i, the following should hold:
-+ range i-1 threshold < frame length <= range i threshold
-+ Each range threshold must be larger then its preceding range
-+ threshold, and last range threshold must be 0xFFFF. */
-+#endif /* (DPAA_VERSION >= 11) */
-+ uint16_t numOfKeys; /**< Number of initial keys;
-+ Note that in case of 'action' = e_FM_PCD_ACTION_INDEXED_LOOKUP,
-+ this field should be power-of-2 of the number of bits that are
-+ set in 'icIndxMask'. */
-+ uint8_t keySize; /**< Size of key - for extraction of type FULL_FIELD, 'keySize' has
-+ to be the standard size of the selected key; For other extraction
-+ types, 'keySize' has to be as size of extraction; When 'action' =
-+ e_FM_PCD_ACTION_INDEXED_LOOKUP, 'keySize' must be 2. */
-+ t_FmPcdCcKeyParams keyParams[FM_PCD_MAX_NUM_OF_KEYS];
-+ /**< An array with 'numOfKeys' entries, each entry specifies the
-+ corresponding key parameters;
-+ When 'action' = e_FM_PCD_ACTION_EXACT_MATCH, this value must not
-+ exceed 255 (FM_PCD_MAX_NUM_OF_KEYS-1) as the last entry is saved
-+ for the 'miss' entry. */
-+ t_FmPcdCcNextEngineParams ccNextEngineParamsForMiss;
-+ /**< Parameters for defining the next engine when a key is not matched;
-+ Not relevant if action = e_FM_PCD_ACTION_INDEXED_LOOKUP. */
-+} t_KeysParams;
-+
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a CC node
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcNodeParams {
-+ t_FmPcdExtractEntry extractCcParams; /**< Extraction parameters */
-+ t_KeysParams keysParams; /**< Keys definition matching the selected extraction */
-+} t_FmPcdCcNodeParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a hash table
-+*//***************************************************************************/
-+typedef struct t_FmPcdHashTableParams {
-+ uint16_t maxNumOfKeys; /**< Maximum Number Of Keys that will (ever) be used in this Hash-table */
-+ e_FmPcdCcStatsMode statisticsMode; /**< If not e_FM_PCD_CC_STATS_MODE_NONE, the required structures for the
-+ requested statistics mode will be allocated according to maxNumOfKeys. */
-+ uint8_t kgHashShift; /**< KG-Hash-shift as it was configured in the KG-scheme
-+ that leads to this hash-table. */
-+ uint16_t hashResMask; /**< Mask that will be used on the hash-result;
-+ The number-of-sets for this hash will be calculated
-+ as (2^(number of bits set in 'hashResMask'));
-+ The 4 lower bits must be cleared. */
-+ uint8_t hashShift; /**< Byte offset from the beginning of the KeyGen hash result to the
-+ 2-bytes to be used as hash index. */
-+ uint8_t matchKeySize; /**< Size of the exact match keys held by the hash buckets */
-+
-+ t_FmPcdCcNextEngineParams ccNextEngineParamsForMiss; /**< Parameters for defining the next engine when a key is not matched */
-+
-+} t_FmPcdHashTableParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a CC tree group.
-+
-+ This structure defines a CC group in terms of NetEnv units
-+ and the action to be taken in each case. The unitIds list must
-+ be given in order from low to high indices.
-+
-+ t_FmPcdCcNextEngineParams is a list of 2^numOfDistinctionUnits
-+ structures where each defines the next action to be taken for
-+ each units combination. for example:
-+ numOfDistinctionUnits = 2
-+ unitIds = {1,3}
-+ p_NextEnginePerEntriesInGrp[0] = t_FmPcdCcNextEngineParams for the case that
-+ unit 1 - not found; unit 3 - not found;
-+ p_NextEnginePerEntriesInGrp[1] = t_FmPcdCcNextEngineParams for the case that
-+ unit 1 - not found; unit 3 - found;
-+ p_NextEnginePerEntriesInGrp[2] = t_FmPcdCcNextEngineParams for the case that
-+ unit 1 - found; unit 3 - not found;
-+ p_NextEnginePerEntriesInGrp[3] = t_FmPcdCcNextEngineParams for the case that
-+ unit 1 - found; unit 3 - found;
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcGrpParams {
-+ uint8_t numOfDistinctionUnits; /**< Up to 4 */
-+ uint8_t unitIds[FM_PCD_MAX_NUM_OF_CC_UNITS];
-+ /**< Indices of the units as defined in
-+ FM_PCD_NetEnvCharacteristicsSet() */
-+ t_FmPcdCcNextEngineParams nextEnginePerEntriesInGrp[FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP];
-+ /**< Maximum entries per group is 16 */
-+} t_FmPcdCcGrpParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CC tree groups
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcTreeParams {
-+ t_Handle h_NetEnv; /**< A handle to the Network environment as returned
-+ by FM_PCD_NetEnvCharacteristicsSet() */
-+ uint8_t numOfGrps; /**< Number of CC groups within the CC tree */
-+ t_FmPcdCcGrpParams ccGrpParams[FM_PCD_MAX_NUM_OF_CC_GROUPS];
-+ /**< Parameters for each group. */
-+} t_FmPcdCcTreeParams;
-+
-+
-+/**************************************************************************//**
-+ @Description CC key statistics structure
-+*//***************************************************************************/
-+typedef struct t_FmPcdCcKeyStatistics {
-+ uint32_t byteCount; /**< This counter reflects byte count of frames that
-+ were matched by this key. */
-+ uint32_t frameCount; /**< This counter reflects count of frames that
-+ were matched by this key. */
-+#if (DPAA_VERSION >= 11)
-+ uint32_t frameLengthRangeCount[FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
-+ /**< These counters reflect how many frames matched
-+ this key in 'RMON' statistics mode:
-+ Each counter holds the number of frames of a
-+ specific frames length range, according to the
-+ ranges provided at initialization. */
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmPcdCcKeyStatistics;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining policer byte rate
-+*//***************************************************************************/
-+typedef struct t_FmPcdPlcrByteRateModeParams {
-+ e_FmPcdPlcrFrameLengthSelect frameLengthSelection; /**< Frame length selection */
-+ e_FmPcdPlcrRollBackFrameSelect rollBackFrameSelection; /**< relevant option only e_FM_PCD_PLCR_L2_FRM_LEN,
-+ e_FM_PCD_PLCR_FULL_FRM_LEN */
-+} t_FmPcdPlcrByteRateModeParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the policer profile (based on
-+ RFC-2698 or RFC-4115 attributes).
-+*//***************************************************************************/
-+typedef struct t_FmPcdPlcrNonPassthroughAlgParams {
-+ e_FmPcdPlcrRateMode rateMode; /**< Byte mode or Packet mode */
-+ t_FmPcdPlcrByteRateModeParams byteModeParams; /**< Valid for Byte NULL for Packet */
-+ uint32_t committedInfoRate; /**< KBits/Second or Packets/Second */
-+ uint32_t committedBurstSize; /**< Bytes/Packets */
-+ uint32_t peakOrExcessInfoRate; /**< KBits/Second or Packets/Second */
-+ uint32_t peakOrExcessBurstSize; /**< Bytes/Packets */
-+} t_FmPcdPlcrNonPassthroughAlgParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the next engine after policer
-+*//***************************************************************************/
-+typedef union u_FmPcdPlcrNextEngineParams {
-+ e_FmPcdDoneAction action; /**< Action - when next engine is BMI (done) */
-+ t_Handle h_Profile; /**< Policer profile handle - used when next engine
-+ is Policer, must be a SHARED profile */
-+ t_Handle h_DirectScheme; /**< Direct scheme select - when next engine is KeyGen */
-+} u_FmPcdPlcrNextEngineParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the policer profile entry
-+*//***************************************************************************/
-+typedef struct t_FmPcdPlcrProfileParams {
-+ bool modify; /**< TRUE to change an existing profile */
-+ union {
-+ struct {
-+ e_FmPcdProfileTypeSelection profileType; /**< Type of policer profile */
-+ t_Handle h_FmPort; /**< Relevant for per-port profiles only */
-+ uint16_t relativeProfileId; /**< Profile id - relative to shared group or to port */
-+ } newParams; /**< use it when modify = FALSE */
-+ t_Handle h_Profile; /**< A handle to a profile - use it when modify=TRUE */
-+ } id;
-+ e_FmPcdPlcrAlgorithmSelection algSelection; /**< Profile Algorithm PASS_THROUGH, RFC_2698, RFC_4115 */
-+ e_FmPcdPlcrColorMode colorMode; /**< COLOR_BLIND, COLOR_AWARE */
-+
-+ union {
-+ e_FmPcdPlcrColor dfltColor; /**< For Color-Blind Pass-Through mode; the policer will re-color
-+ any incoming packet with the default value. */
-+ e_FmPcdPlcrColor override; /**< For Color-Aware modes; the profile response to a
-+ pre-color value of 2'b11. */
-+ } color;
-+
-+ t_FmPcdPlcrNonPassthroughAlgParams nonPassthroughAlgParams; /**< RFC2698 or RFC4115 parameters */
-+
-+ e_FmPcdEngine nextEngineOnGreen; /**< Next engine for green-colored frames */
-+ u_FmPcdPlcrNextEngineParams paramsOnGreen; /**< Next engine parameters for green-colored frames */
-+
-+ e_FmPcdEngine nextEngineOnYellow; /**< Next engine for yellow-colored frames */
-+ u_FmPcdPlcrNextEngineParams paramsOnYellow; /**< Next engine parameters for yellow-colored frames */
-+
-+ e_FmPcdEngine nextEngineOnRed; /**< Next engine for red-colored frames */
-+ u_FmPcdPlcrNextEngineParams paramsOnRed; /**< Next engine parameters for red-colored frames */
-+
-+ bool trapProfileOnFlowA; /**< Obsolete - do not use */
-+ bool trapProfileOnFlowB; /**< Obsolete - do not use */
-+ bool trapProfileOnFlowC; /**< Obsolete - do not use */
-+} t_FmPcdPlcrProfileParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for selecting a location for requested manipulation
-+*//***************************************************************************/
-+typedef struct t_FmManipHdrInfo {
-+ e_NetHeaderType hdr; /**< Header selection */
-+ e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled IP. Otherwise should be cleared. */
-+ bool byField; /**< TRUE if the location of manipulation is according to some field in the specific header*/
-+ t_FmPcdFields fullField; /**< Relevant only when byField = TRUE: Extract field */
-+} t_FmManipHdrInfo;
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+/**************************************************************************//**
-+ @Description Parameters for defining an insertion manipulation
-+ of type e_FM_PCD_MANIP_INSRT_TO_START_OF_FRAME_TEMPLATE
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrInsrtByTemplateParams {
-+ uint8_t size; /**< Size of insert template to the start of the frame. */
-+ uint8_t hdrTemplate[FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE];
-+ /**< Array of the insertion template. */
-+
-+ bool modifyOuterIp; /**< TRUE if user want to modify some fields in outer IP. */
-+ struct {
-+ uint16_t ipOuterOffset; /**< Offset of outer IP in the insert template, relevant if modifyOuterIp = TRUE.*/
-+ uint16_t dscpEcn; /**< value of dscpEcn in IP outer, relevant if modifyOuterIp = TRUE.
-+ in IPV4 dscpEcn only byte - it has to be adjusted to the right*/
-+ bool udpPresent; /**< TRUE if UDP is present in the insert template, relevant if modifyOuterIp = TRUE.*/
-+ uint8_t udpOffset; /**< Offset in the insert template of UDP, relevant if modifyOuterIp = TRUE and udpPresent=TRUE.*/
-+ uint8_t ipIdentGenId; /**< Used by FMan-CTRL to calculate IP-identification field,relevant if modifyOuterIp = TRUE.*/
-+ bool recalculateLength; /**< TRUE if recalculate length has to be performed due to the engines in the path which can change the frame later, relevant if modifyOuterIp = TRUE.*/
-+ struct {
-+ uint8_t blockSize; /**< The CAAM block-size; Used by FMan-CTRL to calculate the IP Total Length field.*/
-+ uint8_t extraBytesAddedAlignedToBlockSize; /**< Used by FMan-CTRL to calculate the IP Total Length field and UDP length*/
-+ uint8_t extraBytesAddedNotAlignedToBlockSize;/**< Used by FMan-CTRL to calculate the IP Total Length field and UDP length.*/
-+ } recalculateLengthParams; /**< Recalculate length parameters - relevant if modifyOuterIp = TRUE and recalculateLength = TRUE */
-+ } modifyOuterIpParams; /**< Outer IP modification parameters - ignored if modifyOuterIp is FALSE */
-+
-+ bool modifyOuterVlan; /**< TRUE if user wants to modify VPri field in the outer VLAN header*/
-+ struct {
-+ uint8_t vpri; /**< Value of VPri, relevant if modifyOuterVlan = TRUE
-+ VPri only 3 bits, it has to be adjusted to the right*/
-+ } modifyOuterVlanParams;
-+} t_FmPcdManipHdrInsrtByTemplateParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CAPWAP fragmentation
-+*//***************************************************************************/
-+typedef struct t_CapwapFragmentationParams {
-+ uint16_t sizeForFragmentation; /**< if length of the frame is greater than this value, CAPWAP fragmentation will be executed.*/
-+ bool headerOptionsCompr; /**< TRUE - first fragment include the CAPWAP header options field,
-+ and all other fragments exclude the CAPWAP options field,
-+ FALSE - all fragments include CAPWAP header options field. */
-+} t_CapwapFragmentationParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CAPWAP reassembly
-+*//***************************************************************************/
-+typedef struct t_CapwapReassemblyParams {
-+ uint16_t maxNumFramesInProcess; /**< Number of frames which can be reassembled concurrently; must be power of 2.
-+ In case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 4 - 512,
-+ In case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 8 - 2048 */
-+ bool haltOnDuplicationFrag; /**< If TRUE, reassembly process will be halted due to duplicated fragment,
-+ and all processed fragments will be enqueued with error indication;
-+ If FALSE, only duplicated fragments will be enqueued with error indication. */
-+
-+ e_FmPcdManipReassemTimeOutMode timeOutMode; /**< Expiration delay initialized by the reassembly process */
-+ uint32_t fqidForTimeOutFrames; /**< FQID in which time out frames will enqueue during Time Out Process */
-+ uint32_t timeoutRoutineRequestTime;
-+ /**< Represents the time interval in microseconds between consecutive
-+ timeout routine requests It has to be power of 2. */
-+ uint32_t timeoutThresholdForReassmProcess;
-+ /**< Time interval (microseconds) for marking frames in process as too old;
-+ Frames in process are those for which at least one fragment was received
-+ but not all fragments. */
-+
-+ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry;/**< Number of frames per hash entry (needed for the reassembly process) */
-+} t_CapwapReassemblyParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining fragmentation/reassembly manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipFragOrReasmParams {
-+ bool frag; /**< TRUE if using the structure for fragmentation,
-+ otherwise this structure is used for reassembly */
-+ uint8_t sgBpid; /**< Scatter/Gather buffer pool id;
-+ Same LIODN number is used for these buffers as for
-+ the received frames buffers, so buffers of this pool
-+ need to be allocated in the same memory area as the
-+ received buffers. If the received buffers arrive
-+ from different sources, the Scatter/Gather BP id
-+ should be mutual to all these sources. */
-+ e_NetHeaderType hdr; /**< Header selection */
-+ union {
-+ t_CapwapFragmentationParams capwapFragParams; /**< Structure for CAPWAP fragmentation,
-+ relevant if 'frag' = TRUE, 'hdr' = HEADER_TYPE_CAPWAP */
-+ t_CapwapReassemblyParams capwapReasmParams; /**< Structure for CAPWAP reassembly,
-+ relevant if 'frag' = FALSE, 'hdr' = HEADER_TYPE_CAPWAP */
-+ } u;
-+} t_FmPcdManipFragOrReasmParams;
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header removal by header type
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrRmvByHdrParams {
-+ e_FmPcdManipHdrRmvByHdrType type; /**< Selection of header removal location */
-+ union {
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ struct {
-+ bool include; /**< If FALSE, remove until the specified header (not including the header);
-+ If TRUE, remove also the specified header. */
-+ t_FmManipHdrInfo hdrInfo;
-+ } fromStartByHdr; /**< Relevant when type = e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
-+#endif /* (DPAA_VERSION >= 11) || ... */
-+#if (DPAA_VERSION >= 11)
-+ t_FmManipHdrInfo hdrInfo; /**< Relevant when type = e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
-+#endif /* (DPAA_VERSION >= 11) */
-+ e_FmPcdManipHdrRmvSpecificL2 specificL2; /**< Relevant when type = e_FM_PCD_MANIP_BY_HDR_SPECIFIC_L2;
-+ Defines which L2 headers to remove. */
-+ } u;
-+} t_FmPcdManipHdrRmvByHdrParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring IP fragmentation manipulation
-+
-+ Restrictions:
-+ - IP Fragmentation output fragments must not be forwarded to application directly.
-+ - Maximum number of fragments per frame is 16.
-+ - Fragmentation of IP fragments is not supported.
-+ - IPv4 packets containing header Option fields are fragmented by copying all option
-+ fields to each fragment, regardless of the copy bit value.
-+ - Transmit confirmation is not supported.
-+ - Fragmentation after SEC can't handle S/G frames.
-+ - Fragmentation nodes must be set as the last PCD action (i.e. the
-+ corresponding CC node key must have next engine set to e_FM_PCD_DONE).
-+ - Only BMan buffers shall be used for frames to be fragmented.
-+ - IPF does not support VSP. Therefore, on the same port where we have IPF
-+ we cannot support VSP.
-+ - NOTE: The following comment is relevant only for FMAN v3 devices: IPF
-+ does not support VSP. Therefore, on the same port where we have IPF we
-+ cannot support VSP.
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipFragIpParams {
-+ uint16_t sizeForFragmentation; /**< If length of the frame is greater than this value,
-+ IP fragmentation will be executed.*/
-+#if (DPAA_VERSION == 10)
-+ uint8_t scratchBpid; /**< Absolute buffer pool id according to BM configuration.*/
-+#endif /* (DPAA_VERSION == 10) */
-+ bool sgBpidEn; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
-+ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
-+ received frame's buffer. */
-+ uint8_t sgBpid; /**< Scatter/Gather buffer pool id;
-+ This parameters is relevant when 'sgBpidEn=TRUE';
-+ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
-+ of this pool need to be allocated in the same memory area as the received buffers.
-+ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
-+ mutual to all these sources. */
-+ e_FmPcdManipDontFragAction dontFragAction; /**< Don't Fragment Action - If an IP packet is larger
-+ than MTU and its DF bit is set, then this field will
-+ determine the action to be taken.*/
-+} t_FmPcdManipFragIpParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring IP reassembly manipulation.
-+
-+ This is a common structure for both IPv4 and IPv6 reassembly
-+ manipulation. For reassembly of both IPv4 and IPv6, make sure to
-+ set the 'hdr' field in t_FmPcdManipReassemParams to HEADER_TYPE_IPv6.
-+
-+ Restrictions:
-+ - Application must define at least one scheme to catch the reassembled frames.
-+ - Maximum number of fragments per frame is 16.
-+ - Reassembly of IPv4 fragments containing Option fields is supported.
-+
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipReassemIpParams {
-+ uint8_t relativeSchemeId[2]; /**< Partition relative scheme id:
-+ relativeSchemeId[0] - Relative scheme ID for IPV4 Reassembly manipulation;
-+ relativeSchemeId[1] - Relative scheme ID for IPV6 Reassembly manipulation;
-+ NOTE: The following comment is relevant only for FMAN v2 devices:
-+ Relative scheme ID for IPv4/IPv6 Reassembly manipulation must be smaller than
-+ the user schemes id to ensure that the reassembly schemes will be first match;
-+ Rest schemes, if defined, should have higher relative scheme ID. */
-+#if (DPAA_VERSION >= 11)
-+ uint32_t nonConsistentSpFqid; /**< In case that other fragments of the frame corresponds to different storage
-+ profile than the opening fragment (Non-Consistent-SP state)
-+ then one of two possible scenarios occurs:
-+ if 'nonConsistentSpFqid != 0', the reassembled frame will be enqueued to
-+ this fqid, otherwise a 'Non Consistent SP' bit will be set in the FD[status].*/
-+#else
-+ uint8_t sgBpid; /**< Buffer pool id for the S/G frame created by the reassembly process */
-+#endif /* (DPAA_VERSION >= 11) */
-+ uint8_t dataMemId; /**< Memory partition ID for the IPR's external tables structure */
-+ uint16_t dataLiodnOffset; /**< LIODN offset for access the IPR's external tables structure. */
-+ uint16_t minFragSize[2]; /**< Minimum fragment size:
-+ minFragSize[0] - for ipv4, minFragSize[1] - for ipv6 */
-+ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry[2];
-+ /**< Number of frames per hash entry needed for reassembly process:
-+ numOfFramesPerHashEntry[0] - for ipv4 (max value is e_FM_PCD_MANIP_EIGHT_WAYS_HASH);
-+ numOfFramesPerHashEntry[1] - for ipv6 (max value is e_FM_PCD_MANIP_SIX_WAYS_HASH). */
-+ uint16_t maxNumFramesInProcess; /**< Number of frames which can be processed by Reassembly in the same time;
-+ Must be power of 2;
-+ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 4 - 512;
-+ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 8 - 2048. */
-+ e_FmPcdManipReassemTimeOutMode timeOutMode; /**< Expiration delay initialized by Reassembly process */
-+ uint32_t fqidForTimeOutFrames; /**< FQID in which time out frames will enqueue during Time Out Process;
-+ Recommended value for this field is 0; in this way timed-out frames will be discarded */
-+ uint32_t timeoutThresholdForReassmProcess;
-+ /**< Represents the time interval in microseconds which defines
-+ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
-+} t_FmPcdManipReassemIpParams;
-+
-+/**************************************************************************//**
-+ @Description structure for defining IPSEC manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipSpecialOffloadIPSecParams {
-+ bool decryption; /**< TRUE if being used in decryption direction;
-+ FALSE if being used in encryption direction. */
-+ bool ecnCopy; /**< TRUE to copy the ECN bits from inner/outer to outer/inner
-+ (direction depends on the 'decryption' field). */
-+ bool dscpCopy; /**< TRUE to copy the DSCP bits from inner/outer to outer/inner
-+ (direction depends on the 'decryption' field). */
-+ bool variableIpHdrLen; /**< TRUE for supporting variable IP header length in decryption. */
-+ bool variableIpVersion; /**< TRUE for supporting both IP version on the same SA in encryption */
-+ uint8_t outerIPHdrLen; /**< if 'variableIpVersion == TRUE' then this field must be set to non-zero value;
-+ It is specifies the length of the outer IP header that was configured in the
-+ corresponding SA. */
-+ uint16_t arwSize; /**< if <> '0' then will perform ARW check for this SA;
-+ The value must be a multiplication of 16 */
-+ uintptr_t arwAddr; /**< if arwSize <> '0' then this field must be set to non-zero value;
-+ MUST be allocated from FMAN's MURAM that the post-sec op-port belongs to;
-+ Must be 4B aligned. Required MURAM size is 'NEXT_POWER_OF_2(arwSize+32))/8+4' Bytes */
-+} t_FmPcdManipSpecialOffloadIPSecParams;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Parameters for configuring CAPWAP fragmentation manipulation
-+
-+ Restrictions:
-+ - Maximum number of fragments per frame is 16.
-+ - Transmit confirmation is not supported.
-+ - Fragmentation nodes must be set as the last PCD action (i.e. the
-+ corresponding CC node key must have next engine set to e_FM_PCD_DONE).
-+ - Only BMan buffers shall be used for frames to be fragmented.
-+ - NOTE: The following comment is relevant only for FMAN v3 devices: IPF
-+ does not support VSP. Therefore, on the same port where we have IPF we
-+ cannot support VSP.
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipFragCapwapParams {
-+ uint16_t sizeForFragmentation; /**< If length of the frame is greater than this value,
-+ CAPWAP fragmentation will be executed.*/
-+ bool sgBpidEn; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
-+ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
-+ received frame's buffer. */
-+ uint8_t sgBpid; /**< Scatter/Gather buffer pool id;
-+ This parameters is relevant when 'sgBpidEn=TRUE';
-+ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
-+ of this pool need to be allocated in the same memory area as the received buffers.
-+ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
-+ mutual to all these sources. */
-+ bool compressModeEn; /**< CAPWAP Header Options Compress Enable mode;
-+ When this mode is enabled then only the first fragment include the CAPWAP header options
-+ field (if user provides it in the input frame) and all other fragments exclude the CAPWAP
-+ options field (CAPWAP header is updated accordingly).*/
-+} t_FmPcdManipFragCapwapParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring CAPWAP reassembly manipulation.
-+
-+ Restrictions:
-+ - Application must define one scheme to catch the reassembled frames.
-+ - Maximum number of fragments per frame is 16.
-+
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipReassemCapwapParams {
-+ uint8_t relativeSchemeId; /**< Partition relative scheme id;
-+ NOTE: this id must be smaller than the user schemes id to ensure that the reassembly scheme will be first match;
-+ Rest schemes, if defined, should have higher relative scheme ID. */
-+ uint8_t dataMemId; /**< Memory partition ID for the IPR's external tables structure */
-+ uint16_t dataLiodnOffset; /**< LIODN offset for access the IPR's external tables structure. */
-+ uint16_t maxReassembledFrameLength;/**< The maximum CAPWAP reassembled frame length in bytes;
-+ If maxReassembledFrameLength == 0, any successful reassembled frame length is
-+ considered as a valid length;
-+ if maxReassembledFrameLength > 0, a successful reassembled frame which its length
-+ exceeds this value is considered as an error frame (FD status[CRE] bit is set). */
-+ e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry;
-+ /**< Number of frames per hash entry needed for reassembly process */
-+ uint16_t maxNumFramesInProcess; /**< Number of frames which can be processed by reassembly in the same time;
-+ Must be power of 2;
-+ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 4 - 512;
-+ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 8 - 2048. */
-+ e_FmPcdManipReassemTimeOutMode timeOutMode; /**< Expiration delay initialized by Reassembly process */
-+ uint32_t fqidForTimeOutFrames; /**< FQID in which time out frames will enqueue during Time Out Process;
-+ Recommended value for this field is 0; in this way timed-out frames will be discarded */
-+ uint32_t timeoutThresholdForReassmProcess;
-+ /**< Represents the time interval in microseconds which defines
-+ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
-+} t_FmPcdManipReassemCapwapParams;
-+
-+/**************************************************************************//**
-+ @Description structure for defining CAPWAP manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipSpecialOffloadCapwapParams {
-+ bool dtls; /**< TRUE if continue to SEC DTLS encryption */
-+ e_FmPcdManipHdrQosSrc qosSrc; /**< TODO */
-+} t_FmPcdManipSpecialOffloadCapwapParams;
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining special offload manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipSpecialOffloadParams {
-+ e_FmPcdManipSpecialOffloadType type; /**< Type of special offload manipulation */
-+ union
-+ {
-+ t_FmPcdManipSpecialOffloadIPSecParams ipsec; /**< Parameters for IPSec; Relevant when
-+ type = e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC */
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdManipSpecialOffloadCapwapParams capwap; /**< Parameters for CAPWAP; Relevant when
-+ type = e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} t_FmPcdManipSpecialOffloadParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining insertion manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrInsrt {
-+ uint8_t size; /**< size of inserted section */
-+ uint8_t *p_Data; /**< data to be inserted */
-+} t_FmPcdManipHdrInsrt;
-+
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining generic removal manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrRmvGenericParams {
-+ uint8_t offset; /**< Offset from beginning of header to the start
-+ location of the removal */
-+ uint8_t size; /**< Size of removed section */
-+} t_FmPcdManipHdrRmvGenericParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining generic insertion manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrInsrtGenericParams {
-+ uint8_t offset; /**< Offset from beginning of header to the start
-+ location of the insertion */
-+ uint8_t size; /**< Size of inserted section */
-+ bool replace; /**< TRUE to override (replace) existing data at
-+ 'offset', FALSE to insert */
-+ uint8_t *p_Data; /**< Pointer to data to be inserted */
-+} t_FmPcdManipHdrInsrtGenericParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation VLAN DSCP To Vpri translation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrFieldUpdateVlanDscpToVpri {
-+ uint8_t dscpToVpriTable[FM_PCD_MANIP_DSCP_TO_VLAN_TRANS];
-+ /**< A table of VPri values for each DSCP value;
-+ The index is the DSCP value (0-0x3F) and the
-+ value is the corresponding VPRI (0-15). */
-+ uint8_t vpriDefVal; /**< 0-7, Relevant only if if updateType =
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN,
-+ this field is the Q Tag default value if the
-+ IP header is not found. */
-+} t_FmPcdManipHdrFieldUpdateVlanDscpToVpri;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation VLAN fields updates
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrFieldUpdateVlan {
-+ e_FmPcdManipHdrFieldUpdateVlan updateType; /**< Selects VLAN update type */
-+ union {
-+ uint8_t vpri; /**< 0-7, Relevant only if If updateType =
-+ e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_PRI, this
-+ is the new VLAN pri. */
-+ t_FmPcdManipHdrFieldUpdateVlanDscpToVpri dscpToVpri; /**< Parameters structure, Relevant only if updateType
-+ = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN. */
-+ } u;
-+} t_FmPcdManipHdrFieldUpdateVlan;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation IPV4 fields updates
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrFieldUpdateIpv4 {
-+ ipv4HdrManipUpdateFlags_t validUpdates; /**< ORed flag, selecting the required updates */
-+ uint8_t tos; /**< 8 bit New TOS; Relevant if validUpdates contains
-+ HDR_MANIP_IPV4_TOS */
-+ uint16_t id; /**< 16 bit New IP ID; Relevant only if validUpdates
-+ contains HDR_MANIP_IPV4_ID */
-+ uint32_t src; /**< 32 bit New IP SRC; Relevant only if validUpdates
-+ contains HDR_MANIP_IPV4_SRC */
-+ uint32_t dst; /**< 32 bit New IP DST; Relevant only if validUpdates
-+ contains HDR_MANIP_IPV4_DST */
-+} t_FmPcdManipHdrFieldUpdateIpv4;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation IPV6 fields updates
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrFieldUpdateIpv6 {
-+ ipv6HdrManipUpdateFlags_t validUpdates; /**< ORed flag, selecting the required updates */
-+ uint8_t trafficClass; /**< 8 bit New Traffic Class; Relevant if validUpdates contains
-+ HDR_MANIP_IPV6_TC */
-+ uint8_t src[NET_HEADER_FIELD_IPv6_ADDR_SIZE];
-+ /**< 16 byte new IP SRC; Relevant only if validUpdates
-+ contains HDR_MANIP_IPV6_SRC */
-+ uint8_t dst[NET_HEADER_FIELD_IPv6_ADDR_SIZE];
-+ /**< 16 byte new IP DST; Relevant only if validUpdates
-+ contains HDR_MANIP_IPV6_DST */
-+} t_FmPcdManipHdrFieldUpdateIpv6;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation TCP/UDP fields updates
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrFieldUpdateTcpUdp {
-+ tcpUdpHdrManipUpdateFlags_t validUpdates; /**< ORed flag, selecting the required updates */
-+ uint16_t src; /**< 16 bit New TCP/UDP SRC; Relevant only if validUpdates
-+ contains HDR_MANIP_TCP_UDP_SRC */
-+ uint16_t dst; /**< 16 bit New TCP/UDP DST; Relevant only if validUpdates
-+ contains HDR_MANIP_TCP_UDP_DST */
-+} t_FmPcdManipHdrFieldUpdateTcpUdp;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation fields updates
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrFieldUpdateParams {
-+ e_FmPcdManipHdrFieldUpdateType type; /**< Type of header field update manipulation */
-+ union {
-+ t_FmPcdManipHdrFieldUpdateVlan vlan; /**< Parameters for VLAN update. Relevant when
-+ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN */
-+ t_FmPcdManipHdrFieldUpdateIpv4 ipv4; /**< Parameters for IPv4 update. Relevant when
-+ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4 */
-+ t_FmPcdManipHdrFieldUpdateIpv6 ipv6; /**< Parameters for IPv6 update. Relevant when
-+ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6 */
-+ t_FmPcdManipHdrFieldUpdateTcpUdp tcpUdp; /**< Parameters for TCP/UDP update. Relevant when
-+ type = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP */
-+ } u;
-+} t_FmPcdManipHdrFieldUpdateParams;
-+
-+
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining custom header manipulation for generic field replacement
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrCustomGenFieldReplace {
-+ uint8_t srcOffset; /**< Location of new data - Offset from
-+ Parse Result (>= 16, srcOffset+size <= 32, ) */
-+ uint8_t dstOffset; /**< Location of data to be overwritten - Offset from
-+ start of frame (dstOffset + size <= 256). */
-+ uint8_t size; /**< The number of bytes (<=16) to be replaced */
-+ uint8_t mask; /**< Optional 1 byte mask. Set to select bits for
-+ replacement (1 - bit will be replaced);
-+ Clear to use field as is. */
-+ uint8_t maskOffset; /**< Relevant if mask != 0;
-+ Mask offset within the replaces "size" */
-+} t_FmPcdManipHdrCustomGenFieldReplace;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining custom header manipulation for IP replacement
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrCustomIpHdrReplace {
-+ e_FmPcdManipHdrCustomIpReplace replaceType; /**< Selects replace update type */
-+ bool decTtlHl; /**< Decrement TTL (IPV4) or Hop limit (IPV6) by 1 */
-+ bool updateIpv4Id; /**< Relevant when replaceType =
-+ e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 */
-+ uint16_t id; /**< 16 bit New IP ID; Relevant only if
-+ updateIpv4Id = TRUE */
-+ uint8_t hdrSize; /**< The size of the new IP header */
-+ uint8_t hdr[FM_PCD_MANIP_MAX_HDR_SIZE];
-+ /**< The new IP header */
-+} t_FmPcdManipHdrCustomIpHdrReplace;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining custom header manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrCustomParams {
-+ e_FmPcdManipHdrCustomType type; /**< Type of header field update manipulation */
-+ union {
-+ t_FmPcdManipHdrCustomIpHdrReplace ipHdrReplace; /**< Parameters IP header replacement */
-+ t_FmPcdManipHdrCustomGenFieldReplace genFieldReplace; /**< Parameters IP header replacement */
-+ } u;
-+} t_FmPcdManipHdrCustomParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining specific L2 insertion manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrInsrtSpecificL2Params {
-+ e_FmPcdManipHdrInsrtSpecificL2 specificL2; /**< Selects which L2 headers to insert */
-+ bool update; /**< TRUE to update MPLS header */
-+ uint8_t size; /**< size of inserted section */
-+ uint8_t *p_Data; /**< data to be inserted */
-+} t_FmPcdManipHdrInsrtSpecificL2Params;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Parameters for defining IP insertion manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrInsrtIpParams {
-+ bool calcL4Checksum; /**< Calculate L4 checksum. */
-+ e_FmPcdManipHdrQosMappingMode mappingMode; /**< TODO */
-+ uint8_t lastPidOffset; /**< the offset of the last Protocol within
-+ the inserted header */
-+ uint16_t id; /**< 16 bit New IP ID */
-+ bool dontFragOverwrite;
-+ /**< IPv4 only. DF is overwritten with the hash-result next-to-last byte.
-+ * This byte is configured to be overwritten when RPD is set. */
-+ uint8_t lastDstOffset;
-+ /**< IPv6 only. if routing extension exist, user should set the offset of the destination address
-+ * in order to calculate UDP checksum pseudo header;
-+ * Otherwise set it to '0'. */
-+ t_FmPcdManipHdrInsrt insrt; /**< size and data to be inserted. */
-+} t_FmPcdManipHdrInsrtIpParams;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header insertion manipulation by header type
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrInsrtByHdrParams {
-+ e_FmPcdManipHdrInsrtByHdrType type; /**< Selects manipulation type */
-+ union {
-+
-+ t_FmPcdManipHdrInsrtSpecificL2Params specificL2Params;
-+ /**< Used when type = e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2:
-+ Selects which L2 headers to insert */
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdManipHdrInsrtIpParams ipParams; /**< Used when type = e_FM_PCD_MANIP_INSRT_BY_HDR_IP */
-+ t_FmPcdManipHdrInsrt insrt; /**< Used when type is one of e_FM_PCD_MANIP_INSRT_BY_HDR_UDP,
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, or
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} t_FmPcdManipHdrInsrtByHdrParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header insertion manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrInsrtParams {
-+ e_FmPcdManipHdrInsrtType type; /**< Type of insertion manipulation */
-+ union {
-+ t_FmPcdManipHdrInsrtByHdrParams byHdr; /**< Parameters for defining header insertion manipulation by header type,
-+ relevant if 'type' = e_FM_PCD_MANIP_INSRT_BY_HDR */
-+ t_FmPcdManipHdrInsrtGenericParams generic; /**< Parameters for defining generic header insertion manipulation,
-+ relevant if 'type' = e_FM_PCD_MANIP_INSRT_GENERIC */
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ t_FmPcdManipHdrInsrtByTemplateParams byTemplate; /**< Parameters for defining header insertion manipulation by template,
-+ relevant if 'type' = e_FM_PCD_MANIP_INSRT_BY_TEMPLATE */
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+ } u;
-+} t_FmPcdManipHdrInsrtParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header removal manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrRmvParams {
-+ e_FmPcdManipHdrRmvType type; /**< Type of header removal manipulation */
-+ union {
-+ t_FmPcdManipHdrRmvByHdrParams byHdr; /**< Parameters for defining header removal manipulation by header type,
-+ relevant if type = e_FM_PCD_MANIP_RMV_BY_HDR */
-+ t_FmPcdManipHdrRmvGenericParams generic; /**< Parameters for defining generic header removal manipulation,
-+ relevant if type = e_FM_PCD_MANIP_RMV_GENERIC */
-+ } u;
-+} t_FmPcdManipHdrRmvParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation node
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipHdrParams {
-+ bool rmv; /**< TRUE, to define removal manipulation */
-+ t_FmPcdManipHdrRmvParams rmvParams; /**< Parameters for removal manipulation, relevant if 'rmv' = TRUE */
-+
-+ bool insrt; /**< TRUE, to define insertion manipulation */
-+ t_FmPcdManipHdrInsrtParams insrtParams; /**< Parameters for insertion manipulation, relevant if 'insrt' = TRUE */
-+
-+ bool fieldUpdate; /**< TRUE, to define field update manipulation */
-+ t_FmPcdManipHdrFieldUpdateParams fieldUpdateParams; /**< Parameters for field update manipulation, relevant if 'fieldUpdate' = TRUE */
-+
-+ bool custom; /**< TRUE, to define custom manipulation */
-+ t_FmPcdManipHdrCustomParams customParams; /**< Parameters for custom manipulation, relevant if 'custom' = TRUE */
-+
-+ bool dontParseAfterManip;/**< TRUE to de-activate the parser after the manipulation defined in this node.
-+ Restrictions:
-+ 1. MUST be set if the next engine after the CC is not another CC node
-+ (but rather Policer or Keygen), and this is the last (no h_NextManip) in a chain
-+ of manipulation nodes. This includes single nodes (i.e. no h_NextManip and
-+ also never pointed as h_NextManip of other manipulation nodes)
-+ 2. MUST be set if the next engine after the CC is another CC node, and
-+ this is NOT the last manipulation node (i.e. it has h_NextManip).*/
-+} t_FmPcdManipHdrParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining fragmentation manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipFragParams {
-+ e_NetHeaderType hdr; /**< Header selection */
-+ union {
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdManipFragCapwapParams capwapFrag; /**< Parameters for defining CAPWAP fragmentation,
-+ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+ t_FmPcdManipFragIpParams ipFrag; /**< Parameters for defining IP fragmentation,
-+ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
-+ } u;
-+} t_FmPcdManipFragParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining reassembly manipulation
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipReassemParams {
-+ e_NetHeaderType hdr; /**< Header selection */
-+ union {
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdManipReassemCapwapParams capwapReassem; /**< Parameters for defining CAPWAP reassembly,
-+ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ t_FmPcdManipReassemIpParams ipReassem; /**< Parameters for defining IP reassembly,
-+ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
-+ } u;
-+} t_FmPcdManipReassemParams;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a manipulation node
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipParams {
-+ e_FmPcdManipType type; /**< Selects type of manipulation node */
-+ union{
-+ t_FmPcdManipHdrParams hdr; /**< Parameters for defining header manipulation node */
-+ t_FmPcdManipReassemParams reassem; /**< Parameters for defining reassembly manipulation node */
-+ t_FmPcdManipFragParams frag; /**< Parameters for defining fragmentation manipulation node */
-+ t_FmPcdManipSpecialOffloadParams specialOffload; /**< Parameters for defining special offload manipulation node */
-+ } u;
-+
-+ t_Handle h_NextManip; /**< Supported for Header Manipulation only;
-+ Handle to another (previously defined) manipulation node;
-+ Allows concatenation of manipulation actions;
-+ This parameter is optional and may be NULL. */
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ bool fragOrReasm; /**< TRUE, if defined fragmentation/reassembly manipulation */
-+ t_FmPcdManipFragOrReasmParams fragOrReasmParams; /**< Parameters for fragmentation/reassembly manipulation,
-+ relevant if fragOrReasm = TRUE */
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+} t_FmPcdManipParams;
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving IP reassembly statistics
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipReassemIpStats {
-+ /* common counters for both IPv4 and IPv6 */
-+ uint32_t timeout; /**< Counts the number of timeout occurrences */
-+ uint32_t rfdPoolBusy; /**< Counts the number of failed attempts to allocate
-+ a Reassembly Frame Descriptor */
-+ uint32_t internalBufferBusy; /**< Counts the number of times an internal buffer busy occurred */
-+ uint32_t externalBufferBusy; /**< Counts the number of times external buffer busy occurred */
-+ uint32_t sgFragments; /**< Counts the number of Scatter/Gather fragments */
-+ uint32_t dmaSemaphoreDepletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
-+#if (DPAA_VERSION >= 11)
-+ uint32_t nonConsistentSp; /**< Counts the number of Non Consistent Storage Profile events for
-+ successfully reassembled frames */
-+#endif /* (DPAA_VERSION >= 11) */
-+ struct {
-+ uint32_t successfullyReassembled; /**< Counts the number of successfully reassembled frames */
-+ uint32_t validFragments; /**< Counts the total number of valid fragments that
-+ have been processed for all frames */
-+ uint32_t processedFragments; /**< Counts the number of processed fragments
-+ (valid and error fragments) for all frames */
-+ uint32_t malformedFragments; /**< Counts the number of malformed fragments processed for all frames */
-+ uint32_t discardedFragments; /**< Counts the number of fragments discarded by the reassembly process */
-+ uint32_t autoLearnBusy; /**< Counts the number of times a busy condition occurs when attempting
-+ to access an IP-Reassembly Automatic Learning Hash set */
-+ uint32_t moreThan16Fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
-+ exceeds 16 */
-+ } specificHdrStatistics[2]; /**< slot '0' is for IPv4, slot '1' is for IPv6 */
-+} t_FmPcdManipReassemIpStats;
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving IP fragmentation statistics
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipFragIpStats {
-+ uint32_t totalFrames; /**< Number of frames that passed through the manipulation node */
-+ uint32_t fragmentedFrames; /**< Number of frames that were fragmented */
-+ uint32_t generatedFragments; /**< Number of fragments that were generated */
-+} t_FmPcdManipFragIpStats;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Structure for retrieving CAPWAP reassembly statistics
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipReassemCapwapStats {
-+ uint32_t timeout; /**< Counts the number of timeout occurrences */
-+ uint32_t rfdPoolBusy; /**< Counts the number of failed attempts to allocate
-+ a Reassembly Frame Descriptor */
-+ uint32_t internalBufferBusy; /**< Counts the number of times an internal buffer busy occurred */
-+ uint32_t externalBufferBusy; /**< Counts the number of times external buffer busy occurred */
-+ uint32_t sgFragments; /**< Counts the number of Scatter/Gather fragments */
-+ uint32_t dmaSemaphoreDepletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
-+ uint32_t successfullyReassembled; /**< Counts the number of successfully reassembled frames */
-+ uint32_t validFragments; /**< Counts the total number of valid fragments that
-+ have been processed for all frames */
-+ uint32_t processedFragments; /**< Counts the number of processed fragments
-+ (valid and error fragments) for all frames */
-+ uint32_t malformedFragments; /**< Counts the number of malformed fragments processed for all frames */
-+ uint32_t autoLearnBusy; /**< Counts the number of times a busy condition occurs when attempting
-+ to access an Reassembly Automatic Learning Hash set */
-+ uint32_t discardedFragments; /**< Counts the number of fragments discarded by the reassembly process */
-+ uint32_t moreThan16Fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
-+ exceeds 16 */
-+ uint32_t exceedMaxReassemblyFrameLen;/**< ounts the number of times that a successful reassembled frame
-+ length exceeds MaxReassembledFrameLength value */
-+} t_FmPcdManipReassemCapwapStats;
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving CAPWAP fragmentation statistics
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipFragCapwapStats {
-+ uint32_t totalFrames; /**< Number of frames that passed through the manipulation node */
-+ uint32_t fragmentedFrames; /**< Number of frames that were fragmented */
-+ uint32_t generatedFragments; /**< Number of fragments that were generated */
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ uint8_t sgAllocationFailure; /**< Number of allocation failure of s/g buffers */
-+#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
-+} t_FmPcdManipFragCapwapStats;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving reassembly statistics
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipReassemStats {
-+ union {
-+ t_FmPcdManipReassemIpStats ipReassem; /**< Structure for IP reassembly statistics */
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdManipReassemCapwapStats capwapReassem; /**< Structure for CAPWAP reassembly statistics */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} t_FmPcdManipReassemStats;
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving fragmentation statistics
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipFragStats {
-+ union {
-+ t_FmPcdManipFragIpStats ipFrag; /**< Structure for IP fragmentation statistics */
-+#if (DPAA_VERSION >= 11)
-+ t_FmPcdManipFragCapwapStats capwapFrag; /**< Structure for CAPWAP fragmentation statistics */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} t_FmPcdManipFragStats;
-+
-+/**************************************************************************//**
-+ @Description Structure for selecting manipulation statistics
-+*//***************************************************************************/
-+typedef struct t_FmPcdManipStats {
-+ union {
-+ t_FmPcdManipReassemStats reassem; /**< Structure for reassembly statistics */
-+ t_FmPcdManipFragStats frag; /**< Structure for fragmentation statistics */
-+ } u;
-+} t_FmPcdManipStats;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Parameters for defining frame replicator group and its members
-+*//***************************************************************************/
-+typedef struct t_FmPcdFrmReplicGroupParams {
-+ uint8_t maxNumOfEntries; /**< Maximal number of members in the group;
-+ Must be at least 2. */
-+ uint8_t numOfEntries; /**< Number of members in the group;
-+ Must be at least 1. */
-+ t_FmPcdCcNextEngineParams nextEngineParams[FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES];
-+ /**< Array of members' parameters */
-+} t_FmPcdFrmReplicGroupParams;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+/**************************************************************************//**
-+ @Description structure for defining statistics node
-+*//***************************************************************************/
-+typedef struct t_FmPcdStatsParams {
-+ e_FmPcdStatsType type; /**< type of statistics node */
-+} t_FmPcdStatsParams;
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_NetEnvCharacteristicsSet
-+
-+ @Description Define a set of Network Environment Characteristics.
-+
-+ When setting an environment it is important to understand its
-+ application. It is not meant to describe the flows that will run
-+ on the ports using this environment, but what the user means TO DO
-+ with the PCD mechanisms in order to parse-classify-distribute those
-+ frames.
-+ By specifying a distinction unit, the user means it would use that option
-+ for distinction between frames at either a KeyGen scheme or a coarse
-+ classification action descriptor. Using interchangeable headers to define a
-+ unit means that the user is indifferent to which of the interchangeable
-+ headers is present in the frame, and wants the distinction to be based
-+ on the presence of either one of them.
-+
-+ Depending on context, there are limitations to the use of environments. A
-+ port using the PCD functionality is bound to an environment. Some or even
-+ all ports may share an environment but also an environment per port is
-+ possible. When initializing a scheme, a classification plan group (see below),
-+ or a coarse classification tree, one of the initialized environments must be
-+ stated and related to. When a port is bound to a scheme, a classification
-+ plan group, or a coarse classification tree, it MUST be bound to the same
-+ environment.
-+
-+ The different PCD modules, may relate (for flows definition) ONLY on
-+ distinction units as defined by their environment. When initializing a
-+ scheme for example, it may not choose to select IPV4 as a match for
-+ recognizing flows unless it was defined in the relating environment. In
-+ fact, to guide the user through the configuration of the PCD, each module's
-+ characterization in terms of flows is not done using protocol names, but using
-+ environment indexes.
-+
-+ In terms of HW implementation, the list of distinction units sets the LCV vectors
-+ and later used for match vector, classification plan vectors and coarse classification
-+ indexing.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_NetEnvParams A structure of parameters for the initialization of
-+ the network environment.
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_NetEnvCharacteristicsSet(t_Handle h_FmPcd, t_FmPcdNetEnvParams *p_NetEnvParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_NetEnvCharacteristicsDelete
-+
-+ @Description Deletes a set of Network Environment Characteristics.
-+
-+ @Param[in] h_NetEnv A handle to the Network environment.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PCD_NetEnvCharacteristicsDelete(t_Handle h_NetEnv);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSchemeSet
-+
-+ @Description Initializing or modifying and enabling a scheme for the KeyGen.
-+ This routine should be called for adding or modifying a scheme.
-+ When a scheme needs modifying, the API requires that it will be
-+ rewritten. In such a case 'modify' should be TRUE. If the
-+ routine is called for a valid scheme and 'modify' is FALSE,
-+ it will return error.
-+
-+ @Param[in] h_FmPcd If this is a new scheme - A handle to an FM PCD Module.
-+ Otherwise NULL (ignored by driver).
-+ @Param[in,out] p_SchemeParams A structure of parameters for defining the scheme
-+
-+ @Return A handle to the initialized scheme on success; NULL code otherwise.
-+ When used as "modify" (rather than for setting a new scheme),
-+ p_SchemeParams->id.h_Scheme will return NULL if action fails due to scheme
-+ BUSY state.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_KgSchemeSet(t_Handle h_FmPcd,
-+ t_FmPcdKgSchemeParams *p_SchemeParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSchemeDelete
-+
-+ @Description Deleting an initialized scheme.
-+
-+ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet()
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_KgSchemeDelete(t_Handle h_Scheme);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSchemeGetCounter
-+
-+ @Description Reads scheme packet counter.
-+
-+ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet().
-+
-+ @Return Counter's current value.
-+
-+ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
-+*//***************************************************************************/
-+uint32_t FM_PCD_KgSchemeGetCounter(t_Handle h_Scheme);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSchemeSetCounter
-+
-+ @Description Writes scheme packet counter.
-+
-+ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet().
-+ @Param[in] value New scheme counter value - typically '0' for
-+ resetting the counter.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_KgSchemeSetCounter(t_Handle h_Scheme, uint32_t value);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrProfileSet
-+
-+ @Description Sets a profile entry in the policer profile table.
-+ The routine overrides any existing value.
-+
-+ @Param[in] h_FmPcd A handle to an FM PCD Module.
-+ @Param[in] p_Profile A structure of parameters for defining a
-+ policer profile entry.
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+ When used as "modify" (rather than for setting a new profile),
-+ p_Profile->id.h_Profile will return NULL if action fails due to profile
-+ BUSY state.
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_PlcrProfileSet(t_Handle h_FmPcd,
-+ t_FmPcdPlcrProfileParams *p_Profile);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrProfileDelete
-+
-+ @Description Delete a profile entry in the policer profile table.
-+ The routine set entry to invalid.
-+
-+ @Param[in] h_Profile A handle to the profile.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Error FM_PCD_PlcrProfileDelete(t_Handle h_Profile);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrProfileGetCounter
-+
-+ @Description Sets an entry in the classification plan.
-+ The routine overrides any existing value.
-+
-+ @Param[in] h_Profile A handle to the profile.
-+ @Param[in] counter Counter selector.
-+
-+ @Return specific counter value.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+uint32_t FM_PCD_PlcrProfileGetCounter(t_Handle h_Profile,
-+ e_FmPcdPlcrProfileCounters counter);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrProfileSetCounter
-+
-+ @Description Sets an entry in the classification plan.
-+ The routine overrides any existing value.
-+
-+ @Param[in] h_Profile A handle to the profile.
-+ @Param[in] counter Counter selector.
-+ @Param[in] value value to set counter with.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Error FM_PCD_PlcrProfileSetCounter(t_Handle h_Profile,
-+ e_FmPcdPlcrProfileCounters counter,
-+ uint32_t value);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_CcRootBuild
-+
-+ @Description This routine must be called to define a complete coarse
-+ classification tree. This is the way to define coarse
-+ classification to a certain flow - the KeyGen schemes
-+ may point only to trees defined in this way.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_Params A structure of parameters to define the tree.
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_CcRootBuild (t_Handle h_FmPcd,
-+ t_FmPcdCcTreeParams *p_Params);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_CcRootDelete
-+
-+ @Description Deleting an built tree.
-+
-+ @Param[in] h_CcTree A handle to a CC tree.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_CcRootModifyNextEngine
-+
-+ @Description Modify the Next Engine Parameters in the entry of the tree.
-+
-+ @Param[in] h_CcTree A handle to the tree
-+ @Param[in] grpId A Group index in the tree
-+ @Param[in] index Entry index in the group defined by grpId
-+ @Param[in] p_FmPcdCcNextEngineParams Pointer to new next engine parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_CcBuildTree().
-+*//***************************************************************************/
-+t_Error FM_PCD_CcRootModifyNextEngine(t_Handle h_CcTree,
-+ uint8_t grpId,
-+ uint8_t index,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableSet
-+
-+ @Description This routine should be called for each CC (coarse classification)
-+ node. The whole CC tree should be built bottom up so that each
-+ node points to already defined nodes.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_Param A structure of parameters defining the CC node
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_Param);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableDelete
-+
-+ @Description Deleting an built node.
-+
-+ @Param[in] h_CcNode A handle to a CC node.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyMissNextEngine
-+
-+ @Description Modify the Next Engine Parameters of the Miss key case of the node.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] p_FmPcdCcNextEngineParams Parameters for defining next engine
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet();
-+ Not relevant in the case the node is of type 'INDEXED_LOOKUP'.
-+ When configuring nextEngine = e_FM_PCD_CC, note that
-+ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
-+ from the currently changed table.
-+
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableModifyMissNextEngine(t_Handle h_CcNode,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableRemoveKey
-+
-+ @Description Remove the key (including next engine parameters of this key)
-+ defined by the index of the relevant node.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for removing
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
-+ node and the nodes that lead to it.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableRemoveKey(t_Handle h_CcNode, uint16_t keyIndex);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableAddKey
-+
-+ @Description Add the key (including next engine parameters of this key in the
-+ index defined by the keyIndex. Note that 'FM_PCD_LAST_KEY_INDEX'
-+ may be used by user that don't care about the position of the
-+ key in the table - in that case, the key will be automatically
-+ added by the driver in the last available entry.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for adding.
-+ @Param[in] keySize Key size of added key
-+ @Param[in] p_KeyParams A pointer to the parameters includes
-+ new key with Next Engine Parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
-+ node and the nodes that lead to it.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableAddKey(t_Handle h_CcNode,
-+ uint16_t keyIndex,
-+ uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_KeyParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyNextEngine
-+
-+ @Description Modify the Next Engine Parameters in the relevant key entry of the node.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for Next Engine modifications
-+ @Param[in] p_FmPcdCcNextEngineParams Parameters for defining next engine
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+ When configuring nextEngine = e_FM_PCD_CC, note that
-+ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
-+ from the currently changed table.
-+
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableModifyNextEngine(t_Handle h_CcNode,
-+ uint16_t keyIndex,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyKeyAndNextEngine
-+
-+ @Description Modify the key and Next Engine Parameters of this key in the
-+ index defined by the keyIndex.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for adding
-+ @Param[in] keySize Key size of added key
-+ @Param[in] p_KeyParams A pointer to the parameters includes
-+ modified key and modified Next Engine Parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
-+ node and the nodes that lead to it.
-+ When configuring nextEngine = e_FM_PCD_CC, note that
-+ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
-+ from the currently changed table.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableModifyKeyAndNextEngine(t_Handle h_CcNode,
-+ uint16_t keyIndex,
-+ uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_KeyParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyKey
-+
-+ @Description Modify the key in the index defined by the keyIndex.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for adding
-+ @Param[in] keySize Key size of added key
-+ @Param[in] p_Key A pointer to the new key
-+ @Param[in] p_Mask A pointer to the new mask if relevant,
-+ otherwise pointer to NULL
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
-+ node and the nodes that lead to it.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode,
-+ uint16_t keyIndex,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ uint8_t *p_Mask);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableFindNRemoveKey
-+
-+ @Description Remove the key (including next engine parameters of this key)
-+ defined by the key and mask. Note that this routine will search
-+ the node to locate the index of the required key (& mask) to remove.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keySize Key size of the one to remove.
-+ @Param[in] p_Key A pointer to the requested key to remove.
-+ @Param[in] p_Mask A pointer to the mask if relevant,
-+ otherwise pointer to NULL
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
-+ node and the nodes that lead to it.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableFindNRemoveKey(t_Handle h_CcNode,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ uint8_t *p_Mask);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableFindNModifyNextEngine
-+
-+ @Description Modify the Next Engine Parameters in the relevant key entry of
-+ the node. Note that this routine will search the node to locate
-+ the index of the required key (& mask) to modify.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keySize Key size of the one to modify.
-+ @Param[in] p_Key A pointer to the requested key to modify.
-+ @Param[in] p_Mask A pointer to the mask if relevant,
-+ otherwise pointer to NULL
-+ @Param[in] p_FmPcdCcNextEngineParams Parameters for defining next engine
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+ When configuring nextEngine = e_FM_PCD_CC, note that
-+ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
-+ from the currently changed table.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableFindNModifyNextEngine(t_Handle h_CcNode,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ uint8_t *p_Mask,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableFindNModifyKeyAndNextEngine
-+
-+ @Description Modify the key and Next Engine Parameters of this key in the
-+ index defined by the keyIndex. Note that this routine will search
-+ the node to locate the index of the required key (& mask) to modify.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keySize Key size of the one to modify.
-+ @Param[in] p_Key A pointer to the requested key to modify.
-+ @Param[in] p_Mask A pointer to the mask if relevant,
-+ otherwise pointer to NULL
-+ @Param[in] p_KeyParams A pointer to the parameters includes
-+ modified key and modified Next Engine Parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
-+ node and the nodes that lead to it.
-+ When configuring nextEngine = e_FM_PCD_CC, note that
-+ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
-+ from the currently changed table.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableFindNModifyKeyAndNextEngine(t_Handle h_CcNode,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ uint8_t *p_Mask,
-+ t_FmPcdCcKeyParams *p_KeyParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableFindNModifyKey
-+
-+ @Description Modify the key in the index defined by the keyIndex. Note that
-+ this routine will search the node to locate the index of the
-+ required key (& mask) to modify.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keySize Key size of the one to modify.
-+ @Param[in] p_Key A pointer to the requested key to modify.
-+ @Param[in] p_Mask A pointer to the mask if relevant,
-+ otherwise pointer to NULL
-+ @Param[in] p_NewKey A pointer to the new key
-+ @Param[in] p_NewMask A pointer to the new mask if relevant,
-+ otherwise pointer to NULL
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() was called for this
-+ node and the nodes that lead to it.
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableFindNModifyKey(t_Handle h_CcNode,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ uint8_t *p_Mask,
-+ uint8_t *p_NewKey,
-+ uint8_t *p_NewMask);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableGetKeyCounter
-+
-+ @Description This routine may be used to get a counter of specific key in a CC
-+ Node; This counter reflects how many frames passed that were matched
-+ this key.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for adding
-+
-+ @Return The specific key counter.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+uint32_t FM_PCD_MatchTableGetKeyCounter(t_Handle h_CcNode, uint16_t keyIndex);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableGetKeyStatistics
-+
-+ @Description This routine may be used to get statistics counters of specific key
-+ in a CC Node.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames passed that were matched
-+ this key; The total frames count will be returned in the counter
-+ of the first range (as only one frame length range was defined).
-+ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
-+ frame count will be separated to frame length counters, based on
-+ provided frame length ranges.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for adding
-+ @Param[out] p_KeyStatistics Key statistics counters
-+
-+ @Return The specific key statistics.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode,
-+ uint16_t keyIndex,
-+ t_FmPcdCcKeyStatistics *p_KeyStatistics);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableGetMissStatistics
-+
-+ @Description This routine may be used to get statistics counters of miss entry
-+ in a CC Node.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames were not matched to any
-+ existing key and therefore passed through the miss entry; The
-+ total frames count will be returned in the counter of the
-+ first range (as only one frame length range was defined).
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[out] p_MissStatistics Statistics counters for 'miss'
-+
-+ @Return The statistics for 'miss'.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableGetMissStatistics(t_Handle h_CcNode,
-+ t_FmPcdCcKeyStatistics *p_MissStatistics);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableFindNGetKeyStatistics
-+
-+ @Description This routine may be used to get statistics counters of specific key
-+ in a CC Node.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames passed that were matched
-+ this key; The total frames count will be returned in the counter
-+ of the first range (as only one frame length range was defined).
-+ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
-+ frame count will be separated to frame length counters, based on
-+ provided frame length ranges.
-+ Note that this routine will search the node to locate the index
-+ of the required key based on received key parameters.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keySize Size of the requested key
-+ @Param[in] p_Key A pointer to the requested key
-+ @Param[in] p_Mask A pointer to the mask if relevant,
-+ otherwise pointer to NULL
-+ @Param[out] p_KeyStatistics Key statistics counters
-+
-+ @Return The specific key statistics.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableFindNGetKeyStatistics(t_Handle h_CcNode,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ uint8_t *p_Mask,
-+ t_FmPcdCcKeyStatistics *p_KeyStatistics);
-+
-+/**************************************************************************//*
-+ @Function FM_PCD_MatchTableGetNextEngine
-+
-+ @Description Gets NextEngine of the relevant keyIndex.
-+
-+ @Param[in] h_CcNode A handle to the node.
-+ @Param[in] keyIndex keyIndex in the relevant node.
-+ @Param[out] p_FmPcdCcNextEngineParams here updated nextEngine parameters for
-+ the relevant keyIndex of the CC Node
-+ received as parameter to this function
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableGetNextEngine(t_Handle h_CcNode,
-+ uint16_t keyIndex,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//*
-+ @Function FM_PCD_MatchTableGetIndexedHashBucket
-+
-+ @Description This routine simulates KeyGen operation on the provided key and
-+ calculates to which hash bucket it will be mapped.
-+
-+ @Param[in] h_CcNode A handle to the node.
-+ @Param[in] kgKeySize Key size as it was configured in the KG
-+ scheme that leads to this hash.
-+ @Param[in] p_KgKey Pointer to the key; must be like the key
-+ that the KG is generated, i.e. the same
-+ extraction and with mask if exist.
-+ @Param[in] kgHashShift Hash-shift as it was configured in the KG
-+ scheme that leads to this hash.
-+ @Param[out] p_CcNodeBucketHandle Pointer to the bucket of the provided key.
-+ @Param[out] p_BucketIndex Index to the bucket of the provided key
-+ @Param[out] p_LastIndex Pointer to last index in the bucket of the
-+ provided key.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet()
-+*//***************************************************************************/
-+t_Error FM_PCD_MatchTableGetIndexedHashBucket(t_Handle h_CcNode,
-+ uint8_t kgKeySize,
-+ uint8_t *p_KgKey,
-+ uint8_t kgHashShift,
-+ t_Handle *p_CcNodeBucketHandle,
-+ uint8_t *p_BucketIndex,
-+ uint16_t *p_LastIndex);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableSet
-+
-+ @Description This routine initializes a hash table structure.
-+ KeyGen hash result determines the hash bucket.
-+ Next, KeyGen key is compared against all keys of this
-+ bucket (exact match).
-+ Number of sets (number of buckets) of the hash equals to the
-+ number of 1-s in 'hashResMask' in the provided parameters.
-+ Number of hash table ways is then calculated by dividing
-+ 'maxNumOfKeys' equally between the hash sets. This is the maximal
-+ number of keys that a hash bucket may hold.
-+ The hash table is initialized empty and keys may be
-+ added to it following the initialization. Keys masks are not
-+ supported in current hash table implementation.
-+ The initialized hash table can be integrated as a node in a
-+ CC tree.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_Param A structure of parameters defining the hash table
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableDelete
-+
-+ @Description This routine deletes the provided hash table and released all
-+ its allocated resources.
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableAddKey
-+
-+ @Description This routine adds the provided key (including next engine
-+ parameters of this key) to the hash table.
-+ The key is added as the last key of the bucket that it is
-+ mapped to.
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[in] keySize Key size of added key
-+ @Param[in] p_KeyParams A pointer to the parameters includes
-+ new key with next engine parameters; The pointer
-+ to the key mask must be NULL, as masks are not
-+ supported in hash table implementation.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableAddKey(t_Handle h_HashTbl,
-+ uint8_t keySize,
-+ t_FmPcdCcKeyParams *p_KeyParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableRemoveKey
-+
-+ @Description This routine removes the requested key (including next engine
-+ parameters of this key) from the hash table.
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[in] keySize Key size of the one to remove.
-+ @Param[in] p_Key A pointer to the requested key to remove.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableRemoveKey(t_Handle h_HashTbl,
-+ uint8_t keySize,
-+ uint8_t *p_Key);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableModifyNextEngine
-+
-+ @Description This routine modifies the next engine for the provided key. The
-+ key should be previously added to the hash table.
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[in] keySize Key size of the key to modify.
-+ @Param[in] p_Key A pointer to the requested key to modify.
-+ @Param[in] p_FmPcdCcNextEngineParams A structure for defining new next engine
-+ parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+ When configuring nextEngine = e_FM_PCD_CC, note that
-+ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
-+ from the currently changed table.
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableModifyNextEngine(t_Handle h_HashTbl,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableModifyMissNextEngine
-+
-+ @Description This routine modifies the next engine on key match miss.
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[in] p_FmPcdCcNextEngineParams A structure for defining new next engine
-+ parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+ When configuring nextEngine = e_FM_PCD_CC, note that
-+ p_FmPcdCcNextEngineParams->ccParams.h_CcNode must be different
-+ from the currently changed table.
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableModifyMissNextEngine(t_Handle h_HashTbl,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//*
-+ @Function FM_PCD_HashTableGetMissNextEngine
-+
-+ @Description Gets NextEngine in case of key match miss.
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[out] p_FmPcdCcNextEngineParams Next engine parameters for the specified
-+ hash table.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableGetMissNextEngine(t_Handle h_HashTbl,
-+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableFindNGetKeyStatistics
-+
-+ @Description This routine may be used to get statistics counters of specific key
-+ in a hash table.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames passed that were matched
-+ this key; The total frames count will be returned in the counter
-+ of the first range (as only one frame length range was defined).
-+ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
-+ frame count will be separated to frame length counters, based on
-+ provided frame length ranges.
-+ Note that this routine will identify the bucket of this key in
-+ the hash table and will search the bucket to locate the index
-+ of the required key based on received key parameters.
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[in] keySize Size of the requested key
-+ @Param[in] p_Key A pointer to the requested key
-+ @Param[out] p_KeyStatistics Key statistics counters
-+
-+ @Return The specific key statistics.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableFindNGetKeyStatistics(t_Handle h_HashTbl,
-+ uint8_t keySize,
-+ uint8_t *p_Key,
-+ t_FmPcdCcKeyStatistics *p_KeyStatistics);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableGetMissStatistics
-+
-+ @Description This routine may be used to get statistics counters of 'miss'
-+ entry of the a hash table.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames were not matched to any
-+ existing key and therefore passed through the miss entry;
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[out] p_MissStatistics Statistics counters for 'miss'
-+
-+ @Return The statistics for 'miss'.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_HashTableGetMissStatistics(t_Handle h_HashTbl,
-+ t_FmPcdCcKeyStatistics *p_MissStatistics);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipNodeSet
-+
-+ @Description This routine should be called for defining a manipulation
-+ node. A manipulation node must be defined before the CC node
-+ that precedes it.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_FmPcdManipParams A structure of parameters defining the manipulation
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_ManipNodeSet(t_Handle h_FmPcd, t_FmPcdManipParams *p_FmPcdManipParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipNodeDelete
-+
-+ @Description Delete an existing manipulation node.
-+
-+ @Param[in] h_ManipNode A handle to a manipulation node.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_ManipNodeSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_ManipNodeDelete(t_Handle h_ManipNode);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipGetStatistics
-+
-+ @Description Retrieve the manipulation statistics.
-+
-+ @Param[in] h_ManipNode A handle to a manipulation node.
-+ @Param[out] p_FmPcdManipStats A structure for retrieving the manipulation statistics
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_ManipNodeSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_ManipGetStatistics(t_Handle h_ManipNode, t_FmPcdManipStats *p_FmPcdManipStats);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipNodeReplace
-+
-+ @Description Change existing manipulation node to be according to new requirement.
-+
-+ @Param[in] h_ManipNode A handle to a manipulation node.
-+ @Param[out] p_ManipParams A structure of parameters defining the change requirement
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_ManipNodeSet().
-+*//***************************************************************************/
-+t_Error FM_PCD_ManipNodeReplace(t_Handle h_ManipNode, t_FmPcdManipParams *p_ManipParams);
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicSetGroup
-+
-+ @Description Initialize a Frame Replicator group.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_FrmReplicGroupParam A structure of parameters for the initialization of
-+ the frame replicator group.
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_FrmReplicSetGroup(t_Handle h_FmPcd, t_FmPcdFrmReplicGroupParams *p_FrmReplicGroupParam);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicDeleteGroup
-+
-+ @Description Delete a Frame Replicator group.
-+
-+ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup().
-+*//***************************************************************************/
-+t_Error FM_PCD_FrmReplicDeleteGroup(t_Handle h_FrmReplicGroup);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicAddMember
-+
-+ @Description Add the member in the index defined by the memberIndex.
-+
-+ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
-+ @Param[in] memberIndex member index for adding.
-+ @Param[in] p_MemberParams A pointer to the new member parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
-+*//***************************************************************************/
-+t_Error FM_PCD_FrmReplicAddMember(t_Handle h_FrmReplicGroup,
-+ uint16_t memberIndex,
-+ t_FmPcdCcNextEngineParams *p_MemberParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicRemoveMember
-+
-+ @Description Remove the member defined by the index from the relevant group.
-+
-+ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
-+ @Param[in] memberIndex member index for removing.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
-+*//***************************************************************************/
-+t_Error FM_PCD_FrmReplicRemoveMember(t_Handle h_FrmReplicGroup,
-+ uint16_t memberIndex);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+/**************************************************************************//**
-+ @Function FM_PCD_StatisticsSetNode
-+
-+ @Description This routine should be called for defining a statistics node.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_FmPcdstatsParams A structure of parameters defining the statistics
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+t_Handle FM_PCD_StatisticsSetNode(t_Handle h_FmPcd, t_FmPcdStatsParams *p_FmPcdstatsParams);
-+#endif /* ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+
-+/** @} */ /* end of FM_PCD_Runtime_build_grp group */
-+/** @} */ /* end of FM_PCD_Runtime_grp group */
-+/** @} */ /* end of FM_PCD_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#ifdef NCSW_BACKWARD_COMPATIBLE_API
-+#define FM_PCD_MAX_NUM_OF_INTERCHANGABLE_HDRS FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS
-+#define e_FM_PCD_MANIP_ONE_WAYS_HASH e_FM_PCD_MANIP_ONE_WAY_HASH
-+#define e_FM_PCD_MANIP_TOW_WAYS_HASH e_FM_PCD_MANIP_TWO_WAYS_HASH
-+
-+#define e_FM_PCD_MANIP_FRAGMENT_PACKECT e_FM_PCD_MANIP_FRAGMENT_PACKET /* Feb13 */
-+
-+#define FM_PCD_SetNetEnvCharacteristics(_pcd, _params) \
-+ FM_PCD_NetEnvCharacteristicsSet(_pcd, _params)
-+#define FM_PCD_KgSetScheme(_pcd, _params) FM_PCD_KgSchemeSet(_pcd, _params)
-+#define FM_PCD_CcBuildTree(_pcd, _params) FM_PCD_CcRootBuild(_pcd, _params)
-+#define FM_PCD_CcSetNode(_pcd, _params) FM_PCD_MatchTableSet(_pcd, _params)
-+#define FM_PCD_PlcrSetProfile(_pcd, _params) FM_PCD_PlcrProfileSet(_pcd, _params)
-+#define FM_PCD_ManipSetNode(_pcd, _params) FM_PCD_ManipNodeSet(_pcd, _params)
-+
-+#define FM_PCD_DeleteNetEnvCharacteristics(_pcd, ...) \
-+ FM_PCD_NetEnvCharacteristicsDelete(__VA_ARGS__)
-+#define FM_PCD_KgDeleteScheme(_pcd, ...) \
-+ FM_PCD_KgSchemeDelete(__VA_ARGS__)
-+#define FM_PCD_KgGetSchemeCounter(_pcd, ...) \
-+ FM_PCD_KgSchemeGetCounter(__VA_ARGS__)
-+#define FM_PCD_KgSetSchemeCounter(_pcd, ...) \
-+ FM_PCD_KgSchemeSetCounter(__VA_ARGS__)
-+#define FM_PCD_PlcrDeleteProfile(_pcd, ...) \
-+ FM_PCD_PlcrProfileDelete(__VA_ARGS__)
-+#define FM_PCD_PlcrGetProfileCounter(_pcd, ...) \
-+ FM_PCD_PlcrProfileGetCounter(__VA_ARGS__)
-+#define FM_PCD_PlcrSetProfileCounter(_pcd, ...) \
-+ FM_PCD_PlcrProfileSetCounter(__VA_ARGS__)
-+#define FM_PCD_CcDeleteTree(_pcd, ...) \
-+ FM_PCD_CcRootDelete(__VA_ARGS__)
-+#define FM_PCD_CcTreeModifyNextEngine(_pcd, ...) \
-+ FM_PCD_CcRootModifyNextEngine(__VA_ARGS__)
-+#define FM_PCD_CcDeleteNode(_pcd, ...) \
-+ FM_PCD_MatchTableDelete(__VA_ARGS__)
-+#define FM_PCD_CcNodeModifyMissNextEngine(_pcd, ...) \
-+ FM_PCD_MatchTableModifyMissNextEngine(__VA_ARGS__)
-+#define FM_PCD_CcNodeRemoveKey(_pcd, ...) \
-+ FM_PCD_MatchTableRemoveKey(__VA_ARGS__)
-+#define FM_PCD_CcNodeAddKey(_pcd, ...) \
-+ FM_PCD_MatchTableAddKey(__VA_ARGS__)
-+#define FM_PCD_CcNodeModifyNextEngine(_pcd, ...) \
-+ FM_PCD_MatchTableModifyNextEngine(__VA_ARGS__)
-+#define FM_PCD_CcNodeModifyKeyAndNextEngine(_pcd, ...) \
-+ FM_PCD_MatchTableModifyKeyAndNextEngine(__VA_ARGS__)
-+#define FM_PCD_CcNodeModifyKey(_pcd, ...) \
-+ FM_PCD_MatchTableModifyKey(__VA_ARGS__)
-+#define FM_PCD_CcNodeFindNRemoveKey(_pcd, ...) \
-+ FM_PCD_MatchTableFindNRemoveKey(__VA_ARGS__)
-+#define FM_PCD_CcNodeFindNModifyNextEngine(_pcd, ...) \
-+ FM_PCD_MatchTableFindNModifyNextEngine(__VA_ARGS__)
-+#define FM_PCD_CcNodeFindNModifyKeyAndNextEngine(_pcd, ...) \
-+ FM_PCD_MatchTableFindNModifyKeyAndNextEngine(__VA_ARGS__)
-+#define FM_PCD_CcNodeFindNModifyKey(_pcd, ...) \
-+ FM_PCD_MatchTableFindNModifyKey(__VA_ARGS__)
-+#define FM_PCD_CcIndexedHashNodeGetBucket(_pcd, ...) \
-+ FM_PCD_MatchTableGetIndexedHashBucket(__VA_ARGS__)
-+#define FM_PCD_CcNodeGetNextEngine(_pcd, ...) \
-+ FM_PCD_MatchTableGetNextEngine(__VA_ARGS__)
-+#define FM_PCD_CcNodeGetKeyCounter(_pcd, ...) \
-+ FM_PCD_MatchTableGetKeyCounter(__VA_ARGS__)
-+#define FM_PCD_ManipDeleteNode(_pcd, ...) \
-+ FM_PCD_ManipNodeDelete(__VA_ARGS__)
-+#endif /* NCSW_BACKWARD_COMPATIBLE_API */
-+
-+
-+#endif /* __FM_PCD_EXT */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_port_ext.h
-@@ -0,0 +1,2608 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_port_ext.h
-+
-+ @Description FM-Port Application Programming Interface.
-+*//***************************************************************************/
-+#ifndef __FM_PORT_EXT
-+#define __FM_PORT_EXT
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_ext.h"
-+#include "net_ext.h"
-+
-+
-+/**************************************************************************//**
-+
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_PORT_grp FM Port
-+
-+ @Description FM Port API
-+
-+ The FM uses a general module called "port" to represent a Tx port
-+ (MAC), an Rx port (MAC) or Offline Parsing port.
-+ The number of ports in an FM varies between SOCs.
-+ The SW driver manages these ports as sub-modules of the FM, i.e.
-+ after an FM is initialized, its ports may be initialized and
-+ operated upon.
-+
-+ The port is initialized aware of its type, but other functions on
-+ a port may be indifferent to its type. When necessary, the driver
-+ verifies coherence and returns error if applicable.
-+
-+ On initialization, user specifies the port type and it's index
-+ (relative to the port's type) - always starting at 0.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description An enum for defining port PCD modes.
-+ This enum defines the superset of PCD engines support - i.e. not
-+ all engines have to be used, but all have to be enabled. The real
-+ flow of a specific frame depends on the PCD configuration and the
-+ frame headers and payload.
-+ Note: the first engine and the first engine after the parser (if
-+ exists) should be in order, the order is important as it will
-+ define the flow of the port. However, as for the rest engines
-+ (the ones that follows), the order is not important anymore as
-+ it is defined by the PCD graph itself.
-+*//***************************************************************************/
-+typedef enum e_FmPortPcdSupport {
-+ e_FM_PORT_PCD_SUPPORT_NONE = 0 /**< BMI to BMI, PCD is not used */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_ONLY /**< Use only Parser */
-+ , e_FM_PORT_PCD_SUPPORT_PLCR_ONLY /**< Use only Policer */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR /**< Use Parser and Policer */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG /**< Use Parser and Keygen */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC /**< Use Parser, Keygen and Coarse Classification */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR
-+ /**< Use all PCD engines */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR /**< Use Parser, Keygen and Policer */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_AND_CC /**< Use Parser and Coarse Classification */
-+ , e_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR /**< Use Parser and Coarse Classification and Policer */
-+ , e_FM_PORT_PCD_SUPPORT_CC_ONLY /**< Use only Coarse Classification */
-+#ifdef FM_CAPWAP_SUPPORT
-+ , e_FM_PORT_PCD_SUPPORT_CC_AND_KG /**< Use Coarse Classification,and Keygen */
-+ , e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR /**< Use Coarse Classification, Keygen and Policer */
-+#endif /* FM_CAPWAP_SUPPORT */
-+} e_FmPortPcdSupport;
-+
-+/**************************************************************************//**
-+ @Description Port interrupts
-+*//***************************************************************************/
-+typedef enum e_FmPortExceptions {
-+ e_FM_PORT_EXCEPTION_IM_BUSY /**< Independent-Mode Rx-BUSY */
-+} e_FmPortExceptions;
-+
-+
-+/**************************************************************************//**
-+ @Collection General FM Port defines
-+*//***************************************************************************/
-+#define FM_PORT_PRS_RESULT_NUM_OF_WORDS 8 /**< Number of 4 bytes words in parser result */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection FM Frame error
-+*//***************************************************************************/
-+typedef uint32_t fmPortFrameErrSelect_t; /**< typedef for defining Frame Descriptor errors */
-+
-+#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT /**< Not for Rx-Port! Unsupported Format */
-+#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH /**< Not for Rx-Port! Length Error */
-+#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA /**< DMA Data error */
-+#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM /**< non Frame-Manager error; probably come from SEC that
-+ was chained to FM */
-+
-+#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR) /**< IPR error */
-+#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & ~FM_FD_IPR) /**< IPR non-consistent-sp */
-+
-+#define FM_PORT_FRM_ERR_IPFE 0 /**< Obsolete; will be removed in the future */
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+#define FM_PORT_FRM_ERR_CRE FM_FD_ERR_CRE
-+#define FM_PORT_FRM_ERR_CHE FM_FD_ERR_CHE
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL /**< Rx FIFO overflow, FCS error, code error, running disparity
-+ error (SGMII and TBI modes), FIFO parity error. PHY
-+ Sequence error, PHY error control character detected. */
-+#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE /**< Frame too long OR Frame size exceeds max_length_frame */
-+#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD /**< indicates a classifier "drop" operation */
-+#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION /**< Extract Out of Frame */
-+#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME /**< No Scheme Selected */
-+#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW /**< Keysize Overflow */
-+#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED /**< Frame color is red */
-+#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW /**< Frame color is yellow */
-+#define FM_PORT_FRM_ERR_ILL_PLCR FM_FD_ERR_ILL_PLCR /**< Illegal Policer Profile selected */
-+#define FM_PORT_FRM_ERR_PLCR_FRAME_LEN FM_FD_ERR_PLCR_FRAME_LEN /**< Policer frame length error */
-+#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT /**< Parser Time out Exceed */
-+#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT /**< Invalid Soft Parser instruction */
-+#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR /**< Header error was identified during parsing */
-+#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED /**< Frame parsed beyind 256 first bytes */
-+#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001 /**< FPM Frame Processing Timeout Exceeded */
-+/* @} */
-+
-+
-+
-+/**************************************************************************//**
-+ @Group FM_PORT_init_grp FM Port Initialization Unit
-+
-+ @Description FM Port Initialization Unit
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Exceptions user callback routine, will be called upon an
-+ exception passing the exception identification.
-+
-+ @Param[in] h_App - User's application descriptor.
-+ @Param[in] exception - The exception.
-+ *//***************************************************************************/
-+typedef void (t_FmPortExceptionCallback) (t_Handle h_App, e_FmPortExceptions exception);
-+
-+/**************************************************************************//**
-+ @Description User callback function called by driver with received data.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_App Application's handle originally specified to
-+ the API Config function
-+ @Param[in] p_Data A pointer to data received
-+ @Param[in] length length of received data
-+ @Param[in] status receive status and errors
-+ @Param[in] position position of buffer in frame
-+ @Param[in] h_BufContext A handle of the user acossiated with this buffer
-+
-+ @Retval e_RX_STORE_RESPONSE_CONTINUE - order the driver to continue Rx
-+ operation for all ready data.
-+ @Retval e_RX_STORE_RESPONSE_PAUSE - order the driver to stop Rx operation.
-+*//***************************************************************************/
-+typedef e_RxStoreResponse (t_FmPortImRxStoreCallback) (t_Handle h_App,
-+ uint8_t *p_Data,
-+ uint16_t length,
-+ uint16_t status,
-+ uint8_t position,
-+ t_Handle h_BufContext);
-+
-+/**************************************************************************//**
-+ @Description User callback function called by driver when transmit completed.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_App Application's handle originally specified to
-+ the API Config function
-+ @Param[in] p_Data A pointer to data received
-+ @Param[in] status transmit status and errors
-+ @Param[in] lastBuffer is last buffer in frame
-+ @Param[in] h_BufContext A handle of the user acossiated with this buffer
-+ *//***************************************************************************/
-+typedef void (t_FmPortImTxConfCallback) (t_Handle h_App,
-+ uint8_t *p_Data,
-+ uint16_t status,
-+ t_Handle h_BufContext);
-+
-+/**************************************************************************//**
-+ @Description A structure for additional Rx port parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortRxParams {
-+ uint32_t errFqid; /**< Error Queue Id. */
-+ uint32_t dfltFqid; /**< Default Queue Id. */
-+ uint16_t liodnOffset; /**< Port's LIODN offset. */
-+ t_FmExtPools extBufPools; /**< Which external buffer pools are used
-+ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes. */
-+} t_FmPortRxParams;
-+
-+/**************************************************************************//**
-+ @Description A structure for additional non-Rx port parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortNonRxParams {
-+ uint32_t errFqid; /**< Error Queue Id. */
-+ uint32_t dfltFqid; /**< For Tx - Default Confirmation queue,
-+ 0 means no Tx confirmation for processed
-+ frames. For OP port - default Rx queue. */
-+ uint32_t qmChannel; /**< QM-channel dedicated to this port; will be used
-+ by the FM for dequeue. */
-+} t_FmPortNonRxParams;
-+
-+/**************************************************************************//**
-+ @Description A structure for additional Rx port parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortImRxTxParams {
-+ t_Handle h_FmMuram; /**< A handle of the FM-MURAM partition */
-+ uint16_t liodnOffset; /**< For Rx ports only. Port's LIODN Offset. */
-+ uint8_t dataMemId; /**< Memory partition ID for data buffers */
-+ uint32_t dataMemAttributes; /**< Memory attributes for data buffers */
-+ t_BufferPoolInfo rxPoolParams; /**< For Rx ports only. */
-+ t_FmPortImRxStoreCallback *f_RxStore; /**< For Rx ports only. */
-+ t_FmPortImTxConfCallback *f_TxConf; /**< For Tx ports only. */
-+} t_FmPortImRxTxParams;
-+
-+/**************************************************************************//**
-+ @Description A union for additional parameters depending on port type
-+*//***************************************************************************/
-+typedef union u_FmPortSpecificParams {
-+ t_FmPortImRxTxParams imRxTxParams; /**< Rx/Tx Independent-Mode port parameter structure */
-+ t_FmPortRxParams rxParams; /**< Rx port parameters structure */
-+ t_FmPortNonRxParams nonRxParams; /**< Non-Rx port parameters structure */
-+} u_FmPortSpecificParams;
-+
-+/**************************************************************************//**
-+ @Description A structure representing FM initialization parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortParams {
-+ uintptr_t baseAddr; /**< Virtual Address of memory mapped FM Port registers.*/
-+ t_Handle h_Fm; /**< A handle to the FM object this port related to */
-+ e_FmPortType portType; /**< Port type */
-+ uint8_t portId; /**< Port Id - relative to type;
-+ NOTE: When configuring Offline Parsing port for
-+ FMANv3 devices (DPAA_VERSION 11 and higher),
-+ it is highly recommended NOT to use portId=0 due to lack
-+ of HW resources on portId=0. */
-+ bool independentModeEnable;
-+ /**< This port is Independent-Mode - Used for Rx/Tx ports only! */
-+ uint16_t liodnBase; /**< Irrelevant for P4080 rev 1. LIODN base for this port, to be
-+ used together with LIODN offset. */
-+ u_FmPortSpecificParams specificParams; /**< Additional parameters depending on port
-+ type. */
-+
-+ t_FmPortExceptionCallback *f_Exception; /**< Relevant for IM only Callback routine to be called on BUSY exception */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks */
-+} t_FmPortParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Config
-+
-+ @Description Creates a descriptor for the FM PORT module.
-+
-+ The routine returns a handle (descriptor) to the FM PORT object.
-+ This descriptor must be passed as first parameter to all other
-+ FM PORT function calls.
-+
-+ No actual initialization or configuration of FM hardware is
-+ done by this routine.
-+
-+ @Param[in] p_FmPortParams - Pointer to data structure of parameters
-+
-+ @Retval Handle to FM object, or NULL for Failure.
-+*//***************************************************************************/
-+t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Init
-+
-+ @Description Initializes the FM PORT module by defining the software structure
-+ and configuring the hardware registers.
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PORT_Init(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Free
-+
-+ @Description Frees all resources that were assigned to FM PORT module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PORT_Free(t_Handle h_FmPort);
-+
-+
-+/**************************************************************************//**
-+ @Group FM_PORT_advanced_init_grp FM Port Advanced Configuration Unit
-+
-+ @Description Configuration functions used to change default values.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description enum for defining QM frame dequeue
-+*//***************************************************************************/
-+typedef enum e_FmPortDeqType {
-+ e_FM_PORT_DEQ_TYPE1, /**< Dequeue from the SP channel - with priority precedence,
-+ and Intra-Class Scheduling respected. */
-+ e_FM_PORT_DEQ_TYPE2, /**< Dequeue from the SP channel - with active FQ precedence,
-+ and Intra-Class Scheduling respected. */
-+ e_FM_PORT_DEQ_TYPE3 /**< Dequeue from the SP channel - with active FQ precedence,
-+ and override Intra-Class Scheduling */
-+} e_FmPortDeqType;
-+
-+/**************************************************************************//**
-+ @Description enum for defining QM frame dequeue
-+*//***************************************************************************/
-+typedef enum e_FmPortDeqPrefetchOption {
-+ e_FM_PORT_DEQ_NO_PREFETCH, /**< QMI preforms a dequeue action for a single frame
-+ only when a dedicated portID Tnum is waiting. */
-+ e_FM_PORT_DEQ_PARTIAL_PREFETCH, /**< QMI preforms a dequeue action for 3 frames when
-+ one dedicated portId tnum is waiting. */
-+ e_FM_PORT_DEQ_FULL_PREFETCH /**< QMI preforms a dequeue action for 3 frames when
-+ no dedicated portId tnums are waiting. */
-+
-+} e_FmPortDeqPrefetchOption;
-+
-+/**************************************************************************//**
-+ @Description enum for defining port default color
-+*//***************************************************************************/
-+typedef enum e_FmPortColor {
-+ e_FM_PORT_COLOR_GREEN, /**< Default port color is green */
-+ e_FM_PORT_COLOR_YELLOW, /**< Default port color is yellow */
-+ e_FM_PORT_COLOR_RED, /**< Default port color is red */
-+ e_FM_PORT_COLOR_OVERRIDE /**< Ignore color */
-+} e_FmPortColor;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining Dual Tx rate limiting scale
-+*//***************************************************************************/
-+typedef enum e_FmPortDualRateLimiterScaleDown {
-+ e_FM_PORT_DUAL_RATE_LIMITER_NONE = 0, /**< Use only single rate limiter */
-+ e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_2, /**< Divide high rate limiter by 2 */
-+ e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_4, /**< Divide high rate limiter by 4 */
-+ e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 /**< Divide high rate limiter by 8 */
-+} e_FmPortDualRateLimiterScaleDown;
-+
-+
-+/**************************************************************************//**
-+ @Description A structure for defining FM port resources
-+*//***************************************************************************/
-+typedef struct t_FmPortRsrc {
-+ uint32_t num; /**< Committed required resource */
-+ uint32_t extra; /**< Extra (not committed) required resource */
-+} t_FmPortRsrc;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining observed pool depletion
-+*//***************************************************************************/
-+typedef struct t_FmPortObservedBufPoolDepletion {
-+ t_FmBufPoolDepletion poolDepletionParams;/**< parameters to define pool depletion */
-+ t_FmExtPools poolsParams; /**< Which external buffer pools are observed
-+ (up to FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS),
-+ and their sizes. */
-+} t_FmPortObservedBufPoolDepletion;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining Tx rate limiting
-+*//***************************************************************************/
-+typedef struct t_FmPortRateLimit {
-+ uint16_t maxBurstSize; /**< in KBytes for Tx ports, in frames
-+ for OP ports. (note that
-+ for early chips burst size is
-+ rounded up to a multiply of 1000 frames).*/
-+ uint32_t rateLimit; /**< in Kb/sec for Tx ports, in frame/sec for
-+ OP ports. Rate limit refers to
-+ data rate (rather than line rate). */
-+ e_FmPortDualRateLimiterScaleDown rateLimitDivider; /**< For OP ports only. Not-valid
-+ for some earlier chip revisions */
-+} t_FmPortRateLimit;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining the parameters of
-+ the Rx port performance counters
-+*//***************************************************************************/
-+typedef struct t_FmPortPerformanceCnt {
-+ uint8_t taskCompVal; /**< Task compare value */
-+ uint8_t queueCompVal; /**< Rx queue/Tx confirm queue compare
-+ value (unused for H/O) */
-+ uint8_t dmaCompVal; /**< Dma compare value */
-+ uint32_t fifoCompVal; /**< Fifo compare value (in bytes) */
-+} t_FmPortPerformanceCnt;
-+
-+
-+/**************************************************************************//**
-+ @Description A structure for defining the sizes of the Deep Sleep
-+ the Auto Response tables
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarTablesSizes
-+{
-+ uint16_t maxNumOfArpEntries;
-+ uint16_t maxNumOfEchoIpv4Entries;
-+ uint16_t maxNumOfNdpEntries;
-+ uint16_t maxNumOfEchoIpv6Entries;
-+ uint16_t maxNumOfSnmpIPV4Entries;
-+ uint16_t maxNumOfSnmpIPV6Entries;
-+ uint16_t maxNumOfSnmpOidEntries;
-+ uint16_t maxNumOfSnmpOidChar; /* total amount of character needed for the snmp table */
-+
-+ uint16_t maxNumOfIpProtFiltering;
-+ uint16_t maxNumOfTcpPortFiltering;
-+ uint16_t maxNumOfUdpPortFiltering;
-+} t_FmPortDsarTablesSizes;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDsarSupport
-+
-+ @Description This function will allocate the amount of MURAM needed for
-+ this max number of entries for Deep Sleep Auto Response.
-+ it will calculate all needed MURAM for autoresponse including
-+ necesary common stuff.
-+
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] params A pointer to a structure containing the maximum
-+ sizes of the auto response tables
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDsarSupport(t_Handle h_FmPortRx, t_FmPortDsarTablesSizes *params);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigNumOfOpenDmas
-+
-+ @Description Calling this routine changes the max number of open DMA's
-+ available for this port. It changes this parameter in the
-+ internal driver data base from its default configuration
-+ [OP: 1]
-+ [1G-RX, 1G-TX: 1 (+1)]
-+ [10G-RX, 10G-TX: 8 (+8)]
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_OpenDmas A pointer to a structure of parameters defining
-+ the open DMA allocation.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_OpenDmas);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigNumOfTasks
-+
-+ @Description Calling this routine changes the max number of tasks
-+ available for this port. It changes this parameter in the
-+ internal driver data base from its default configuration
-+ [OP: 1]
-+ [1G-RX, 1G-TX: 3 (+2)]
-+ [10G-RX, 10G-TX: 16 (+8)]
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_NumOfTasks A pointer to a structure of parameters defining
-+ the tasks allocation.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigSizeOfFifo
-+
-+ @Description Calling this routine changes the max FIFO size configured for this port.
-+
-+ This function changes the internal driver data base from its
-+ default configuration. Please refer to the driver's User Guide for
-+ information on default FIFO sizes in the various devices.
-+ [OP: 2KB]
-+ [1G-RX, 1G-TX: 11KB]
-+ [10G-RX, 10G-TX: 12KB]
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_SizeOfFifo A pointer to a structure of parameters defining
-+ the FIFO allocation.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDeqHighPriority
-+
-+ @Description Calling this routine changes the dequeue priority in the
-+ internal driver data base from its default configuration
-+ 1G: [DEFAULT_PORT_deqHighPriority_1G]
-+ 10G: [DEFAULT_PORT_deqHighPriority_10G]
-+
-+ May be used for Non-Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] highPri TRUE to select high priority, FALSE for normal operation.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDeqHighPriority(t_Handle h_FmPort, bool highPri);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDeqType
-+
-+ @Description Calling this routine changes the dequeue type parameter in the
-+ internal driver data base from its default configuration
-+ [DEFAULT_PORT_deqType].
-+
-+ May be used for Non-Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] deqType According to QM definition.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDeqType(t_Handle h_FmPort, e_FmPortDeqType deqType);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDeqPrefetchOption
-+
-+ @Description Calling this routine changes the dequeue prefetch option parameter in the
-+ internal driver data base from its default configuration
-+ [DEFAULT_PORT_deqPrefetchOption]
-+ Note: Available for some chips only
-+
-+ May be used for Non-Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] deqPrefetchOption New option
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDeqPrefetchOption(t_Handle h_FmPort, e_FmPortDeqPrefetchOption deqPrefetchOption);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDeqByteCnt
-+
-+ @Description Calling this routine changes the dequeue byte count parameter in
-+ the internal driver data base from its default configuration
-+ 1G:[DEFAULT_PORT_deqByteCnt_1G].
-+ 10G:[DEFAULT_PORT_deqByteCnt_10G].
-+
-+ May be used for Non-Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] deqByteCnt New byte count
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDeqByteCnt(t_Handle h_FmPort, uint16_t deqByteCnt);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigBufferPrefixContent
-+
-+ @Description Defines the structure, size and content of the application buffer.
-+ The prefix will
-+ In Tx ports, if 'passPrsResult', the application
-+ should set a value to their offsets in the prefix of
-+ the FM will save the first 'privDataSize', than,
-+ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
-+ and timeStamp, and the packet itself (in this order), to the
-+ application buffer, and to offset.
-+ Calling this routine changes the buffer margins definitions
-+ in the internal driver data base from its default
-+ configuration: Data size: [DEFAULT_PORT_bufferPrefixContent_privDataSize]
-+ Pass Parser result: [DEFAULT_PORT_bufferPrefixContent_passPrsResult].
-+ Pass timestamp: [DEFAULT_PORT_bufferPrefixContent_passTimeStamp].
-+
-+ May be used for all ports
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in,out] p_FmBufferPrefixContent A structure of parameters describing the
-+ structure of the buffer.
-+ Out parameter: Start margin - offset
-+ of data from start of external buffer.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigBufferPrefixContent(t_Handle h_FmPort,
-+ t_FmBufferPrefixContent *p_FmBufferPrefixContent);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigCheksumLastBytesIgnore
-+
-+ @Description Calling this routine changes the number of checksum bytes to ignore
-+ parameter in the internal driver data base from its default configuration
-+ [DEFAULT_PORT_cheksumLastBytesIgnore]
-+
-+ May be used by Tx & Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] cheksumLastBytesIgnore New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigCheksumLastBytesIgnore(t_Handle h_FmPort, uint8_t cheksumLastBytesIgnore);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigCutBytesFromEnd
-+
-+ @Description Calling this routine changes the number of bytes to cut from a
-+ frame's end parameter in the internal driver data base
-+ from its default configuration [DEFAULT_PORT_cutBytesFromEnd]
-+ Note that if the result of (frame length before chop - cutBytesFromEnd) is
-+ less than 14 bytes, the chop operation is not executed.
-+
-+ May be used for Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] cutBytesFromEnd New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigCutBytesFromEnd(t_Handle h_FmPort, uint8_t cutBytesFromEnd);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigPoolDepletion
-+
-+ @Description Calling this routine enables pause frame generation depending on the
-+ depletion status of BM pools. It also defines the conditions to activate
-+ this functionality. By default, this functionality is disabled.
-+
-+ May be used for Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_BufPoolDepletion A structure of pool depletion parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigPoolDepletion(t_Handle h_FmPort, t_FmBufPoolDepletion *p_BufPoolDepletion);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigObservedPoolDepletion
-+
-+ @Description Calling this routine enables a mechanism to stop port enqueue
-+ depending on the depletion status of selected BM pools.
-+ It also defines the conditions to activate
-+ this functionality. By default, this functionality is disabled.
-+
-+ Note: Available for some chips only
-+
-+ May be used for OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_FmPortObservedBufPoolDepletion A structure of parameters for pool depletion.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigObservedPoolDepletion(t_Handle h_FmPort,
-+ t_FmPortObservedBufPoolDepletion *p_FmPortObservedBufPoolDepletion);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigExtBufPools
-+
-+ @Description This routine should be called for OP ports
-+ that internally use BM buffer pools. In such cases, e.g. for fragmentation and
-+ re-assembly, the FM needs new BM buffers. By calling this routine the user
-+ specifies the BM buffer pools that should be used.
-+
-+ Note: Available for some chips only
-+
-+ May be used for OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_FmExtPools A structure of parameters for the external pools.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigExtBufPools(t_Handle h_FmPort, t_FmExtPools *p_FmExtPools);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigBackupPools
-+
-+ @Description Calling this routine allows the configuration of some of the BM pools
-+ defined for this port as backup pools.
-+ A pool configured to be a backup pool will be used only if all other
-+ enabled non-backup pools are depleted.
-+
-+ May be used for Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_FmPortBackupBmPools An array of pool id's. All pools specified here will
-+ be defined as backup pools.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigBackupPools(t_Handle h_FmPort, t_FmBackupBmPools *p_FmPortBackupBmPools);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigFrmDiscardOverride
-+
-+ @Description Calling this routine changes the error frames destination parameter
-+ in the internal driver data base from its default configuration:
-+ override = [DEFAULT_PORT_frmDiscardOverride]
-+
-+ May be used for Rx and OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] override TRUE to override discarding of error frames and
-+ enqueueing them to error queue.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigFrmDiscardOverride(t_Handle h_FmPort, bool override);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigErrorsToDiscard
-+
-+ @Description Calling this routine changes the behaviour on error parameter
-+ in the internal driver data base from its default configuration:
-+ [DEFAULT_PORT_errorsToDiscard].
-+ If a requested error was previously defined as "ErrorsToEnqueue" it's
-+ definition will change and the frame will be discarded.
-+ Errors that were not defined either as "ErrorsToEnqueue" nor as
-+ "ErrorsToDiscard", will be forwarded to CPU.
-+
-+ May be used for Rx and OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] errs A list of errors to discard
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigErrorsToDiscard(t_Handle h_FmPort, fmPortFrameErrSelect_t errs);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDmaSwapData
-+
-+ @Description Calling this routine changes the DMA swap data aparameter
-+ in the internal driver data base from its default
-+ configuration [DEFAULT_PORT_dmaSwapData]
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] swapData New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDmaSwapData(t_Handle h_FmPort, e_FmDmaSwapOption swapData);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDmaIcCacheAttr
-+
-+ @Description Calling this routine changes the internal context cache
-+ attribute parameter in the internal driver data base
-+ from its default configuration [DEFAULT_PORT_dmaIntContextCacheAttr]
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] intContextCacheAttr New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDmaIcCacheAttr(t_Handle h_FmPort, e_FmDmaCacheOption intContextCacheAttr);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDmaHdrAttr
-+
-+ @Description Calling this routine changes the header cache
-+ attribute parameter in the internal driver data base
-+ from its default configuration [DEFAULT_PORT_dmaHeaderCacheAttr]
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] headerCacheAttr New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDmaHdrAttr(t_Handle h_FmPort, e_FmDmaCacheOption headerCacheAttr);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDmaScatterGatherAttr
-+
-+ @Description Calling this routine changes the scatter gather cache
-+ attribute parameter in the internal driver data base
-+ from its default configuration [DEFAULT_PORT_dmaScatterGatherCacheAttr]
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] scatterGatherCacheAttr New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDmaScatterGatherAttr(t_Handle h_FmPort, e_FmDmaCacheOption scatterGatherCacheAttr);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDmaWriteOptimize
-+
-+ @Description Calling this routine changes the write optimization
-+ parameter in the internal driver data base
-+ from its default configuration: By default optimize = [DEFAULT_PORT_dmaWriteOptimize].
-+ Note:
-+
-+ 1. For head optimization, data alignment must be >= 16 (supported by default).
-+
-+ 3. For tail optimization, note that the optimization is performed by extending the write transaction
-+ of the frame payload at the tail as needed to achieve optimal bus transfers, so that the last write
-+ is extended to be on 16/64 bytes aligned block (chip dependent).
-+
-+ Relevant for non-Tx port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] optimize TRUE to enable optimization, FALSE for normal operation
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDmaWriteOptimize(t_Handle h_FmPort, bool optimize);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigNoScatherGather
-+
-+ @Description Calling this routine changes the noScatherGather parameter in internal driver data base
-+ from its default configuration.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] noScatherGather (TRUE - frame is discarded if can not be stored in single buffer,
-+ FALSE - frame can be stored in scatter gather (S/G) format).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigNoScatherGather(t_Handle h_FmPort, bool noScatherGather);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDfltColor
-+
-+ @Description Calling this routine changes the internal default color parameter
-+ in the internal driver data base
-+ from its default configuration [DEFAULT_PORT_color]
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] color New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDfltColor(t_Handle h_FmPort, e_FmPortColor color);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigSyncReq
-+
-+ @Description Calling this routine changes the synchronization attribute parameter
-+ in the internal driver data base from its default configuration:
-+ syncReq = [DEFAULT_PORT_syncReq]
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] syncReq TRUE to request synchronization, FALSE otherwize.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigSyncReq(t_Handle h_FmPort, bool syncReq);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigForwardReuseIntContext
-+
-+ @Description This routine is relevant for Rx ports that are routed to OP port.
-+ It changes the internal context reuse option in the internal
-+ driver data base from its default configuration:
-+ reuse = [DEFAULT_PORT_forwardIntContextReuse]
-+
-+ May be used for Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] reuse TRUE to reuse internal context on frames
-+ forwarded to OP port.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigForwardReuseIntContext(t_Handle h_FmPort, bool reuse);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigDontReleaseTxBufToBM
-+
-+ @Description This routine should be called if no Tx confirmation
-+ is done, and yet buffers should not be released to the BM.
-+ Normally, buffers are returned using the Tx confirmation
-+ process. When Tx confirmation is not used (defFqid=0),
-+ buffers are typically released to the BM. This routine
-+ may be called to avoid this behavior and not release the
-+ buffers.
-+
-+ May be used for Tx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigDontReleaseTxBufToBM(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigIMMaxRxBufLength
-+
-+ @Description Changes the maximum receive buffer length from its default
-+ configuration: Closest rounded down power of 2 value of the
-+ data buffer size.
-+
-+ The maximum receive buffer length directly affects the structure
-+ of received frames (single- or multi-buffered) and the performance
-+ of both the FM and the driver.
-+
-+ The selection between single- or multi-buffered frames should be
-+ done according to the characteristics of the specific application.
-+ The recommended mode is to use a single data buffer per packet,
-+ as this mode provides the best performance. However, the user can
-+ select to use multiple data buffers per packet.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] newVal Maximum receive buffer length (in bytes).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+ This routine is to be used only if Independent-Mode is enabled.
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigIMMaxRxBufLength(t_Handle h_FmPort, uint16_t newVal);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigIMRxBdRingLength
-+
-+ @Description Changes the receive BD ring length from its default
-+ configuration:[DEFAULT_PORT_rxBdRingLength]
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] newVal The desired BD ring length.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+ This routine is to be used only if Independent-Mode is enabled.
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigIMRxBdRingLength(t_Handle h_FmPort, uint16_t newVal);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigIMTxBdRingLength
-+
-+ @Description Changes the transmit BD ring length from its default
-+ configuration:[DEFAULT_PORT_txBdRingLength]
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] newVal The desired BD ring length.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+ This routine is to be used only if Independent-Mode is enabled.
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigIMTxBdRingLength(t_Handle h_FmPort, uint16_t newVal);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigIMFmanCtrlExternalStructsMemory
-+
-+ @Description Configures memory partition and attributes for FMan-Controller
-+ data structures (e.g. BD rings).
-+ Calling this routine changes the internal driver data base
-+ from its default configuration
-+ [DEFAULT_PORT_ImfwExtStructsMemId, DEFAULT_PORT_ImfwExtStructsMemAttr].
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] memId Memory partition ID.
-+ @Param[in] memAttributes Memory attributes mask (a combination of MEMORY_ATTR_x flags).
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigIMFmanCtrlExternalStructsMemory(t_Handle h_FmPort,
-+ uint8_t memId,
-+ uint32_t memAttributes);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigIMPolling
-+
-+ @Description Changes the Rx flow from interrupt driven (default) to polling.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+ This routine is to be used only if Independent-Mode is enabled.
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigIMPolling(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigMaxFrameLength
-+
-+ @Description Changes the definition of the max size of frame that should be
-+ transmitted/received on this port from its default value [DEFAULT_PORT_maxFrameLength].
-+ This parameter is used for confirmation of the minimum Fifo
-+ size calculations and only for Tx ports or ports working in
-+ independent mode. This should be larger than the maximum possible
-+ MTU that will be used for this port (i.e. its MAC).
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] length Max size of frame
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+ This routine is to be used only if Independent-Mode is enabled.
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigMaxFrameLength(t_Handle h_FmPort, uint16_t length);
-+
-+/**************************************************************************//*
-+ @Function FM_PORT_ConfigTxFifoMinFillLevel
-+
-+ @Description Calling this routine changes the fifo minimum
-+ fill level parameter in the internal driver data base
-+ from its default configuration [DEFAULT_PORT_txFifoMinFillLevel]
-+
-+ May be used for Tx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] minFillLevel New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigTxFifoMinFillLevel(t_Handle h_FmPort, uint32_t minFillLevel);
-+
-+/**************************************************************************//*
-+ @Function FM_PORT_ConfigFifoDeqPipelineDepth
-+
-+ @Description Calling this routine changes the fifo dequeue
-+ pipeline depth parameter in the internal driver data base
-+
-+ from its default configuration: 1G ports: [DEFAULT_PORT_fifoDeqPipelineDepth_1G],
-+ 10G port: [DEFAULT_PORT_fifoDeqPipelineDepth_10G],
-+ OP port: [DEFAULT_PORT_fifoDeqPipelineDepth_OH]
-+
-+ May be used for Tx/OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] deqPipelineDepth New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigFifoDeqPipelineDepth(t_Handle h_FmPort, uint8_t deqPipelineDepth);
-+
-+/**************************************************************************//*
-+ @Function FM_PORT_ConfigTxFifoLowComfLevel
-+
-+ @Description Calling this routine changes the fifo low comfort level
-+ parameter in internal driver data base
-+ from its default configuration [DEFAULT_PORT_txFifoLowComfLevel]
-+
-+ May be used for Tx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] fifoLowComfLevel New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigTxFifoLowComfLevel(t_Handle h_FmPort, uint32_t fifoLowComfLevel);
-+
-+/**************************************************************************//*
-+ @Function FM_PORT_ConfigRxFifoThreshold
-+
-+ @Description Calling this routine changes the threshold of the FIFO
-+ fill level parameter in the internal driver data base
-+ from its default configuration [DEFAULT_PORT_rxFifoThreshold]
-+
-+ If the total number of buffers which are
-+ currently in use and associated with the
-+ specific RX port exceed this threshold, the
-+ BMI will signal the MAC to send a pause frame
-+ over the link.
-+
-+ May be used for Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] fifoThreshold New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigRxFifoThreshold(t_Handle h_FmPort, uint32_t fifoThreshold);
-+
-+/**************************************************************************//*
-+ @Function FM_PORT_ConfigRxFifoPriElevationLevel
-+
-+ @Description Calling this routine changes the priority elevation level
-+ parameter in the internal driver data base from its default
-+ configuration [DEFAULT_PORT_rxFifoPriElevationLevel]
-+
-+ If the total number of buffers which are currently in use and
-+ associated with the specific RX port exceed the amount specified
-+ in priElevationLevel, BMI will signal the main FM's DMA to
-+ elevate the FM priority on the system bus.
-+
-+ May be used for Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] priElevationLevel New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigRxFifoPriElevationLevel(t_Handle h_FmPort, uint32_t priElevationLevel);
-+
-+#ifdef FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
-+/**************************************************************************//*
-+ @Function FM_PORT_ConfigBCBWorkaround
-+
-+ @Description Configures BCB errata workaround.
-+
-+ When BCB errata is applicable, the workaround is always
-+ performed by FM Controller. Thus, this functions doesn't
-+ actually enable errata workaround but rather allows driver
-+ to perform adjustments required due to errata workaround
-+ execution in FM controller.
-+
-+ Applying BCB workaround also configures FM_PORT_FRM_ERR_PHYSICAL
-+ errors to be discarded. Thus FM_PORT_FRM_ERR_PHYSICAL can't be
-+ set by FM_PORT_SetErrorsRoute() function.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigBCBWorkaround(t_Handle h_FmPort);
-+#endif /* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 */
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//*
-+ @Function FM_PORT_ConfigInternalBuffOffset
-+
-+ @Description Configures internal buffer offset.
-+
-+ May be used for Rx and OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] val New value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ConfigInternalBuffOffset(t_Handle h_FmPort, uint8_t val);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/** @} */ /* end of FM_PORT_advanced_init_grp group */
-+/** @} */ /* end of FM_PORT_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_PORT_runtime_control_grp FM Port Runtime Control Unit
-+
-+ @Description FM Port Runtime control unit API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description enum for defining FM Port counters
-+*//***************************************************************************/
-+typedef enum e_FmPortCounters {
-+ e_FM_PORT_COUNTERS_CYCLE, /**< BMI performance counter */
-+ e_FM_PORT_COUNTERS_TASK_UTIL, /**< BMI performance counter */
-+ e_FM_PORT_COUNTERS_QUEUE_UTIL, /**< BMI performance counter */
-+ e_FM_PORT_COUNTERS_DMA_UTIL, /**< BMI performance counter */
-+ e_FM_PORT_COUNTERS_FIFO_UTIL, /**< BMI performance counter */
-+ e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION, /**< BMI Rx only performance counter */
-+ e_FM_PORT_COUNTERS_FRAME, /**< BMI statistics counter */
-+ e_FM_PORT_COUNTERS_DISCARD_FRAME, /**< BMI statistics counter */
-+ e_FM_PORT_COUNTERS_DEALLOC_BUF, /**< BMI deallocate buffer statistics counter */
-+ e_FM_PORT_COUNTERS_RX_BAD_FRAME, /**< BMI Rx only statistics counter */
-+ e_FM_PORT_COUNTERS_RX_LARGE_FRAME, /**< BMI Rx only statistics counter */
-+ e_FM_PORT_COUNTERS_RX_FILTER_FRAME, /**< BMI Rx & OP only statistics counter */
-+ e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR, /**< BMI Rx, OP & HC only statistics counter */
-+ e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD, /**< BMI Rx, OP & HC statistics counter */
-+ e_FM_PORT_COUNTERS_PREPARE_TO_ENQUEUE_COUNTER, /**< BMI Rx, OP & HC only statistics counter */
-+ e_FM_PORT_COUNTERS_WRED_DISCARD, /**< BMI OP & HC only statistics counter */
-+ e_FM_PORT_COUNTERS_LENGTH_ERR, /**< BMI non-Rx statistics counter */
-+ e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT, /**< BMI non-Rx statistics counter */
-+ e_FM_PORT_COUNTERS_DEQ_TOTAL, /**< QMI total QM dequeues counter */
-+ e_FM_PORT_COUNTERS_ENQ_TOTAL, /**< QMI total QM enqueues counter */
-+ e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI counter */
-+ e_FM_PORT_COUNTERS_DEQ_CONFIRM /**< QMI counter */
-+} e_FmPortCounters;
-+
-+typedef struct t_FmPortBmiStats {
-+ uint32_t cntCycle;
-+ uint32_t cntTaskUtil;
-+ uint32_t cntQueueUtil;
-+ uint32_t cntDmaUtil;
-+ uint32_t cntFifoUtil;
-+ uint32_t cntRxPauseActivation;
-+ uint32_t cntFrame;
-+ uint32_t cntDiscardFrame;
-+ uint32_t cntDeallocBuf;
-+ uint32_t cntRxBadFrame;
-+ uint32_t cntRxLargeFrame;
-+ uint32_t cntRxFilterFrame;
-+ uint32_t cntRxListDmaErr;
-+ uint32_t cntRxOutOfBuffersDiscard;
-+ uint32_t cntWredDiscard;
-+ uint32_t cntLengthErr;
-+ uint32_t cntUnsupportedFormat;
-+} t_FmPortBmiStats;
-+
-+/**************************************************************************//**
-+ @Description Structure for Port id parameters.
-+ Fields commented 'IN' are passed by the port module to be used
-+ by the FM module.
-+ Fields commented 'OUT' will be filled by FM before returning to port.
-+*//***************************************************************************/
-+typedef struct t_FmPortCongestionGrps {
-+ uint16_t numOfCongestionGrpsToConsider; /**< The number of required CGs
-+ to define the size of the following array */
-+ uint8_t congestionGrpsToConsider[FM_PORT_NUM_OF_CONGESTION_GRPS];
-+ /**< An array of CG indexes;
-+ Note that the size of the array should be
-+ 'numOfCongestionGrpsToConsider'. */
-+#if (DPAA_VERSION >= 11)
-+ bool pfcPrioritiesEn[FM_PORT_NUM_OF_CONGESTION_GRPS][FM_MAX_NUM_OF_PFC_PRIORITIES];
-+ /**< a matrix that represents the map between the CG ids
-+ defined in 'congestionGrpsToConsider' to the priorties
-+ mapping array. */
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmPortCongestionGrps;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response ARP Entry
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarArpEntry
-+{
-+ uint32_t ipAddress;
-+ uint8_t mac[6];
-+ bool isVlan;
-+ uint16_t vid;
-+} t_FmPortDsarArpEntry;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response ARP info
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarArpInfo
-+{
-+ uint8_t tableSize;
-+ t_FmPortDsarArpEntry *p_AutoResTable;
-+ bool enableConflictDetection; /* when TRUE Conflict Detection will be checked and wake the host if needed */
-+} t_FmPortDsarArpInfo;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response NDP Entry
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarNdpEntry
-+{
-+ uint32_t ipAddress[4];
-+ uint8_t mac[6];
-+ bool isVlan;
-+ uint16_t vid;
-+} t_FmPortDsarNdpEntry;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response NDP info
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarNdpInfo
-+{
-+ uint32_t multicastGroup;
-+
-+ uint8_t tableSizeAssigned;
-+ t_FmPortDsarNdpEntry *p_AutoResTableAssigned; /* This list refer to solicitation IP addresses.
-+ Note that all IP adresses must be from the same multicast group.
-+ This will be checked and if not operation will fail. */
-+ uint8_t tableSizeTmp;
-+ t_FmPortDsarNdpEntry *p_AutoResTableTmp; /* This list refer to temp IP addresses.
-+ Note that all temp IP adresses must be from the same multicast group.
-+ This will be checked and if not operation will fail. */
-+
-+ bool enableConflictDetection; /* when TRUE Conflict Detection will be checked and wake the host if needed */
-+
-+} t_FmPortDsarNdpInfo;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response ICMPV4 info
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarEchoIpv4Info
-+{
-+ uint8_t tableSize;
-+ t_FmPortDsarArpEntry *p_AutoResTable;
-+} t_FmPortDsarEchoIpv4Info;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response ICMPV6 info
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarEchoIpv6Info
-+{
-+ uint8_t tableSize;
-+ t_FmPortDsarNdpEntry *p_AutoResTable;
-+} t_FmPortDsarEchoIpv6Info;
-+
-+/**************************************************************************//**
-+@Description Deep Sleep Auto Response SNMP OIDs table entry
-+
-+*//***************************************************************************/
-+typedef struct {
-+ uint16_t oidSize;
-+ uint8_t *oidVal; /* only the oid string */
-+ uint16_t resSize;
-+ uint8_t *resVal; /* resVal will be the entire reply,
-+ i.e. "Type|Length|Value" */
-+} t_FmPortDsarOidsEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP IPv4 Addresses Table Entry
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+typedef struct
-+{
-+ uint32_t ipv4Addr; /*!< 32 bit IPv4 Address. */
-+ bool isVlan;
-+ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+} t_FmPortDsarSnmpIpv4AddrTblEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP IPv6 Addresses Table Entry
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+typedef struct
-+{
-+ uint32_t ipv6Addr[4]; /*!< 4 * 32 bit IPv6 Address. */
-+ bool isVlan;
-+ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+} t_FmPortDsarSnmpIpv6AddrTblEntry;
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP Descriptor
-+
-+*//***************************************************************************/
-+typedef struct
-+{
-+ uint16_t control; /**< Control bits [0-15]. */
-+ uint16_t maxSnmpMsgLength; /**< Maximal allowed SNMP message length. */
-+ uint16_t numOfIpv4Addresses; /**< Number of entries in IPv4 addresses table. */
-+ uint16_t numOfIpv6Addresses; /**< Number of entries in IPv6 addresses table. */
-+ t_FmPortDsarSnmpIpv4AddrTblEntry *p_Ipv4AddrTbl; /**< Pointer to IPv4 addresses table. */
-+ t_FmPortDsarSnmpIpv6AddrTblEntry *p_Ipv6AddrTbl; /**< Pointer to IPv6 addresses table. */
-+ uint8_t *p_RdOnlyCommunityStr; /**< Pointer to the Read Only Community String. */
-+ uint8_t *p_RdWrCommunityStr; /**< Pointer to the Read Write Community String. */
-+ t_FmPortDsarOidsEntry *p_OidsTbl; /**< Pointer to OIDs table. */
-+ uint32_t oidsTblSize; /**< Number of entries in OIDs table. */
-+} t_FmPortDsarSnmpInfo;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response filtering Entry
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarFilteringEntry
-+{
-+ uint16_t srcPort;
-+ uint16_t dstPort;
-+ uint16_t srcPortMask;
-+ uint16_t dstPortMask;
-+} t_FmPortDsarFilteringEntry;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response filtering info
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarFilteringInfo
-+{
-+ /* IP protocol filtering parameters */
-+ uint8_t ipProtTableSize;
-+ uint8_t *p_IpProtTablePtr;
-+ bool ipProtPassOnHit; /* when TRUE, miss in the table will cause the packet to be droped,
-+ hit will pass the packet to UDP/TCP filters if needed and if not
-+ to the classification tree. If the classification tree will pass
-+ the packet to a queue it will cause a wake interupt.
-+ When FALSE it the other way around. */
-+ /* UDP port filtering parameters */
-+ uint8_t udpPortsTableSize;
-+ t_FmPortDsarFilteringEntry *p_UdpPortsTablePtr;
-+ bool udpPortPassOnHit; /* when TRUE, miss in the table will cause the packet to be droped,
-+ hit will pass the packet to classification tree.
-+ If the classification tree will pass the packet to a queue it
-+ will cause a wake interupt.
-+ When FALSE it the other way around. */
-+ /* TCP port filtering parameters */
-+ uint16_t tcpFlagsMask;
-+ uint8_t tcpPortsTableSize;
-+ t_FmPortDsarFilteringEntry *p_TcpPortsTablePtr;
-+ bool tcpPortPassOnHit; /* when TRUE, miss in the table will cause the packet to be droped,
-+ hit will pass the packet to classification tree.
-+ If the classification tree will pass the packet to a queue it
-+ will cause a wake interupt.
-+ When FALSE it the other way around. */
-+} t_FmPortDsarFilteringInfo;
-+
-+/**************************************************************************//**
-+ @Description Structure for Deep Sleep Auto Response parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortDsarParams
-+{
-+ t_Handle h_FmPortTx;
-+ t_FmPortDsarArpInfo *p_AutoResArpInfo;
-+ t_FmPortDsarEchoIpv4Info *p_AutoResEchoIpv4Info;
-+ t_FmPortDsarNdpInfo *p_AutoResNdpInfo;
-+ t_FmPortDsarEchoIpv6Info *p_AutoResEchoIpv6Info;
-+ t_FmPortDsarSnmpInfo *p_AutoResSnmpInfo;
-+ t_FmPortDsarFilteringInfo *p_AutoResFilteringInfo;
-+} t_FmPortDsarParams;
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_EnterDsar
-+
-+ @Description Enter Deep Sleep Auto Response mode.
-+ This function write the apropriate values to in the relevant
-+ tables in the MURAM.
-+
-+ @Param[in] h_FmPortRx - FM PORT module descriptor
-+ @Param[in] params - Auto Response parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_EnterDsar(t_Handle h_FmPortRx, t_FmPortDsarParams *params);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_EnterDsarFinal
-+
-+ @Description Enter Deep Sleep Auto Response mode.
-+ This function sets the Tx port in independent mode as needed
-+ and redirect the receive flow to go through the
-+ Dsar Fman-ctrl code
-+
-+ @Param[in] h_DsarRxPort - FM Rx PORT module descriptor
-+ @Param[in] h_DsarTxPort - FM Tx PORT module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_EnterDsarFinal(t_Handle h_DsarRxPort, t_Handle h_DsarTxPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ExitDsar
-+
-+ @Description Exit Deep Sleep Auto Response mode.
-+ This function reverse the AR mode and put the ports back into
-+ their original wake mode
-+
-+ @Param[in] h_FmPortRx - FM PORT Rx module descriptor
-+ @Param[in] h_FmPortTx - FM PORT Tx module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_EnterDsar().
-+*//***************************************************************************/
-+void FM_PORT_ExitDsar(t_Handle h_FmPortRx, t_Handle h_FmPortTx);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_IsInDsar
-+
-+ @Description This function returns TRUE if the port was set as Auto Response
-+ and FALSE if not. Once Exit AR mode it will return FALSE as well
-+ until re-enabled once more.
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+bool FM_PORT_IsInDsar(t_Handle h_FmPort);
-+
-+typedef struct t_FmPortDsarStats
-+{
-+ uint32_t arpArCnt;
-+ uint32_t echoIcmpv4ArCnt;
-+ uint32_t ndpArCnt;
-+ uint32_t echoIcmpv6ArCnt;
-+ uint32_t snmpGetCnt;
-+ uint32_t snmpGetNextCnt;
-+} t_FmPortDsarStats;
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetDsarStats
-+
-+ @Description Return statistics for Deep Sleep Auto Response
-+
-+ @Param[in] h_FmPortRx - FM PORT module descriptor
-+ @Param[out] stats - structure containing the statistics counters
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_PORT_GetDsarStats(t_Handle h_FmPortRx, t_FmPortDsarStats *stats);
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/**************************************************************************//**
-+ @Function FM_PORT_DumpRegs
-+
-+ @Description Dump all regs.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_DumpRegs(t_Handle h_FmPort);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetBufferDataOffset
-+
-+ @Description Relevant for Rx ports.
-+ Returns the data offset from the beginning of the data buffer
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+
-+ @Return data offset.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+uint32_t FM_PORT_GetBufferDataOffset(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetBufferICInfo
-+
-+ @Description Returns the Internal Context offset from the beginning of the data buffer
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return Internal context info pointer on success, NULL if 'allOtherInfo' was not
-+ configured for this port.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+uint8_t * FM_PORT_GetBufferICInfo(t_Handle h_FmPort, char *p_Data);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetBufferPrsResult
-+
-+ @Description Returns the pointer to the parse result in the data buffer.
-+ In Rx ports this is relevant after reception, if parse
-+ result is configured to be part of the data passed to the
-+ application. For non Rx ports it may be used to get the pointer
-+ of the area in the buffer where parse result should be
-+ initialized - if so configured.
-+ See FM_PORT_ConfigBufferPrefixContent for data buffer prefix
-+ configuration.
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return Parse result pointer on success, NULL if parse result was not
-+ configured for this port.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_FmPrsResult * FM_PORT_GetBufferPrsResult(t_Handle h_FmPort, char *p_Data);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetBufferTimeStamp
-+
-+ @Description Returns the time stamp in the data buffer.
-+ Relevant for Rx ports for getting the buffer time stamp.
-+ See FM_PORT_ConfigBufferPrefixContent for data buffer prefix
-+ configuration.
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return A pointer to the hash result on success, NULL otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+uint64_t * FM_PORT_GetBufferTimeStamp(t_Handle h_FmPort, char *p_Data);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetBufferHashResult
-+
-+ @Description Given a data buffer, on the condition that hash result was defined
-+ as a part of the buffer content (see FM_PORT_ConfigBufferPrefixContent)
-+ this routine will return the pointer to the hash result location in the
-+ buffer prefix.
-+
-+ @Param[in] h_FmPort - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return A pointer to the hash result on success, NULL otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+uint8_t * FM_PORT_GetBufferHashResult(t_Handle h_FmPort, char *p_Data);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Disable
-+
-+ @Description Gracefully disable an FM port. The port will not start new tasks after all
-+ tasks associated with the port are terminated.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ This is a blocking routine, it returns after port is
-+ gracefully stopped, i.e. the port will not except new frames,
-+ but it will finish all frames or tasks which were already began
-+*//***************************************************************************/
-+t_Error FM_PORT_Disable(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Enable
-+
-+ @Description A runtime routine provided to allow disable/enable of port.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_Enable(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetRateLimit
-+
-+ @Description Calling this routine enables rate limit algorithm.
-+ By default, this functionality is disabled.
-+ Note that rate-limit mechanism uses the FM time stamp.
-+ The selected rate limit specified here would be
-+ rounded DOWN to the nearest 16M.
-+
-+ May be used for Tx and OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_RateLimit A structure of rate limit parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ If rate limit is set on a port that need to send PFC frames,
-+ it might violate the stop transmit timing.
-+*//***************************************************************************/
-+t_Error FM_PORT_SetRateLimit(t_Handle h_FmPort, t_FmPortRateLimit *p_RateLimit);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_DeleteRateLimit
-+
-+ @Description Calling this routine disables and clears rate limit
-+ initialization.
-+
-+ May be used for Tx and OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_DeleteRateLimit(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetPfcPrioritiesMappingToQmanWQ
-+
-+ @Description Calling this routine maps each PFC received priority to the transmit WQ.
-+ This WQ will be blocked upon receiving a PFC frame with this priority.
-+
-+ May be used for Tx ports only.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] prio PFC priority (0-7).
-+ @Param[in] wq Work Queue (0-7).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetPfcPrioritiesMappingToQmanWQ(t_Handle h_FmPort, uint8_t prio, uint8_t wq);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetStatisticsCounters
-+
-+ @Description Calling this routine enables/disables port's statistics counters.
-+ By default, counters are enabled.
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] enable TRUE to enable, FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetStatisticsCounters(t_Handle h_FmPort, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetFrameQueueCounters
-+
-+ @Description Calling this routine enables/disables port's enqueue/dequeue counters.
-+ By default, counters are enabled.
-+
-+ May be used for all ports
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] enable TRUE to enable, FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetFrameQueueCounters(t_Handle h_FmPort, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_AnalyzePerformanceParams
-+
-+ @Description User may call this routine to so the driver will analyze if the
-+ basic performance parameters are correct and also the driver may
-+ suggest of improvements; The basic parameters are FIFO sizes, number
-+ of DMAs and number of TNUMs for the port.
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_AnalyzePerformanceParams(t_Handle h_FmPort);
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetAllocBufCounter
-+
-+ @Description Calling this routine enables/disables BM pool allocate
-+ buffer counters.
-+ By default, counters are enabled.
-+
-+ May be used for Rx ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] poolId BM pool id.
-+ @Param[in] enable TRUE to enable, FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetBmiCounters
-+
-+ @Description Read port's BMI stat counters and place them into
-+ a designated structure of counters.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[out] p_BmiStats counters structure
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_GetBmiCounters(t_Handle h_FmPort, t_FmPortBmiStats *p_BmiStats);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetCounter
-+
-+ @Description Reads one of the FM PORT counters.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] fmPortCounter The requested counter.
-+
-+ @Return Counter's current value.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ Note that it is user's responsibility to call this routine only
-+ for enabled counters, and there will be no indication if a
-+ disabled counter is accessed.
-+*//***************************************************************************/
-+uint32_t FM_PORT_GetCounter(t_Handle h_FmPort, e_FmPortCounters fmPortCounter);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ModifyCounter
-+
-+ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] fmPortCounter The requested counter.
-+ @Param[in] value The requested value to be written into the counter.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ModifyCounter(t_Handle h_FmPort, e_FmPortCounters fmPortCounter, uint32_t value);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetAllocBufCounter
-+
-+ @Description Reads one of the FM PORT buffer counters.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] poolId The requested pool.
-+
-+ @Return Counter's current value.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ Note that it is user's responsibility to call this routine only
-+ for enabled counters, and there will be no indication if a
-+ disabled counter is accessed.
-+*//***************************************************************************/
-+uint32_t FM_PORT_GetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ModifyAllocBufCounter
-+
-+ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] poolId The requested pool.
-+ @Param[in] value The requested value to be written into the counter.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ModifyAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, uint32_t value);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_AddCongestionGrps
-+
-+ @Description This routine effects the corresponding Tx port.
-+ It should be called in order to enable pause
-+ frame transmission in case of congestion in one or more
-+ of the congestion groups relevant to this port.
-+ Each call to this routine may add one or more congestion
-+ groups to be considered relevant to this port.
-+
-+ May be used for Rx, or RX+OP ports only (depending on chip)
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_CongestionGrps A pointer to an array of congestion groups
-+ id's to consider.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_AddCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_RemoveCongestionGrps
-+
-+ @Description This routine effects the corresponding Tx port. It should be
-+ called when congestion groups were
-+ defined for this port and are no longer relevant, or pause
-+ frames transmitting is not required on their behalf.
-+ Each call to this routine may remove one or more congestion
-+ groups to be considered relevant to this port.
-+
-+ May be used for Rx, or RX+OP ports only (depending on chip)
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_CongestionGrps A pointer to an array of congestion groups
-+ id's to consider.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_RemoveCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_IsStalled
-+
-+ @Description A routine for checking whether the specified port is stalled.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return TRUE if port is stalled, FALSE otherwize
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+bool FM_PORT_IsStalled(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ReleaseStalled
-+
-+ @Description This routine may be called in case the port was stalled and may
-+ now be released.
-+ Note that this routine is available only on older FMan revisions
-+ (FMan v2, DPAA v1.0 only).
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_ReleaseStalled(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetRxL4ChecksumVerify
-+
-+ @Description This routine is relevant for Rx ports (1G and 10G). The routine
-+ set/clear the L3/L4 checksum verification (on RX side).
-+ Note that this takes affect only if hw-parser is enabled!
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] l4Checksum boolean indicates whether to do L3/L4 checksum
-+ on frames or not.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetRxL4ChecksumVerify(t_Handle h_FmPort, bool l4Checksum);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetErrorsRoute
-+
-+ @Description Errors selected for this routine will cause a frame with that error
-+ to be enqueued to error queue.
-+ Errors not selected for this routine will cause a frame with that error
-+ to be enqueued to the one of the other port queues.
-+ By default all errors are defined to be enqueued to error queue.
-+ Errors that were configured to be discarded (at initialization)
-+ may not be selected here.
-+
-+ May be used for Rx and OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] errs A list of errors to enqueue to error queue
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetErrorsRoute(t_Handle h_FmPort, fmPortFrameErrSelect_t errs);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetIMExceptions
-+
-+ @Description Calling this routine enables/disables FM PORT interrupts.
-+
-+ @Param[in] h_FmPort FM PORT module descriptor.
-+ @Param[in] exception The exception to be selected.
-+ @Param[in] enable TRUE to enable interrupt, FALSE to mask it.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ This routine should NOT be called from guest-partition
-+ (i.e. guestId != NCSW_MASTER_ID)
-+*//***************************************************************************/
-+t_Error FM_PORT_SetIMExceptions(t_Handle h_FmPort, e_FmPortExceptions exception, bool enable);
-+
-+/**************************************************************************//*
-+ @Function FM_PORT_SetPerformanceCounters
-+
-+ @Description Calling this routine enables/disables port's performance counters.
-+ By default, counters are enabled.
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] enable TRUE to enable, FALSE to disable.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetPerformanceCounters(t_Handle h_FmPort, bool enable);
-+
-+/**************************************************************************//*
-+ @Function FM_PORT_SetPerformanceCountersParams
-+
-+ @Description Calling this routine defines port's performance
-+ counters parameters.
-+
-+ May be used for all port types
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_FmPortPerformanceCnt A pointer to a structure of performance
-+ counters parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetPerformanceCountersParams(t_Handle h_FmPort, t_FmPortPerformanceCnt *p_FmPortPerformanceCnt);
-+
-+/**************************************************************************//**
-+ @Group FM_PORT_pcd_runtime_control_grp FM Port PCD Runtime Control Unit
-+
-+ @Description FM Port PCD Runtime control unit API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description A structure defining the KG scheme after the parser.
-+ This is relevant only to change scheme selection mode - from
-+ direct to indirect and vice versa, or when the scheme is selected directly,
-+ to select the scheme id.
-+
-+*//***************************************************************************/
-+typedef struct t_FmPcdKgSchemeSelect {
-+ bool direct; /**< TRUE to use 'h_Scheme' directly, FALSE to use LCV. */
-+ t_Handle h_DirectScheme; /**< Scheme handle, selects the scheme after parser;
-+ Relevant only when 'direct' is TRUE. */
-+} t_FmPcdKgSchemeSelect;
-+
-+/**************************************************************************//**
-+ @Description A structure of scheme parameters
-+*//***************************************************************************/
-+typedef struct t_FmPcdPortSchemesParams {
-+ uint8_t numOfSchemes; /**< Number of schemes for port to be bound to. */
-+ t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES]; /**< Array of 'numOfSchemes' schemes for the
-+ port to be bound to */
-+} t_FmPcdPortSchemesParams;
-+
-+/**************************************************************************//**
-+ @Description Union for defining port protocol parameters for parser
-+*//***************************************************************************/
-+typedef union u_FmPcdHdrPrsOpts {
-+ /* MPLS */
-+ struct {
-+ bool labelInterpretationEnable; /**< When this bit is set, the last MPLS label will be
-+ interpreted as described in HW spec table. When the bit
-+ is cleared, the parser will advance to MPLS next parse */
-+ e_NetHeaderType nextParse; /**< must be equal or higher than IPv4 */
-+ } mplsPrsOptions;
-+ /* VLAN */
-+ struct {
-+ uint16_t tagProtocolId1; /**< User defined Tag Protocol Identifier, to be recognized
-+ on VLAN TAG on top of 0x8100 and 0x88A8 */
-+ uint16_t tagProtocolId2; /**< User defined Tag Protocol Identifier, to be recognized
-+ on VLAN TAG on top of 0x8100 and 0x88A8 */
-+ } vlanPrsOptions;
-+ /* PPP */
-+ struct{
-+ bool enableMTUCheck; /**< Check validity of MTU according to RFC2516 */
-+ } pppoePrsOptions;
-+
-+ /* IPV6 */
-+ struct{
-+ bool routingHdrEnable; /**< TRUE to enable routing header, otherwise ignore */
-+ } ipv6PrsOptions;
-+
-+ /* UDP */
-+ struct{
-+ bool padIgnoreChecksum; /**< TRUE to ignore pad in checksum */
-+ } udpPrsOptions;
-+
-+ /* TCP */
-+ struct {
-+ bool padIgnoreChecksum; /**< TRUE to ignore pad in checksum */
-+ } tcpPrsOptions;
-+} u_FmPcdHdrPrsOpts;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining each header for the parser
-+*//***************************************************************************/
-+typedef struct t_FmPcdPrsAdditionalHdrParams {
-+ e_NetHeaderType hdr; /**< Selected header; use HEADER_TYPE_NONE
-+ to indicate that sw parser is to run first
-+ (before HW parser, and independent of the
-+ existence of any protocol), in this case,
-+ swPrsEnable must be set, and all other
-+ parameters are irrelevant. */
-+ bool errDisable; /**< TRUE to disable error indication */
-+ bool swPrsEnable; /**< Enable jump to SW parser when this
-+ header is recognized by the HW parser. */
-+ uint8_t indexPerHdr; /**< Normally 0, if more than one sw parser
-+ attachments exists for the same header,
-+ (in the main sw parser code) use this
-+ index to distinguish between them. */
-+ bool usePrsOpts; /**< TRUE to use parser options. */
-+ u_FmPcdHdrPrsOpts prsOpts; /**< A union according to header type,
-+ defining the parser options selected.*/
-+} t_FmPcdPrsAdditionalHdrParams;
-+
-+/**************************************************************************//**
-+ @Description struct for defining port PCD parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortPcdPrsParams {
-+ uint8_t prsResultPrivateInfo; /**< The private info provides a method of inserting
-+ port information into the parser result. This information
-+ may be extracted by Keygen and be used for frames
-+ distribution when a per-port distinction is required,
-+ it may also be used as a port logical id for analyzing
-+ incoming frames. */
-+ uint8_t parsingOffset; /**< Number of bytes from beginning of packet to start parsing */
-+ e_NetHeaderType firstPrsHdr; /**< The type of the first header expected at 'parsingOffset' */
-+ bool includeInPrsStatistics; /**< TRUE to include this port in the parser statistics;
-+ NOTE: this field is not valid when the FM is in "guest" mode
-+ and IPC is not available. */
-+ uint8_t numOfHdrsWithAdditionalParams; /**< Normally 0, some headers may get
-+ special parameters */
-+ t_FmPcdPrsAdditionalHdrParams additionalParams[FM_PCD_PRS_NUM_OF_HDRS];
-+ /**< 'numOfHdrsWithAdditionalParams' structures
-+ of additional parameters
-+ for each header that requires them */
-+ bool setVlanTpid1; /**< TRUE to configure user selection of Ethertype to
-+ indicate a VLAN tag (in addition to the TPID values
-+ 0x8100 and 0x88A8). */
-+ uint16_t vlanTpid1; /**< extra tag to use if setVlanTpid1=TRUE. */
-+ bool setVlanTpid2; /**< TRUE to configure user selection of Ethertype to
-+ indicate a VLAN tag (in addition to the TPID values
-+ 0x8100 and 0x88A8). */
-+ uint16_t vlanTpid2; /**< extra tag to use if setVlanTpid1=TRUE. */
-+} t_FmPortPcdPrsParams;
-+
-+/**************************************************************************//**
-+ @Description struct for defining coarse alassification parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortPcdCcParams {
-+ t_Handle h_CcTree; /**< A handle to a CC tree */
-+} t_FmPortPcdCcParams;
-+
-+/**************************************************************************//**
-+ @Description struct for defining keygen parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortPcdKgParams {
-+ uint8_t numOfSchemes; /**< Number of schemes for port to be bound to. */
-+ t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES];
-+ /**< Array of 'numOfSchemes' schemes handles for the
-+ port to be bound to */
-+ bool directScheme; /**< TRUE for going from parser to a specific scheme,
-+ regardless of parser result */
-+ t_Handle h_DirectScheme; /**< relevant only if direct == TRUE, Scheme handle,
-+ as returned by FM_PCD_KgSetScheme */
-+} t_FmPortPcdKgParams;
-+
-+/**************************************************************************//**
-+ @Description struct for defining policer parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortPcdPlcrParams {
-+ t_Handle h_Profile; /**< Selected profile handle */
-+} t_FmPortPcdPlcrParams;
-+
-+/**************************************************************************//**
-+ @Description struct for defining port PCD parameters
-+*//***************************************************************************/
-+typedef struct t_FmPortPcdParams {
-+ e_FmPortPcdSupport pcdSupport; /**< Relevant for Rx and offline ports only.
-+ Describes the active PCD engines for this port. */
-+ t_Handle h_NetEnv; /**< HL Unused in PLCR only mode */
-+ t_FmPortPcdPrsParams *p_PrsParams; /**< Parser parameters for this port */
-+ t_FmPortPcdCcParams *p_CcParams; /**< Coarse classification parameters for this port */
-+ t_FmPortPcdKgParams *p_KgParams; /**< Keygen parameters for this port */
-+ t_FmPortPcdPlcrParams *p_PlcrParams; /**< Policer parameters for this port; Relevant for one of
-+ following cases:
-+ e_FM_PORT_PCD_SUPPORT_PLCR_ONLY or
-+ e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR were selected,
-+ or if any flow uses a KG scheme were policer
-+ profile is not generated
-+ ('bypassPlcrProfileGeneration selected'). */
-+ t_Handle h_IpReassemblyManip; /**< IP Reassembly manipulation */
-+#if (DPAA_VERSION >= 11)
-+ t_Handle h_CapwapReassemblyManip;/**< CAPWAP Reassembly manipulation */
-+#endif /* (DPAA_VERSION >= 11) */
-+} t_FmPortPcdParams;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining the Parser starting point
-+*//***************************************************************************/
-+typedef struct t_FmPcdPrsStart {
-+ uint8_t parsingOffset; /**< Number of bytes from beginning of packet to
-+ start parsing */
-+ e_NetHeaderType firstPrsHdr; /**< The type of the first header axpected at
-+ 'parsingOffset' */
-+} t_FmPcdPrsStart;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description struct for defining external buffer margins
-+*//***************************************************************************/
-+typedef struct t_FmPortVSPAllocParams {
-+ uint8_t numOfProfiles; /**< Number of Virtual Storage Profiles; must be a power of 2 */
-+ uint8_t dfltRelativeId; /**< The default Virtual-Storage-Profile-id dedicated to Rx/OP port
-+ The same default Virtual-Storage-Profile-id will be for coupled Tx port
-+ if relevant function called for Rx port */
-+ t_Handle h_FmTxPort; /**< Handle to coupled Tx Port; not relevant for OP port. */
-+} t_FmPortVSPAllocParams;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetPCD
-+
-+ @Description Calling this routine defines the port's PCD configuration.
-+ It changes it from its default configuration which is PCD
-+ disabled (BMI to BMI) and configures it according to the passed
-+ parameters.
-+
-+ May be used for Rx and OP ports only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_FmPortPcd A Structure of parameters defining the port's PCD
-+ configuration.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_SetPCD(t_Handle h_FmPort, t_FmPortPcdParams *p_FmPortPcd);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_DeletePCD
-+
-+ @Description Calling this routine releases the port's PCD configuration.
-+ The port returns to its default configuration which is PCD
-+ disabled (BMI to BMI) and all PCD configuration is removed.
-+
-+ May be used for Rx and OP ports which are
-+ in PCD mode only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_DeletePCD(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_AttachPCD
-+
-+ @Description This routine may be called after FM_PORT_DetachPCD was called,
-+ to return to the originally configured PCD support flow.
-+ The couple of routines are used to allow PCD configuration changes
-+ that demand that PCD will not be used while changes take place.
-+
-+ May be used for Rx and OP ports which are
-+ in PCD mode only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+t_Error FM_PORT_AttachPCD(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_DetachPCD
-+
-+ @Description Calling this routine detaches the port from its PCD functionality.
-+ The port returns to its default flow which is BMI to BMI.
-+
-+ May be used for Rx and OP ports which are
-+ in PCD mode only
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_AttachPCD().
-+*//***************************************************************************/
-+t_Error FM_PORT_DetachPCD(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdPlcrAllocProfiles
-+
-+ @Description This routine may be called only for ports that use the Policer in
-+ order to allocate private policer profiles.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] numOfProfiles The number of required policer profiles
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init() and FM_PCD_Init(),
-+ and before FM_PORT_SetPCD().
-+*//***************************************************************************/
-+t_Error FM_PORT_PcdPlcrAllocProfiles(t_Handle h_FmPort, uint16_t numOfProfiles);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdPlcrFreeProfiles
-+
-+ @Description This routine should be called for freeing private policer profiles.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init() and FM_PCD_Init(),
-+ and before FM_PORT_SetPCD().
-+*//***************************************************************************/
-+t_Error FM_PORT_PcdPlcrFreeProfiles(t_Handle h_FmPort);
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Function FM_PORT_VSPAlloc
-+
-+ @Description This routine allocated VSPs per port and forces the port to work
-+ in VSP mode. Note that the port is initialized by default with the
-+ physical-storage-profile only.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_Params A structure of parameters for allocation VSP's per port
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init(), and before FM_PORT_SetPCD()
-+ and also before FM_PORT_Enable(); i.e. the port should be disabled.
-+*//***************************************************************************/
-+t_Error FM_PORT_VSPAlloc(t_Handle h_FmPort, t_FmPortVSPAllocParams *p_Params);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdKgModifyInitialScheme
-+
-+ @Description This routine may be called only for ports that use the keygen in
-+ order to change the initial scheme frame should be routed to.
-+ The change may be of a scheme id (in case of direct mode),
-+ from direct to indirect, or from indirect to direct - specifying the scheme id.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_FmPcdKgScheme A structure of parameters for defining whether
-+ a scheme is direct/indirect, and if direct - scheme id.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
-+*//***************************************************************************/
-+t_Error FM_PORT_PcdKgModifyInitialScheme (t_Handle h_FmPort, t_FmPcdKgSchemeSelect *p_FmPcdKgScheme);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdPlcrModifyInitialProfile
-+
-+ @Description This routine may be called for ports with flows
-+ e_FM_PORT_PCD_SUPPORT_PLCR_ONLY or e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR
-+ only, to change the initial Policer profile frame should be
-+ routed to. The change may be of a profile and/or absolute/direct
-+ mode selection.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] h_Profile Policer profile handle
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
-+*//***************************************************************************/
-+t_Error FM_PORT_PcdPlcrModifyInitialProfile (t_Handle h_FmPort, t_Handle h_Profile);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdCcModifyTree
-+
-+ @Description This routine may be called for ports that use coarse classification tree
-+ if the user wishes to replace the tree. The routine may not be called while port
-+ receives packets using the PCD functionalities, therefor port must be first detached
-+ from the PCD, only than the routine may be called, and than port be attached to PCD again.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] h_CcTree A CC tree that was already built. The tree id as returned from
-+ the BuildTree routine.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init(), FM_PORT_SetPCD() and FM_PORT_DetachPCD()
-+*//***************************************************************************/
-+t_Error FM_PORT_PcdCcModifyTree (t_Handle h_FmPort, t_Handle h_CcTree);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdKgBindSchemes
-+
-+ @Description These routines may be called for adding more schemes for the
-+ port to be bound to. The selected schemes are not added,
-+ just this specific port starts using them.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_PortScheme A structure defining the list of schemes to be added.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
-+*//***************************************************************************/
-+t_Error FM_PORT_PcdKgBindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdKgUnbindSchemes
-+
-+ @Description These routines may be called for adding more schemes for the
-+ port to be bound to. The selected schemes are not removed or invalidated,
-+ just this specific port stops using them.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_PortScheme A structure defining the list of schemes to be added.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD().
-+*//***************************************************************************/
-+t_Error FM_PORT_PcdKgUnbindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetIPv4OptionsCount
-+
-+ @Description TODO
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[out] p_Ipv4OptionsCount will hold the counter value
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init()
-+*//***************************************************************************/
-+t_Error FM_PORT_GetIPv4OptionsCount(t_Handle h_FmPort, uint32_t *p_Ipv4OptionsCount);
-+
-+/** @} */ /* end of FM_PORT_pcd_runtime_control_grp group */
-+/** @} */ /* end of FM_PORT_runtime_control_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_PORT_runtime_data_grp FM Port Runtime Data-path Unit
-+
-+ @Description FM Port Runtime data unit API functions, definitions and enums.
-+ This API is valid only if working in Independent-Mode.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ImTx
-+
-+ @Description Tx function, called to transmit a data buffer on the port.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_Data A pointer to an LCP data buffer.
-+ @Param[in] length Size of data for transmission.
-+ @Param[in] lastBuffer Buffer position - TRUE for the last buffer
-+ of a frame, including a single buffer frame
-+ @Param[in] h_BufContext A handle of the user acossiated with this buffer
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ NOTE - This routine can be used only when working in
-+ Independent-Mode mode.
-+*//***************************************************************************/
-+t_Error FM_PORT_ImTx( t_Handle h_FmPort,
-+ uint8_t *p_Data,
-+ uint16_t length,
-+ bool lastBuffer,
-+ t_Handle h_BufContext);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ImTxConf
-+
-+ @Description Tx port confirmation routine, optional, may be called to verify
-+ transmission of all frames. The procedure performed by this
-+ routine will be performed automatically on next buffer transmission,
-+ but if desired, calling this routine will invoke this action on
-+ demand.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ NOTE - This routine can be used only when working in
-+ Independent-Mode mode.
-+*//***************************************************************************/
-+void FM_PORT_ImTxConf(t_Handle h_FmPort);
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ImRx
-+
-+ @Description Rx function, may be called to poll for received buffers.
-+ Normally, Rx process is invoked by the driver on Rx interrupt.
-+ Alternatively, this routine may be called on demand.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+ NOTE - This routine can be used only when working in
-+ Independent-Mode mode.
-+*//***************************************************************************/
-+t_Error FM_PORT_ImRx(t_Handle h_FmPort);
-+
-+/** @} */ /* end of FM_PORT_runtime_data_grp group */
-+/** @} */ /* end of FM_PORT_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+
-+#ifdef NCSW_BACKWARD_COMPATIBLE_API
-+#define FM_PORT_ConfigTxFifoDeqPipelineDepth FM_PORT_ConfigFifoDeqPipelineDepth
-+#endif /* NCSW_BACKWARD_COMPATIBLE_API */
-+
-+
-+#endif /* __FM_PORT_EXT */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_rtc_ext.h
-@@ -0,0 +1,619 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_rtc_ext.h
-+
-+ @Description External definitions and API for FM RTC IEEE1588 Timer Module.
-+
-+ @Cautions None.
-+*//***************************************************************************/
-+
-+#ifndef __FM_RTC_EXT_H__
-+#define __FM_RTC_EXT_H__
-+
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "fsl_fman_rtc.h"
-+
-+/**************************************************************************//**
-+
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group fm_rtc_grp FM RTC
-+
-+ @Description FM RTC functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group fm_rtc_init_grp FM RTC Initialization Unit
-+
-+ @Description FM RTC initialization API.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description FM RTC Alarm Polarity Options.
-+*//***************************************************************************/
-+typedef enum e_FmRtcAlarmPolarity
-+{
-+ e_FM_RTC_ALARM_POLARITY_ACTIVE_HIGH = E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH, /**< Active-high output polarity */
-+ e_FM_RTC_ALARM_POLARITY_ACTIVE_LOW = E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW /**< Active-low output polarity */
-+} e_FmRtcAlarmPolarity;
-+
-+/**************************************************************************//**
-+ @Description FM RTC Trigger Polarity Options.
-+*//***************************************************************************/
-+typedef enum e_FmRtcTriggerPolarity
-+{
-+ e_FM_RTC_TRIGGER_ON_RISING_EDGE = E_FMAN_RTC_TRIGGER_ON_RISING_EDGE, /**< Trigger on rising edge */
-+ e_FM_RTC_TRIGGER_ON_FALLING_EDGE = E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE /**< Trigger on falling edge */
-+} e_FmRtcTriggerPolarity;
-+
-+/**************************************************************************//**
-+ @Description IEEE1588 Timer Module FM RTC Optional Clock Sources.
-+*//***************************************************************************/
-+typedef enum e_FmSrcClock
-+{
-+ e_FM_RTC_SOURCE_CLOCK_EXTERNAL = E_FMAN_RTC_SOURCE_CLOCK_EXTERNAL, /**< external high precision timer reference clock */
-+ e_FM_RTC_SOURCE_CLOCK_SYSTEM = E_FMAN_RTC_SOURCE_CLOCK_SYSTEM, /**< MAC system clock */
-+ e_FM_RTC_SOURCE_CLOCK_OSCILATOR = E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR /**< RTC clock oscilator */
-+}e_FmSrcClk;
-+
-+/**************************************************************************//**
-+ @Description FM RTC configuration parameters structure.
-+
-+ This structure should be passed to FM_RTC_Config().
-+*//***************************************************************************/
-+typedef struct t_FmRtcParams
-+{
-+ t_Handle h_Fm; /**< FM Handle*/
-+ uintptr_t baseAddress; /**< Base address of FM RTC registers */
-+ t_Handle h_App; /**< A handle to an application layer object; This handle will
-+ be passed by the driver upon calling the above callbacks */
-+} t_FmRtcParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_Config
-+
-+ @Description Configures the FM RTC module according to user's parameters.
-+
-+ The driver assigns default values to some FM RTC parameters.
-+ These parameters can be overwritten using the advanced
-+ configuration routines.
-+
-+ @Param[in] p_FmRtcParam - FM RTC configuration parameters.
-+
-+ @Return Handle to the new FM RTC object; NULL pointer on failure.
-+
-+ @Cautions None
-+*//***************************************************************************/
-+t_Handle FM_RTC_Config(t_FmRtcParams *p_FmRtcParam);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_Init
-+
-+ @Description Initializes the FM RTC driver and hardware.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_Init(t_Handle h_FmRtc);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_Free
-+
-+ @Description Frees the FM RTC object and all allocated resources.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_Free(t_Handle h_FmRtc);
-+
-+
-+/**************************************************************************//**
-+ @Group fm_rtc_adv_config_grp FM RTC Advanced Configuration Unit
-+
-+ @Description FM RTC advanced configuration functions.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigPeriod
-+
-+ @Description Configures the period of the timestamp if different than
-+ default [DEFAULT_clockPeriod].
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] period - Period in nano-seconds.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigPeriod(t_Handle h_FmRtc, uint32_t period);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigSourceClock
-+
-+ @Description Configures the source clock of the RTC.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] srcClk - Source clock selection.
-+ @Param[in] freqInMhz - the source-clock frequency (in MHz).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigSourceClock(t_Handle h_FmRtc,
-+ e_FmSrcClk srcClk,
-+ uint32_t freqInMhz);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigPulseRealignment
-+
-+ @Description Configures the RTC to automatic FIPER pulse realignment in
-+ response to timer adjustments [DEFAULT_pulseRealign]
-+
-+ In this mode, the RTC clock is identical to the source clock.
-+ This feature can be useful when the system contains an external
-+ RTC with inherent frequency compensation.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] enable - TRUE to enable automatic realignment.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigPulseRealignment(t_Handle h_FmRtc, bool enable);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigFrequencyBypass
-+
-+ @Description Configures the RTC to bypass the frequency compensation
-+ mechanism. [DEFAULT_bypass]
-+
-+ In this mode, the RTC clock is identical to the source clock.
-+ This feature can be useful when the system contains an external
-+ RTC with inherent frequency compensation.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] enabled - TRUE to bypass frequency compensation;
-+ FALSE otherwise.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigFrequencyBypass(t_Handle h_FmRtc, bool enabled);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigInvertedInputClockPhase
-+
-+ @Description Configures the RTC to invert the source clock phase on input.
-+ [DEFAULT_invertInputClkPhase]
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] inverted - TRUE to invert the source clock phase on input.
-+ FALSE otherwise.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigInvertedInputClockPhase(t_Handle h_FmRtc, bool inverted);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigInvertedOutputClockPhase
-+
-+ @Description Configures the RTC to invert the output clock phase.
-+ [DEFAULT_invertOutputClkPhase]
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] inverted - TRUE to invert the output clock phase.
-+ FALSE otherwise.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigInvertedOutputClockPhase(t_Handle h_FmRtc, bool inverted);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigOutputClockDivisor
-+
-+ @Description Configures the divisor for generating the output clock from
-+ the RTC clock. [DEFAULT_outputClockDivisor]
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] divisor - Divisor for generation of the output clock.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigOutputClockDivisor(t_Handle h_FmRtc, uint16_t divisor);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigAlarmPolarity
-+
-+ @Description Configures the polarity (active-high/active-low) of a specific
-+ alarm signal. [DEFAULT_alarmPolarity]
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] alarmId - Alarm ID.
-+ @Param[in] alarmPolarity - Alarm polarity.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigAlarmPolarity(t_Handle h_FmRtc,
-+ uint8_t alarmId,
-+ e_FmRtcAlarmPolarity alarmPolarity);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ConfigExternalTriggerPolarity
-+
-+ @Description Configures the polarity (rising/falling edge) of a specific
-+ external trigger signal. [DEFAULT_triggerPolarity]
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] triggerId - Trigger ID.
-+ @Param[in] triggerPolarity - Trigger polarity.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously created using FM_RTC_Config().
-+*//***************************************************************************/
-+t_Error FM_RTC_ConfigExternalTriggerPolarity(t_Handle h_FmRtc,
-+ uint8_t triggerId,
-+ e_FmRtcTriggerPolarity triggerPolarity);
-+
-+/** @} */ /* end of fm_rtc_adv_config_grp */
-+/** @} */ /* end of fm_rtc_init_grp */
-+
-+
-+/**************************************************************************//**
-+ @Group fm_rtc_control_grp FM RTC Control Unit
-+
-+ @Description FM RTC runtime control API.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function t_FmRtcExceptionsCallback
-+
-+ @Description Exceptions user callback routine, used for RTC different mechanisms.
-+
-+ @Param[in] h_App - User's application descriptor.
-+ @Param[in] id - source id.
-+*//***************************************************************************/
-+typedef void (t_FmRtcExceptionsCallback) ( t_Handle h_App, uint8_t id);
-+
-+/**************************************************************************//**
-+ @Description FM RTC alarm parameters.
-+*//***************************************************************************/
-+typedef struct t_FmRtcAlarmParams {
-+ uint8_t alarmId; /**< 0 or 1 */
-+ uint64_t alarmTime; /**< In nanoseconds, the time when the alarm
-+ should go off - must be a multiple of
-+ the RTC period */
-+ t_FmRtcExceptionsCallback *f_AlarmCallback; /**< This routine will be called when RTC
-+ reaches alarmTime */
-+ bool clearOnExpiration; /**< TRUE to turn off the alarm once expired. */
-+} t_FmRtcAlarmParams;
-+
-+/**************************************************************************//**
-+ @Description FM RTC Periodic Pulse parameters.
-+*//***************************************************************************/
-+typedef struct t_FmRtcPeriodicPulseParams {
-+ uint8_t periodicPulseId; /**< 0 or 1 */
-+ uint64_t periodicPulsePeriod; /**< In Nanoseconds. Must be
-+ a multiple of the RTC period */
-+ t_FmRtcExceptionsCallback *f_PeriodicPulseCallback; /**< This routine will be called every
-+ periodicPulsePeriod. */
-+} t_FmRtcPeriodicPulseParams;
-+
-+/**************************************************************************//**
-+ @Description FM RTC Periodic Pulse parameters.
-+*//***************************************************************************/
-+typedef struct t_FmRtcExternalTriggerParams {
-+ uint8_t externalTriggerId; /**< 0 or 1 */
-+ bool usePulseAsInput; /**< Use the pulse interrupt instead of
-+ an external signal */
-+ t_FmRtcExceptionsCallback *f_ExternalTriggerCallback; /**< This routine will be called every
-+ periodicPulsePeriod. */
-+} t_FmRtcExternalTriggerParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_Enable
-+
-+ @Description Enable the RTC (time count is started).
-+
-+ The user can select to resume the time count from previous
-+ point, or to restart the time count.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] resetClock - Restart the time count from zero.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_Enable(t_Handle h_FmRtc, bool resetClock);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_Disable
-+
-+ @Description Disables the RTC (time count is stopped).
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_Disable(t_Handle h_FmRtc);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_SetClockOffset
-+
-+ @Description Sets the clock offset (usually relative to another clock).
-+
-+ The user can pass a negative offset value.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] offset - New clock offset (in nanoseconds).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_SetClockOffset(t_Handle h_FmRtc, int64_t offset);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_SetAlarm
-+
-+ @Description Schedules an alarm event to a given RTC time.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] p_FmRtcAlarmParams - Alarm parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+ Must be called only prior to FM_RTC_Enable().
-+*//***************************************************************************/
-+t_Error FM_RTC_SetAlarm(t_Handle h_FmRtc, t_FmRtcAlarmParams *p_FmRtcAlarmParams);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_SetPeriodicPulse
-+
-+ @Description Sets a periodic pulse.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] p_FmRtcPeriodicPulseParams - Periodic pulse parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+ Must be called only prior to FM_RTC_Enable().
-+*//***************************************************************************/
-+t_Error FM_RTC_SetPeriodicPulse(t_Handle h_FmRtc, t_FmRtcPeriodicPulseParams *p_FmRtcPeriodicPulseParams);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ClearPeriodicPulse
-+
-+ @Description Clears a periodic pulse.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] periodicPulseId - Periodic pulse id.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_ClearPeriodicPulse(t_Handle h_FmRtc, uint8_t periodicPulseId);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_SetExternalTrigger
-+
-+ @Description Sets an external trigger indication and define a callback
-+ routine to be called on such event.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] p_FmRtcExternalTriggerParams - External Trigger parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_SetExternalTrigger(t_Handle h_FmRtc, t_FmRtcExternalTriggerParams *p_FmRtcExternalTriggerParams);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_ClearExternalTrigger
-+
-+ @Description Clears external trigger indication.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] id - External Trigger id.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_ClearExternalTrigger(t_Handle h_FmRtc, uint8_t id);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_GetExternalTriggerTimeStamp
-+
-+ @Description Reads the External Trigger TimeStamp.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] triggerId - External Trigger id.
-+ @Param[out] p_TimeStamp - External Trigger timestamp (in nanoseconds).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_GetExternalTriggerTimeStamp(t_Handle h_FmRtc,
-+ uint8_t triggerId,
-+ uint64_t *p_TimeStamp);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_GetCurrentTime
-+
-+ @Description Returns the current RTC time.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[out] p_Ts - returned time stamp (in nanoseconds).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_GetCurrentTime(t_Handle h_FmRtc, uint64_t *p_Ts);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_SetCurrentTime
-+
-+ @Description Sets the current RTC time.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] ts - The new time stamp (in nanoseconds).
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_SetCurrentTime(t_Handle h_FmRtc, uint64_t ts);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_GetFreqCompensation
-+
-+ @Description Retrieves the frequency compensation value
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[out] p_Compensation - A pointer to the returned value of compensation.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_GetFreqCompensation(t_Handle h_FmRtc, uint32_t *p_Compensation);
-+
-+/**************************************************************************//**
-+ @Function FM_RTC_SetFreqCompensation
-+
-+ @Description Sets a new frequency compensation value.
-+
-+ @Param[in] h_FmRtc - Handle to FM RTC object.
-+ @Param[in] freqCompensation - The new frequency compensation value to set.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_SetFreqCompensation(t_Handle h_FmRtc, uint32_t freqCompensation);
-+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+/**************************************************************************//**
-+*@Function FM_RTC_EnableInterrupt
-+*
-+*@Description Enable interrupt of FM RTC.
-+*
-+*@Param[in] h_FmRtc - Handle to FM RTC object.
-+*@Param[in] events - Interrupt events.
-+*
-+*@Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_RTC_EnableInterrupt(t_Handle h_FmRtc, uint32_t events);
-+
-+/**************************************************************************//**
-+*@Function FM_RTC_DisableInterrupt
-+*
-+*@Description Disable interrupt of FM RTC.
-+*
-+*@Param[in] h_FmRtc - Handle to FM RTC object.
-+*@Param[in] events - Interrupt events.
-+*
-+*@Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_RTC_DisableInterrupt(t_Handle h_FmRtc, uint32_t events);
-+#endif
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+/**************************************************************************//**
-+ @Function FM_RTC_DumpRegs
-+
-+ @Description Dumps all FM registers
-+
-+ @Param[in] h_FmRtc A handle to an FM RTC Module.
-+
-+ @Return E_OK on success;
-+
-+ @Cautions Allowed only FM_Init().
-+*//***************************************************************************/
-+t_Error FM_RTC_DumpRegs(t_Handle h_FmRtc);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+/** @} */ /* end of fm_rtc_control_grp */
-+/** @} */ /* end of fm_rtc_grp */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#endif /* __FM_RTC_EXT_H__ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/fm_vsp_ext.h
-@@ -0,0 +1,411 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File fm_vsp_ext.h
-+
-+ @Description FM Virtual Storage-Profile ...
-+*//***************************************************************************/
-+#ifndef __FM_VSP_EXT_H
-+#define __FM_VSP_EXT_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "debug_ext.h"
-+
-+#include "fm_ext.h"
-+
-+
-+/**************************************************************************//**
-+
-+ @Group FM_grp Frame Manager API
-+
-+ @Description FM API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_VSP_grp FM Virtual-Storage-Profile
-+
-+ @Description FM Virtual-Storage-Profile API
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_VSP_init_grp FM VSP Initialization Unit
-+
-+ @Description FM VSP initialization API.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Virtual Storage Profile
-+*//***************************************************************************/
-+typedef struct t_FmVspParams {
-+ t_Handle h_Fm; /**< A handle to the FM object this VSP related to */
-+ t_FmExtPools extBufPools; /**< Which external buffer pools are used
-+ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes.
-+ parameter associated with Rx / OP port */
-+ uint16_t liodnOffset; /**< VSP's LIODN offset */
-+ struct {
-+ e_FmPortType portType; /**< Port type */
-+ uint8_t portId; /**< Port Id - relative to type */
-+ } portParams;
-+ uint8_t relativeProfileId; /**< VSP Id - relative to VSP's range
-+ defined in relevant FM object */
-+} t_FmVspParams;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_Config
-+
-+ @Description Creates descriptor for the FM VSP module.
-+
-+ The routine returns a handle (descriptor) to the FM VSP object.
-+ This descriptor must be passed as first parameter to all other
-+ FM VSP function calls.
-+
-+ No actual initialization or configuration of FM hardware is
-+ done by this routine.
-+
-+@Param[in] p_FmVspParams Pointer to data structure of parameters
-+
-+ @Retval Handle to FM VSP object, or NULL for Failure.
-+*//***************************************************************************/
-+t_Handle FM_VSP_Config(t_FmVspParams *p_FmVspParams);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_Init
-+
-+ @Description Initializes the FM VSP module
-+
-+ @Param[in] h_FmVsp - FM VSP module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_VSP_Init(t_Handle h_FmVsp);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_Free
-+
-+ @Description Frees all resources that were assigned to FM VSP module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmVsp - FM VSP module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error FM_VSP_Free(t_Handle h_FmVsp);
-+
-+
-+/**************************************************************************//**
-+ @Group FM_VSP_adv_config_grp FM VSP Advanced Configuration Unit
-+
-+ @Description FM VSP advanced configuration functions.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigBufferPrefixContent
-+
-+ @Description Defines the structure, size and content of the application buffer.
-+
-+ The prefix will
-+ In VSPs defined for Tx ports, if 'passPrsResult', the application
-+ should set a value to their offsets in the prefix of
-+ the FM will save the first 'privDataSize', than,
-+ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
-+ and timeStamp, and the packet itself (in this order), to the
-+ application buffer, and to offset.
-+
-+ Calling this routine changes the buffer margins definitions
-+ in the internal driver data base from its default
-+ configuration: Data size: [DEFAULT_FM_SP_bufferPrefixContent_privDataSize]
-+ Pass Parser result: [DEFAULT_FM_SP_bufferPrefixContent_passPrsResult].
-+ Pass timestamp: [DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp].
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in,out] p_FmBufferPrefixContent A structure of parameters describing the
-+ structure of the buffer.
-+ Out parameter: Start margin - offset
-+ of data from start of external buffer.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigBufferPrefixContent(t_Handle h_FmVsp,
-+ t_FmBufferPrefixContent *p_FmBufferPrefixContent);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigDmaSwapData
-+
-+ @Description Calling this routine changes the DMA swap data parameter
-+ in the internal driver data base from its default
-+ configuration [DEFAULT_FM_SP_dmaSwapData]
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] swapData New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigDmaSwapData(t_Handle h_FmVsp, e_FmDmaSwapOption swapData);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigDmaIcCacheAttr
-+
-+ @Description Calling this routine changes the internal context cache
-+ attribute parameter in the internal driver data base
-+ from its default configuration [DEFAULT_FM_SP_dmaIntContextCacheAttr]
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] intContextCacheAttr New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigDmaIcCacheAttr(t_Handle h_FmVsp,
-+ e_FmDmaCacheOption intContextCacheAttr);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigDmaHdrAttr
-+
-+ @Description Calling this routine changes the header cache
-+ attribute parameter in the internal driver data base
-+ from its default configuration [DEFAULT_FM_SP_dmaHeaderCacheAttr]
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] headerCacheAttr New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigDmaHdrAttr(t_Handle h_FmVsp, e_FmDmaCacheOption headerCacheAttr);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigDmaScatterGatherAttr
-+
-+ @Description Calling this routine changes the scatter gather cache
-+ attribute parameter in the internal driver data base
-+ from its default configuration [DEFAULT_FM_SP_dmaScatterGatherCacheAttr]
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] scatterGatherCacheAttr New selection
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigDmaScatterGatherAttr(t_Handle h_FmVsp,
-+ e_FmDmaCacheOption scatterGatherCacheAttr);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigDmaWriteOptimize
-+
-+ @Description Calling this routine changes the write optimization
-+ parameter in the internal driver data base
-+ from its default configuration: optimize = [DEFAULT_FM_SP_dmaWriteOptimize]
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] optimize TRUE to enable optimization, FALSE for normal operation
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigDmaWriteOptimize(t_Handle h_FmVsp, bool optimize);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigNoScatherGather
-+
-+ @Description Calling this routine changes the possibility to receive S/G frame
-+ in the internal driver data base
-+ from its default configuration: optimize = [DEFAULT_FM_SP_noScatherGather]
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] noScatherGather TRUE to operate without scatter/gather capability.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigNoScatherGather(t_Handle h_FmVsp, bool noScatherGather);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigPoolDepletion
-+
-+ @Description Calling this routine enables pause frame generation depending on the
-+ depletion status of BM pools. It also defines the conditions to activate
-+ this functionality. By default, this functionality is disabled.
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] p_BufPoolDepletion A structure of pool depletion parameters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigPoolDepletion(t_Handle h_FmVsp, t_FmBufPoolDepletion *p_BufPoolDepletion);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigBackupPools
-+
-+ @Description Calling this routine allows the configuration of some of the BM pools
-+ defined for this port as backup pools.
-+ A pool configured to be a backup pool will be used only if all other
-+ enabled non-backup pools are depleted.
-+
-+ @Param[in] h_FmVsp A handle to a FM VSP module.
-+ @Param[in] p_BackupBmPools An array of pool id's. All pools specified here will
-+ be defined as backup pools.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+t_Error FM_VSP_ConfigBackupPools(t_Handle h_FmVsp, t_FmBackupBmPools *p_BackupBmPools);
-+
-+/** @} */ /* end of FM_VSP_adv_config_grp group */
-+/** @} */ /* end of FM_VSP_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_VSP_control_grp FM VSP Control Unit
-+
-+ @Description FM VSP runtime control API.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_GetBufferDataOffset
-+
-+ @Description Relevant for Rx ports.
-+ Returns the data offset from the beginning of the data buffer
-+
-+ @Param[in] h_FmVsp - FM PORT module descriptor
-+
-+ @Return data offset.
-+
-+ @Cautions Allowed only following FM_VSP_Init().
-+*//***************************************************************************/
-+uint32_t FM_VSP_GetBufferDataOffset(t_Handle h_FmVsp);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_GetBufferICInfo
-+
-+ @Description Returns the Internal Context offset from the beginning of the data buffer
-+
-+ @Param[in] h_FmVsp - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return Internal context info pointer on success, NULL if 'allOtherInfo' was not
-+ configured for this port.
-+
-+ @Cautions Allowed only following FM_VSP_Init().
-+*//***************************************************************************/
-+uint8_t * FM_VSP_GetBufferICInfo(t_Handle h_FmVsp, char *p_Data);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_GetBufferPrsResult
-+
-+ @Description Returns the pointer to the parse result in the data buffer.
-+ In Rx ports this is relevant after reception, if parse
-+ result is configured to be part of the data passed to the
-+ application. For non Rx ports it may be used to get the pointer
-+ of the area in the buffer where parse result should be
-+ initialized - if so configured.
-+ See FM_VSP_ConfigBufferPrefixContent for data buffer prefix
-+ configuration.
-+
-+ @Param[in] h_FmVsp - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return Parse result pointer on success, NULL if parse result was not
-+ configured for this port.
-+
-+ @Cautions Allowed only following FM_VSP_Init().
-+*//***************************************************************************/
-+t_FmPrsResult * FM_VSP_GetBufferPrsResult(t_Handle h_FmVsp, char *p_Data);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_GetBufferTimeStamp
-+
-+ @Description Returns the time stamp in the data buffer.
-+ Relevant for Rx ports for getting the buffer time stamp.
-+ See FM_VSP_ConfigBufferPrefixContent for data buffer prefix
-+ configuration.
-+
-+ @Param[in] h_FmVsp - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return A pointer to the hash result on success, NULL otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Init().
-+*//***************************************************************************/
-+uint64_t * FM_VSP_GetBufferTimeStamp(t_Handle h_FmVsp, char *p_Data);
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_GetBufferHashResult
-+
-+ @Description Given a data buffer, on the condition that hash result was defined
-+ as a part of the buffer content (see FM_VSP_ConfigBufferPrefixContent)
-+ this routine will return the pointer to the hash result location in the
-+ buffer prefix.
-+
-+ @Param[in] h_FmVsp - FM PORT module descriptor
-+ @Param[in] p_Data - A pointer to the data buffer.
-+
-+ @Return A pointer to the hash result on success, NULL otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Init().
-+*//***************************************************************************/
-+uint8_t * FM_VSP_GetBufferHashResult(t_Handle h_FmVsp, char *p_Data);
-+
-+
-+/** @} */ /* end of FM_VSP_control_grp group */
-+/** @} */ /* end of FM_VSP_grp group */
-+/** @} */ /* end of FM_grp group */
-+
-+
-+#endif /* __FM_VSP_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/Peripherals/mii_acc_ext.h
-@@ -0,0 +1,76 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+
-+#ifndef __MII_ACC_EXT_H
-+#define __MII_ACC_EXT_H
-+
-+
-+/**************************************************************************//**
-+ @Function MII_ReadPhyReg
-+
-+ @Description This routine is called to read a specified PHY
-+ register value.
-+
-+ @Param[in] h_MiiAccess - Handle to MII configuration access registers
-+ @Param[in] phyAddr - PHY address (0-31).
-+ @Param[in] reg - PHY register to read
-+ @Param[out] p_Data - Gets the register value.
-+
-+ @Return Always zero (success).
-+*//***************************************************************************/
-+int MII_ReadPhyReg(t_Handle h_MiiAccess,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t *p_Data);
-+
-+/**************************************************************************//**
-+ @Function MII_WritePhyReg
-+
-+ @Description This routine is called to write data to a specified PHY
-+ register.
-+
-+ @Param[in] h_MiiAccess - Handle to MII configuration access registers
-+ @Param[in] phyAddr - PHY address (0-31).
-+ @Param[in] reg - PHY register to write
-+ @Param[in] data - Data to write in register.
-+
-+ @Return Always zero (success).
-+*//***************************************************************************/
-+int MII_WritePhyReg(t_Handle h_MiiAccess,
-+ uint8_t phyAddr,
-+ uint8_t reg,
-+ uint16_t data);
-+
-+
-+#endif /* __MII_ACC_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/core_ext.h
-@@ -0,0 +1,90 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File core_ext.h
-+
-+ @Description Generic interface to basic core operations.
-+
-+ The system integrator must ensure that this interface is
-+ mapped to a specific core implementation, by including the
-+ appropriate header file.
-+*//***************************************************************************/
-+#ifndef __CORE_EXT_H
-+#define __CORE_EXT_H
-+
-+#ifdef CONFIG_FMAN_ARM
-+#include "arm_ext.h"
-+#include <linux/smp.h>
-+#else
-+#ifdef NCSW_PPC_CORE
-+#include "ppc_ext.h"
-+#elif defined(NCSW_VXWORKS)
-+#include "core_vxw_ext.h"
-+#else
-+#error "Core is not defined!"
-+#endif /* NCSW_CORE */
-+
-+#if (!defined(CORE_IS_LITTLE_ENDIAN) && !defined(CORE_IS_BIG_ENDIAN))
-+#error "Must define core as little-endian or big-endian!"
-+#endif /* (!defined(CORE_IS_LITTLE_ENDIAN) && ... */
-+
-+#ifndef CORE_CACHELINE_SIZE
-+#error "Must define the core cache-line size!"
-+#endif /* !CORE_CACHELINE_SIZE */
-+
-+#endif /* CONFIG_FMAN_ARM */
-+
-+
-+/**************************************************************************//**
-+ @Function CORE_GetId
-+
-+ @Description Returns the core ID in the system.
-+
-+ @Return Core ID.
-+*//***************************************************************************/
-+uint32_t CORE_GetId(void);
-+
-+/**************************************************************************//**
-+ @Function CORE_MemoryBarrier
-+
-+ @Description This routine will cause the core to stop executing any commands
-+ until all previous memory read/write commands are completely out
-+ of the core's pipeline.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void CORE_MemoryBarrier(void);
-+#define fsl_mem_core_barrier() CORE_MemoryBarrier()
-+
-+#endif /* __CORE_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/arm_ext.h
-@@ -0,0 +1,55 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File arm_ext.h
-+
-+ @Description Core API for ARM cores
-+
-+ These routines must be implemented by each specific PowerPC
-+ core driver.
-+*//***************************************************************************/
-+#ifndef __ARM_EXT_H
-+#define __ARM_EXT_H
-+
-+#include "part_ext.h"
-+
-+
-+#define CORE_IS_LITTLE_ENDIAN
-+
-+static __inline__ void CORE_MemoryBarrier(void)
-+{
-+ mb();
-+}
-+
-+#endif /* __PPC_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/e500v2_ext.h
-@@ -0,0 +1,476 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File e500v2_ext.h
-+
-+ @Description E500 external definitions prototypes
-+ This file is not included by the E500
-+ source file as it is an assembly file. It is used
-+ only for prototypes exposure, for inclusion
-+ by user and other modules.
-+*//***************************************************************************/
-+
-+#ifndef __E500V2_EXT_H
-+#define __E500V2_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+/* Layer 1 Cache Manipulations
-+ *==============================
-+ * Should not be called directly by the user.
-+ */
-+void L1DCache_Invalidate (void);
-+void L1ICache_Invalidate(void);
-+void L1DCache_Enable(void);
-+void L1ICache_Enable(void);
-+void L1DCache_Disable(void);
-+void L1ICache_Disable(void);
-+void L1DCache_Flush(void);
-+void L1ICache_Flush(void);
-+uint32_t L1ICache_IsEnabled(void);
-+uint32_t L1DCache_IsEnabled(void);
-+/*
-+ *
-+ */
-+uint32_t L1DCache_LineLock(uint32_t addr);
-+uint32_t L1ICache_LineLock(uint32_t addr);
-+void L1Cache_BroadCastEnable(void);
-+void L1Cache_BroadCastDisable(void);
-+
-+
-+#define CORE_DCacheEnable E500_DCacheEnable
-+#define CORE_ICacheEnable E500_ICacheEnable
-+#define CORE_DCacheDisable E500_DCacheDisable
-+#define CORE_ICacheDisable E500_ICacheDisable
-+#define CORE_GetId E500_GetId
-+#define CORE_TestAndSet E500_TestAndSet
-+#define CORE_MemoryBarrier E500_MemoryBarrier
-+#define CORE_InstructionSync E500_InstructionSync
-+
-+#define CORE_SetDozeMode E500_SetDozeMode
-+#define CORE_SetNapMode E500_SetNapMode
-+#define CORE_SetSleepMode E500_SetSleepMode
-+#define CORE_SetJogMode E500_SetJogMode
-+#define CORE_SetDeepSleepMode E500_SetDeepSleepMode
-+
-+#define CORE_RecoverDozeMode E500_RecoverDozeMode
-+#define CORE_RecoverNapMode E500_RecoverNapMode
-+#define CORE_RecoverSleepMode E500_RecoverSleepMode
-+#define CORE_RecoverJogMode E500_RecoverJogMode
-+
-+void E500_SetDozeMode(void);
-+void E500_SetNapMode(void);
-+void E500_SetSleepMode(void);
-+void E500_SetJogMode(void);
-+t_Error E500_SetDeepSleepMode(uint32_t bptrAddress);
-+
-+void E500_RecoverDozeMode(void);
-+void E500_RecoverNapMode(void);
-+void E500_RecoverSleepMode(void);
-+void E500_RecoverJogMode(void);
-+
-+
-+/**************************************************************************//**
-+ @Group E500_id E500 Application Programming Interface
-+
-+ @Description E500 API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group E500_init_grp E500 Initialization Unit
-+
-+ @Description E500 initialization unit API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+
-+/**************************************************************************//**
-+ @Function E500_DCacheEnable
-+
-+ @Description Enables the data cache for memory pages that are
-+ not cache inhibited.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_DCacheEnable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_ICacheEnable
-+
-+ @Description Enables the instruction cache for memory pages that are
-+ not cache inhibited.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_ICacheEnable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_DCacheDisable
-+
-+ @Description Disables the data cache.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_DCacheDisable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_ICacheDisable
-+
-+ @Description Disables the instruction cache.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_ICacheDisable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_DCacheFlush
-+
-+ @Description Flushes the data cache
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_DCacheFlush(void);
-+
-+/**************************************************************************//**
-+ @Function E500_ICacheFlush
-+
-+ @Description Flushes the instruction cache.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_ICacheFlush(void);
-+
-+/**************************************************************************//**
-+ @Function E500_DCacheSetStashId
-+
-+ @Description Set Stash Id for data cache
-+
-+ @Param[in] stashId the stash id to be set.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_DCacheSetStashId(uint8_t stashId);
-+
-+/**************************************************************************//**
-+ @Description E500mc L2 Cache Operation Mode
-+*//***************************************************************************/
-+typedef enum e_E500mcL2CacheMode
-+{
-+ e_L2_CACHE_MODE_DATA_ONLY = 0x00000001, /**< Cache data only */
-+ e_L2_CACHE_MODE_INST_ONLY = 0x00000002, /**< Cache instructions only */
-+ e_L2_CACHE_MODE_DATA_AND_INST = 0x00000003 /**< Cache data and instructions */
-+} e_E500mcL2CacheMode;
-+
-+#if defined(CORE_E500MC) || defined(CORE_E5500)
-+/**************************************************************************//**
-+ @Function E500_L2CacheEnable
-+
-+ @Description Enables the cache for memory pages that are not cache inhibited.
-+
-+ @param[in] mode - L2 cache mode: data only, instruction only or instruction and data.
-+
-+ @Return None.
-+
-+ @Cautions This routine must be call only ONCE for both caches. I.e. it is
-+ not possible to call this routine for i-cache and than to call
-+ again for d-cache; The second call will override the first one.
-+*//***************************************************************************/
-+void E500_L2CacheEnable(e_E500mcL2CacheMode mode);
-+
-+/**************************************************************************//**
-+ @Function E500_L2CacheDisable
-+
-+ @Description Disables the cache (data instruction or both).
-+
-+ @Return None.
-+
-+*//***************************************************************************/
-+void E500_L2CacheDisable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_L2CacheFlush
-+
-+ @Description Flushes the cache.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_L2CacheFlush(void);
-+
-+/**************************************************************************//**
-+ @Function E500_L2SetStashId
-+
-+ @Description Set Stash Id
-+
-+ @Param[in] stashId the stash id to be set.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_L2SetStashId(uint8_t stashId);
-+#endif /* defined(CORE_E500MC) || defined(CORE_E5500) */
-+
-+#ifdef CORE_E6500
-+/**************************************************************************//**
-+ @Function E6500_L2CacheEnable
-+
-+ @Description Enables the cache for memory pages that are not cache inhibited.
-+
-+ @param[in] mode - L2 cache mode: support data & instruction only.
-+
-+ @Return None.
-+
-+ @Cautions This routine must be call only ONCE for both caches. I.e. it is
-+ not possible to call this routine for i-cache and than to call
-+ again for d-cache; The second call will override the first one.
-+*//***************************************************************************/
-+void E6500_L2CacheEnable(uintptr_t clusterBase);
-+
-+/**************************************************************************//**
-+ @Function E6500_L2CacheDisable
-+
-+ @Description Disables the cache (data instruction or both).
-+
-+ @Return None.
-+
-+*//***************************************************************************/
-+void E6500_L2CacheDisable(uintptr_t clusterBase);
-+
-+/**************************************************************************//**
-+ @Function E6500_L2CacheFlush
-+
-+ @Description Flushes the cache.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E6500_L2CacheFlush(uintptr_t clusterBase);
-+
-+/**************************************************************************//**
-+ @Function E6500_L2SetStashId
-+
-+ @Description Set Stash Id
-+
-+ @Param[in] stashId the stash id to be set.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E6500_L2SetStashId(uintptr_t clusterBase, uint8_t stashId);
-+
-+/**************************************************************************//**
-+ @Function E6500_GetCcsrBase
-+
-+ @Description Obtain SoC CCSR base address
-+
-+ @Param[in] None.
-+
-+ @Return Physical CCSR base address.
-+*//***************************************************************************/
-+physAddress_t E6500_GetCcsrBase(void);
-+#endif /* CORE_E6500 */
-+
-+/**************************************************************************//**
-+ @Function E500_AddressBusStreamingEnable
-+
-+ @Description Enables address bus streaming on the CCB.
-+
-+ This setting, along with the ECM streaming configuration
-+ parameters, enables address bus streaming on the CCB.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_AddressBusStreamingEnable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_AddressBusStreamingDisable
-+
-+ @Description Disables address bus streaming on the CCB.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_AddressBusStreamingDisable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_AddressBroadcastEnable
-+
-+ @Description Enables address broadcast.
-+
-+ The e500 broadcasts cache management instructions (dcbst, dcblc
-+ (CT = 1), icblc (CT = 1), dcbf, dcbi, mbar, msync, tlbsync, icbi)
-+ based on ABE. ABE must be set to allow management of external
-+ L2 caches.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_AddressBroadcastEnable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_AddressBroadcastDisable
-+
-+ @Description Disables address broadcast.
-+
-+ The e500 broadcasts cache management instructions (dcbst, dcblc
-+ (CT = 1), icblc (CT = 1), dcbf, dcbi, mbar, msync, tlbsync, icbi)
-+ based on ABE. ABE must be set to allow management of external
-+ L2 caches.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void E500_AddressBroadcastDisable(void);
-+
-+/**************************************************************************//**
-+ @Function E500_IsTaskletSupported
-+
-+ @Description Checks if tasklets are supported by the e500 interrupt handler.
-+
-+ @Retval TRUE - Tasklets are supported.
-+ @Retval FALSE - Tasklets are not supported.
-+*//***************************************************************************/
-+bool E500_IsTaskletSupported(void);
-+
-+void E500_EnableTimeBase(void);
-+void E500_DisableTimeBase(void);
-+
-+uint64_t E500_GetTimeBaseTime(void);
-+
-+void E500_GenericIntrInit(void);
-+
-+t_Error E500_SetIntr(int ppcIntrSrc,
-+ void (* Isr)(t_Handle handle),
-+ t_Handle handle);
-+
-+t_Error E500_ClearIntr(int ppcIntrSrc);
-+
-+/**************************************************************************//**
-+ @Function E500_GenericIntrHandler
-+
-+ @Description This is the general e500 interrupt handler.
-+
-+ It is called by the main assembly interrupt handler
-+ when an exception occurs and no other function has been
-+ assigned to this exception.
-+
-+ @Param intrEntry - (In) The exception interrupt vector entry.
-+*//***************************************************************************/
-+void E500_GenericIntrHandler(uint32_t intrEntry);
-+
-+/**************************************************************************//**
-+ @Function CriticalIntr
-+
-+ @Description This is the specific critical e500 interrupt handler.
-+
-+ It is called by the main assembly interrupt handler
-+ when an critical interrupt.
-+
-+ @Param intrEntry - (In) The exception interrupt vector entry.
-+*//***************************************************************************/
-+void CriticalIntr(uint32_t intrEntry);
-+
-+
-+/**************************************************************************//**
-+ @Function E500_GetId
-+
-+ @Description Returns the core ID in the system.
-+
-+ @Return Core ID.
-+*//***************************************************************************/
-+uint32_t E500_GetId(void);
-+
-+/**************************************************************************//**
-+ @Function E500_TestAndSet
-+
-+ @Description This routine tries to atomically test-and-set an integer
-+ in memory to a non-zero value.
-+
-+ The memory will be set only if it is tested as zero, in which
-+ case the routine returns the new non-zero value; otherwise the
-+ routine returns zero.
-+
-+ @Param[in] p - pointer to a volatile int in memory, on which test-and-set
-+ operation should be made.
-+
-+ @Retval Zero - Operation failed - memory was already set.
-+ @Retval Non-zero - Operation succeeded - memory has been set.
-+*//***************************************************************************/
-+int E500_TestAndSet(volatile int *p);
-+
-+/**************************************************************************//**
-+ @Function E500_MemoryBarrier
-+
-+ @Description This routine will cause the core to stop executing any commands
-+ until all previous memory read/write commands are completely out
-+ of the core's pipeline.
-+
-+ @Return None.
-+*//***************************************************************************/
-+static __inline__ void E500_MemoryBarrier(void)
-+{
-+#ifndef CORE_E500V2
-+ __asm__ ("mbar 1");
-+#else /* CORE_E500V2 */
-+ /**** ERRATA WORK AROUND START ****/
-+ /* ERRATA num: CPU1 */
-+ /* Description: "mbar MO = 1" instruction fails to order caching-inhibited
-+ guarded loads and stores. */
-+
-+ /* "msync" instruction is used instead */
-+
-+ __asm__ ("msync");
-+
-+ /**** ERRATA WORK AROUND END ****/
-+#endif /* CORE_E500V2 */
-+}
-+
-+/**************************************************************************//**
-+ @Function E500_InstructionSync
-+
-+ @Description This routine will cause the core to wait for previous instructions
-+ (including any interrupts they generate) to complete before the
-+ synchronization command executes, which purges all instructions
-+ from the processor's pipeline and refetches the next instruction.
-+
-+ @Return None.
-+*//***************************************************************************/
-+static __inline__ void E500_InstructionSync(void)
-+{
-+ __asm__ ("isync");
-+}
-+
-+
-+/** @} */ /* end of E500_init_grp group */
-+/** @} */ /* end of E500_grp group */
-+
-+
-+#endif /* __E500V2_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/cores/ppc_ext.h
-@@ -0,0 +1,141 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File ppc_ext.h
-+
-+ @Description Core API for PowerPC cores
-+
-+ These routines must be implemented by each specific PowerPC
-+ core driver.
-+*//***************************************************************************/
-+#ifndef __PPC_EXT_H
-+#define __PPC_EXT_H
-+
-+#include "part_ext.h"
-+
-+
-+#define CORE_IS_BIG_ENDIAN
-+
-+#if defined(CORE_E300) || defined(CORE_E500V2)
-+#define CORE_CACHELINE_SIZE 32
-+#elif defined(CORE_E500MC) || defined(CORE_E5500) || defined(CORE_E6500)
-+#define CORE_CACHELINE_SIZE 64
-+#else
-+#error "Core not defined!"
-+#endif /* defined(CORE_E300) || ... */
-+
-+
-+/**************************************************************************//**
-+ @Function CORE_TestAndSet
-+
-+ @Description This routine tries to atomically test-and-set an integer
-+ in memory to a non-zero value.
-+
-+ The memory will be set only if it is tested as zero, in which
-+ case the routine returns the new non-zero value; otherwise the
-+ routine returns zero.
-+
-+ @Param[in] p - pointer to a volatile int in memory, on which test-and-set
-+ operation should be made.
-+
-+ @Retval Zero - Operation failed - memory was already set.
-+ @Retval Non-zero - Operation succeeded - memory has been set.
-+*//***************************************************************************/
-+int CORE_TestAndSet(volatile int *p);
-+
-+/**************************************************************************//**
-+ @Function CORE_InstructionSync
-+
-+ @Description This routine will cause the core to wait for previous instructions
-+ (including any interrupts they generate) to complete before the
-+ synchronization command executes, which purges all instructions
-+ from the processor's pipeline and refetches the next instruction.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void CORE_InstructionSync(void);
-+
-+/**************************************************************************//**
-+ @Function CORE_DCacheEnable
-+
-+ @Description Enables the data cache for memory pages that are
-+ not cache inhibited.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void CORE_DCacheEnable(void);
-+
-+/**************************************************************************//**
-+ @Function CORE_ICacheEnable
-+
-+ @Description Enables the instruction cache for memory pages that are
-+ not cache inhibited.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void CORE_ICacheEnable(void);
-+
-+/**************************************************************************//**
-+ @Function CORE_DCacheDisable
-+
-+ @Description Disables the data cache.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void CORE_DCacheDisable(void);
-+
-+/**************************************************************************//**
-+ @Function CORE_ICacheDisable
-+
-+ @Description Disables the instruction cache.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void CORE_ICacheDisable(void);
-+
-+
-+
-+#if defined(CORE_E300)
-+#include "e300_ext.h"
-+#elif defined(CORE_E500V2) || defined(CORE_E500MC) || defined(CORE_E5500) || defined(CORE_E6500)
-+#include "e500v2_ext.h"
-+#if !defined(NCSW_LINUX)
-+#include "e500v2_asm_ext.h"
-+#endif
-+#else
-+#error "Core not defined!"
-+#endif
-+
-+
-+#endif /* __PPC_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/ddr_std_ext.h
-@@ -0,0 +1,77 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DDR_SDT_EXT_H
-+#define __DDR_SDT_EXT_H
-+
-+
-+/**************************************************************************//**
-+ @Group ddr_Generic_Resources
-+
-+ @Description ddr generic functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+
-+/**************************************************************************//**
-+ @Description SPD maximum size
-+*//***************************************************************************/
-+#define SPD_MAX_SIZE 256
-+
-+/**************************************************************************//**
-+ @Description DDR types select
-+*//***************************************************************************/
-+typedef enum e_DdrType
-+{
-+ e_DDR_DDR1,
-+ e_DDR_DDR2,
-+ e_DDR_DDR3,
-+ e_DDR_DDR3L,
-+ e_DDR_DDR4
-+} e_DdrType;
-+
-+/**************************************************************************//**
-+ @Description DDR Mode.
-+*//***************************************************************************/
-+typedef enum e_DdrMode
-+{
-+ e_DDR_BUS_WIDTH_32BIT,
-+ e_DDR_BUS_WIDTH_64BIT
-+} e_DdrMode;
-+
-+/** @} */ /* end of ddr_Generic_Resources group */
-+
-+
-+
-+#endif /* __DDR_SDT_EXT_H */
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/debug_ext.h
-@@ -0,0 +1,233 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File debug_ext.h
-+
-+ @Description Debug mode definitions.
-+*//***************************************************************************/
-+
-+#ifndef __DEBUG_EXT_H
-+#define __DEBUG_EXT_H
-+
-+#include "std_ext.h"
-+#include "xx_ext.h"
-+#include "memcpy_ext.h"
-+#if (DEBUG_ERRORS > 0)
-+#include "sprint_ext.h"
-+#include "string_ext.h"
-+#endif /* DEBUG_ERRORS > 0 */
-+
-+
-+#if (DEBUG_ERRORS > 0)
-+
-+/* Internally used macros */
-+
-+#define DUMP_Print XX_Print
-+#define DUMP_MAX_LEVELS 6
-+#define DUMP_IDX_LEN 6
-+#define DUMP_MAX_STR 64
-+
-+
-+#define _CREATE_DUMP_SUBSTR(phrase) \
-+ dumpTmpLevel = 0; dumpSubStr[0] = '\0'; \
-+ snprintf(dumpTmpStr, DUMP_MAX_STR, "%s", #phrase); \
-+ p_DumpToken = strtok(dumpTmpStr, (dumpIsArr[0] ? "[" : ".")); \
-+ while ((p_DumpToken != NULL) && (dumpTmpLevel < DUMP_MAX_LEVELS)) \
-+ { \
-+ strlcat(dumpSubStr, p_DumpToken, DUMP_MAX_STR); \
-+ if (dumpIsArr[dumpTmpLevel]) \
-+ { \
-+ strlcat(dumpSubStr, dumpIdxStr[dumpTmpLevel], DUMP_MAX_STR); \
-+ p_DumpToken = strtok(NULL, "."); \
-+ } \
-+ if ((p_DumpToken != NULL) && \
-+ ((p_DumpToken = strtok(NULL, (dumpIsArr[++dumpTmpLevel] ? "[" : "."))) != NULL)) \
-+ strlcat(dumpSubStr, ".", DUMP_MAX_STR); \
-+ }
-+
-+
-+/**************************************************************************//**
-+ @Group gen_id General Drivers Utilities
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group dump_id Memory and Registers Dump Mechanism
-+
-+ @Description Macros for dumping memory mapped structures.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Declaration of dump mechanism variables.
-+
-+ This macro must be declared at the beginning of each routine
-+ which uses the dump mechanism macros, before the routine's code
-+ starts.
-+*//***************************************************************************/
-+#define DECLARE_DUMP \
-+ char dumpIdxStr[DUMP_MAX_LEVELS + 1][DUMP_IDX_LEN] = { "", }; \
-+ char dumpSubStr[DUMP_MAX_STR] = ""; \
-+ char dumpTmpStr[DUMP_MAX_STR] = ""; \
-+ char *p_DumpToken = NULL; \
-+ int dumpArrIdx = 0, dumpArrSize = 0, dumpLevel = 0, dumpTmpLevel = 0; \
-+ uint8_t dumpIsArr[DUMP_MAX_LEVELS + 1] = { 0 }; \
-+ /* Prevent warnings if not all used */ \
-+ UNUSED(dumpIdxStr[0][0]); \
-+ UNUSED(dumpSubStr[0]); \
-+ UNUSED(dumpTmpStr[0]); \
-+ UNUSED(p_DumpToken); \
-+ UNUSED(dumpArrIdx); \
-+ UNUSED(dumpArrSize); \
-+ UNUSED(dumpLevel); \
-+ UNUSED(dumpTmpLevel); \
-+ UNUSED(dumpIsArr[0]);
-+
-+
-+/**************************************************************************//**
-+ @Description Prints a title for a subsequent dumped structure or memory.
-+
-+ The inputs for this macro are the structure/memory title and
-+ its base addresses.
-+*//***************************************************************************/
-+#define DUMP_TITLE(addr, msg) \
-+ DUMP_Print("\r\n"); DUMP_Print msg; \
-+ if (addr) \
-+ DUMP_Print(" (%p)", (addr)); \
-+ DUMP_Print("\r\n---------------------------------------------------------\r\n");
-+
-+/**************************************************************************//**
-+ @Description Prints a subtitle for a subsequent dumped sub-structure (optional).
-+
-+ The inputs for this macro are the sub-structure subtitle.
-+ A separating line with this subtitle will be printed.
-+*//***************************************************************************/
-+#define DUMP_SUBTITLE(subtitle) \
-+ DUMP_Print("----------- "); DUMP_Print subtitle; DUMP_Print("\r\n")
-+
-+
-+/**************************************************************************//**
-+ @Description Dumps a memory region in 4-bytes aligned format.
-+
-+ The inputs for this macro are the base addresses and size
-+ (in bytes) of the memory region.
-+*//***************************************************************************/
-+#define DUMP_MEMORY(addr, size) \
-+ MemDisp((uint8_t *)(addr), (int)(size))
-+
-+
-+/**************************************************************************//**
-+ @Description Declares a dump loop, for dumping a sub-structure array.
-+
-+ The inputs for this macro are:
-+ - idx: an index variable, for indexing the sub-structure items
-+ inside the loop. This variable must be declared separately
-+ in the beginning of the routine.
-+ - cnt: the number of times to repeat the loop. This number should
-+ equal the number of items in the sub-structures array.
-+
-+ Note, that the body of the loop must be written inside brackets.
-+*//***************************************************************************/
-+#define DUMP_SUBSTRUCT_ARRAY(idx, cnt) \
-+ for (idx=0, dumpIsArr[dumpLevel++] = 1; \
-+ (idx < cnt) && (dumpLevel > 0) && snprintf(dumpIdxStr[dumpLevel-1], DUMP_IDX_LEN, "[%d]", idx); \
-+ idx++, ((idx < cnt) || (dumpIsArr[--dumpLevel] = 0)))
-+
-+
-+/**************************************************************************//**
-+ @Description Dumps a structure's member variable.
-+
-+ The input for this macro is the full reference for the member
-+ variable, where the structure is referenced using a pointer.
-+
-+ Note, that a members array must be dumped using DUMP_ARR macro,
-+ rather than using this macro.
-+
-+ If the member variable is part of a sub-structure hierarchy,
-+ the full hierarchy (including array indexing) must be specified.
-+
-+ Examples: p_Struct->member
-+ p_Struct->sub.member
-+ p_Struct->sub[i].member
-+*//***************************************************************************/
-+#define DUMP_VAR(st, phrase) \
-+ do { \
-+ void *addr = (void *)&((st)->phrase); \
-+ physAddress_t physAddr = XX_VirtToPhys(addr); \
-+ _CREATE_DUMP_SUBSTR(phrase); \
-+ DUMP_Print("0x%010llX: 0x%08x%8s\t%s\r\n", \
-+ physAddr, GET_UINT32(*(uint32_t*)addr), "", dumpSubStr); \
-+ } while (0)
-+
-+
-+/**************************************************************************//**
-+ @Description Dumps a structure's members array.
-+
-+ The input for this macro is the full reference for the members
-+ array, where the structure is referenced using a pointer.
-+
-+ If the members array is part of a sub-structure hierarchy,
-+ the full hierarchy (including array indexing) must be specified.
-+
-+ Examples: p_Struct->array
-+ p_Struct->sub.array
-+ p_Struct->sub[i].array
-+*//***************************************************************************/
-+#define DUMP_ARR(st, phrase) \
-+ do { \
-+ physAddress_t physAddr; \
-+ _CREATE_DUMP_SUBSTR(phrase); \
-+ dumpArrSize = ARRAY_SIZE((st)->phrase); \
-+ for (dumpArrIdx=0; dumpArrIdx < dumpArrSize; dumpArrIdx++) { \
-+ physAddr = XX_VirtToPhys((void *)&((st)->phrase[dumpArrIdx])); \
-+ DUMP_Print("0x%010llX: 0x%08x%8s\t%s[%d]\r\n", \
-+ physAddr, GET_UINT32((st)->phrase[dumpArrIdx]), "", dumpSubStr, dumpArrIdx); \
-+ } \
-+ } while (0)
-+
-+
-+
-+#endif /* DEBUG_ERRORS > 0 */
-+
-+
-+/** @} */ /* end of dump_id group */
-+/** @} */ /* end of gen_id group */
-+
-+
-+#endif /* __DEBUG_EXT_H */
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/endian_ext.h
-@@ -0,0 +1,447 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+
-+ @File endian_ext.h
-+
-+ @Description Big/little endian swapping routines.
-+*//***************************************************************************/
-+
-+#ifndef __ENDIAN_EXT_H
-+#define __ENDIAN_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group gen_id General Drivers Utilities
-+
-+ @Description General usage API. This API is intended for usage by both the
-+ internal modules and the user's application.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group endian_id Big/Little-Endian Conversion
-+
-+ @Description Routines and macros for Big/Little-Endian conversion and
-+ general byte swapping.
-+
-+ All routines and macros are expecting unsigned values as
-+ parameters, but will generate the correct result also for
-+ signed values. Therefore, signed/unsigned casting is allowed.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection Byte-Swap Macros
-+
-+ Macros for swapping byte order.
-+
-+ @Cautions The parameters of these macros are evaluated multiple times.
-+ For calculated expressions or expressions that contain function
-+ calls it is recommended to use the byte-swap routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Swaps the byte order of a given 16-bit value.
-+
-+ @Param[in] val - The 16-bit value to swap.
-+
-+ @Return The byte-swapped value..
-+
-+ @Cautions The given value is evaluated multiple times by this macro.
-+ For calculated expressions or expressions that contain function
-+ calls it is recommended to use the SwapUint16() routine.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define SWAP_UINT16(val) \
-+ ((uint16_t)((((val) & 0x00FF) << 8) | (((val) & 0xFF00) >> 8)))
-+
-+/**************************************************************************//**
-+ @Description Swaps the byte order of a given 32-bit value.
-+
-+ @Param[in] val - The 32-bit value to swap.
-+
-+ @Return The byte-swapped value..
-+
-+ @Cautions The given value is evaluated multiple times by this macro.
-+ For calculated expressions or expressions that contain function
-+ calls it is recommended to use the SwapUint32() routine.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define SWAP_UINT32(val) \
-+ ((uint32_t)((((val) & 0x000000FF) << 24) | \
-+ (((val) & 0x0000FF00) << 8) | \
-+ (((val) & 0x00FF0000) >> 8) | \
-+ (((val) & 0xFF000000) >> 24)))
-+
-+/**************************************************************************//**
-+ @Description Swaps the byte order of a given 64-bit value.
-+
-+ @Param[in] val - The 64-bit value to swap.
-+
-+ @Return The byte-swapped value..
-+
-+ @Cautions The given value is evaluated multiple times by this macro.
-+ For calculated expressions or expressions that contain function
-+ calls it is recommended to use the SwapUint64() routine.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define SWAP_UINT64(val) \
-+ ((uint64_t)((((val) & 0x00000000000000FFULL) << 56) | \
-+ (((val) & 0x000000000000FF00ULL) << 40) | \
-+ (((val) & 0x0000000000FF0000ULL) << 24) | \
-+ (((val) & 0x00000000FF000000ULL) << 8) | \
-+ (((val) & 0x000000FF00000000ULL) >> 8) | \
-+ (((val) & 0x0000FF0000000000ULL) >> 24) | \
-+ (((val) & 0x00FF000000000000ULL) >> 40) | \
-+ (((val) & 0xFF00000000000000ULL) >> 56)))
-+
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection Byte-Swap Routines
-+
-+ Routines for swapping the byte order of a given parameter and
-+ returning the swapped value.
-+
-+ These inline routines are safer than the byte-swap macros,
-+ because they evaluate the parameter expression only once.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function SwapUint16
-+
-+ @Description Returns the byte-swapped value of a given 16-bit value.
-+
-+ @Param[in] val - The 16-bit value.
-+
-+ @Return The byte-swapped value of the parameter.
-+*//***************************************************************************/
-+static __inline__ uint16_t SwapUint16(uint16_t val)
-+{
-+ return (uint16_t)(((val & 0x00FF) << 8) |
-+ ((val & 0xFF00) >> 8));
-+}
-+
-+/**************************************************************************//**
-+ @Function SwapUint32
-+
-+ @Description Returns the byte-swapped value of a given 32-bit value.
-+
-+ @Param[in] val - The 32-bit value.
-+
-+ @Return The byte-swapped value of the parameter.
-+*//***************************************************************************/
-+static __inline__ uint32_t SwapUint32(uint32_t val)
-+{
-+ return (uint32_t)(((val & 0x000000FF) << 24) |
-+ ((val & 0x0000FF00) << 8) |
-+ ((val & 0x00FF0000) >> 8) |
-+ ((val & 0xFF000000) >> 24));
-+}
-+
-+/**************************************************************************//**
-+ @Function SwapUint64
-+
-+ @Description Returns the byte-swapped value of a given 64-bit value.
-+
-+ @Param[in] val - The 64-bit value.
-+
-+ @Return The byte-swapped value of the parameter.
-+*//***************************************************************************/
-+static __inline__ uint64_t SwapUint64(uint64_t val)
-+{
-+ return (uint64_t)(((val & 0x00000000000000FFULL) << 56) |
-+ ((val & 0x000000000000FF00ULL) << 40) |
-+ ((val & 0x0000000000FF0000ULL) << 24) |
-+ ((val & 0x00000000FF000000ULL) << 8) |
-+ ((val & 0x000000FF00000000ULL) >> 8) |
-+ ((val & 0x0000FF0000000000ULL) >> 24) |
-+ ((val & 0x00FF000000000000ULL) >> 40) |
-+ ((val & 0xFF00000000000000ULL) >> 56));
-+}
-+
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection In-place Byte-Swap-And-Set Routines
-+
-+ Routines for swapping the byte order of a given variable and
-+ setting the swapped value back to the same variable.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function SwapUint16P
-+
-+ @Description Swaps the byte order of a given 16-bit variable.
-+
-+ @Param[in] p_Val - Pointer to the 16-bit variable.
-+
-+ @Return None.
-+*//***************************************************************************/
-+static __inline__ void SwapUint16P(uint16_t *p_Val)
-+{
-+ *p_Val = SwapUint16(*p_Val);
-+}
-+
-+/**************************************************************************//**
-+ @Function SwapUint32P
-+
-+ @Description Swaps the byte order of a given 32-bit variable.
-+
-+ @Param[in] p_Val - Pointer to the 32-bit variable.
-+
-+ @Return None.
-+*//***************************************************************************/
-+static __inline__ void SwapUint32P(uint32_t *p_Val)
-+{
-+ *p_Val = SwapUint32(*p_Val);
-+}
-+
-+/**************************************************************************//**
-+ @Function SwapUint64P
-+
-+ @Description Swaps the byte order of a given 64-bit variable.
-+
-+ @Param[in] p_Val - Pointer to the 64-bit variable.
-+
-+ @Return None.
-+*//***************************************************************************/
-+static __inline__ void SwapUint64P(uint64_t *p_Val)
-+{
-+ *p_Val = SwapUint64(*p_Val);
-+}
-+
-+/* @} */
-+
-+
-+/**************************************************************************//**
-+ @Collection Little-Endian Conversion Macros
-+
-+ These macros convert given parameters to or from Little-Endian
-+ format. Use these macros when you want to read or write a specific
-+ Little-Endian value in memory, without a-priori knowing the CPU
-+ byte order.
-+
-+ These macros use the byte-swap routines. For conversion of
-+ constants in initialization structures, you may use the CONST
-+ versions of these macros (see below), which are using the
-+ byte-swap macros instead.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Converts a given 16-bit value from CPU byte order to
-+ Little-Endian byte order.
-+
-+ @Param[in] val - The 16-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CPU_TO_LE16(val) SwapUint16(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 32-bit value from CPU byte order to
-+ Little-Endian byte order.
-+
-+ @Param[in] val - The 32-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CPU_TO_LE32(val) SwapUint32(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 64-bit value from CPU byte order to
-+ Little-Endian byte order.
-+
-+ @Param[in] val - The 64-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CPU_TO_LE64(val) SwapUint64(val)
-+
-+
-+/**************************************************************************//**
-+ @Description Converts a given 16-bit value from Little-Endian byte order to
-+ CPU byte order.
-+
-+ @Param[in] val - The 16-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define LE16_TO_CPU(val) CPU_TO_LE16(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 32-bit value from Little-Endian byte order to
-+ CPU byte order.
-+
-+ @Param[in] val - The 32-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define LE32_TO_CPU(val) CPU_TO_LE32(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 64-bit value from Little-Endian byte order to
-+ CPU byte order.
-+
-+ @Param[in] val - The 64-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define LE64_TO_CPU(val) CPU_TO_LE64(val)
-+
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection Little-Endian Constant Conversion Macros
-+
-+ These macros convert given constants to or from Little-Endian
-+ format. Use these macros when you want to read or write a specific
-+ Little-Endian constant in memory, without a-priori knowing the
-+ CPU byte order.
-+
-+ These macros use the byte-swap macros, therefore can be used for
-+ conversion of constants in initialization structures.
-+
-+ @Cautions The parameters of these macros are evaluated multiple times.
-+ For non-constant expressions, use the non-CONST macro versions.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Converts a given 16-bit constant from CPU byte order to
-+ Little-Endian byte order.
-+
-+ @Param[in] val - The 16-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CONST_CPU_TO_LE16(val) SWAP_UINT16(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 32-bit constant from CPU byte order to
-+ Little-Endian byte order.
-+
-+ @Param[in] val - The 32-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CONST_CPU_TO_LE32(val) SWAP_UINT32(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 64-bit constant from CPU byte order to
-+ Little-Endian byte order.
-+
-+ @Param[in] val - The 64-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CONST_CPU_TO_LE64(val) SWAP_UINT64(val)
-+
-+
-+/**************************************************************************//**
-+ @Description Converts a given 16-bit constant from Little-Endian byte order
-+ to CPU byte order.
-+
-+ @Param[in] val - The 16-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CONST_LE16_TO_CPU(val) CONST_CPU_TO_LE16(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 32-bit constant from Little-Endian byte order
-+ to CPU byte order.
-+
-+ @Param[in] val - The 32-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CONST_LE32_TO_CPU(val) CONST_CPU_TO_LE32(val)
-+
-+/**************************************************************************//**
-+ @Description Converts a given 64-bit constant from Little-Endian byte order
-+ to CPU byte order.
-+
-+ @Param[in] val - The 64-bit value to convert.
-+
-+ @Return The converted value.
-+
-+ @hideinitializer
-+*//***************************************************************************/
-+#define CONST_LE64_TO_CPU(val) CONST_CPU_TO_LE64(val)
-+
-+/* @} */
-+
-+
-+/** @} */ /* end of endian_id group */
-+/** @} */ /* end of gen_id group */
-+
-+
-+#endif /* __ENDIAN_EXT_H */
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/enet_ext.h
-@@ -0,0 +1,205 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File enet_ext.h
-+
-+ @Description Ethernet generic definitions and enums.
-+*//***************************************************************************/
-+
-+#ifndef __ENET_EXT_H
-+#define __ENET_EXT_H
-+
-+#include "fsl_enet.h"
-+
-+#define ENET_NUM_OCTETS_PER_ADDRESS 6 /**< Number of octets (8-bit bytes) in an ethernet address */
-+#define ENET_GROUP_ADDR 0x01 /**< Group address mask for ethernet addresses */
-+
-+
-+/**************************************************************************//**
-+ @Description Ethernet Address
-+*//***************************************************************************/
-+typedef uint8_t t_EnetAddr[ENET_NUM_OCTETS_PER_ADDRESS];
-+
-+/**************************************************************************//**
-+ @Description Ethernet Address Type.
-+*//***************************************************************************/
-+typedef enum e_EnetAddrType
-+{
-+ e_ENET_ADDR_TYPE_INDIVIDUAL, /**< Individual (unicast) address */
-+ e_ENET_ADDR_TYPE_GROUP, /**< Group (multicast) address */
-+ e_ENET_ADDR_TYPE_BROADCAST /**< Broadcast address */
-+} e_EnetAddrType;
-+
-+/**************************************************************************//**
-+ @Description Ethernet MAC-PHY Interface
-+*//***************************************************************************/
-+typedef enum e_EnetInterface
-+{
-+ e_ENET_IF_MII = E_ENET_IF_MII, /**< MII interface */
-+ e_ENET_IF_RMII = E_ENET_IF_RMII, /**< RMII interface */
-+ e_ENET_IF_SMII = E_ENET_IF_SMII, /**< SMII interface */
-+ e_ENET_IF_GMII = E_ENET_IF_GMII, /**< GMII interface */
-+ e_ENET_IF_RGMII = E_ENET_IF_RGMII, /**< RGMII interface */
-+ e_ENET_IF_TBI = E_ENET_IF_TBI, /**< TBI interface */
-+ e_ENET_IF_RTBI = E_ENET_IF_RTBI, /**< RTBI interface */
-+ e_ENET_IF_SGMII = E_ENET_IF_SGMII, /**< SGMII interface */
-+ e_ENET_IF_XGMII = E_ENET_IF_XGMII, /**< XGMII interface */
-+ e_ENET_IF_QSGMII= E_ENET_IF_QSGMII, /**< QSGMII interface */
-+ e_ENET_IF_XFI = E_ENET_IF_XFI /**< XFI interface */
-+} e_EnetInterface;
-+
-+#define ENET_IF_SGMII_BASEX 0x80000000 /**< SGMII/QSGII interface with 1000BaseX
-+ auto-negotiation between MAC and phy
-+ or backplane;
-+ Note: 1000BaseX auto-negotiation relates
-+ only to interface between MAC and phy/backplane,
-+ SGMII phy can still synchronize with far-end phy
-+ at 10Mbps, 100Mbps or 1000Mbps */
-+
-+/**************************************************************************//**
-+ @Description Ethernet Duplex Mode
-+*//***************************************************************************/
-+typedef enum e_EnetDuplexMode
-+{
-+ e_ENET_HALF_DUPLEX, /**< Half-Duplex mode */
-+ e_ENET_FULL_DUPLEX /**< Full-Duplex mode */
-+} e_EnetDuplexMode;
-+
-+/**************************************************************************//**
-+ @Description Ethernet Speed (nominal data rate)
-+*//***************************************************************************/
-+typedef enum e_EnetSpeed
-+{
-+ e_ENET_SPEED_10 = E_ENET_SPEED_10, /**< 10 Mbps */
-+ e_ENET_SPEED_100 = E_ENET_SPEED_100, /**< 100 Mbps */
-+ e_ENET_SPEED_1000 = E_ENET_SPEED_1000, /**< 1000 Mbps = 1 Gbps */
-+ e_ENET_SPEED_2500 = E_ENET_SPEED_2500, /**< 2500 Mbps = 2.5 Gbps */
-+ e_ENET_SPEED_10000 = E_ENET_SPEED_10000 /**< 10000 Mbps = 10 Gbps */
-+} e_EnetSpeed;
-+
-+/**************************************************************************//**
-+ @Description Ethernet mode (combination of MAC-PHY interface and speed)
-+*//***************************************************************************/
-+typedef enum e_EnetMode
-+{
-+ e_ENET_MODE_INVALID = 0, /**< Invalid Ethernet mode */
-+ e_ENET_MODE_MII_10 = (e_ENET_IF_MII | e_ENET_SPEED_10), /**< 10 Mbps MII */
-+ e_ENET_MODE_MII_100 = (e_ENET_IF_MII | e_ENET_SPEED_100), /**< 100 Mbps MII */
-+ e_ENET_MODE_RMII_10 = (e_ENET_IF_RMII | e_ENET_SPEED_10), /**< 10 Mbps RMII */
-+ e_ENET_MODE_RMII_100 = (e_ENET_IF_RMII | e_ENET_SPEED_100), /**< 100 Mbps RMII */
-+ e_ENET_MODE_SMII_10 = (e_ENET_IF_SMII | e_ENET_SPEED_10), /**< 10 Mbps SMII */
-+ e_ENET_MODE_SMII_100 = (e_ENET_IF_SMII | e_ENET_SPEED_100), /**< 100 Mbps SMII */
-+ e_ENET_MODE_GMII_1000 = (e_ENET_IF_GMII | e_ENET_SPEED_1000), /**< 1000 Mbps GMII */
-+ e_ENET_MODE_RGMII_10 = (e_ENET_IF_RGMII | e_ENET_SPEED_10), /**< 10 Mbps RGMII */
-+ e_ENET_MODE_RGMII_100 = (e_ENET_IF_RGMII | e_ENET_SPEED_100), /**< 100 Mbps RGMII */
-+ e_ENET_MODE_RGMII_1000 = (e_ENET_IF_RGMII | e_ENET_SPEED_1000), /**< 1000 Mbps RGMII */
-+ e_ENET_MODE_TBI_1000 = (e_ENET_IF_TBI | e_ENET_SPEED_1000), /**< 1000 Mbps TBI */
-+ e_ENET_MODE_RTBI_1000 = (e_ENET_IF_RTBI | e_ENET_SPEED_1000), /**< 1000 Mbps RTBI */
-+ e_ENET_MODE_SGMII_10 = (e_ENET_IF_SGMII | e_ENET_SPEED_10),
-+ /**< 10 Mbps SGMII with auto-negotiation between MAC and
-+ SGMII phy according to Cisco SGMII specification */
-+ e_ENET_MODE_SGMII_100 = (e_ENET_IF_SGMII | e_ENET_SPEED_100),
-+ /**< 100 Mbps SGMII with auto-negotiation between MAC and
-+ SGMII phy according to Cisco SGMII specification */
-+ e_ENET_MODE_SGMII_1000 = (e_ENET_IF_SGMII | e_ENET_SPEED_1000),
-+ /**< 1000 Mbps SGMII with auto-negotiation between MAC and
-+ SGMII phy according to Cisco SGMII specification */
-+ e_ENET_MODE_SGMII_2500 = (e_ENET_IF_SGMII | e_ENET_SPEED_2500),
-+ e_ENET_MODE_SGMII_BASEX_10 = (ENET_IF_SGMII_BASEX | e_ENET_IF_SGMII | e_ENET_SPEED_10),
-+ /**< 10 Mbps SGMII with 1000BaseX auto-negotiation between
-+ MAC and SGMII phy or backplane */
-+ e_ENET_MODE_SGMII_BASEX_100 = (ENET_IF_SGMII_BASEX | e_ENET_IF_SGMII | e_ENET_SPEED_100),
-+ /**< 100 Mbps SGMII with 1000BaseX auto-negotiation between
-+ MAC and SGMII phy or backplane */
-+ e_ENET_MODE_SGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | e_ENET_IF_SGMII | e_ENET_SPEED_1000),
-+ /**< 1000 Mbps SGMII with 1000BaseX auto-negotiation between
-+ MAC and SGMII phy or backplane */
-+ e_ENET_MODE_QSGMII_1000 = (e_ENET_IF_QSGMII| e_ENET_SPEED_1000),
-+ /**< 1000 Mbps QSGMII with auto-negotiation between MAC and
-+ QSGMII phy according to Cisco QSGMII specification */
-+ e_ENET_MODE_QSGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | e_ENET_IF_QSGMII| e_ENET_SPEED_1000),
-+ /**< 1000 Mbps QSGMII with 1000BaseX auto-negotiation between
-+ MAC and QSGMII phy or backplane */
-+ e_ENET_MODE_XGMII_10000 = (e_ENET_IF_XGMII | e_ENET_SPEED_10000), /**< 10000 Mbps XGMII */
-+ e_ENET_MODE_XFI_10000 = (e_ENET_IF_XFI | e_ENET_SPEED_10000) /**< 10000 Mbps XFI */
-+} e_EnetMode;
-+
-+
-+#define IS_ENET_MODE_VALID(mode) \
-+ (((mode) == e_ENET_MODE_MII_10 ) || \
-+ ((mode) == e_ENET_MODE_MII_100 ) || \
-+ ((mode) == e_ENET_MODE_RMII_10 ) || \
-+ ((mode) == e_ENET_MODE_RMII_100 ) || \
-+ ((mode) == e_ENET_MODE_SMII_10 ) || \
-+ ((mode) == e_ENET_MODE_SMII_100 ) || \
-+ ((mode) == e_ENET_MODE_GMII_1000 ) || \
-+ ((mode) == e_ENET_MODE_RGMII_10 ) || \
-+ ((mode) == e_ENET_MODE_RGMII_100 ) || \
-+ ((mode) == e_ENET_MODE_RGMII_1000 ) || \
-+ ((mode) == e_ENET_MODE_TBI_1000 ) || \
-+ ((mode) == e_ENET_MODE_RTBI_1000 ) || \
-+ ((mode) == e_ENET_MODE_SGMII_10 ) || \
-+ ((mode) == e_ENET_MODE_SGMII_100 ) || \
-+ ((mode) == e_ENET_MODE_SGMII_1000 ) || \
-+ ((mode) == e_ENET_MODE_SGMII_BASEX_10 ) || \
-+ ((mode) == e_ENET_MODE_SGMII_BASEX_100 ) || \
-+ ((mode) == e_ENET_MODE_SGMII_BASEX_1000 ) || \
-+ ((mode) == e_ENET_MODE_XGMII_10000) || \
-+ ((mode) == e_ENET_MODE_QSGMII_1000) || \
-+ ((mode) == e_ENET_MODE_QSGMII_BASEX_1000) || \
-+ ((mode) == e_ENET_MODE_XFI_10000))
-+
-+
-+#define MAKE_ENET_MODE(_interface, _speed) (e_EnetMode)((_interface) | (_speed))
-+
-+#define ENET_INTERFACE_FROM_MODE(mode) (e_EnetInterface)((mode) & 0x0FFF0000)
-+#define ENET_SPEED_FROM_MODE(mode) (e_EnetSpeed)((mode) & 0x0000FFFF)
-+
-+#define ENET_ADDR_TO_UINT64(_enetAddr) \
-+ (uint64_t)(((uint64_t)(_enetAddr)[0] << 40) | \
-+ ((uint64_t)(_enetAddr)[1] << 32) | \
-+ ((uint64_t)(_enetAddr)[2] << 24) | \
-+ ((uint64_t)(_enetAddr)[3] << 16) | \
-+ ((uint64_t)(_enetAddr)[4] << 8) | \
-+ ((uint64_t)(_enetAddr)[5]))
-+
-+#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enetAddr) \
-+ do { \
-+ int i; \
-+ for (i=0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) \
-+ (_enetAddr)[i] = (uint8_t)((_addr64) >> ((5-i)*8)); \
-+ } while (0)
-+
-+
-+#endif /* __ENET_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/error_ext.h
-@@ -0,0 +1,529 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File error_ext.h
-+
-+ @Description Error definitions.
-+*//***************************************************************************/
-+
-+#ifndef __ERROR_EXT_H
-+#define __ERROR_EXT_H
-+
-+#if !defined(NCSW_LINUX)
-+#include <errno.h>
-+#endif
-+
-+#include "std_ext.h"
-+#include "xx_ext.h"
-+#include "core_ext.h"
-+
-+
-+
-+
-+/**************************************************************************//**
-+ @Group gen_id General Drivers Utilities
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group gen_error_id Errors, Events and Debug
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/******************************************************************************
-+The scheme below provides the bits description for error codes:
-+
-+ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
-+| Reserved (should be zero) | Module ID |
-+
-+ 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
-+| Error Type |
-+******************************************************************************/
-+
-+#define ERROR_CODE(_err) ((((uint32_t)_err) & 0x0000FFFF) | __ERR_MODULE__)
-+
-+#define GET_ERROR_TYPE(_errcode) ((_errcode) & 0x0000FFFF)
-+ /**< Extract module code from error code (#t_Error) */
-+
-+#define GET_ERROR_MODULE(_errcode) ((_errcode) & 0x00FF0000)
-+ /**< Extract error type (#e_ErrorType) from
-+ error code (#t_Error) */
-+
-+
-+/**************************************************************************//**
-+ @Description Error Type Enumeration
-+*//***************************************************************************/
-+typedef enum e_ErrorType /* Comments / Associated Message Strings */
-+{ /* ------------------------------------------------------------ */
-+ E_OK = 0 /* Never use "RETURN_ERROR" with E_OK; Use "return E_OK;" */
-+ ,E_WRITE_FAILED = EIO /**< Write access failed on memory/device. */
-+ /* String: none, or device name. */
-+ ,E_NO_DEVICE = ENXIO /**< The associated device is not initialized. */
-+ /* String: none. */
-+ ,E_NOT_AVAILABLE = EAGAIN
-+ /**< Resource is unavailable. */
-+ /* String: none, unless the operation is not the main goal
-+ of the function (in this case add resource description). */
-+ ,E_NO_MEMORY = ENOMEM /**< External memory allocation failed. */
-+ /* String: description of item for which allocation failed. */
-+ ,E_INVALID_ADDRESS = EFAULT
-+ /**< Invalid address. */
-+ /* String: description of the specific violation. */
-+ ,E_BUSY = EBUSY /**< Resource or module is busy. */
-+ /* String: none, unless the operation is not the main goal
-+ of the function (in this case add resource description). */
-+ ,E_ALREADY_EXISTS = EEXIST
-+ /**< Requested resource or item already exists. */
-+ /* Use when resource duplication or sharing are not allowed.
-+ String: none, unless the operation is not the main goal
-+ of the function (in this case add item description). */
-+ ,E_INVALID_OPERATION = ENODEV
-+ /**< The operation/command is invalid (unrecognized). */
-+ /* String: none. */
-+ ,E_INVALID_VALUE = EDOM /**< Invalid value. */
-+ /* Use for non-enumeration parameters, and
-+ only when other error types are not suitable.
-+ String: parameter description + "(should be <attribute>)",
-+ e.g: "Maximum Rx buffer length (should be divisible by 8)",
-+ "Channel number (should be even)". */
-+ ,E_NOT_IN_RANGE = ERANGE/**< Parameter value is out of range. */
-+ /* Don't use this error for enumeration parameters.
-+ String: parameter description + "(should be %d-%d)",
-+ e.g: "Number of pad characters (should be 0-15)". */
-+ ,E_NOT_SUPPORTED = ENOSYS
-+ /**< The function is not supported or not implemented. */
-+ /* String: none. */
-+ ,E_INVALID_STATE /**< The operation is not allowed in current module state. */
-+ /* String: none. */
-+ ,E_INVALID_HANDLE /**< Invalid handle of module or object. */
-+ /* String: none, unless the function takes in more than one
-+ handle (in this case add the handle description) */
-+ ,E_INVALID_ID /**< Invalid module ID (usually enumeration or index). */
-+ /* String: none, unless the function takes in more than one
-+ ID (in this case add the ID description) */
-+ ,E_NULL_POINTER /**< Unexpected NULL pointer. */
-+ /* String: pointer description. */
-+ ,E_INVALID_SELECTION /**< Invalid selection or mode. */
-+ /* Use for enumeration values, only when other error types
-+ are not suitable.
-+ String: parameter description. */
-+ ,E_INVALID_COMM_MODE /**< Invalid communication mode. */
-+ /* String: none, unless the function takes in more than one
-+ communication mode indications (in this case add
-+ parameter description). */
-+ ,E_INVALID_MEMORY_TYPE /**< Invalid memory type. */
-+ /* String: none, unless the function takes in more than one
-+ memory types (in this case add memory description,
-+ e.g: "Data memory", "Buffer descriptors memory"). */
-+ ,E_INVALID_CLOCK /**< Invalid clock. */
-+ /* String: none, unless the function takes in more than one
-+ clocks (in this case add clock description,
-+ e.g: "Rx clock", "Tx clock"). */
-+ ,E_CONFLICT /**< Some setting conflicts with another setting. */
-+ /* String: description of the conflicting settings. */
-+ ,E_NOT_ALIGNED /**< Non-aligned address. */
-+ /* String: parameter description + "(should be %d-bytes aligned)",
-+ e.g: "Rx data buffer (should be 32-bytes aligned)". */
-+ ,E_NOT_FOUND /**< Requested resource or item was not found. */
-+ /* Use only when the resource/item is uniquely identified.
-+ String: none, unless the operation is not the main goal
-+ of the function (in this case add item description). */
-+ ,E_FULL /**< Resource is full. */
-+ /* String: none, unless the operation is not the main goal
-+ of the function (in this case add resource description). */
-+ ,E_EMPTY /**< Resource is empty. */
-+ /* String: none, unless the operation is not the main goal
-+ of the function (in this case add resource description). */
-+ ,E_ALREADY_FREE /**< Specified resource or item is already free or deleted. */
-+ /* String: none, unless the operation is not the main goal
-+ of the function (in this case add item description). */
-+ ,E_READ_FAILED /**< Read access failed on memory/device. */
-+ /* String: none, or device name. */
-+ ,E_INVALID_FRAME /**< Invalid frame object (NULL handle or missing buffers). */
-+ /* String: none. */
-+ ,E_SEND_FAILED /**< Send operation failed on device. */
-+ /* String: none, or device name. */
-+ ,E_RECEIVE_FAILED /**< Receive operation failed on device. */
-+ /* String: none, or device name. */
-+ ,E_TIMEOUT/* = ETIMEDOUT*/ /**< The operation timed out. */
-+ /* String: none. */
-+
-+ ,E_DUMMY_LAST /* NEVER USED */
-+
-+} e_ErrorType;
-+
-+/**************************************************************************//**
-+ @Description Event Type Enumeration
-+*//***************************************************************************/
-+typedef enum e_Event /* Comments / Associated Flags and Message Strings */
-+{ /* ------------------------------------------------------------ */
-+ EV_NO_EVENT = 0 /**< No event; Never used. */
-+
-+ ,EV_RX_DISCARD /**< Received packet discarded (by the driver, and only for
-+ complete packets);
-+ Flags: error flags in case of error, zero otherwise. */
-+ /* String: reason for discard, e.g: "Error in frame",
-+ "Disordered frame", "Incomplete frame", "No frame object". */
-+ ,EV_RX_ERROR /**< Receive error (by hardware/firmware);
-+ Flags: usually status flags from the buffer descriptor. */
-+ /* String: none. */
-+ ,EV_TX_ERROR /**< Transmit error (by hardware/firmware);
-+ Flags: usually status flags from the buffer descriptor. */
-+ /* String: none. */
-+ ,EV_NO_BUFFERS /**< System ran out of buffer objects;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_NO_MB_FRAMES /**< System ran out of multi-buffer frame objects;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_NO_SB_FRAMES /**< System ran out of single-buffer frame objects;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_TX_QUEUE_FULL /**< Transmit queue is full;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_RX_QUEUE_FULL /**< Receive queue is full;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_INTR_QUEUE_FULL /**< Interrupt queue overflow;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_NO_DATA_BUFFER /**< Data buffer allocation (from higher layer) failed;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_OBJ_POOL_EMPTY /**< Objects pool is empty;
-+ Flags: zero. */
-+ /* String: object description (name). */
-+ ,EV_BUS_ERROR /**< Illegal access on bus;
-+ Flags: the address (if available) or bus identifier */
-+ /* String: bus/address/module description. */
-+ ,EV_PTP_TXTS_QUEUE_FULL /**< PTP Tx timestamps queue is full;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_PTP_RXTS_QUEUE_FULL /**< PTP Rx timestamps queue is full;
-+ Flags: zero. */
-+ /* String: none. */
-+ ,EV_DUMMY_LAST
-+
-+} e_Event;
-+
-+
-+/**************************************************************************//**
-+ @Collection Debug Levels for Errors and Events
-+
-+ The level description refers to errors only.
-+ For events, classification is done by the user.
-+
-+ The TRACE, INFO and WARNING levels are allowed only when using
-+ the DBG macro, and are not allowed when using the error macros
-+ (RETURN_ERROR or REPORT_ERROR).
-+ @{
-+*//***************************************************************************/
-+#define REPORT_LEVEL_CRITICAL 1 /**< Crasher: Incorrect flow, NULL pointers/handles. */
-+#define REPORT_LEVEL_MAJOR 2 /**< Cannot proceed: Invalid operation, parameters or
-+ configuration. */
-+#define REPORT_LEVEL_MINOR 3 /**< Recoverable problem: a repeating call with the same
-+ parameters may be successful. */
-+#define REPORT_LEVEL_WARNING 4 /**< Something is not exactly right, yet it is not an error. */
-+#define REPORT_LEVEL_INFO 5 /**< Messages which may be of interest to user/programmer. */
-+#define REPORT_LEVEL_TRACE 6 /**< Program flow messages. */
-+
-+#define EVENT_DISABLED 0xFF /**< Disabled event (not reported at all) */
-+
-+/* @} */
-+
-+
-+
-+#define NO_MSG ("")
-+
-+#ifndef DEBUG_GLOBAL_LEVEL
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
-+#endif /* DEBUG_GLOBAL_LEVEL */
-+
-+#ifndef ERROR_GLOBAL_LEVEL
-+#define ERROR_GLOBAL_LEVEL DEBUG_GLOBAL_LEVEL
-+#endif /* ERROR_GLOBAL_LEVEL */
-+
-+#ifndef EVENT_GLOBAL_LEVEL
-+#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
-+#endif /* EVENT_GLOBAL_LEVEL */
-+
-+#ifdef EVENT_LOCAL_LEVEL
-+#define EVENT_DYNAMIC_LEVEL EVENT_LOCAL_LEVEL
-+#else
-+#define EVENT_DYNAMIC_LEVEL EVENT_GLOBAL_LEVEL
-+#endif /* EVENT_LOCAL_LEVEL */
-+
-+
-+#ifndef DEBUG_DYNAMIC_LEVEL
-+#define DEBUG_USING_STATIC_LEVEL
-+
-+#ifdef DEBUG_STATIC_LEVEL
-+#define DEBUG_DYNAMIC_LEVEL DEBUG_STATIC_LEVEL
-+#else
-+#define DEBUG_DYNAMIC_LEVEL DEBUG_GLOBAL_LEVEL
-+#endif /* DEBUG_STATIC_LEVEL */
-+
-+#else /* DEBUG_DYNAMIC_LEVEL */
-+#ifdef DEBUG_STATIC_LEVEL
-+#error "Please use either DEBUG_STATIC_LEVEL or DEBUG_DYNAMIC_LEVEL (not both)"
-+#else
-+int DEBUG_DYNAMIC_LEVEL = DEBUG_GLOBAL_LEVEL;
-+#endif /* DEBUG_STATIC_LEVEL */
-+#endif /* !DEBUG_DYNAMIC_LEVEL */
-+
-+
-+#ifndef ERROR_DYNAMIC_LEVEL
-+
-+#ifdef ERROR_STATIC_LEVEL
-+#define ERROR_DYNAMIC_LEVEL ERROR_STATIC_LEVEL
-+#else
-+#define ERROR_DYNAMIC_LEVEL ERROR_GLOBAL_LEVEL
-+#endif /* ERROR_STATIC_LEVEL */
-+
-+#else /* ERROR_DYNAMIC_LEVEL */
-+#ifdef ERROR_STATIC_LEVEL
-+#error "Please use either ERROR_STATIC_LEVEL or ERROR_DYNAMIC_LEVEL (not both)"
-+#else
-+int ERROR_DYNAMIC_LEVEL = ERROR_GLOBAL_LEVEL;
-+#endif /* ERROR_STATIC_LEVEL */
-+#endif /* !ERROR_DYNAMIC_LEVEL */
-+
-+#define PRINT_FORMAT "[CPU%02d, %s:%d %s]"
-+#define PRINT_FMT_PARAMS raw_smp_processor_id(), __FILE__, __LINE__, __FUNCTION__
-+
-+#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0))
-+/* No debug/error/event messages at all */
-+#define DBG(_level, _vmsg)
-+
-+#define REPORT_ERROR(_level, _err, _vmsg)
-+
-+#define RETURN_ERROR(_level, _err, _vmsg) \
-+ return ERROR_CODE(_err)
-+
-+#if (REPORT_EVENTS > 0)
-+
-+#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) \
-+ do { \
-+ if (_ev##_LEVEL <= EVENT_DYNAMIC_LEVEL) { \
-+ XX_EventById((uint32_t)(_ev), (t_Handle)(_appId), (uint16_t)(_flg), NO_MSG); \
-+ } \
-+ } while (0)
-+
-+#else
-+
-+#define REPORT_EVENT(_ev, _appId, _flg, _vmsg)
-+
-+#endif /* (REPORT_EVENTS > 0) */
-+
-+
-+#else /* DEBUG_ERRORS > 0 */
-+
-+extern const char *dbgLevelStrings[];
-+extern const char *moduleStrings[];
-+#if (REPORT_EVENTS > 0)
-+extern const char *eventStrings[];
-+#endif /* (REPORT_EVENTS > 0) */
-+
-+char * ErrTypeStrings (e_ErrorType err);
-+
-+
-+#if ((defined(DEBUG_USING_STATIC_LEVEL)) && (DEBUG_DYNAMIC_LEVEL < REPORT_LEVEL_WARNING))
-+/* No need for DBG macro - debug level is higher anyway */
-+#define DBG(_level, _vmsg)
-+#else
-+#define DBG(_level, _vmsg) \
-+ do { \
-+ if (REPORT_LEVEL_##_level <= DEBUG_DYNAMIC_LEVEL) { \
-+ XX_Print("> %s (%s) " PRINT_FORMAT ": ", \
-+ dbgLevelStrings[REPORT_LEVEL_##_level - 1], \
-+ moduleStrings[__ERR_MODULE__ >> 16], \
-+ PRINT_FMT_PARAMS); \
-+ XX_Print _vmsg; \
-+ XX_Print("\r\n"); \
-+ } \
-+ } while (0)
-+#endif /* (defined(DEBUG_USING_STATIC_LEVEL) && (DEBUG_DYNAMIC_LEVEL < WARNING)) */
-+
-+
-+#define REPORT_ERROR(_level, _err, _vmsg) \
-+ do { \
-+ if (REPORT_LEVEL_##_level <= ERROR_DYNAMIC_LEVEL) { \
-+ XX_Print("! %s %s Error " PRINT_FORMAT ": %s; ", \
-+ dbgLevelStrings[REPORT_LEVEL_##_level - 1], \
-+ moduleStrings[__ERR_MODULE__ >> 16], \
-+ PRINT_FMT_PARAMS, \
-+ ErrTypeStrings((e_ErrorType)GET_ERROR_TYPE(_err))); \
-+ XX_Print _vmsg; \
-+ XX_Print("\r\n"); \
-+ } \
-+ } while (0)
-+
-+
-+#define RETURN_ERROR(_level, _err, _vmsg) \
-+ do { \
-+ REPORT_ERROR(_level, (_err), _vmsg); \
-+ return ERROR_CODE(_err); \
-+ } while (0)
-+
-+
-+#if (REPORT_EVENTS > 0)
-+
-+#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) \
-+ do { \
-+ if (_ev##_LEVEL <= EVENT_DYNAMIC_LEVEL) { \
-+ XX_Print("~ %s %s Event " PRINT_FORMAT ": %s (flags: 0x%04x); ", \
-+ dbgLevelStrings[_ev##_LEVEL - 1], \
-+ moduleStrings[__ERR_MODULE__ >> 16], \
-+ PRINT_FMT_PARAMS, \
-+ eventStrings[((_ev) - EV_NO_EVENT - 1)], \
-+ (uint16_t)(_flg)); \
-+ XX_Print _vmsg; \
-+ XX_Print("\r\n"); \
-+ XX_EventById((uint32_t)(_ev), (t_Handle)(_appId), (uint16_t)(_flg), NO_MSG); \
-+ } \
-+ } while (0)
-+
-+#else /* not REPORT_EVENTS */
-+
-+#define REPORT_EVENT(_ev, _appId, _flg, _vmsg)
-+
-+#endif /* (REPORT_EVENTS > 0) */
-+
-+#endif /* (DEBUG_ERRORS > 0) */
-+
-+
-+/**************************************************************************//**
-+ @Function ASSERT_COND
-+
-+ @Description Assertion macro.
-+
-+ @Param[in] _cond - The condition being checked, in positive form;
-+ Failure of the condition triggers the assert.
-+*//***************************************************************************/
-+#ifdef DISABLE_ASSERTIONS
-+#define ASSERT_COND(_cond)
-+#else
-+#define ASSERT_COND(_cond) \
-+ do { \
-+ if (!(_cond)) { \
-+ XX_Print("*** ASSERT_COND failed " PRINT_FORMAT "\r\n", \
-+ PRINT_FMT_PARAMS); \
-+ XX_Exit(1); \
-+ } \
-+ } while (0)
-+#endif /* DISABLE_ASSERTIONS */
-+
-+
-+#ifdef DISABLE_INIT_PARAMETERS_CHECK
-+
-+#define CHECK_INIT_PARAMETERS(handle, f_check)
-+#define CHECK_INIT_PARAMETERS_RETURN_VALUE(handle, f_check, retval)
-+
-+#else
-+
-+#define CHECK_INIT_PARAMETERS(handle, f_check) \
-+ do { \
-+ t_Error err = f_check(handle); \
-+ if (err != E_OK) { \
-+ RETURN_ERROR(MAJOR, err, NO_MSG); \
-+ } \
-+ } while (0)
-+
-+#define CHECK_INIT_PARAMETERS_RETURN_VALUE(handle, f_check, retval) \
-+ do { \
-+ t_Error err = f_check(handle); \
-+ if (err != E_OK) { \
-+ REPORT_ERROR(MAJOR, err, NO_MSG); \
-+ return (retval); \
-+ } \
-+ } while (0)
-+
-+#endif /* DISABLE_INIT_PARAMETERS_CHECK */
-+
-+#ifdef DISABLE_SANITY_CHECKS
-+
-+#define SANITY_CHECK_RETURN_ERROR(_cond, _err)
-+#define SANITY_CHECK_RETURN_VALUE(_cond, _err, retval)
-+#define SANITY_CHECK_RETURN(_cond, _err)
-+#define SANITY_CHECK_EXIT(_cond, _err)
-+
-+#else /* DISABLE_SANITY_CHECKS */
-+
-+#define SANITY_CHECK_RETURN_ERROR(_cond, _err) \
-+ do { \
-+ if (!(_cond)) { \
-+ RETURN_ERROR(CRITICAL, (_err), NO_MSG); \
-+ } \
-+ } while (0)
-+
-+#define SANITY_CHECK_RETURN_VALUE(_cond, _err, retval) \
-+ do { \
-+ if (!(_cond)) { \
-+ REPORT_ERROR(CRITICAL, (_err), NO_MSG); \
-+ return (retval); \
-+ } \
-+ } while (0)
-+
-+#define SANITY_CHECK_RETURN(_cond, _err) \
-+ do { \
-+ if (!(_cond)) { \
-+ REPORT_ERROR(CRITICAL, (_err), NO_MSG); \
-+ return; \
-+ } \
-+ } while (0)
-+
-+#define SANITY_CHECK_EXIT(_cond, _err) \
-+ do { \
-+ if (!(_cond)) { \
-+ REPORT_ERROR(CRITICAL, (_err), NO_MSG); \
-+ XX_Exit(1); \
-+ } \
-+ } while (0)
-+
-+#endif /* DISABLE_SANITY_CHECKS */
-+
-+/** @} */ /* end of Debug/error Utils group */
-+
-+/** @} */ /* end of General Utils group */
-+
-+#endif /* __ERROR_EXT_H */
-+
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/list_ext.h
-@@ -0,0 +1,358 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+
-+ @File list_ext.h
-+
-+ @Description External prototypes for list.c
-+*//***************************************************************************/
-+
-+#ifndef __LIST_EXT_H
-+#define __LIST_EXT_H
-+
-+
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group etc_id Utility Library Application Programming Interface
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group list_id List
-+
-+ @Description List module functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description List structure.
-+*//***************************************************************************/
-+typedef struct List
-+{
-+ struct List *p_Next; /**< A pointer to the next list object */
-+ struct List *p_Prev; /**< A pointer to the previous list object */
-+} t_List;
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_FIRST/LIST_LAST/LIST_NEXT/LIST_PREV
-+
-+ @Description Macro to get first/last/next/previous entry in a list.
-+
-+ @Param[in] p_List - A pointer to a list.
-+*//***************************************************************************/
-+#define LIST_FIRST(p_List) (p_List)->p_Next
-+#define LIST_LAST(p_List) (p_List)->p_Prev
-+#define LIST_NEXT LIST_FIRST
-+#define LIST_PREV LIST_LAST
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_INIT
-+
-+ @Description Macro for initialization of a list struct.
-+
-+ @Param[in] lst - The t_List object to initialize.
-+*//***************************************************************************/
-+#define LIST_INIT(lst) {&(lst), &(lst)}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST
-+
-+ @Description Macro to declare of a list.
-+
-+ @Param[in] listName - The list object name.
-+*//***************************************************************************/
-+#define LIST(listName) t_List listName = LIST_INIT(listName)
-+
-+
-+/**************************************************************************//**
-+ @Function INIT_LIST
-+
-+ @Description Macro to initialize a list pointer.
-+
-+ @Param[in] p_List - The list pointer.
-+*//***************************************************************************/
-+#define INIT_LIST(p_List) LIST_FIRST(p_List) = LIST_LAST(p_List) = (p_List)
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_OBJECT
-+
-+ @Description Macro to get the struct (object) for this entry.
-+
-+ @Param[in] type - The type of the struct (object) this list is embedded in.
-+ @Param[in] member - The name of the t_List object within the struct.
-+
-+ @Return The structure pointer for this entry.
-+*//***************************************************************************/
-+#define MEMBER_OFFSET(type, member) (PTR_TO_UINT(&((type *)0)->member))
-+#define LIST_OBJECT(p_List, type, member) \
-+ ((type *)((char *)(p_List)-MEMBER_OFFSET(type, member)))
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_FOR_EACH
-+
-+ @Description Macro to iterate over a list.
-+
-+ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
-+ @Param[in] p_Head - A pointer to the head for your list pointer.
-+
-+ @Cautions You can't delete items with this routine.
-+ For deletion use LIST_FOR_EACH_SAFE().
-+*//***************************************************************************/
-+#define LIST_FOR_EACH(p_Pos, p_Head) \
-+ for (p_Pos = LIST_FIRST(p_Head); p_Pos != (p_Head); p_Pos = LIST_NEXT(p_Pos))
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_FOR_EACH_SAFE
-+
-+ @Description Macro to iterate over a list safe against removal of list entry.
-+
-+ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
-+ @Param[in] p_Tmp - Another pointer to a list to use as temporary storage.
-+ @Param[in] p_Head - A pointer to the head for your list pointer.
-+*//***************************************************************************/
-+#define LIST_FOR_EACH_SAFE(p_Pos, p_Tmp, p_Head) \
-+ for (p_Pos = LIST_FIRST(p_Head), p_Tmp = LIST_FIRST(p_Pos); \
-+ p_Pos != (p_Head); \
-+ p_Pos = p_Tmp, p_Tmp = LIST_NEXT(p_Pos))
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_FOR_EACH_OBJECT_SAFE
-+
-+ @Description Macro to iterate over list of given type safely.
-+
-+ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
-+ @Param[in] p_Tmp - Another pointer to a list to use as temporary storage.
-+ @Param[in] type - The type of the struct this is embedded in.
-+ @Param[in] p_Head - A pointer to the head for your list pointer.
-+ @Param[in] member - The name of the list_struct within the struct.
-+
-+ @Cautions You can't delete items with this routine.
-+ For deletion use LIST_FOR_EACH_SAFE().
-+*//***************************************************************************/
-+#define LIST_FOR_EACH_OBJECT_SAFE(p_Pos, p_Tmp, p_Head, type, member) \
-+ for (p_Pos = LIST_OBJECT(LIST_FIRST(p_Head), type, member), \
-+ p_Tmp = LIST_OBJECT(LIST_FIRST(&p_Pos->member), type, member); \
-+ &p_Pos->member != (p_Head); \
-+ p_Pos = p_Tmp, \
-+ p_Tmp = LIST_OBJECT(LIST_FIRST(&p_Pos->member), type, member))
-+
-+/**************************************************************************//**
-+ @Function LIST_FOR_EACH_OBJECT
-+
-+ @Description Macro to iterate over list of given type.
-+
-+ @Param[in] p_Pos - A pointer to a list to use as a loop counter.
-+ @Param[in] type - The type of the struct this is embedded in.
-+ @Param[in] p_Head - A pointer to the head for your list pointer.
-+ @Param[in] member - The name of the list_struct within the struct.
-+
-+ @Cautions You can't delete items with this routine.
-+ For deletion use LIST_FOR_EACH_SAFE().
-+*//***************************************************************************/
-+#define LIST_FOR_EACH_OBJECT(p_Pos, type, p_Head, member) \
-+ for (p_Pos = LIST_OBJECT(LIST_FIRST(p_Head), type, member); \
-+ &p_Pos->member != (p_Head); \
-+ p_Pos = LIST_OBJECT(LIST_FIRST(&(p_Pos->member)), type, member))
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_Add
-+
-+ @Description Add a new entry to a list.
-+
-+ Insert a new entry after the specified head.
-+ This is good for implementing stacks.
-+
-+ @Param[in] p_New - A pointer to a new list entry to be added.
-+ @Param[in] p_Head - A pointer to a list head to add it after.
-+
-+ @Return none.
-+*//***************************************************************************/
-+static __inline__ void LIST_Add(t_List *p_New, t_List *p_Head)
-+{
-+ LIST_PREV(LIST_NEXT(p_Head)) = p_New;
-+ LIST_NEXT(p_New) = LIST_NEXT(p_Head);
-+ LIST_PREV(p_New) = p_Head;
-+ LIST_NEXT(p_Head) = p_New;
-+}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_AddToTail
-+
-+ @Description Add a new entry to a list.
-+
-+ Insert a new entry before the specified head.
-+ This is useful for implementing queues.
-+
-+ @Param[in] p_New - A pointer to a new list entry to be added.
-+ @Param[in] p_Head - A pointer to a list head to add it before.
-+
-+ @Return none.
-+*//***************************************************************************/
-+static __inline__ void LIST_AddToTail(t_List *p_New, t_List *p_Head)
-+{
-+ LIST_NEXT(LIST_PREV(p_Head)) = p_New;
-+ LIST_PREV(p_New) = LIST_PREV(p_Head);
-+ LIST_NEXT(p_New) = p_Head;
-+ LIST_PREV(p_Head) = p_New;
-+}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_Del
-+
-+ @Description Deletes entry from a list.
-+
-+ @Param[in] p_Entry - A pointer to the element to delete from the list.
-+
-+ @Return none.
-+
-+ @Cautions LIST_IsEmpty() on entry does not return true after this,
-+ the entry is in an undefined state.
-+*//***************************************************************************/
-+static __inline__ void LIST_Del(t_List *p_Entry)
-+{
-+ LIST_PREV(LIST_NEXT(p_Entry)) = LIST_PREV(p_Entry);
-+ LIST_NEXT(LIST_PREV(p_Entry)) = LIST_NEXT(p_Entry);
-+}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_DelAndInit
-+
-+ @Description Deletes entry from list and reinitialize it.
-+
-+ @Param[in] p_Entry - A pointer to the element to delete from the list.
-+
-+ @Return none.
-+*//***************************************************************************/
-+static __inline__ void LIST_DelAndInit(t_List *p_Entry)
-+{
-+ LIST_Del(p_Entry);
-+ INIT_LIST(p_Entry);
-+}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_Move
-+
-+ @Description Delete from one list and add as another's head.
-+
-+ @Param[in] p_Entry - A pointer to the list entry to move.
-+ @Param[in] p_Head - A pointer to the list head that will precede our entry.
-+
-+ @Return none.
-+*//***************************************************************************/
-+static __inline__ void LIST_Move(t_List *p_Entry, t_List *p_Head)
-+{
-+ LIST_Del(p_Entry);
-+ LIST_Add(p_Entry, p_Head);
-+}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_MoveToTail
-+
-+ @Description Delete from one list and add as another's tail.
-+
-+ @Param[in] p_Entry - A pointer to the entry to move.
-+ @Param[in] p_Head - A pointer to the list head that will follow our entry.
-+
-+ @Return none.
-+*//***************************************************************************/
-+static __inline__ void LIST_MoveToTail(t_List *p_Entry, t_List *p_Head)
-+{
-+ LIST_Del(p_Entry);
-+ LIST_AddToTail(p_Entry, p_Head);
-+}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_IsEmpty
-+
-+ @Description Tests whether a list is empty.
-+
-+ @Param[in] p_List - A pointer to the list to test.
-+
-+ @Return 1 if the list is empty, 0 otherwise.
-+*//***************************************************************************/
-+static __inline__ int LIST_IsEmpty(t_List *p_List)
-+{
-+ return (LIST_FIRST(p_List) == p_List);
-+}
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_Append
-+
-+ @Description Join two lists.
-+
-+ @Param[in] p_NewList - A pointer to the new list to add.
-+ @Param[in] p_Head - A pointer to the place to add it in the first list.
-+
-+ @Return none.
-+*//***************************************************************************/
-+void LIST_Append(t_List *p_NewList, t_List *p_Head);
-+
-+
-+/**************************************************************************//**
-+ @Function LIST_NumOfObjs
-+
-+ @Description Counts number of objects in the list
-+
-+ @Param[in] p_List - A pointer to the list which objects are to be counted.
-+
-+ @Return Number of objects in the list.
-+*//***************************************************************************/
-+int LIST_NumOfObjs(t_List *p_List);
-+
-+/** @} */ /* end of list_id group */
-+/** @} */ /* end of etc_id group */
-+
-+
-+#endif /* __LIST_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mem_ext.h
-@@ -0,0 +1,318 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+
-+ @File mem_ext.h
-+
-+ @Description External prototypes for the memory manager object
-+*//***************************************************************************/
-+
-+#ifndef __MEM_EXT_H
-+#define __MEM_EXT_H
-+
-+#include "std_ext.h"
-+#include "part_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group etc_id Utility Library Application Programming Interface
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group mem_id Slab Memory Manager
-+
-+ @Description Slab Memory Manager module functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/* Each block is of the following structure:
-+ *
-+ *
-+ * +-----------+----------+---------------------------+-----------+-----------+
-+ * | Alignment | Prefix | Data | Postfix | Alignment |
-+ * | field | field | field | field | Padding |
-+ * | | | | | |
-+ * +-----------+----------+---------------------------+-----------+-----------+
-+ * and at the beginning of all bytes, an additional optional padding might reside
-+ * to ensure that the first blocks data field is aligned as requested.
-+ */
-+
-+
-+#define MEM_MAX_NAME_LENGTH 8
-+
-+/**************************************************************************//*
-+ @Description Memory Segment structure
-+*//***************************************************************************/
-+
-+typedef struct
-+{
-+ char name[MEM_MAX_NAME_LENGTH];
-+ /* The segment's name */
-+ uint8_t **p_Bases; /* Base addresses of the segments */
-+ uint8_t **p_BlocksStack; /* Array of pointers to blocks */
-+ t_Handle h_Spinlock;
-+ uint16_t dataSize; /* Size of each data block */
-+ uint16_t prefixSize; /* How many bytes to reserve before the data */
-+ uint16_t postfixSize; /* How many bytes to reserve after the data */
-+ uint16_t alignment; /* Requested alignment for the data field */
-+ int allocOwner; /* Memory allocation owner */
-+ uint32_t getFailures; /* Number of times get failed */
-+ uint32_t num; /* Number of blocks in segment */
-+ uint32_t current; /* Current block */
-+ bool consecutiveMem; /* Allocate consecutive data blocks memory */
-+#ifdef DEBUG_MEM_LEAKS
-+ void *p_MemDbg; /* MEM debug database (MEM leaks detection) */
-+ uint32_t blockOffset;
-+ uint32_t blockSize;
-+#endif /* DEBUG_MEM_LEAKS */
-+} t_MemorySegment;
-+
-+
-+
-+/**************************************************************************//**
-+ @Function MEM_Init
-+
-+ @Description Create a new memory segment.
-+
-+ @Param[in] name - Name of memory partition.
-+ @Param[in] p_Handle - Handle to new segment is returned through here.
-+ @Param[in] num - Number of blocks in new segment.
-+ @Param[in] dataSize - Size of blocks in segment.
-+ @Param[in] prefixSize - How many bytes to allocate before the data.
-+ @Param[in] postfixSize - How many bytes to allocate after the data.
-+ @Param[in] alignment - Requested alignment for data field (in bytes).
-+
-+ @Return E_OK - success, E_NO_MEMORY - out of memory.
-+*//***************************************************************************/
-+t_Error MEM_Init(char name[],
-+ t_Handle *p_Handle,
-+ uint32_t num,
-+ uint16_t dataSize,
-+ uint16_t prefixSize,
-+ uint16_t postfixSize,
-+ uint16_t alignment);
-+
-+/**************************************************************************//**
-+ @Function MEM_InitSmart
-+
-+ @Description Create a new memory segment.
-+
-+ @Param[in] name - Name of memory partition.
-+ @Param[in] p_Handle - Handle to new segment is returned through here.
-+ @Param[in] num - Number of blocks in new segment.
-+ @Param[in] dataSize - Size of blocks in segment.
-+ @Param[in] prefixSize - How many bytes to allocate before the data.
-+ @Param[in] postfixSize - How many bytes to allocate after the data.
-+ @Param[in] alignment - Requested alignment for data field (in bytes).
-+ @Param[in] memPartitionId - Memory partition ID for allocation.
-+ @Param[in] consecutiveMem - Whether to allocate the memory blocks
-+ continuously or not.
-+
-+ @Return E_OK - success, E_NO_MEMORY - out of memory.
-+*//***************************************************************************/
-+t_Error MEM_InitSmart(char name[],
-+ t_Handle *p_Handle,
-+ uint32_t num,
-+ uint16_t dataSize,
-+ uint16_t prefixSize,
-+ uint16_t postfixSize,
-+ uint16_t alignment,
-+ uint8_t memPartitionId,
-+ bool consecutiveMem);
-+
-+/**************************************************************************//**
-+ @Function MEM_InitByAddress
-+
-+ @Description Create a new memory segment with a specified base address.
-+
-+ @Param[in] name - Name of memory partition.
-+ @Param[in] p_Handle - Handle to new segment is returned through here.
-+ @Param[in] num - Number of blocks in new segment.
-+ @Param[in] dataSize - Size of blocks in segment.
-+ @Param[in] prefixSize - How many bytes to allocate before the data.
-+ @Param[in] postfixSize - How many bytes to allocate after the data.
-+ @Param[in] alignment - Requested alignment for data field (in bytes).
-+ @Param[in] address - The required base address.
-+
-+ @Return E_OK - success, E_NO_MEMORY - out of memory.
-+ *//***************************************************************************/
-+t_Error MEM_InitByAddress(char name[],
-+ t_Handle *p_Handle,
-+ uint32_t num,
-+ uint16_t dataSize,
-+ uint16_t prefixSize,
-+ uint16_t postfixSize,
-+ uint16_t alignment,
-+ uint8_t *address);
-+
-+/**************************************************************************//**
-+ @Function MEM_Free
-+
-+ @Description Free a specific memory segment.
-+
-+ @Param[in] h_Mem - Handle to memory segment.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void MEM_Free(t_Handle h_Mem);
-+
-+/**************************************************************************//**
-+ @Function MEM_Get
-+
-+ @Description Get a block of memory from a segment.
-+
-+ @Param[in] h_Mem - Handle to memory segment.
-+
-+ @Return Pointer to new memory block on success,0 otherwise.
-+*//***************************************************************************/
-+void * MEM_Get(t_Handle h_Mem);
-+
-+/**************************************************************************//**
-+ @Function MEM_GetN
-+
-+ @Description Get up to N blocks of memory from a segment.
-+
-+ The blocks are assumed to be of a fixed size (one size per segment).
-+
-+ @Param[in] h_Mem - Handle to memory segment.
-+ @Param[in] num - Number of blocks to allocate.
-+ @Param[out] array - Array of at least num pointers to which the addresses
-+ of the allocated blocks are written.
-+
-+ @Return The number of blocks actually allocated.
-+
-+ @Cautions Interrupts are disabled for all of the allocation loop.
-+ Although this loop is very short for each block (several machine
-+ instructions), you should not allocate a very large number
-+ of blocks via this routine.
-+*//***************************************************************************/
-+uint16_t MEM_GetN(t_Handle h_Mem, uint32_t num, void *array[]);
-+
-+/**************************************************************************//**
-+ @Function MEM_Put
-+
-+ @Description Put a block of memory back to a segment.
-+
-+ @Param[in] h_Mem - Handle to memory segment.
-+ @Param[in] p_Block - The block to return.
-+
-+ @Return Pointer to new memory block on success,0 otherwise.
-+*//***************************************************************************/
-+t_Error MEM_Put(t_Handle h_Mem, void *p_Block);
-+
-+/**************************************************************************//**
-+ @Function MEM_ComputePartitionSize
-+
-+ @Description calculate a tight upper boundary of the size of a partition with
-+ given attributes.
-+
-+ The returned value is suitable if one wants to use MEM_InitByAddress().
-+
-+ @Param[in] num - The number of blocks in the segment.
-+ @Param[in] dataSize - Size of block to get.
-+ @Param[in] prefixSize - The prefix size
-+ @Param postfixSize - The postfix size
-+ @Param[in] alignment - The requested alignment value (in bytes)
-+
-+ @Return The memory block size a segment with the given attributes needs.
-+*//***************************************************************************/
-+uint32_t MEM_ComputePartitionSize(uint32_t num,
-+ uint16_t dataSize,
-+ uint16_t prefixSize,
-+ uint16_t postfixSize,
-+ uint16_t alignment);
-+
-+#ifdef DEBUG_MEM_LEAKS
-+#if !((defined(__MWERKS__) || defined(__GNUC__)) && (__dest_os == __ppc_eabi))
-+#error "Memory-Leaks-Debug option is supported only for freescale CodeWarrior"
-+#endif /* !(defined(__MWERKS__) && ... */
-+
-+/**************************************************************************//**
-+ @Function MEM_CheckLeaks
-+
-+ @Description Report MEM object leaks.
-+
-+ This routine is automatically called by the MEM_Free() routine,
-+ but it can also be invoked while the MEM object is alive.
-+
-+ @Param[in] h_Mem - Handle to memory segment.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void MEM_CheckLeaks(t_Handle h_Mem);
-+
-+#else /* not DEBUG_MEM_LEAKS */
-+#define MEM_CheckLeaks(h_Mem)
-+#endif /* not DEBUG_MEM_LEAKS */
-+
-+/**************************************************************************//**
-+ @Description Get base of MEM
-+*//***************************************************************************/
-+#define MEM_GetBase(h_Mem) ((t_MemorySegment *)(h_Mem))->p_Bases[0]
-+
-+/**************************************************************************//**
-+ @Description Get size of MEM block
-+*//***************************************************************************/
-+#define MEM_GetSize(h_Mem) ((t_MemorySegment *)(h_Mem))->dataSize
-+
-+/**************************************************************************//**
-+ @Description Get prefix size of MEM block
-+*//***************************************************************************/
-+#define MEM_GetPrefixSize(h_Mem) ((t_MemorySegment *)(h_Mem))->prefixSize
-+
-+/**************************************************************************//**
-+ @Description Get postfix size of MEM block
-+*//***************************************************************************/
-+#define MEM_GetPostfixSize(h_Mem) ((t_MemorySegment *)(h_Mem))->postfixSize
-+
-+/**************************************************************************//**
-+ @Description Get alignment of MEM block (in bytes)
-+*//***************************************************************************/
-+#define MEM_GetAlignment(h_Mem) ((t_MemorySegment *)(h_Mem))->alignment
-+
-+/**************************************************************************//**
-+ @Description Get the number of blocks in the segment
-+*//***************************************************************************/
-+#define MEM_GetNumOfBlocks(h_Mem) ((t_MemorySegment *)(h_Mem))->num
-+
-+/** @} */ /* end of MEM group */
-+/** @} */ /* end of etc_id group */
-+
-+
-+#endif /* __MEM_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/memcpy_ext.h
-@@ -0,0 +1,208 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+
-+ @File memcpy_ext.h
-+
-+ @Description Efficient functions for copying and setting blocks of memory.
-+*//***************************************************************************/
-+
-+#ifndef __MEMCPY_EXT_H
-+#define __MEMCPY_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group etc_id Utility Library Application Programming Interface
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group mem_cpy Memory Copy
-+
-+ @Description Memory Copy module functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function MemCpy32
-+
-+ @Description Copies one memory buffer into another one in 4-byte chunks!
-+ Which should be more efficient than byte by byte.
-+
-+ For large buffers (over 60 bytes) this function is about 4 times
-+ more efficient than the trivial memory copy. For short buffers
-+ it is reduced to the trivial copy and may be a bit worse.
-+
-+ @Param[in] pDst - The address of the destination buffer.
-+ @Param[in] pSrc - The address of the source buffer.
-+ @Param[in] size - The number of bytes that will be copied from pSrc to pDst.
-+
-+ @Return pDst (the address of the destination buffer).
-+
-+ @Cautions There is no parameter or boundary checking! It is up to the user
-+ to supply non-null parameters as source & destination and size
-+ that actually fits into the destination buffer.
-+*//***************************************************************************/
-+void * MemCpy32(void* pDst,void* pSrc, uint32_t size);
-+void * IO2IOCpy32(void* pDst,void* pSrc, uint32_t size);
-+void * IO2MemCpy32(void* pDst,void* pSrc, uint32_t size);
-+void * Mem2IOCpy32(void* pDst,void* pSrc, uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function MemCpy64
-+
-+ @Description Copies one memory buffer into another one in 8-byte chunks!
-+ Which should be more efficient than byte by byte.
-+
-+ For large buffers (over 60 bytes) this function is about 8 times
-+ more efficient than the trivial memory copy. For short buffers
-+ it is reduced to the trivial copy and may be a bit worse.
-+
-+ Some testing suggests that MemCpy32() preforms better than
-+ MemCpy64() over small buffers. On average they break even at
-+ 100 byte buffers. For buffers larger than that MemCpy64 is
-+ superior.
-+
-+ @Param[in] pDst - The address of the destination buffer.
-+ @Param[in] pSrc - The address of the source buffer.
-+ @Param[in] size - The number of bytes that will be copied from pSrc to pDst.
-+
-+ @Return pDst (the address of the destination buffer).
-+
-+ @Cautions There is no parameter or boundary checking! It is up to the user
-+ to supply non null parameters as source & destination and size
-+ that actually fits into their buffer.
-+
-+ Do not use under Linux.
-+*//***************************************************************************/
-+void * MemCpy64(void* pDst,void* pSrc, uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function MemSet32
-+
-+ @Description Sets all bytes of a memory buffer to a specific value, in
-+ 4-byte chunks.
-+
-+ @Param[in] pDst - The address of the destination buffer.
-+ @Param[in] val - Value to set destination bytes to.
-+ @Param[in] size - The number of bytes that will be set to val.
-+
-+ @Return pDst (the address of the destination buffer).
-+
-+ @Cautions There is no parameter or boundary checking! It is up to the user
-+ to supply non null parameter as destination and size
-+ that actually fits into the destination buffer.
-+*//***************************************************************************/
-+void * MemSet32(void* pDst, uint8_t val, uint32_t size);
-+void * IOMemSet32(void* pDst, uint8_t val, uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function MemSet64
-+
-+ @Description Sets all bytes of a memory buffer to a specific value, in
-+ 8-byte chunks.
-+
-+ @Param[in] pDst - The address of the destination buffer.
-+ @Param[in] val - Value to set destination bytes to.
-+ @Param[in] size - The number of bytes that will be set to val.
-+
-+ @Return pDst (the address of the destination buffer).
-+
-+ @Cautions There is no parameter or boundary checking! It is up to the user
-+ to supply non null parameter as destination and size
-+ that actually fits into the destination buffer.
-+*//***************************************************************************/
-+void * MemSet64(void* pDst, uint8_t val, uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function MemDisp
-+
-+ @Description Displays a block of memory in chunks of 32 bits.
-+
-+ @Param[in] addr - The address of the memory to display.
-+ @Param[in] size - The number of bytes that will be displayed.
-+
-+ @Return None.
-+
-+ @Cautions There is no parameter or boundary checking! It is up to the user
-+ to supply non null parameter as destination and size
-+ that actually fits into the destination buffer.
-+*//***************************************************************************/
-+void MemDisp(uint8_t *addr, int size);
-+
-+/**************************************************************************//**
-+ @Function MemCpy8
-+
-+ @Description Trivial copy one memory buffer into another byte by byte
-+
-+ @Param[in] pDst - The address of the destination buffer.
-+ @Param[in] pSrc - The address of the source buffer.
-+ @Param[in] size - The number of bytes that will be copied from pSrc to pDst.
-+
-+ @Return pDst (the address of the destination buffer).
-+
-+ @Cautions There is no parameter or boundary checking! It is up to the user
-+ to supply non-null parameters as source & destination and size
-+ that actually fits into the destination buffer.
-+*//***************************************************************************/
-+void * MemCpy8(void* pDst,void* pSrc, uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function MemSet8
-+
-+ @Description Sets all bytes of a memory buffer to a specific value byte by byte.
-+
-+ @Param[in] pDst - The address of the destination buffer.
-+ @Param[in] c - Value to set destination bytes to.
-+ @Param[in] size - The number of bytes that will be set to val.
-+
-+ @Return pDst (the address of the destination buffer).
-+
-+ @Cautions There is no parameter or boundary checking! It is up to the user
-+ to supply non null parameter as destination and size
-+ that actually fits into the destination buffer.
-+*//***************************************************************************/
-+void * MemSet8(void* pDst, int c, uint32_t size);
-+
-+/** @} */ /* end of mem_cpy group */
-+/** @} */ /* end of etc_id group */
-+
-+
-+#endif /* __MEMCPY_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/mm_ext.h
-@@ -0,0 +1,310 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File mm_ext.h
-+
-+ @Description Memory Manager Application Programming Interface
-+*//***************************************************************************/
-+#ifndef __MM_EXT
-+#define __MM_EXT
-+
-+#include "std_ext.h"
-+
-+#define MM_MAX_ALIGNMENT 20 /* Alignments from 2 to 128 are available
-+ where maximum alignment defined as
-+ MM_MAX_ALIGNMENT power of 2 */
-+
-+#define MM_MAX_NAME_LEN 32
-+
-+/**************************************************************************//**
-+ @Group etc_id Utility Library Application Programming Interface
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group mm_grp Flexible Memory Manager
-+
-+ @Description Flexible Memory Manager module functions,definitions and enums.
-+ (All of the following functions,definitions and enums can be found in mm_ext.h)
-+
-+ @{
-+*//***************************************************************************/
-+
-+
-+/**************************************************************************//**
-+ @Function MM_Init
-+
-+ @Description Initializes a new MM object.
-+
-+ It initializes a new memory block consisting of base address
-+ and size of the available memory by calling to MemBlock_Init
-+ routine. It is also initializes a new free block for each
-+ by calling FreeBlock_Init routine, which is pointed to
-+ the almost all memory started from the required alignment
-+ from the base address and to the end of the memory.
-+ The handle to the new MM object is returned via "MM"
-+ argument (passed by reference).
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] base - Base address of the MM.
-+ @Param[in] size - Size of the MM.
-+
-+ @Return E_OK is returned on success. E_NOMEMORY is returned if the new MM object or a new free block can not be initialized.
-+*//***************************************************************************/
-+t_Error MM_Init(t_Handle *h_MM, uint64_t base, uint64_t size);
-+
-+/**************************************************************************//**
-+ @Function MM_Get
-+
-+ @Description Allocates a block of memory according to the given size and the alignment.
-+
-+ The Alignment argument tells from which
-+ free list allocate a block of memory. 2^alignment indicates
-+ the alignment that the base address of the allocated block
-+ should have. So, the only values 1, 2, 4, 8, 16, 32 and 64
-+ are available for the alignment argument.
-+ The routine passes through the specific free list of free
-+ blocks and seeks for a first block that have anough memory
-+ that is required (best fit).
-+ After the block is found and data is allocated, it calls
-+ the internal MM_CutFree routine to update all free lists
-+ do not include a just allocated block. Of course, each
-+ free list contains a free blocks with the same alignment.
-+ It is also creates a busy block that holds
-+ information about an allocated block.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] size - Size of the MM.
-+ @Param[in] alignment - Index as a power of two defines a required
-+ alignment (in bytes); Should be 1, 2, 4, 8, 16, 32 or 64
-+ @Param[in] name - The name that specifies an allocated block.
-+
-+ @Return base address of an allocated block ILLEGAL_BASE if can't allocate a block
-+*//***************************************************************************/
-+uint64_t MM_Get(t_Handle h_MM, uint64_t size, uint64_t alignment, char *name);
-+
-+/**************************************************************************//**
-+ @Function MM_GetBase
-+
-+ @Description Gets the base address of the required MM objects.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+
-+ @Return base address of the block.
-+*//***************************************************************************/
-+uint64_t MM_GetBase(t_Handle h_MM);
-+
-+/**************************************************************************//**
-+ @Function MM_GetForce
-+
-+ @Description Force memory allocation.
-+
-+ It means to allocate a block of memory of the given
-+ size from the given base address.
-+ The routine checks if the required block can be allocated
-+ (that is it is free) and then, calls the internal MM_CutFree
-+ routine to update all free lists do not include that block.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] base - Base address of the MM.
-+ @Param[in] size - Size of the MM.
-+ @Param[in] name - Name that specifies an allocated block.
-+
-+ @Return base address of an allocated block, ILLEGAL_BASE if can't allocate a block.
-+*//***************************************************************************/
-+uint64_t MM_GetForce(t_Handle h_MM, uint64_t base, uint64_t size, char *name);
-+
-+/**************************************************************************//**
-+ @Function MM_GetForceMin
-+
-+ @Description Allocates a block of memory according to the given size, the alignment and minimum base address.
-+
-+ The Alignment argument tells from which
-+ free list allocate a block of memory. 2^alignment indicates
-+ the alignment that the base address of the allocated block
-+ should have. So, the only values 1, 2, 4, 8, 16, 32 and 64
-+ are available for the alignment argument.
-+ The minimum baser address forces the location of the block
-+ to be from a given address onward.
-+ The routine passes through the specific free list of free
-+ blocks and seeks for the first base address equal or smaller
-+ than the required minimum address and end address larger than
-+ than the required base + its size - i.e. that may contain
-+ the required block.
-+ After the block is found and data is allocated, it calls
-+ the internal MM_CutFree routine to update all free lists
-+ do not include a just allocated block. Of course, each
-+ free list contains a free blocks with the same alignment.
-+ It is also creates a busy block that holds
-+ information about an allocated block.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] size - Size of the MM.
-+ @Param[in] alignment - Index as a power of two defines a required
-+ alignment (in bytes); Should be 1, 2, 4, 8, 16, 32 or 64
-+ @Param[in] min - The minimum base address of the block.
-+ @Param[in] name - Name that specifies an allocated block.
-+
-+ @Return base address of an allocated block,ILLEGAL_BASE if can't allocate a block.
-+*//***************************************************************************/
-+uint64_t MM_GetForceMin(t_Handle h_MM,
-+ uint64_t size,
-+ uint64_t alignment,
-+ uint64_t min,
-+ char *name);
-+
-+/**************************************************************************//**
-+ @Function MM_Put
-+
-+ @Description Puts a block of memory of the given base address back to the memory.
-+
-+ It checks if there is a busy block with the
-+ given base address. If not, it returns 0, that
-+ means can't free a block. Otherwise, it gets parameters of
-+ the busy block and after it updates lists of free blocks,
-+ removes that busy block from the list by calling to MM_CutBusy
-+ routine.
-+ After that it calls to MM_AddFree routine to add a new free
-+ block to the free lists.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] base - Base address of the MM.
-+
-+ @Return The size of bytes released, 0 if failed.
-+*//***************************************************************************/
-+uint64_t MM_Put(t_Handle h_MM, uint64_t base);
-+
-+/**************************************************************************//**
-+ @Function MM_PutForce
-+
-+ @Description Releases a block of memory of the required size from the required base address.
-+
-+ First, it calls to MM_CutBusy routine
-+ to cut a free block from the busy list. And then, calls to
-+ MM_AddFree routine to add the free block to the free lists.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] base - Base address of of a block to free.
-+ @Param[in] size - Size of a block to free.
-+
-+ @Return The number of bytes released, 0 on failure.
-+*//***************************************************************************/
-+uint64_t MM_PutForce(t_Handle h_MM, uint64_t base, uint64_t size);
-+
-+/**************************************************************************//**
-+ @Function MM_Add
-+
-+ @Description Adds a new memory block for memory allocation.
-+
-+ When a new memory block is initialized and added to the
-+ memory list, it calls to MM_AddFree routine to add the
-+ new free block to the free lists.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] base - Base address of the memory block.
-+ @Param[in] size - Size of the memory block.
-+
-+ @Return E_OK on success, otherwise returns an error code.
-+*//***************************************************************************/
-+t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size);
-+
-+/**************************************************************************//**
-+ @Function MM_Dump
-+
-+ @Description Prints results of free and busy lists.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+*//***************************************************************************/
-+void MM_Dump(t_Handle h_MM);
-+
-+/**************************************************************************//**
-+ @Function MM_Free
-+
-+ @Description Releases memory allocated for MM object.
-+
-+ @Param[in] h_MM - Handle of the MM object.
-+*//***************************************************************************/
-+void MM_Free(t_Handle h_MM);
-+
-+/**************************************************************************//**
-+ @Function MM_GetMemBlock
-+
-+ @Description Returns base address of the memory block specified by the index.
-+
-+ If index is 0, returns base address
-+ of the first memory block, 1 - returns base address
-+ of the second memory block, etc.
-+ Note, those memory blocks are allocated by the
-+ application before MM_Init or MM_Add and have to
-+ be released by the application before or after invoking
-+ the MM_Free routine.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] index - Index of the memory block.
-+
-+ @Return valid base address or ILLEGAL_BASE if no memory block specified by the index.
-+*//***************************************************************************/
-+uint64_t MM_GetMemBlock(t_Handle h_MM, int index);
-+
-+/**************************************************************************//**
-+ @Function MM_InRange
-+
-+ @Description Checks if a specific address is in the memory range of the passed MM object.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+ @Param[in] addr - The address to be checked.
-+
-+ @Return TRUE if the address is in the address range of the block, FALSE otherwise.
-+*//***************************************************************************/
-+bool MM_InRange(t_Handle h_MM, uint64_t addr);
-+
-+/**************************************************************************//**
-+ @Function MM_GetFreeMemSize
-+
-+ @Description Returns the size (in bytes) of free memory.
-+
-+ @Param[in] h_MM - Handle to the MM object.
-+
-+ @Return Free memory size in bytes.
-+*//***************************************************************************/
-+uint64_t MM_GetFreeMemSize(t_Handle h_MM);
-+
-+
-+/** @} */ /* end of mm_grp group */
-+/** @} */ /* end of etc_id group */
-+
-+#endif /* __MM_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/etc/sprint_ext.h
-@@ -0,0 +1,118 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File sprint_ext.h
-+
-+ @Description Debug routines (externals).
-+
-+*//***************************************************************************/
-+
-+#ifndef __SPRINT_EXT_H
-+#define __SPRINT_EXT_H
-+
-+
-+#if defined(NCSW_LINUX) && defined(__KERNEL__)
-+#include <linux/kernel.h>
-+
-+#elif defined(NCSW_VXWORKS)
-+#include "private/stdioP.h"
-+
-+#else
-+#include <stdio.h>
-+#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
-+
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group etc_id Utility Library Application Programming Interface
-+
-+ @Description External routines.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group sprint_id Sprint
-+
-+ @Description Sprint & Sscan module functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function Sprint
-+
-+ @Description Format a string and place it in a buffer.
-+
-+ @Param[in] buff - The buffer to place the result into.
-+ @Param[in] str - The format string to use.
-+ @Param[in] ... - Arguments for the format string.
-+
-+ @Return Number of bytes formatted.
-+*//***************************************************************************/
-+int Sprint(char *buff, const char *str, ...);
-+
-+/**************************************************************************//**
-+ @Function Snprint
-+
-+ @Description Format a string and place it in a buffer.
-+
-+ @Param[in] buf - The buffer to place the result into.
-+ @Param[in] size - The size of the buffer, including the trailing null space.
-+ @Param[in] fmt - The format string to use.
-+ @Param[in] ... - Arguments for the format string.
-+
-+ @Return Number of bytes formatted.
-+*//***************************************************************************/
-+int Snprint(char * buf, uint32_t size, const char *fmt, ...);
-+
-+/**************************************************************************//**
-+ @Function Sscan
-+
-+ @Description Unformat a buffer into a list of arguments.
-+
-+ @Param[in] buf - input buffer.
-+ @Param[in] fmt - formatting of buffer.
-+ @Param[out] ... - resulting arguments.
-+
-+ @Return Number of bytes unformatted.
-+*//***************************************************************************/
-+int Sscan(const char * buf, const char * fmt, ...);
-+
-+/** @} */ /* end of sprint_id group */
-+/** @} */ /* end of etc_id group */
-+
-+
-+#endif /* __SPRINT_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/arch/ppc_access.h
-@@ -0,0 +1,37 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef FL_E500_MACROS_H
-+#define FL_E500_MACROS_H
-+
-+#endif /* FL_E500_MACROS_H */
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/common/general.h
-@@ -0,0 +1,52 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __GENERAL_H
-+#define __GENERAL_H
-+
-+#include "std_ext.h"
-+#if !defined(NCSW_LINUX)
-+#include "errno.h"
-+#endif
-+
-+
-+extern uint32_t get_mac_addr_crc(uint64_t _addr);
-+
-+#ifndef CONFIG_FMAN_ARM
-+#define iowrite32be(val, addr) WRITE_UINT32(*addr, val)
-+#define ioread32be(addr) GET_UINT32(*addr)
-+#endif
-+
-+#define ether_crc(len, addr) get_mac_addr_crc(*(uint64_t *)(addr)>>16)
-+
-+
-+#endif /* __GENERAL_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fman_common.h
-@@ -0,0 +1,78 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __FMAN_COMMON_H
-+#define __FMAN_COMMON_H
-+
-+/**************************************************************************//**
-+ @Description NIA Description
-+*//***************************************************************************/
-+#define NIA_ORDER_RESTOR 0x00800000
-+#define NIA_ENG_FM_CTL 0x00000000
-+#define NIA_ENG_PRS 0x00440000
-+#define NIA_ENG_KG 0x00480000
-+#define NIA_ENG_PLCR 0x004C0000
-+#define NIA_ENG_BMI 0x00500000
-+#define NIA_ENG_QMI_ENQ 0x00540000
-+#define NIA_ENG_QMI_DEQ 0x00580000
-+#define NIA_ENG_MASK 0x007C0000
-+
-+#define NIA_FM_CTL_AC_CC 0x00000006
-+#define NIA_FM_CTL_AC_HC 0x0000000C
-+#define NIA_FM_CTL_AC_IND_MODE_TX 0x00000008
-+#define NIA_FM_CTL_AC_IND_MODE_RX 0x0000000A
-+#define NIA_FM_CTL_AC_FRAG 0x0000000e
-+#define NIA_FM_CTL_AC_PRE_FETCH 0x00000010
-+#define NIA_FM_CTL_AC_POST_FETCH_PCD 0x00000012
-+#define NIA_FM_CTL_AC_POST_FETCH_PCD_UDP_LEN 0x00000018
-+#define NIA_FM_CTL_AC_POST_FETCH_NO_PCD 0x00000012
-+#define NIA_FM_CTL_AC_FRAG_CHECK 0x00000014
-+#define NIA_FM_CTL_AC_PRE_CC 0x00000020
-+
-+
-+#define NIA_BMI_AC_ENQ_FRAME 0x00000002
-+#define NIA_BMI_AC_TX_RELEASE 0x000002C0
-+#define NIA_BMI_AC_RELEASE 0x000000C0
-+#define NIA_BMI_AC_DISCARD 0x000000C1
-+#define NIA_BMI_AC_TX 0x00000274
-+#define NIA_BMI_AC_FETCH 0x00000208
-+#define NIA_BMI_AC_MASK 0x000003FF
-+
-+#define NIA_KG_DIRECT 0x00000100
-+#define NIA_KG_CC_EN 0x00000200
-+#define NIA_PLCR_ABSOLUTE 0x00008000
-+
-+#define NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA 0x00000202
-+#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c
-+
-+#endif /* __FMAN_COMMON_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_enet.h
-@@ -0,0 +1,273 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_ENET_H
-+#define __FSL_ENET_H
-+
-+/**
-+ @Description Ethernet MAC-PHY Interface
-+*/
-+
-+enum enet_interface {
-+ E_ENET_IF_MII = 0x00010000, /**< MII interface */
-+ E_ENET_IF_RMII = 0x00020000, /**< RMII interface */
-+ E_ENET_IF_SMII = 0x00030000, /**< SMII interface */
-+ E_ENET_IF_GMII = 0x00040000, /**< GMII interface */
-+ E_ENET_IF_RGMII = 0x00050000, /**< RGMII interface */
-+ E_ENET_IF_TBI = 0x00060000, /**< TBI interface */
-+ E_ENET_IF_RTBI = 0x00070000, /**< RTBI interface */
-+ E_ENET_IF_SGMII = 0x00080000, /**< SGMII interface */
-+ E_ENET_IF_XGMII = 0x00090000, /**< XGMII interface */
-+ E_ENET_IF_QSGMII = 0x000a0000, /**< QSGMII interface */
-+ E_ENET_IF_XFI = 0x000b0000 /**< XFI interface */
-+};
-+
-+/**
-+ @Description Ethernet Speed (nominal data rate)
-+*/
-+enum enet_speed {
-+ E_ENET_SPEED_10 = 10, /**< 10 Mbps */
-+ E_ENET_SPEED_100 = 100, /**< 100 Mbps */
-+ E_ENET_SPEED_1000 = 1000, /**< 1000 Mbps = 1 Gbps */
-+ E_ENET_SPEED_2500 = 2500, /**< 2500 Mbps = 2.5 Gbps */
-+ E_ENET_SPEED_10000 = 10000 /**< 10000 Mbps = 10 Gbps */
-+};
-+
-+enum mac_type {
-+ E_MAC_DTSEC,
-+ E_MAC_TGEC,
-+ E_MAC_MEMAC
-+};
-+
-+/**************************************************************************//**
-+ @Description Enum for inter-module interrupts registration
-+*//***************************************************************************/
-+enum fman_event_modules {
-+ E_FMAN_MOD_PRS, /**< Parser event */
-+ E_FMAN_MOD_KG, /**< Keygen event */
-+ E_FMAN_MOD_PLCR, /**< Policer event */
-+ E_FMAN_MOD_10G_MAC, /**< 10G MAC event */
-+ E_FMAN_MOD_1G_MAC, /**< 1G MAC event */
-+ E_FMAN_MOD_TMR, /**< Timer event */
-+ E_FMAN_MOD_FMAN_CTRL, /**< FMAN Controller Timer event */
-+ E_FMAN_MOD_MACSEC,
-+ E_FMAN_MOD_DUMMY_LAST
-+};
-+
-+/**************************************************************************//**
-+ @Description Enum for interrupts types
-+*//***************************************************************************/
-+enum fman_intr_type {
-+ E_FMAN_INTR_TYPE_ERR,
-+ E_FMAN_INTR_TYPE_NORMAL
-+};
-+
-+/**************************************************************************//**
-+ @Description enum for defining MAC types
-+*//***************************************************************************/
-+enum fman_mac_type {
-+ E_FMAN_MAC_10G = 0, /**< 10G MAC */
-+ E_FMAN_MAC_1G /**< 1G MAC */
-+};
-+
-+enum fman_mac_exceptions {
-+ E_FMAN_MAC_EX_10G_MDIO_SCAN_EVENTMDIO = 0,
-+ /**< 10GEC MDIO scan event interrupt */
-+ E_FMAN_MAC_EX_10G_MDIO_CMD_CMPL,
-+ /**< 10GEC MDIO command completion interrupt */
-+ E_FMAN_MAC_EX_10G_REM_FAULT,
-+ /**< 10GEC, mEMAC Remote fault interrupt */
-+ E_FMAN_MAC_EX_10G_LOC_FAULT,
-+ /**< 10GEC, mEMAC Local fault interrupt */
-+ E_FMAN_MAC_EX_10G_1TX_ECC_ER,
-+ /**< 10GEC, mEMAC Transmit frame ECC error interrupt */
-+ E_FMAN_MAC_EX_10G_TX_FIFO_UNFL,
-+ /**< 10GEC, mEMAC Transmit FIFO underflow interrupt */
-+ E_FMAN_MAC_EX_10G_TX_FIFO_OVFL,
-+ /**< 10GEC, mEMAC Transmit FIFO overflow interrupt */
-+ E_FMAN_MAC_EX_10G_TX_ER,
-+ /**< 10GEC Transmit frame error interrupt */
-+ E_FMAN_MAC_EX_10G_RX_FIFO_OVFL,
-+ /**< 10GEC, mEMAC Receive FIFO overflow interrupt */
-+ E_FMAN_MAC_EX_10G_RX_ECC_ER,
-+ /**< 10GEC, mEMAC Receive frame ECC error interrupt */
-+ E_FMAN_MAC_EX_10G_RX_JAB_FRM,
-+ /**< 10GEC Receive jabber frame interrupt */
-+ E_FMAN_MAC_EX_10G_RX_OVRSZ_FRM,
-+ /**< 10GEC Receive oversized frame interrupt */
-+ E_FMAN_MAC_EX_10G_RX_RUNT_FRM,
-+ /**< 10GEC Receive runt frame interrupt */
-+ E_FMAN_MAC_EX_10G_RX_FRAG_FRM,
-+ /**< 10GEC Receive fragment frame interrupt */
-+ E_FMAN_MAC_EX_10G_RX_LEN_ER,
-+ /**< 10GEC Receive payload length error interrupt */
-+ E_FMAN_MAC_EX_10G_RX_CRC_ER,
-+ /**< 10GEC Receive CRC error interrupt */
-+ E_FMAN_MAC_EX_10G_RX_ALIGN_ER,
-+ /**< 10GEC Receive alignment error interrupt */
-+ E_FMAN_MAC_EX_1G_BAB_RX,
-+ /**< dTSEC Babbling receive error */
-+ E_FMAN_MAC_EX_1G_RX_CTL,
-+ /**< dTSEC Receive control (pause frame) interrupt */
-+ E_FMAN_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET,
-+ /**< dTSEC Graceful transmit stop complete */
-+ E_FMAN_MAC_EX_1G_BAB_TX,
-+ /**< dTSEC Babbling transmit error */
-+ E_FMAN_MAC_EX_1G_TX_CTL,
-+ /**< dTSEC Transmit control (pause frame) interrupt */
-+ E_FMAN_MAC_EX_1G_TX_ERR,
-+ /**< dTSEC Transmit error */
-+ E_FMAN_MAC_EX_1G_LATE_COL,
-+ /**< dTSEC Late collision */
-+ E_FMAN_MAC_EX_1G_COL_RET_LMT,
-+ /**< dTSEC Collision retry limit */
-+ E_FMAN_MAC_EX_1G_TX_FIFO_UNDRN,
-+ /**< dTSEC Transmit FIFO underrun */
-+ E_FMAN_MAC_EX_1G_MAG_PCKT,
-+ /**< dTSEC Magic Packet detection */
-+ E_FMAN_MAC_EX_1G_MII_MNG_RD_COMPLET,
-+ /**< dTSEC MII management read completion */
-+ E_FMAN_MAC_EX_1G_MII_MNG_WR_COMPLET,
-+ /**< dTSEC MII management write completion */
-+ E_FMAN_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET,
-+ /**< dTSEC Graceful receive stop complete */
-+ E_FMAN_MAC_EX_1G_TX_DATA_ERR,
-+ /**< dTSEC Internal data error on transmit */
-+ E_FMAN_MAC_EX_1G_RX_DATA_ERR,
-+ /**< dTSEC Internal data error on receive */
-+ E_FMAN_MAC_EX_1G_1588_TS_RX_ERR,
-+ /**< dTSEC Time-Stamp Receive Error */
-+ E_FMAN_MAC_EX_1G_RX_MIB_CNT_OVFL,
-+ /**< dTSEC MIB counter overflow */
-+ E_FMAN_MAC_EX_TS_FIFO_ECC_ERR,
-+ /**< mEMAC Time-stamp FIFO ECC error interrupt;
-+ not supported on T4240/B4860 rev1 chips */
-+};
-+
-+#define ENET_IF_SGMII_BASEX 0x80000000
-+ /**< SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
-+ and phy or backplane;
-+ Note: 1000BaseX auto-negotiation relates only to interface between MAC
-+ and phy/backplane, SGMII phy can still synchronize with far-end phy at
-+ 10Mbps, 100Mbps or 1000Mbps */
-+
-+enum enet_mode {
-+ E_ENET_MODE_INVALID = 0,
-+ /**< Invalid Ethernet mode */
-+ E_ENET_MODE_MII_10 = (E_ENET_IF_MII | E_ENET_SPEED_10),
-+ /**< 10 Mbps MII */
-+ E_ENET_MODE_MII_100 = (E_ENET_IF_MII | E_ENET_SPEED_100),
-+ /**< 100 Mbps MII */
-+ E_ENET_MODE_RMII_10 = (E_ENET_IF_RMII | E_ENET_SPEED_10),
-+ /**< 10 Mbps RMII */
-+ E_ENET_MODE_RMII_100 = (E_ENET_IF_RMII | E_ENET_SPEED_100),
-+ /**< 100 Mbps RMII */
-+ E_ENET_MODE_SMII_10 = (E_ENET_IF_SMII | E_ENET_SPEED_10),
-+ /**< 10 Mbps SMII */
-+ E_ENET_MODE_SMII_100 = (E_ENET_IF_SMII | E_ENET_SPEED_100),
-+ /**< 100 Mbps SMII */
-+ E_ENET_MODE_GMII_1000 = (E_ENET_IF_GMII | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps GMII */
-+ E_ENET_MODE_RGMII_10 = (E_ENET_IF_RGMII | E_ENET_SPEED_10),
-+ /**< 10 Mbps RGMII */
-+ E_ENET_MODE_RGMII_100 = (E_ENET_IF_RGMII | E_ENET_SPEED_100),
-+ /**< 100 Mbps RGMII */
-+ E_ENET_MODE_RGMII_1000 = (E_ENET_IF_RGMII | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps RGMII */
-+ E_ENET_MODE_TBI_1000 = (E_ENET_IF_TBI | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps TBI */
-+ E_ENET_MODE_RTBI_1000 = (E_ENET_IF_RTBI | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps RTBI */
-+ E_ENET_MODE_SGMII_10 = (E_ENET_IF_SGMII | E_ENET_SPEED_10),
-+ /**< 10 Mbps SGMII with auto-negotiation between MAC and
-+ SGMII phy according to Cisco SGMII specification */
-+ E_ENET_MODE_SGMII_100 = (E_ENET_IF_SGMII | E_ENET_SPEED_100),
-+ /**< 100 Mbps SGMII with auto-negotiation between MAC and
-+ SGMII phy according to Cisco SGMII specification */
-+ E_ENET_MODE_SGMII_1000 = (E_ENET_IF_SGMII | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps SGMII with auto-negotiation between MAC and
-+ SGMII phy according to Cisco SGMII specification */
-+ E_ENET_MODE_SGMII_BASEX_10 = (ENET_IF_SGMII_BASEX | E_ENET_IF_SGMII
-+ | E_ENET_SPEED_10),
-+ /**< 10 Mbps SGMII with 1000BaseX auto-negotiation between
-+ MAC and SGMII phy or backplane */
-+ E_ENET_MODE_SGMII_BASEX_100 = (ENET_IF_SGMII_BASEX | E_ENET_IF_SGMII
-+ | E_ENET_SPEED_100),
-+ /**< 100 Mbps SGMII with 1000BaseX auto-negotiation between
-+ MAC and SGMII phy or backplane */
-+ E_ENET_MODE_SGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | E_ENET_IF_SGMII
-+ | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps SGMII with 1000BaseX auto-negotiation between
-+ MAC and SGMII phy or backplane */
-+ E_ENET_MODE_QSGMII_1000 = (E_ENET_IF_QSGMII | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps QSGMII with auto-negotiation between MAC and
-+ QSGMII phy according to Cisco QSGMII specification */
-+ E_ENET_MODE_QSGMII_BASEX_1000 = (ENET_IF_SGMII_BASEX | E_ENET_IF_QSGMII
-+ | E_ENET_SPEED_1000),
-+ /**< 1000 Mbps QSGMII with 1000BaseX auto-negotiation between
-+ MAC and QSGMII phy or backplane */
-+ E_ENET_MODE_XGMII_10000 = (E_ENET_IF_XGMII | E_ENET_SPEED_10000),
-+ /**< 10000 Mbps XGMII */
-+ E_ENET_MODE_XFI_10000 = (E_ENET_IF_XFI | E_ENET_SPEED_10000)
-+ /**< 10000 Mbps XFI */
-+};
-+
-+enum fmam_mac_statistics_level {
-+ E_FMAN_MAC_NONE_STATISTICS, /**< No statistics */
-+ E_FMAN_MAC_PARTIAL_STATISTICS, /**< Only error counters are available;
-+ Optimized for performance */
-+ E_FMAN_MAC_FULL_STATISTICS /**< All counters available; Not
-+ optimized for performance */
-+};
-+
-+#define _MAKE_ENET_MODE(_interface, _speed) (enum enet_mode)((_interface) \
-+ | (_speed))
-+
-+#define _ENET_INTERFACE_FROM_MODE(mode) (enum enet_interface) \
-+ ((mode) & 0x0FFF0000)
-+#define _ENET_SPEED_FROM_MODE(mode) (enum enet_speed)((mode) & 0x0000FFFF)
-+#define _ENET_ADDR_TO_UINT64(_enet_addr) \
-+ (uint64_t)(((uint64_t)(_enet_addr)[0] << 40) | \
-+ ((uint64_t)(_enet_addr)[1] << 32) | \
-+ ((uint64_t)(_enet_addr)[2] << 24) | \
-+ ((uint64_t)(_enet_addr)[3] << 16) | \
-+ ((uint64_t)(_enet_addr)[4] << 8) | \
-+ ((uint64_t)(_enet_addr)[5]))
-+
-+#define _MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
-+ do { \
-+ int i; \
-+ for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) \
-+ (_enet_addr)[i] = (uint8_t)((_addr64) >> ((5-i)*8));\
-+ } while (0)
-+
-+#endif /* __FSL_ENET_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman.h
-@@ -0,0 +1,825 @@
-+/*
-+ * Copyright 2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_H
-+#define __FSL_FMAN_H
-+
-+#include "common/general.h"
-+
-+struct fman_ext_pool_params {
-+ uint8_t id; /**< External buffer pool id */
-+ uint16_t size; /**< External buffer pool buffer size */
-+};
-+
-+struct fman_ext_pools {
-+ uint8_t num_pools_used; /**< Number of pools use by this port */
-+ struct fman_ext_pool_params *ext_buf_pool;
-+ /**< Parameters for each port */
-+};
-+
-+struct fman_backup_bm_pools {
-+ uint8_t num_backup_pools; /**< Number of BM backup pools -
-+ must be smaller than the total number
-+ of pools defined for the specified
-+ port.*/
-+ uint8_t *pool_ids; /**< numOfBackupPools pool id's,
-+ specifying which pools should be used
-+ only as backup. Pool id's specified
-+ here must be a subset of the pools
-+ used by the specified port.*/
-+};
-+
-+/**************************************************************************//**
-+ @Description A structure for defining BM pool depletion criteria
-+*//***************************************************************************/
-+struct fman_buf_pool_depletion {
-+ bool buf_pool_depletion_enabled;
-+ bool pools_grp_mode_enable; /**< select mode in which pause frames
-+ will be sent after a number of pools
-+ (all together!) are depleted */
-+ uint8_t num_pools; /**< the number of depleted pools that
-+ will invoke pause frames transmission.
-+ */
-+ bool *pools_to_consider; /**< For each pool, TRUE if it should be
-+ considered for depletion (Note - this
-+ pool must be used by this port!). */
-+ bool single_pool_mode_enable; /**< select mode in which pause frames
-+ will be sent after a single-pool
-+ is depleted; */
-+ bool *pools_to_consider_for_single_mode;
-+ /**< For each pool, TRUE if it should be
-+ considered for depletion (Note - this
-+ pool must be used by this port!) */
-+ bool has_pfc_priorities;
-+ bool *pfc_priorities_en; /**< This field is used by the MAC as
-+ the Priority Enable Vector in the PFC
-+ frame which is transmitted */
-+};
-+
-+/**************************************************************************//**
-+ @Description Enum for defining port DMA swap mode
-+*//***************************************************************************/
-+enum fman_dma_swap_option {
-+ FMAN_DMA_NO_SWP, /**< No swap, transfer data as is.*/
-+ FMAN_DMA_SWP_PPC_LE, /**< The transferred data should be swapped
-+ in PowerPc Little Endian mode. */
-+ FMAN_DMA_SWP_BE /**< The transferred data should be swapped
-+ in Big Endian mode */
-+};
-+
-+/**************************************************************************//**
-+ @Description Enum for defining port DMA cache attributes
-+*//***************************************************************************/
-+enum fman_dma_cache_option {
-+ FMAN_DMA_NO_STASH = 0, /**< Cacheable, no Allocate (No Stashing) */
-+ FMAN_DMA_STASH = 1 /**< Cacheable and Allocate (Stashing on) */
-+};
-+
-+typedef struct t_FmPrsResult fm_prs_result_t;
-+typedef enum e_EnetMode enet_mode_t;
-+typedef t_Handle handle_t;
-+
-+struct fman_revision_info {
-+ uint8_t majorRev; /**< Major revision */
-+ uint8_t minorRev; /**< Minor revision */
-+};
-+
-+/* sizes */
-+#define CAPWAP_FRAG_EXTRA_SPACE 32
-+#define OFFSET_UNITS 16
-+#define MAX_INT_OFFSET 240
-+#define MAX_IC_SIZE 256
-+#define MAX_EXT_OFFSET 496
-+#define MAX_EXT_BUFFER_OFFSET 511
-+
-+/**************************************************************************
-+ @Description Memory Mapped Registers
-+***************************************************************************/
-+#define FMAN_LIODN_TBL 64 /* size of LIODN table */
-+
-+struct fman_fpm_regs {
-+ uint32_t fmfp_tnc; /**< FPM TNUM Control 0x00 */
-+ uint32_t fmfp_prc; /**< FPM Port_ID FmCtl Association 0x04 */
-+ uint32_t fmfp_brkc; /**< FPM Breakpoint Control 0x08 */
-+ uint32_t fmfp_mxd; /**< FPM Flush Control 0x0c */
-+ uint32_t fmfp_dist1; /**< FPM Dispatch Thresholds1 0x10 */
-+ uint32_t fmfp_dist2; /**< FPM Dispatch Thresholds2 0x14 */
-+ uint32_t fm_epi; /**< FM Error Pending Interrupts 0x18 */
-+ uint32_t fm_rie; /**< FM Error Interrupt Enable 0x1c */
-+ uint32_t fmfp_fcev[4]; /**< FPM FMan-Controller Event 1-4 0x20-0x2f */
-+ uint32_t res0030[4]; /**< res 0x30 - 0x3f */
-+ uint32_t fmfp_cee[4]; /**< PM FMan-Controller Event 1-4 0x40-0x4f */
-+ uint32_t res0050[4]; /**< res 0x50-0x5f */
-+ uint32_t fmfp_tsc1; /**< FPM TimeStamp Control1 0x60 */
-+ uint32_t fmfp_tsc2; /**< FPM TimeStamp Control2 0x64 */
-+ uint32_t fmfp_tsp; /**< FPM Time Stamp 0x68 */
-+ uint32_t fmfp_tsf; /**< FPM Time Stamp Fraction 0x6c */
-+ uint32_t fm_rcr; /**< FM Rams Control 0x70 */
-+ uint32_t fmfp_extc; /**< FPM External Requests Control 0x74 */
-+ uint32_t fmfp_ext1; /**< FPM External Requests Config1 0x78 */
-+ uint32_t fmfp_ext2; /**< FPM External Requests Config2 0x7c */
-+ uint32_t fmfp_drd[16]; /**< FPM Data_Ram Data 0-15 0x80 - 0xbf */
-+ uint32_t fmfp_dra; /**< FPM Data Ram Access 0xc0 */
-+ uint32_t fm_ip_rev_1; /**< FM IP Block Revision 1 0xc4 */
-+ uint32_t fm_ip_rev_2; /**< FM IP Block Revision 2 0xc8 */
-+ uint32_t fm_rstc; /**< FM Reset Command 0xcc */
-+ uint32_t fm_cld; /**< FM Classifier Debug 0xd0 */
-+ uint32_t fm_npi; /**< FM Normal Pending Interrupts 0xd4 */
-+ uint32_t fmfp_exte; /**< FPM External Requests Enable 0xd8 */
-+ uint32_t fmfp_ee; /**< FPM Event & Mask 0xdc */
-+ uint32_t fmfp_cev[4]; /**< FPM CPU Event 1-4 0xe0-0xef */
-+ uint32_t res00f0[4]; /**< res 0xf0-0xff */
-+ uint32_t fmfp_ps[64]; /**< FPM Port Status 0x100-0x1ff */
-+ uint32_t fmfp_clfabc; /**< FPM CLFABC 0x200 */
-+ uint32_t fmfp_clfcc; /**< FPM CLFCC 0x204 */
-+ uint32_t fmfp_clfaval; /**< FPM CLFAVAL 0x208 */
-+ uint32_t fmfp_clfbval; /**< FPM CLFBVAL 0x20c */
-+ uint32_t fmfp_clfcval; /**< FPM CLFCVAL 0x210 */
-+ uint32_t fmfp_clfamsk; /**< FPM CLFAMSK 0x214 */
-+ uint32_t fmfp_clfbmsk; /**< FPM CLFBMSK 0x218 */
-+ uint32_t fmfp_clfcmsk; /**< FPM CLFCMSK 0x21c */
-+ uint32_t fmfp_clfamc; /**< FPM CLFAMC 0x220 */
-+ uint32_t fmfp_clfbmc; /**< FPM CLFBMC 0x224 */
-+ uint32_t fmfp_clfcmc; /**< FPM CLFCMC 0x228 */
-+ uint32_t fmfp_decceh; /**< FPM DECCEH 0x22c */
-+ uint32_t res0230[116]; /**< res 0x230 - 0x3ff */
-+ uint32_t fmfp_ts[128]; /**< 0x400: FPM Task Status 0x400 - 0x5ff */
-+ uint32_t res0600[0x400 - 384];
-+};
-+
-+struct fman_bmi_regs {
-+ uint32_t fmbm_init; /**< BMI Initialization 0x00 */
-+ uint32_t fmbm_cfg1; /**< BMI Configuration 1 0x04 */
-+ uint32_t fmbm_cfg2; /**< BMI Configuration 2 0x08 */
-+ uint32_t res000c[5]; /**< 0x0c - 0x1f */
-+ uint32_t fmbm_ievr; /**< Interrupt Event Register 0x20 */
-+ uint32_t fmbm_ier; /**< Interrupt Enable Register 0x24 */
-+ uint32_t fmbm_ifr; /**< Interrupt Force Register 0x28 */
-+ uint32_t res002c[5]; /**< 0x2c - 0x3f */
-+ uint32_t fmbm_arb[8]; /**< BMI Arbitration 0x40 - 0x5f */
-+ uint32_t res0060[12]; /**<0x60 - 0x8f */
-+ uint32_t fmbm_dtc[3]; /**< Debug Trap Counter 0x90 - 0x9b */
-+ uint32_t res009c; /**< 0x9c */
-+ uint32_t fmbm_dcv[3][4]; /**< Debug Compare val 0xa0-0xcf */
-+ uint32_t fmbm_dcm[3][4]; /**< Debug Compare Mask 0xd0-0xff */
-+ uint32_t fmbm_gde; /**< BMI Global Debug Enable 0x100 */
-+ uint32_t fmbm_pp[63]; /**< BMI Port Parameters 0x104 - 0x1ff */
-+ uint32_t res0200; /**< 0x200 */
-+ uint32_t fmbm_pfs[63]; /**< BMI Port FIFO Size 0x204 - 0x2ff */
-+ uint32_t res0300; /**< 0x300 */
-+ uint32_t fmbm_spliodn[63]; /**< Port Partition ID 0x304 - 0x3ff */
-+};
-+
-+struct fman_qmi_regs {
-+ uint32_t fmqm_gc; /**< General Configuration Register 0x00 */
-+ uint32_t res0004; /**< 0x04 */
-+ uint32_t fmqm_eie; /**< Error Interrupt Event Register 0x08 */
-+ uint32_t fmqm_eien; /**< Error Interrupt Enable Register 0x0c */
-+ uint32_t fmqm_eif; /**< Error Interrupt Force Register 0x10 */
-+ uint32_t fmqm_ie; /**< Interrupt Event Register 0x14 */
-+ uint32_t fmqm_ien; /**< Interrupt Enable Register 0x18 */
-+ uint32_t fmqm_if; /**< Interrupt Force Register 0x1c */
-+ uint32_t fmqm_gs; /**< Global Status Register 0x20 */
-+ uint32_t fmqm_ts; /**< Task Status Register 0x24 */
-+ uint32_t fmqm_etfc; /**< Enqueue Total Frame Counter 0x28 */
-+ uint32_t fmqm_dtfc; /**< Dequeue Total Frame Counter 0x2c */
-+ uint32_t fmqm_dc0; /**< Dequeue Counter 0 0x30 */
-+ uint32_t fmqm_dc1; /**< Dequeue Counter 1 0x34 */
-+ uint32_t fmqm_dc2; /**< Dequeue Counter 2 0x38 */
-+ uint32_t fmqm_dc3; /**< Dequeue Counter 3 0x3c */
-+ uint32_t fmqm_dfdc; /**< Dequeue FQID from Default Counter 0x40 */
-+ uint32_t fmqm_dfcc; /**< Dequeue FQID from Context Counter 0x44 */
-+ uint32_t fmqm_dffc; /**< Dequeue FQID from FD Counter 0x48 */
-+ uint32_t fmqm_dcc; /**< Dequeue Confirm Counter 0x4c */
-+ uint32_t res0050[7]; /**< 0x50 - 0x6b */
-+ uint32_t fmqm_tapc; /**< Tnum Aging Period Control 0x6c */
-+ uint32_t fmqm_dmcvc; /**< Dequeue MAC Command Valid Counter 0x70 */
-+ uint32_t fmqm_difdcc; /**< Dequeue Invalid FD Command Counter 0x74 */
-+ uint32_t fmqm_da1v; /**< Dequeue A1 Valid Counter 0x78 */
-+ uint32_t res007c; /**< 0x7c */
-+ uint32_t fmqm_dtc; /**< 0x80 Debug Trap Counter 0x80 */
-+ uint32_t fmqm_efddd; /**< 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
-+ uint32_t res0088[2]; /**< 0x88 - 0x8f */
-+ struct {
-+ uint32_t fmqm_dtcfg1; /**< 0x90 dbg trap cfg 1 Register 0x00 */
-+ uint32_t fmqm_dtval1; /**< Debug Trap Value 1 Register 0x04 */
-+ uint32_t fmqm_dtm1; /**< Debug Trap Mask 1 Register 0x08 */
-+ uint32_t fmqm_dtc1; /**< Debug Trap Counter 1 Register 0x0c */
-+ uint32_t fmqm_dtcfg2; /**< dbg Trap cfg 2 Register 0x10 */
-+ uint32_t fmqm_dtval2; /**< Debug Trap Value 2 Register 0x14 */
-+ uint32_t fmqm_dtm2; /**< Debug Trap Mask 2 Register 0x18 */
-+ uint32_t res001c; /**< 0x1c */
-+ } dbg_traps[3]; /**< 0x90 - 0xef */
-+ uint8_t res00f0[0x400 - 0xf0]; /**< 0xf0 - 0x3ff */
-+};
-+
-+struct fman_dma_regs {
-+ uint32_t fmdmsr; /**< FM DMA status register 0x00 */
-+ uint32_t fmdmmr; /**< FM DMA mode register 0x04 */
-+ uint32_t fmdmtr; /**< FM DMA bus threshold register 0x08 */
-+ uint32_t fmdmhy; /**< FM DMA bus hysteresis register 0x0c */
-+ uint32_t fmdmsetr; /**< FM DMA SOS emergency Threshold Register 0x10 */
-+ uint32_t fmdmtah; /**< FM DMA transfer bus address high reg 0x14 */
-+ uint32_t fmdmtal; /**< FM DMA transfer bus address low reg 0x18 */
-+ uint32_t fmdmtcid; /**< FM DMA transfer bus communication ID reg 0x1c */
-+ uint32_t fmdmra; /**< FM DMA bus internal ram address register 0x20 */
-+ uint32_t fmdmrd; /**< FM DMA bus internal ram data register 0x24 */
-+ uint32_t fmdmwcr; /**< FM DMA CAM watchdog counter value 0x28 */
-+ uint32_t fmdmebcr; /**< FM DMA CAM base in MURAM register 0x2c */
-+ uint32_t fmdmccqdr; /**< FM DMA CAM and CMD Queue Debug reg 0x30 */
-+ uint32_t fmdmccqvr1; /**< FM DMA CAM and CMD Queue Value reg #1 0x34 */
-+ uint32_t fmdmccqvr2; /**< FM DMA CAM and CMD Queue Value reg #2 0x38 */
-+ uint32_t fmdmcqvr3; /**< FM DMA CMD Queue Value register #3 0x3c */
-+ uint32_t fmdmcqvr4; /**< FM DMA CMD Queue Value register #4 0x40 */
-+ uint32_t fmdmcqvr5; /**< FM DMA CMD Queue Value register #5 0x44 */
-+ uint32_t fmdmsefrc; /**< FM DMA Semaphore Entry Full Reject Cntr 0x48 */
-+ uint32_t fmdmsqfrc; /**< FM DMA Semaphore Queue Full Reject Cntr 0x4c */
-+ uint32_t fmdmssrc; /**< FM DMA Semaphore SYNC Reject Counter 0x50 */
-+ uint32_t fmdmdcr; /**< FM DMA Debug Counter 0x54 */
-+ uint32_t fmdmemsr; /**< FM DMA Emergency Smoother Register 0x58 */
-+ uint32_t res005c; /**< 0x5c */
-+ uint32_t fmdmplr[FMAN_LIODN_TBL / 2]; /**< DMA LIODN regs 0x60-0xdf */
-+ uint32_t res00e0[0x400 - 56];
-+};
-+
-+struct fman_rg {
-+ struct fman_fpm_regs *fpm_rg;
-+ struct fman_dma_regs *dma_rg;
-+ struct fman_bmi_regs *bmi_rg;
-+ struct fman_qmi_regs *qmi_rg;
-+};
-+
-+enum fman_dma_cache_override {
-+ E_FMAN_DMA_NO_CACHE_OR = 0, /**< No override of the Cache field */
-+ E_FMAN_DMA_NO_STASH_DATA, /**< No data stashing in system level cache */
-+ E_FMAN_DMA_MAY_STASH_DATA, /**< Stashing allowed in sys level cache */
-+ E_FMAN_DMA_STASH_DATA /**< Stashing performed in system level cache */
-+};
-+
-+enum fman_dma_aid_mode {
-+ E_FMAN_DMA_AID_OUT_PORT_ID = 0, /**< 4 LSB of PORT_ID */
-+ E_FMAN_DMA_AID_OUT_TNUM /**< 4 LSB of TNUM */
-+};
-+
-+enum fman_dma_dbg_cnt_mode {
-+ E_FMAN_DMA_DBG_NO_CNT = 0, /**< No counting */
-+ E_FMAN_DMA_DBG_CNT_DONE, /**< Count DONE commands */
-+ E_FMAN_DMA_DBG_CNT_COMM_Q_EM, /**< command Q emergency signal */
-+ E_FMAN_DMA_DBG_CNT_INT_READ_EM, /**< Read buf emergency signal */
-+ E_FMAN_DMA_DBG_CNT_INT_WRITE_EM, /**< Write buf emergency signal */
-+ E_FMAN_DMA_DBG_CNT_FPM_WAIT, /**< FPM WAIT signal */
-+ E_FMAN_DMA_DBG_CNT_SIGLE_BIT_ECC, /**< Single bit ECC errors */
-+ E_FMAN_DMA_DBG_CNT_RAW_WAR_PROT /**< RAW & WAR protection counter */
-+};
-+
-+enum fman_dma_emergency_level {
-+ E_FMAN_DMA_EM_EBS = 0, /**< EBS emergency */
-+ E_FMAN_DMA_EM_SOS /**< SOS emergency */
-+};
-+
-+enum fman_catastrophic_err {
-+ E_FMAN_CATAST_ERR_STALL_PORT = 0, /**< Port_ID stalled reset required */
-+ E_FMAN_CATAST_ERR_STALL_TASK /**< Only erroneous task is stalled */
-+};
-+
-+enum fman_dma_err {
-+ E_FMAN_DMA_ERR_CATASTROPHIC = 0, /**< Catastrophic DMA error */
-+ E_FMAN_DMA_ERR_REPORT /**< Reported DMA error */
-+};
-+
-+struct fman_cfg {
-+ uint16_t liodn_bs_pr_port[FMAN_LIODN_TBL];/* base per port */
-+ bool en_counters;
-+ uint8_t disp_limit_tsh;
-+ uint8_t prs_disp_tsh;
-+ uint8_t plcr_disp_tsh;
-+ uint8_t kg_disp_tsh;
-+ uint8_t bmi_disp_tsh;
-+ uint8_t qmi_enq_disp_tsh;
-+ uint8_t qmi_deq_disp_tsh;
-+ uint8_t fm_ctl1_disp_tsh;
-+ uint8_t fm_ctl2_disp_tsh;
-+ enum fman_dma_cache_override dma_cache_override;
-+ enum fman_dma_aid_mode dma_aid_mode;
-+ bool dma_aid_override;
-+ uint8_t dma_axi_dbg_num_of_beats;
-+ uint8_t dma_cam_num_of_entries;
-+ uint32_t dma_watchdog;
-+ uint8_t dma_comm_qtsh_asrt_emer;
-+ uint8_t dma_write_buf_tsh_asrt_emer;
-+ uint8_t dma_read_buf_tsh_asrt_emer;
-+ uint8_t dma_comm_qtsh_clr_emer;
-+ uint8_t dma_write_buf_tsh_clr_emer;
-+ uint8_t dma_read_buf_tsh_clr_emer;
-+ uint32_t dma_sos_emergency;
-+ enum fman_dma_dbg_cnt_mode dma_dbg_cnt_mode;
-+ bool dma_stop_on_bus_error;
-+ bool dma_en_emergency;
-+ uint32_t dma_emergency_bus_select;
-+ enum fman_dma_emergency_level dma_emergency_level;
-+ bool dma_en_emergency_smoother;
-+ uint32_t dma_emergency_switch_counter;
-+ bool halt_on_external_activ;
-+ bool halt_on_unrecov_ecc_err;
-+ enum fman_catastrophic_err catastrophic_err;
-+ enum fman_dma_err dma_err;
-+ bool en_muram_test_mode;
-+ bool en_iram_test_mode;
-+ bool external_ecc_rams_enable;
-+ uint16_t tnum_aging_period;
-+ uint32_t exceptions;
-+ uint16_t clk_freq;
-+ bool pedantic_dma;
-+ uint32_t cam_base_addr;
-+ uint32_t fifo_base_addr;
-+ uint32_t total_fifo_size;
-+ uint8_t total_num_of_tasks;
-+ bool qmi_deq_option_support;
-+ uint32_t qmi_def_tnums_thresh;
-+ bool fman_partition_array;
-+ uint8_t num_of_fman_ctrl_evnt_regs;
-+};
-+
-+/**************************************************************************//**
-+ @Description Exceptions
-+*//***************************************************************************/
-+#define FMAN_EX_DMA_BUS_ERROR 0x80000000
-+#define FMAN_EX_DMA_READ_ECC 0x40000000
-+#define FMAN_EX_DMA_SYSTEM_WRITE_ECC 0x20000000
-+#define FMAN_EX_DMA_FM_WRITE_ECC 0x10000000
-+#define FMAN_EX_FPM_STALL_ON_TASKS 0x08000000
-+#define FMAN_EX_FPM_SINGLE_ECC 0x04000000
-+#define FMAN_EX_FPM_DOUBLE_ECC 0x02000000
-+#define FMAN_EX_QMI_SINGLE_ECC 0x01000000
-+#define FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
-+#define FMAN_EX_QMI_DOUBLE_ECC 0x00400000
-+#define FMAN_EX_BMI_LIST_RAM_ECC 0x00200000
-+#define FMAN_EX_BMI_PIPELINE_ECC 0x00100000
-+#define FMAN_EX_BMI_STATISTICS_RAM_ECC 0x00080000
-+#define FMAN_EX_IRAM_ECC 0x00040000
-+#define FMAN_EX_NURAM_ECC 0x00020000
-+#define FMAN_EX_BMI_DISPATCH_RAM_ECC 0x00010000
-+
-+enum fman_exceptions {
-+ E_FMAN_EX_DMA_BUS_ERROR = 0, /**< DMA bus error. */
-+ E_FMAN_EX_DMA_READ_ECC, /**< Read Buffer ECC error */
-+ E_FMAN_EX_DMA_SYSTEM_WRITE_ECC, /**< Write Buffer ECC err on sys side */
-+ E_FMAN_EX_DMA_FM_WRITE_ECC, /**< Write Buffer ECC error on FM side */
-+ E_FMAN_EX_FPM_STALL_ON_TASKS, /**< Stall of tasks on FPM */
-+ E_FMAN_EX_FPM_SINGLE_ECC, /**< Single ECC on FPM. */
-+ E_FMAN_EX_FPM_DOUBLE_ECC, /**< Double ECC error on FPM ram access */
-+ E_FMAN_EX_QMI_SINGLE_ECC, /**< Single ECC on QMI. */
-+ E_FMAN_EX_QMI_DOUBLE_ECC, /**< Double bit ECC occurred on QMI */
-+ E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/**< DeQ from unknown port id */
-+ E_FMAN_EX_BMI_LIST_RAM_ECC, /**< Linked List RAM ECC error */
-+ E_FMAN_EX_BMI_STORAGE_PROFILE_ECC, /**< storage profile */
-+ E_FMAN_EX_BMI_STATISTICS_RAM_ECC, /**< Statistics RAM ECC Err Enable */
-+ E_FMAN_EX_BMI_DISPATCH_RAM_ECC, /**< Dispatch RAM ECC Error Enable */
-+ E_FMAN_EX_IRAM_ECC, /**< Double bit ECC occurred on IRAM*/
-+ E_FMAN_EX_MURAM_ECC /**< Double bit ECC occurred on MURAM*/
-+};
-+
-+enum fman_counters {
-+ E_FMAN_COUNTERS_ENQ_TOTAL_FRAME = 0, /**< QMI tot enQ frames counter */
-+ E_FMAN_COUNTERS_DEQ_TOTAL_FRAME, /**< QMI tot deQ frames counter */
-+ E_FMAN_COUNTERS_DEQ_0, /**< QMI 0 frames from QMan counter */
-+ E_FMAN_COUNTERS_DEQ_1, /**< QMI 1 frames from QMan counter */
-+ E_FMAN_COUNTERS_DEQ_2, /**< QMI 2 frames from QMan counter */
-+ E_FMAN_COUNTERS_DEQ_3, /**< QMI 3 frames from QMan counter */
-+ E_FMAN_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI deQ from dflt queue cntr */
-+ E_FMAN_COUNTERS_DEQ_FROM_CONTEXT, /**< QMI deQ from FQ context cntr */
-+ E_FMAN_COUNTERS_DEQ_FROM_FD, /**< QMI deQ from FD command field cntr */
-+ E_FMAN_COUNTERS_DEQ_CONFIRM, /**< QMI dequeue confirm counter */
-+ E_FMAN_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT, /**< DMA full entry cntr */
-+ E_FMAN_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT, /**< DMA full CAM Q cntr */
-+ E_FMAN_COUNTERS_SEMAPHOR_SYNC_REJECT /**< DMA sync counter */
-+};
-+
-+#define FPM_PRT_FM_CTL1 0x00000001
-+#define FPM_PRT_FM_CTL2 0x00000002
-+
-+/**************************************************************************//**
-+ @Description DMA definitions
-+*//***************************************************************************/
-+
-+/* masks */
-+#define DMA_MODE_AID_OR 0x20000000
-+#define DMA_MODE_SBER 0x10000000
-+#define DMA_MODE_BER 0x00200000
-+#define DMA_MODE_EB 0x00100000
-+#define DMA_MODE_ECC 0x00000020
-+#define DMA_MODE_PRIVILEGE_PROT 0x00001000
-+#define DMA_MODE_SECURE_PROT 0x00000800
-+#define DMA_MODE_EMER_READ 0x00080000
-+#define DMA_MODE_EMER_WRITE 0x00040000
-+#define DMA_MODE_CACHE_OR_MASK 0xC0000000
-+#define DMA_MODE_CEN_MASK 0x0000E000
-+#define DMA_MODE_DBG_MASK 0x00000380
-+#define DMA_MODE_AXI_DBG_MASK 0x0F000000
-+
-+#define DMA_EMSR_EMSTR_MASK 0x0000FFFF
-+
-+#define DMA_TRANSFER_PORTID_MASK 0xFF000000
-+#define DMA_TRANSFER_TNUM_MASK 0x00FF0000
-+#define DMA_TRANSFER_LIODN_MASK 0x00000FFF
-+
-+#define DMA_HIGH_LIODN_MASK 0x0FFF0000
-+#define DMA_LOW_LIODN_MASK 0x00000FFF
-+
-+#define DMA_STATUS_CMD_QUEUE_NOT_EMPTY 0x10000000
-+#define DMA_STATUS_BUS_ERR 0x08000000
-+#define DMA_STATUS_READ_ECC 0x04000000
-+#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000
-+#define DMA_STATUS_FM_WRITE_ECC 0x01000000
-+#define DMA_STATUS_SYSTEM_DPEXT_ECC 0x00800000
-+#define DMA_STATUS_FM_DPEXT_ECC 0x00400000
-+#define DMA_STATUS_SYSTEM_DPDAT_ECC 0x00200000
-+#define DMA_STATUS_FM_DPDAT_ECC 0x00100000
-+#define DMA_STATUS_FM_SPDAT_ECC 0x00080000
-+
-+#define FM_LIODN_BASE_MASK 0x00000FFF
-+
-+/* shifts */
-+#define DMA_MODE_CACHE_OR_SHIFT 30
-+#define DMA_MODE_BUS_PRI_SHIFT 16
-+#define DMA_MODE_AXI_DBG_SHIFT 24
-+#define DMA_MODE_CEN_SHIFT 13
-+#define DMA_MODE_BUS_PROT_SHIFT 10
-+#define DMA_MODE_DBG_SHIFT 7
-+#define DMA_MODE_EMER_LVL_SHIFT 6
-+#define DMA_MODE_AID_MODE_SHIFT 4
-+#define DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS 16
-+#define DMA_MODE_MAX_CAM_NUM_OF_ENTRIES 32
-+
-+#define DMA_THRESH_COMMQ_SHIFT 24
-+#define DMA_THRESH_READ_INT_BUF_SHIFT 16
-+
-+#define DMA_LIODN_SHIFT 16
-+
-+#define DMA_TRANSFER_PORTID_SHIFT 24
-+#define DMA_TRANSFER_TNUM_SHIFT 16
-+
-+/* sizes */
-+#define DMA_MAX_WATCHDOG 0xffffffff
-+
-+/* others */
-+#define DMA_CAM_SIZEOF_ENTRY 0x40
-+#define DMA_CAM_ALIGN 0x1000
-+#define DMA_CAM_UNITS 8
-+
-+/**************************************************************************//**
-+ @Description General defines
-+*//***************************************************************************/
-+
-+#define FM_DEBUG_STATUS_REGISTER_OFFSET 0x000d1084UL
-+#define FM_UCODE_DEBUG_INSTRUCTION 0x6ffff805UL
-+
-+/**************************************************************************//**
-+ @Description FPM defines
-+*//***************************************************************************/
-+
-+/* masks */
-+#define FPM_EV_MASK_DOUBLE_ECC 0x80000000
-+#define FPM_EV_MASK_STALL 0x40000000
-+#define FPM_EV_MASK_SINGLE_ECC 0x20000000
-+#define FPM_EV_MASK_RELEASE_FM 0x00010000
-+#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000
-+#define FPM_EV_MASK_STALL_EN 0x00004000
-+#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000
-+#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008
-+#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004
-+
-+#define FPM_RAM_RAMS_ECC_EN 0x80000000
-+#define FPM_RAM_IRAM_ECC_EN 0x40000000
-+#define FPM_RAM_MURAM_ECC 0x00008000
-+#define FPM_RAM_IRAM_ECC 0x00004000
-+#define FPM_RAM_MURAM_TEST_ECC 0x20000000
-+#define FPM_RAM_IRAM_TEST_ECC 0x10000000
-+#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000
-+
-+#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
-+#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
-+
-+#define FPM_REV1_MAJOR_MASK 0x0000FF00
-+#define FPM_REV1_MINOR_MASK 0x000000FF
-+
-+#define FPM_REV2_INTEG_MASK 0x00FF0000
-+#define FPM_REV2_ERR_MASK 0x0000FF00
-+#define FPM_REV2_CFG_MASK 0x000000FF
-+
-+#define FPM_TS_FRACTION_MASK 0x0000FFFF
-+#define FPM_TS_CTL_EN 0x80000000
-+
-+#define FPM_PRC_REALSE_STALLED 0x00800000
-+
-+#define FPM_PS_STALLED 0x00800000
-+#define FPM_PS_FM_CTL1_SEL 0x80000000
-+#define FPM_PS_FM_CTL2_SEL 0x40000000
-+#define FPM_PS_FM_CTL_SEL_MASK (FPM_PS_FM_CTL1_SEL | FPM_PS_FM_CTL2_SEL)
-+
-+#define FPM_RSTC_FM_RESET 0x80000000
-+#define FPM_RSTC_10G0_RESET 0x04000000
-+#define FPM_RSTC_1G0_RESET 0x40000000
-+#define FPM_RSTC_1G1_RESET 0x20000000
-+#define FPM_RSTC_1G2_RESET 0x10000000
-+#define FPM_RSTC_1G3_RESET 0x08000000
-+#define FPM_RSTC_1G4_RESET 0x02000000
-+
-+
-+#define FPM_DISP_LIMIT_MASK 0x1F000000
-+#define FPM_THR1_PRS_MASK 0xFF000000
-+#define FPM_THR1_KG_MASK 0x00FF0000
-+#define FPM_THR1_PLCR_MASK 0x0000FF00
-+#define FPM_THR1_BMI_MASK 0x000000FF
-+
-+#define FPM_THR2_QMI_ENQ_MASK 0xFF000000
-+#define FPM_THR2_QMI_DEQ_MASK 0x000000FF
-+#define FPM_THR2_FM_CTL1_MASK 0x00FF0000
-+#define FPM_THR2_FM_CTL2_MASK 0x0000FF00
-+
-+/* shifts */
-+#define FPM_DISP_LIMIT_SHIFT 24
-+
-+#define FPM_THR1_PRS_SHIFT 24
-+#define FPM_THR1_KG_SHIFT 16
-+#define FPM_THR1_PLCR_SHIFT 8
-+#define FPM_THR1_BMI_SHIFT 0
-+
-+#define FPM_THR2_QMI_ENQ_SHIFT 24
-+#define FPM_THR2_QMI_DEQ_SHIFT 0
-+#define FPM_THR2_FM_CTL1_SHIFT 16
-+#define FPM_THR2_FM_CTL2_SHIFT 8
-+
-+#define FPM_EV_MASK_CAT_ERR_SHIFT 1
-+#define FPM_EV_MASK_DMA_ERR_SHIFT 0
-+
-+#define FPM_REV1_MAJOR_SHIFT 8
-+#define FPM_REV1_MINOR_SHIFT 0
-+
-+#define FPM_REV2_INTEG_SHIFT 16
-+#define FPM_REV2_ERR_SHIFT 8
-+#define FPM_REV2_CFG_SHIFT 0
-+
-+#define FPM_TS_INT_SHIFT 16
-+
-+#define FPM_PORT_FM_CTL_PORTID_SHIFT 24
-+
-+#define FPM_PS_FM_CTL_SEL_SHIFT 30
-+#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16
-+
-+#define FPM_DISP_LIMIT_SHIFT 24
-+
-+/* Interrupts defines */
-+#define FPM_EVENT_FM_CTL_0 0x00008000
-+#define FPM_EVENT_FM_CTL 0x0000FF00
-+#define FPM_EVENT_FM_CTL_BRK 0x00000080
-+
-+/* others */
-+#define FPM_MAX_DISP_LIMIT 31
-+#define FPM_RSTC_FM_RESET 0x80000000
-+#define FPM_RSTC_1G0_RESET 0x40000000
-+#define FPM_RSTC_1G1_RESET 0x20000000
-+#define FPM_RSTC_1G2_RESET 0x10000000
-+#define FPM_RSTC_1G3_RESET 0x08000000
-+#define FPM_RSTC_10G0_RESET 0x04000000
-+#define FPM_RSTC_1G4_RESET 0x02000000
-+#define FPM_RSTC_1G5_RESET 0x01000000
-+#define FPM_RSTC_1G6_RESET 0x00800000
-+#define FPM_RSTC_1G7_RESET 0x00400000
-+#define FPM_RSTC_10G1_RESET 0x00200000
-+/**************************************************************************//**
-+ @Description BMI defines
-+*//***************************************************************************/
-+/* masks */
-+#define BMI_INIT_START 0x80000000
-+#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
-+#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
-+#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
-+#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
-+#define BMI_NUM_OF_TASKS_MASK 0x3F000000
-+#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000
-+#define BMI_NUM_OF_DMAS_MASK 0x00000F00
-+#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F
-+#define BMI_FIFO_SIZE_MASK 0x000003FF
-+#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000
-+#define BMI_CFG2_DMAS_MASK 0x0000003F
-+#define BMI_TOTAL_FIFO_SIZE_MASK 0x07FF0000
-+#define BMI_TOTAL_NUM_OF_TASKS_MASK 0x007F0000
-+
-+/* shifts */
-+#define BMI_CFG2_TASKS_SHIFT 16
-+#define BMI_CFG2_DMAS_SHIFT 0
-+#define BMI_CFG1_FIFO_SIZE_SHIFT 16
-+#define BMI_FIFO_SIZE_SHIFT 0
-+#define BMI_EXTRA_FIFO_SIZE_SHIFT 16
-+#define BMI_NUM_OF_TASKS_SHIFT 24
-+#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16
-+#define BMI_NUM_OF_DMAS_SHIFT 8
-+#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0
-+
-+/* others */
-+#define BMI_FIFO_ALIGN 0x100
-+#define FMAN_BMI_FIFO_UNITS 0x100
-+
-+
-+/**************************************************************************//**
-+ @Description QMI defines
-+*//***************************************************************************/
-+/* masks */
-+#define QMI_CFG_ENQ_EN 0x80000000
-+#define QMI_CFG_DEQ_EN 0x40000000
-+#define QMI_CFG_EN_COUNTERS 0x10000000
-+#define QMI_CFG_SOFT_RESET 0x01000000
-+#define QMI_CFG_DEQ_MASK 0x0000003F
-+#define QMI_CFG_ENQ_MASK 0x00003F00
-+
-+#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
-+#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
-+#define QMI_INTR_EN_SINGLE_ECC 0x80000000
-+
-+/* shifts */
-+#define QMI_CFG_ENQ_SHIFT 8
-+#define QMI_TAPC_TAP 22
-+
-+#define QMI_GS_HALT_NOT_BUSY 0x00000002
-+
-+/**************************************************************************//**
-+ @Description IRAM defines
-+*//***************************************************************************/
-+/* masks */
-+#define IRAM_IADD_AIE 0x80000000
-+#define IRAM_READY 0x80000000
-+
-+uint32_t fman_get_bmi_err_event(struct fman_bmi_regs *bmi_rg);
-+uint32_t fman_get_qmi_err_event(struct fman_qmi_regs *qmi_rg);
-+uint32_t fman_get_dma_com_id(struct fman_dma_regs *dma_rg);
-+uint64_t fman_get_dma_addr(struct fman_dma_regs *dma_rg);
-+uint32_t fman_get_dma_err_event(struct fman_dma_regs *dma_rg);
-+uint32_t fman_get_fpm_err_event(struct fman_fpm_regs *fpm_rg);
-+uint32_t fman_get_muram_err_event(struct fman_fpm_regs *fpm_rg);
-+uint32_t fman_get_iram_err_event(struct fman_fpm_regs *fpm_rg);
-+uint32_t fman_get_qmi_event(struct fman_qmi_regs *qmi_rg);
-+uint32_t fman_get_fpm_error_interrupts(struct fman_fpm_regs *fpm_rg);
-+uint32_t fman_get_ctrl_intr(struct fman_fpm_regs *fpm_rg,
-+ uint8_t event_reg_id);
-+uint8_t fman_get_qmi_deq_th(struct fman_qmi_regs *qmi_rg);
-+uint8_t fman_get_qmi_enq_th(struct fman_qmi_regs *qmi_rg);
-+uint16_t fman_get_size_of_fifo(struct fman_bmi_regs *bmi_rg, uint8_t port_id);
-+uint32_t fman_get_total_fifo_size(struct fman_bmi_regs *bmi_rg);
-+uint16_t fman_get_size_of_extra_fifo(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id);
-+uint8_t fman_get_num_of_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id);
-+uint8_t fman_get_num_extra_tasks(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id);
-+uint8_t fman_get_num_of_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id);
-+uint8_t fman_get_num_extra_dmas(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id);
-+uint32_t fman_get_normal_pending(struct fman_fpm_regs *fpm_rg);
-+uint32_t fman_get_controller_event(struct fman_fpm_regs *fpm_rg,
-+ uint8_t reg_id);
-+uint32_t fman_get_error_pending(struct fman_fpm_regs *fpm_rg);
-+void fman_get_revision(struct fman_fpm_regs *fpm_rg, uint8_t *major,
-+ uint8_t *minor);
-+uint32_t fman_get_counter(struct fman_rg *fman_rg,
-+ enum fman_counters reg_name);
-+uint32_t fman_get_dma_status(struct fman_dma_regs *dma_rg);
-+
-+
-+int fman_set_erratum_10gmac_a004_wa(struct fman_fpm_regs *fpm_rg);
-+void fman_set_ctrl_intr(struct fman_fpm_regs *fpm_rg, uint8_t event_reg_id,
-+ uint32_t enable_events);
-+void fman_set_num_of_riscs_per_port(struct fman_fpm_regs *fpm_rg,
-+ uint8_t port_id,
-+ uint8_t num_fman_ctrls,
-+ uint32_t or_fman_ctrl);
-+void fman_set_order_restoration_per_port(struct fman_fpm_regs *fpm_rg,
-+ uint8_t port_id,
-+ bool independent_mode,
-+ bool is_rx_port);
-+void fman_set_qmi_enq_th(struct fman_qmi_regs *qmi_rg, uint8_t val);
-+void fman_set_qmi_deq_th(struct fman_qmi_regs *qmi_rg, uint8_t val);
-+void fman_set_liodn_per_port(struct fman_rg *fman_rg,
-+ uint8_t port_id,
-+ uint16_t liodn_base,
-+ uint16_t liodn_offset);
-+void fman_set_size_of_fifo(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint32_t size_of_fifo,
-+ uint32_t extra_size_of_fifo);
-+void fman_set_num_of_tasks(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint8_t num_of_tasks,
-+ uint8_t num_of_extra_tasks);
-+void fman_set_num_of_open_dmas(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint8_t num_of_open_dmas,
-+ uint8_t num_of_extra_open_dmas,
-+ uint8_t total_num_of_dmas);
-+void fman_set_ports_bandwidth(struct fman_bmi_regs *bmi_rg, uint8_t *weights);
-+int fman_set_exception(struct fman_rg *fman_rg,
-+ enum fman_exceptions exception,
-+ bool enable);
-+void fman_set_dma_emergency(struct fman_dma_regs *dma_rg, bool is_write,
-+ bool enable);
-+void fman_set_dma_ext_bus_pri(struct fman_dma_regs *dma_rg, uint32_t pri);
-+void fman_set_congestion_group_pfc_priority(uint32_t *cpg_rg,
-+ uint32_t congestion_group_id,
-+ uint8_t piority_bit_map,
-+ uint32_t reg_num);
-+
-+
-+void fman_defconfig(struct fman_cfg *cfg, bool is_master);
-+void fman_regconfig(struct fman_rg *fman_rg, struct fman_cfg *cfg);
-+int fman_fpm_init(struct fman_fpm_regs *fpm_rg, struct fman_cfg *cfg);
-+int fman_bmi_init(struct fman_bmi_regs *bmi_rg, struct fman_cfg *cfg);
-+int fman_qmi_init(struct fman_qmi_regs *qmi_rg, struct fman_cfg *cfg);
-+int fman_dma_init(struct fman_dma_regs *dma_rg, struct fman_cfg *cfg);
-+void fman_free_resources(struct fman_rg *fman_rg);
-+int fman_enable(struct fman_rg *fman_rg, struct fman_cfg *cfg);
-+void fman_reset(struct fman_fpm_regs *fpm_rg);
-+void fman_resume(struct fman_fpm_regs *fpm_rg);
-+
-+
-+void fman_enable_time_stamp(struct fman_fpm_regs *fpm_rg,
-+ uint8_t count1ubit,
-+ uint16_t fm_clk_freq);
-+void fman_enable_rams_ecc(struct fman_fpm_regs *fpm_rg);
-+void fman_qmi_disable_dispatch_limit(struct fman_fpm_regs *fpm_rg);
-+void fman_disable_rams_ecc(struct fman_fpm_regs *fpm_rg);
-+void fman_resume_stalled_port(struct fman_fpm_regs *fpm_rg, uint8_t port_id);
-+int fman_reset_mac(struct fman_fpm_regs *fpm_rg, uint8_t macId, bool is_10g);
-+bool fman_is_port_stalled(struct fman_fpm_regs *fpm_rg, uint8_t port_id);
-+bool fman_rams_ecc_is_external_ctl(struct fman_fpm_regs *fpm_rg);
-+bool fman_is_qmi_halt_not_busy_state(struct fman_qmi_regs *qmi_rg);
-+int fman_modify_counter(struct fman_rg *fman_rg,
-+ enum fman_counters reg_name,
-+ uint32_t val);
-+void fman_force_intr(struct fman_rg *fman_rg,
-+ enum fman_exceptions exception);
-+void fman_set_vsp_window(struct fman_bmi_regs *bmi_rg,
-+ uint8_t port_id,
-+ uint8_t base_storage_profile,
-+ uint8_t log2_num_of_profiles);
-+
-+/**************************************************************************//**
-+ @Description default values
-+*//***************************************************************************/
-+#define DEFAULT_CATASTROPHIC_ERR E_FMAN_CATAST_ERR_STALL_PORT
-+#define DEFAULT_DMA_ERR E_FMAN_DMA_ERR_CATASTROPHIC
-+#define DEFAULT_HALT_ON_EXTERNAL_ACTIVATION FALSE /* do not change! if changed, must be disabled for rev1 ! */
-+#define DEFAULT_HALT_ON_UNRECOVERABLE_ECC_ERROR FALSE /* do not change! if changed, must be disabled for rev1 ! */
-+#define DEFAULT_EXTERNAL_ECC_RAMS_ENABLE FALSE
-+#define DEFAULT_AID_OVERRIDE FALSE
-+#define DEFAULT_AID_MODE E_FMAN_DMA_AID_OUT_TNUM
-+#define DEFAULT_DMA_COMM_Q_LOW 0x2A
-+#define DEFAULT_DMA_COMM_Q_HIGH 0x3F
-+#define DEFAULT_CACHE_OVERRIDE E_FMAN_DMA_NO_CACHE_OR
-+#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64
-+#define DEFAULT_DMA_DBG_CNT_MODE E_FMAN_DMA_DBG_NO_CNT
-+#define DEFAULT_DMA_EN_EMERGENCY FALSE
-+#define DEFAULT_DMA_SOS_EMERGENCY 0
-+#define DEFAULT_DMA_WATCHDOG 0 /* disabled */
-+#define DEFAULT_DMA_EN_EMERGENCY_SMOOTHER FALSE
-+#define DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER 0
-+#define DEFAULT_DISP_LIMIT 0
-+#define DEFAULT_PRS_DISP_TH 16
-+#define DEFAULT_PLCR_DISP_TH 16
-+#define DEFAULT_KG_DISP_TH 16
-+#define DEFAULT_BMI_DISP_TH 16
-+#define DEFAULT_QMI_ENQ_DISP_TH 16
-+#define DEFAULT_QMI_DEQ_DISP_TH 16
-+#define DEFAULT_FM_CTL1_DISP_TH 16
-+#define DEFAULT_FM_CTL2_DISP_TH 16
-+#define DEFAULT_TNUM_AGING_PERIOD 4
-+
-+
-+#endif /* __FSL_FMAN_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h
-@@ -0,0 +1,1096 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_DTSEC_H
-+#define __FSL_FMAN_DTSEC_H
-+
-+#include "common/general.h"
-+#include "fsl_enet.h"
-+
-+/**
-+ * DOC: dTSEC Init sequence
-+ *
-+ * To prepare dTSEC block for transfer use the following call sequence:
-+ *
-+ * - fman_dtsec_defconfig() - This step is optional and yet recommended. Its
-+ * use is to obtain the default dTSEC configuration parameters.
-+ *
-+ * - Change dtsec configuration in &dtsec_cfg. This structure will be used
-+ * to customize the dTSEC behavior.
-+ *
-+ * - fman_dtsec_init() - Applies the configuration on dTSEC hardware. Note that
-+ * dTSEC is initialized while both Tx and Rx are disabled.
-+ *
-+ * - fman_dtsec_set_mac_address() - Set the station address (mac address).
-+ * This is used by dTSEC to match against received packets.
-+ *
-+ * - fman_dtsec_adjust_link() - Set the link speed and duplex parameters
-+ * after the PHY establishes the link.
-+ *
-+ * - dtsec_enable_tx() and dtsec_enable_rx() to enable transmission and
-+ * reception.
-+ */
-+
-+/**
-+ * DOC: dTSEC Graceful stop
-+ *
-+ * To temporary stop dTSEC activity use fman_dtsec_stop_tx() and
-+ * fman_dtsec_stop_rx(). Note that these functions request dTSEC graceful stop
-+ * but return before this stop is complete. To query for graceful stop
-+ * completion use fman_dtsec_get_event() and check DTSEC_IEVENT_GTSC and
-+ * DTSEC_IEVENT_GRSC bits. Alternatively the dTSEC interrupt mask can be set to
-+ * enable graceful stop interrupts.
-+ *
-+ * To resume operation after graceful stop use fman_dtsec_start_tx() and
-+ * fman_dtsec_start_rx().
-+ */
-+
-+/**
-+ * DOC: dTSEC interrupt handling
-+ *
-+ * This code does not provide an interrupt handler for dTSEC. Instead this
-+ * handler should be implemented and registered to the operating system by the
-+ * caller. Some primitives for accessing the event status and mask registers
-+ * are provided.
-+ *
-+ * See "dTSEC Events" section for a list of events that dTSEC can generate.
-+ */
-+
-+/**
-+ * DOC: dTSEC Events
-+ *
-+ * Interrupt events cause dTSEC event bits to be set. Software may poll the
-+ * event register at any time to check for pending interrupts. If an event
-+ * occurs and its corresponding enable bit is set in the interrupt mask
-+ * register, the event also causes a hardware interrupt at the PIC.
-+ *
-+ * To poll for event status use the fman_dtsec_get_event() function.
-+ * To configure the interrupt mask use fman_dtsec_enable_interrupt() and
-+ * fman_dtsec_disable_interrupt() functions.
-+ * After servicing a dTSEC interrupt use fman_dtsec_ack_event to reset the
-+ * serviced event bit.
-+ *
-+ * The following events may be signaled by dTSEC hardware:
-+ *
-+ * %DTSEC_IEVENT_BABR - Babbling receive error. This bit indicates that
-+ * a frame was received with length in excess of the MAC's maximum frame length
-+ * register.
-+ *
-+ * %DTSEC_IEVENT_RXC - Receive control (pause frame) interrupt. A pause
-+ * control frame was received while Rx pause frame handling is enabled.
-+ * Also see fman_dtsec_handle_rx_pause().
-+ *
-+ * %DTSEC_IEVENT_MSRO - MIB counter overflow. The count for one of the MIB
-+ * counters has exceeded the size of its register.
-+ *
-+ * %DTSEC_IEVENT_GTSC - Graceful transmit stop complete. Graceful stop is now
-+ * complete. The transmitter is in a stopped state, in which only pause frames
-+ * can be transmitted.
-+ * Also see fman_dtsec_stop_tx().
-+ *
-+ * %DTSEC_IEVENT_BABT - Babbling transmit error. The transmitted frame length
-+ * has exceeded the value in the MAC's Maximum Frame Length register.
-+ *
-+ * %DTSEC_IEVENT_TXC - Transmit control (pause frame) interrupt. his bit
-+ * indicates that a control frame was transmitted.
-+ *
-+ * %DTSEC_IEVENT_TXE - Transmit error. This bit indicates that an error
-+ * occurred on the transmitted channel. This bit is set whenever any transmit
-+ * error occurs which causes the dTSEC to discard all or part of a frame
-+ * (LC, CRL, XFUN).
-+ *
-+ * %DTSEC_IEVENT_LC - Late collision. This bit indicates that a collision
-+ * occurred beyond the collision window (slot time) in half-duplex mode.
-+ * The frame is truncated with a bad CRC and the remainder of the frame
-+ * is discarded.
-+ *
-+ * %DTSEC_IEVENT_CRL - Collision retry limit. is bit indicates that the number
-+ * of successive transmission collisions has exceeded the MAC's half-duplex
-+ * register's retransmission maximum count. The frame is discarded without
-+ * being transmitted and transmission of the next frame commences. This only
-+ * occurs while in half-duplex mode.
-+ * The number of retransmit attempts can be set in
-+ * &dtsec_halfdup_cfg.@retransmit before calling fman_dtsec_init().
-+ *
-+ * %DTSEC_IEVENT_XFUN - Transmit FIFO underrun. This bit indicates that the
-+ * transmit FIFO became empty before the complete frame was transmitted.
-+ * The frame is truncated with a bad CRC and the remainder of the frame is
-+ * discarded.
-+ *
-+ * %DTSEC_IEVENT_MAG - TBD
-+ *
-+ * %DTSEC_IEVENT_MMRD - MII management read completion.
-+ *
-+ * %DTSEC_IEVENT_MMWR - MII management write completion.
-+ *
-+ * %DTSEC_IEVENT_GRSC - Graceful receive stop complete. It allows the user to
-+ * know if the system has completed the stop and it is safe to write to receive
-+ * registers (status, control or configuration registers) that are used by the
-+ * system during normal operation.
-+ *
-+ * %DTSEC_IEVENT_TDPE - Internal data error on transmit. This bit indicates
-+ * that the dTSEC has detected a parity error on its stored transmit data, which
-+ * is likely to compromise the validity of recently transferred frames.
-+ *
-+ * %DTSEC_IEVENT_RDPE - Internal data error on receive. This bit indicates that
-+ * the dTSEC has detected a parity error on its stored receive data, which is
-+ * likely to compromise the validity of recently transferred frames.
-+ */
-+/* Interrupt Mask Register (IMASK) */
-+#define DTSEC_IMASK_BREN 0x80000000
-+#define DTSEC_IMASK_RXCEN 0x40000000
-+#define DTSEC_IMASK_MSROEN 0x04000000
-+#define DTSEC_IMASK_GTSCEN 0x02000000
-+#define DTSEC_IMASK_BTEN 0x01000000
-+#define DTSEC_IMASK_TXCEN 0x00800000
-+#define DTSEC_IMASK_TXEEN 0x00400000
-+#define DTSEC_IMASK_LCEN 0x00040000
-+#define DTSEC_IMASK_CRLEN 0x00020000
-+#define DTSEC_IMASK_XFUNEN 0x00010000
-+#define DTSEC_IMASK_ABRTEN 0x00008000
-+#define DTSEC_IMASK_IFERREN 0x00004000
-+#define DTSEC_IMASK_MAGEN 0x00000800
-+#define DTSEC_IMASK_MMRDEN 0x00000400
-+#define DTSEC_IMASK_MMWREN 0x00000200
-+#define DTSEC_IMASK_GRSCEN 0x00000100
-+#define DTSEC_IMASK_TDPEEN 0x00000002
-+#define DTSEC_IMASK_RDPEEN 0x00000001
-+
-+#define DTSEC_EVENTS_MASK \
-+ ((uint32_t)(DTSEC_IMASK_BREN | \
-+ DTSEC_IMASK_RXCEN | \
-+ DTSEC_IMASK_BTEN | \
-+ DTSEC_IMASK_TXCEN | \
-+ DTSEC_IMASK_TXEEN | \
-+ DTSEC_IMASK_ABRTEN | \
-+ DTSEC_IMASK_LCEN | \
-+ DTSEC_IMASK_CRLEN | \
-+ DTSEC_IMASK_XFUNEN | \
-+ DTSEC_IMASK_IFERREN | \
-+ DTSEC_IMASK_MAGEN | \
-+ DTSEC_IMASK_TDPEEN | \
-+ DTSEC_IMASK_RDPEEN))
-+
-+/* dtsec timestamp event bits */
-+#define TMR_PEMASK_TSREEN 0x00010000
-+#define TMR_PEVENT_TSRE 0x00010000
-+
-+/* Group address bit indication */
-+#define MAC_GROUP_ADDRESS 0x0000010000000000ULL
-+/* size in bytes of L2 address */
-+#define MAC_ADDRLEN 6
-+
-+#define DEFAULT_HALFDUP_ON FALSE
-+#define DEFAULT_HALFDUP_RETRANSMIT 0xf
-+#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
-+#define DEFAULT_HALFDUP_EXCESS_DEFER TRUE
-+#define DEFAULT_HALFDUP_NO_BACKOFF FALSE
-+#define DEFAULT_HALFDUP_BP_NO_BACKOFF FALSE
-+#define DEFAULT_HALFDUP_ALT_BACKOFF_VAL 0x0A
-+#define DEFAULT_HALFDUP_ALT_BACKOFF_EN FALSE
-+#define DEFAULT_RX_DROP_BCAST FALSE
-+#define DEFAULT_RX_SHORT_FRM TRUE
-+#define DEFAULT_RX_LEN_CHECK FALSE
-+#define DEFAULT_TX_PAD_CRC TRUE
-+#define DEFAULT_TX_CRC FALSE
-+#define DEFAULT_RX_CTRL_ACC FALSE
-+#define DEFAULT_TX_PAUSE_TIME 0xf000
-+#define DEFAULT_TBIPA 5
-+#define DEFAULT_RX_PREPEND 0
-+#define DEFAULT_PTP_TSU_EN TRUE
-+#define DEFAULT_PTP_EXCEPTION_EN TRUE
-+#define DEFAULT_PREAMBLE_LEN 7
-+#define DEFAULT_RX_PREAMBLE FALSE
-+#define DEFAULT_TX_PREAMBLE FALSE
-+#define DEFAULT_LOOPBACK FALSE
-+#define DEFAULT_RX_TIME_STAMP_EN FALSE
-+#define DEFAULT_TX_TIME_STAMP_EN FALSE
-+#define DEFAULT_RX_FLOW TRUE
-+#define DEFAULT_TX_FLOW TRUE
-+#define DEFAULT_RX_GROUP_HASH_EXD FALSE
-+#define DEFAULT_TX_PAUSE_TIME_EXTD 0
-+#define DEFAULT_RX_PROMISC FALSE
-+#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
-+#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
-+#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
-+#define DEFAULT_BACK_TO_BACK_IPG 0x60
-+#define DEFAULT_MAXIMUM_FRAME 0x600
-+#define DEFAULT_TBI_PHY_ADDR 5
-+#define DEFAULT_WAKE_ON_LAN FALSE
-+
-+/* register related defines (bits, field offsets..) */
-+#define DTSEC_ID1_ID 0xffff0000
-+#define DTSEC_ID1_REV_MJ 0x0000FF00
-+#define DTSEC_ID1_REV_MN 0x000000ff
-+
-+#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
-+#define DTSEC_ID2_INT_NORMAL_OFF 0x00020000
-+
-+#define DTSEC_ECNTRL_CLRCNT 0x00004000
-+#define DTSEC_ECNTRL_AUTOZ 0x00002000
-+#define DTSEC_ECNTRL_STEN 0x00001000
-+#define DTSEC_ECNTRL_CFG_RO 0x80000000
-+#define DTSEC_ECNTRL_GMIIM 0x00000040
-+#define DTSEC_ECNTRL_TBIM 0x00000020
-+#define DTSEC_ECNTRL_SGMIIM 0x00000002
-+#define DTSEC_ECNTRL_RPM 0x00000010
-+#define DTSEC_ECNTRL_R100M 0x00000008
-+#define DTSEC_ECNTRL_RMM 0x00000004
-+#define DTSEC_ECNTRL_QSGMIIM 0x00000001
-+
-+#define DTSEC_TCTRL_THDF 0x00000800
-+#define DTSEC_TCTRL_TTSE 0x00000040
-+#define DTSEC_TCTRL_GTS 0x00000020
-+#define DTSEC_TCTRL_TFC_PAUSE 0x00000010
-+
-+/* PTV offsets */
-+#define PTV_PTE_OFST 16
-+
-+#define RCTRL_CFA 0x00008000
-+#define RCTRL_GHTX 0x00000400
-+#define RCTRL_RTSE 0x00000040
-+#define RCTRL_GRS 0x00000020
-+#define RCTRL_BC_REJ 0x00000010
-+#define RCTRL_MPROM 0x00000008
-+#define RCTRL_RSF 0x00000004
-+#define RCTRL_UPROM 0x00000001
-+#define RCTRL_PROM (RCTRL_UPROM | RCTRL_MPROM)
-+
-+#define TMR_CTL_ESFDP 0x00000800
-+#define TMR_CTL_ESFDE 0x00000400
-+
-+#define MACCFG1_SOFT_RESET 0x80000000
-+#define MACCFG1_LOOPBACK 0x00000100
-+#define MACCFG1_RX_FLOW 0x00000020
-+#define MACCFG1_TX_FLOW 0x00000010
-+#define MACCFG1_TX_EN 0x00000001
-+#define MACCFG1_RX_EN 0x00000004
-+#define MACCFG1_RESET_RxMC 0x00080000
-+#define MACCFG1_RESET_TxMC 0x00040000
-+#define MACCFG1_RESET_RxFUN 0x00020000
-+#define MACCFG1_RESET_TxFUN 0x00010000
-+
-+#define MACCFG2_NIBBLE_MODE 0x00000100
-+#define MACCFG2_BYTE_MODE 0x00000200
-+#define MACCFG2_PRE_AM_Rx_EN 0x00000080
-+#define MACCFG2_PRE_AM_Tx_EN 0x00000040
-+#define MACCFG2_LENGTH_CHECK 0x00000010
-+#define MACCFG2_MAGIC_PACKET_EN 0x00000008
-+#define MACCFG2_PAD_CRC_EN 0x00000004
-+#define MACCFG2_CRC_EN 0x00000002
-+#define MACCFG2_FULL_DUPLEX 0x00000001
-+
-+#define PREAMBLE_LENGTH_SHIFT 12
-+
-+#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
-+#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
-+#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
-+
-+#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
-+#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
-+#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
-+#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
-+
-+#define HAFDUP_ALT_BEB 0x00080000
-+#define HAFDUP_BP_NO_BACKOFF 0x00040000
-+#define HAFDUP_NO_BACKOFF 0x00020000
-+#define HAFDUP_EXCESS_DEFER 0x00010000
-+#define HAFDUP_COLLISION_WINDOW 0x000003ff
-+
-+#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT 20
-+#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
-+#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
-+
-+#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
-+
-+/* CAR1/2 bits */
-+#define DTSEC_CAR1_TR64 0x80000000
-+#define DTSEC_CAR1_TR127 0x40000000
-+#define DTSEC_CAR1_TR255 0x20000000
-+#define DTSEC_CAR1_TR511 0x10000000
-+#define DTSEC_CAR1_TRK1 0x08000000
-+#define DTSEC_CAR1_TRMAX 0x04000000
-+#define DTSEC_CAR1_TRMGV 0x02000000
-+
-+#define DTSEC_CAR1_RBYT 0x00010000
-+#define DTSEC_CAR1_RPKT 0x00008000
-+#define DTSEC_CAR1_RFCS 0x00004000
-+#define DTSEC_CAR1_RMCA 0x00002000
-+#define DTSEC_CAR1_RBCA 0x00001000
-+#define DTSEC_CAR1_RXCF 0x00000800
-+#define DTSEC_CAR1_RXPF 0x00000400
-+#define DTSEC_CAR1_RXUO 0x00000200
-+#define DTSEC_CAR1_RALN 0x00000100
-+#define DTSEC_CAR1_RFLR 0x00000080
-+#define DTSEC_CAR1_RCDE 0x00000040
-+#define DTSEC_CAR1_RCSE 0x00000020
-+#define DTSEC_CAR1_RUND 0x00000010
-+#define DTSEC_CAR1_ROVR 0x00000008
-+#define DTSEC_CAR1_RFRG 0x00000004
-+#define DTSEC_CAR1_RJBR 0x00000002
-+#define DTSEC_CAR1_RDRP 0x00000001
-+
-+#define DTSEC_CAR2_TJBR 0x00080000
-+#define DTSEC_CAR2_TFCS 0x00040000
-+#define DTSEC_CAR2_TXCF 0x00020000
-+#define DTSEC_CAR2_TOVR 0x00010000
-+#define DTSEC_CAR2_TUND 0x00008000
-+#define DTSEC_CAR2_TFRG 0x00004000
-+#define DTSEC_CAR2_TBYT 0x00002000
-+#define DTSEC_CAR2_TPKT 0x00001000
-+#define DTSEC_CAR2_TMCA 0x00000800
-+#define DTSEC_CAR2_TBCA 0x00000400
-+#define DTSEC_CAR2_TXPF 0x00000200
-+#define DTSEC_CAR2_TDFR 0x00000100
-+#define DTSEC_CAR2_TEDF 0x00000080
-+#define DTSEC_CAR2_TSCL 0x00000040
-+#define DTSEC_CAR2_TMCL 0x00000020
-+#define DTSEC_CAR2_TLCL 0x00000010
-+#define DTSEC_CAR2_TXCL 0x00000008
-+#define DTSEC_CAR2_TNCL 0x00000004
-+#define DTSEC_CAR2_TDRP 0x00000001
-+
-+#define CAM1_ERRORS_ONLY \
-+ (DTSEC_CAR1_RXPF | DTSEC_CAR1_RALN | DTSEC_CAR1_RFLR \
-+ | DTSEC_CAR1_RCDE | DTSEC_CAR1_RCSE | DTSEC_CAR1_RUND \
-+ | DTSEC_CAR1_ROVR | DTSEC_CAR1_RFRG | DTSEC_CAR1_RJBR \
-+ | DTSEC_CAR1_RDRP)
-+
-+#define CAM2_ERRORS_ONLY (DTSEC_CAR2_TFCS | DTSEC_CAR2_TXPF | DTSEC_CAR2_TDRP)
-+
-+/*
-+ * Group of dTSEC specific counters relating to the standard RMON MIB Group 1
-+ * (or Ethernet) statistics.
-+ */
-+#define CAM1_MIB_GRP_1 \
-+ (DTSEC_CAR1_RDRP | DTSEC_CAR1_RBYT | DTSEC_CAR1_RPKT | DTSEC_CAR1_RMCA\
-+ | DTSEC_CAR1_RBCA | DTSEC_CAR1_RALN | DTSEC_CAR1_RUND | DTSEC_CAR1_ROVR\
-+ | DTSEC_CAR1_RFRG | DTSEC_CAR1_RJBR \
-+ | DTSEC_CAR1_TR64 | DTSEC_CAR1_TR127 | DTSEC_CAR1_TR255 \
-+ | DTSEC_CAR1_TR511 | DTSEC_CAR1_TRMAX)
-+
-+#define CAM2_MIB_GRP_1 (DTSEC_CAR2_TNCL | DTSEC_CAR2_TDRP)
-+
-+/* memory map */
-+
-+struct dtsec_regs {
-+ /* dTSEC General Control and Status Registers */
-+ uint32_t tsec_id; /* 0x000 ETSEC_ID register */
-+ uint32_t tsec_id2; /* 0x004 ETSEC_ID2 register */
-+ uint32_t ievent; /* 0x008 Interrupt event register */
-+ uint32_t imask; /* 0x00C Interrupt mask register */
-+ uint32_t reserved0010[1];
-+ uint32_t ecntrl; /* 0x014 E control register */
-+ uint32_t ptv; /* 0x018 Pause time value register */
-+ uint32_t tbipa; /* 0x01C TBI PHY address register */
-+ uint32_t tmr_ctrl; /* 0x020 Time-stamp Control register */
-+ uint32_t tmr_pevent; /* 0x024 Time-stamp event register */
-+ uint32_t tmr_pemask; /* 0x028 Timer event mask register */
-+ uint32_t reserved002c[5];
-+ uint32_t tctrl; /* 0x040 Transmit control register */
-+ uint32_t reserved0044[3];
-+ uint32_t rctrl; /* 0x050 Receive control register */
-+ uint32_t reserved0054[11];
-+ uint32_t igaddr[8]; /* 0x080-0x09C Individual/group address */
-+ uint32_t gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
-+ uint32_t reserved00c0[16];
-+ uint32_t maccfg1; /* 0x100 MAC configuration #1 */
-+ uint32_t maccfg2; /* 0x104 MAC configuration #2 */
-+ uint32_t ipgifg; /* 0x108 IPG/IFG */
-+ uint32_t hafdup; /* 0x10C Half-duplex */
-+ uint32_t maxfrm; /* 0x110 Maximum frame */
-+ uint32_t reserved0114[10];
-+ uint32_t ifstat; /* 0x13C Interface status */
-+ uint32_t macstnaddr1; /* 0x140 Station Address,part 1 */
-+ uint32_t macstnaddr2; /* 0x144 Station Address,part 2 */
-+ struct {
-+ uint32_t exact_match1; /* octets 1-4 */
-+ uint32_t exact_match2; /* octets 5-6 */
-+ } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
-+ uint32_t reserved01c0[16];
-+ uint32_t tr64; /* 0x200 transmit and receive 64 byte frame counter */
-+ uint32_t tr127; /* 0x204 transmit and receive 65 to 127 byte frame
-+ * counter */
-+ uint32_t tr255; /* 0x208 transmit and receive 128 to 255 byte frame
-+ * counter */
-+ uint32_t tr511; /* 0x20C transmit and receive 256 to 511 byte frame
-+ * counter */
-+ uint32_t tr1k; /* 0x210 transmit and receive 512 to 1023 byte frame
-+ * counter */
-+ uint32_t trmax; /* 0x214 transmit and receive 1024 to 1518 byte frame
-+ * counter */
-+ uint32_t trmgv; /* 0x218 transmit and receive 1519 to 1522 byte good
-+ * VLAN frame count */
-+ uint32_t rbyt; /* 0x21C receive byte counter */
-+ uint32_t rpkt; /* 0x220 receive packet counter */
-+ uint32_t rfcs; /* 0x224 receive FCS error counter */
-+ uint32_t rmca; /* 0x228 RMCA receive multicast packet counter */
-+ uint32_t rbca; /* 0x22C receive broadcast packet counter */
-+ uint32_t rxcf; /* 0x230 receive control frame packet counter */
-+ uint32_t rxpf; /* 0x234 receive pause frame packet counter */
-+ uint32_t rxuo; /* 0x238 receive unknown OP code counter */
-+ uint32_t raln; /* 0x23C receive alignment error counter */
-+ uint32_t rflr; /* 0x240 receive frame length error counter */
-+ uint32_t rcde; /* 0x244 receive code error counter */
-+ uint32_t rcse; /* 0x248 receive carrier sense error counter */
-+ uint32_t rund; /* 0x24C receive undersize packet counter */
-+ uint32_t rovr; /* 0x250 receive oversize packet counter */
-+ uint32_t rfrg; /* 0x254 receive fragments counter */
-+ uint32_t rjbr; /* 0x258 receive jabber counter */
-+ uint32_t rdrp; /* 0x25C receive drop */
-+ uint32_t tbyt; /* 0x260 transmit byte counter */
-+ uint32_t tpkt; /* 0x264 transmit packet counter */
-+ uint32_t tmca; /* 0x268 transmit multicast packet counter */
-+ uint32_t tbca; /* 0x26C transmit broadcast packet counter */
-+ uint32_t txpf; /* 0x270 transmit pause control frame counter */
-+ uint32_t tdfr; /* 0x274 transmit deferral packet counter */
-+ uint32_t tedf; /* 0x278 transmit excessive deferral packet counter */
-+ uint32_t tscl; /* 0x27C transmit single collision packet counter */
-+ uint32_t tmcl; /* 0x280 transmit multiple collision packet counter */
-+ uint32_t tlcl; /* 0x284 transmit late collision packet counter */
-+ uint32_t txcl; /* 0x288 transmit excessive collision packet counter */
-+ uint32_t tncl; /* 0x28C transmit total collision counter */
-+ uint32_t reserved0290[1];
-+ uint32_t tdrp; /* 0x294 transmit drop frame counter */
-+ uint32_t tjbr; /* 0x298 transmit jabber frame counter */
-+ uint32_t tfcs; /* 0x29C transmit FCS error counter */
-+ uint32_t txcf; /* 0x2A0 transmit control frame counter */
-+ uint32_t tovr; /* 0x2A4 transmit oversize frame counter */
-+ uint32_t tund; /* 0x2A8 transmit undersize frame counter */
-+ uint32_t tfrg; /* 0x2AC transmit fragments frame counter */
-+ uint32_t car1; /* 0x2B0 carry register one register* */
-+ uint32_t car2; /* 0x2B4 carry register two register* */
-+ uint32_t cam1; /* 0x2B8 carry register one mask register */
-+ uint32_t cam2; /* 0x2BC carry register two mask register */
-+ uint32_t reserved02c0[848];
-+};
-+
-+/**
-+ * struct dtsec_mib_grp_1_counters - MIB counter overflows
-+ *
-+ * @tr64: Transmit and Receive 64 byte frame count. Increment for each
-+ * good or bad frame, of any type, transmitted or received, which
-+ * is 64 bytes in length.
-+ * @tr127: Transmit and Receive 65 to 127 byte frame count. Increments for
-+ * each good or bad frame of any type, transmitted or received,
-+ * which is 65-127 bytes in length.
-+ * @tr255: Transmit and Receive 128 to 255 byte frame count. Increments
-+ * for each good or bad frame, of any type, transmitted or
-+ * received, which is 128-255 bytes in length.
-+ * @tr511: Transmit and Receive 256 to 511 byte frame count. Increments
-+ * for each good or bad frame, of any type, transmitted or
-+ * received, which is 256-511 bytes in length.
-+ * @tr1k: Transmit and Receive 512 to 1023 byte frame count. Increments
-+ * for each good or bad frame, of any type, transmitted or
-+ * received, which is 512-1023 bytes in length.
-+ * @trmax: Transmit and Receive 1024 to 1518 byte frame count. Increments
-+ * for each good or bad frame, of any type, transmitted or
-+ * received, which is 1024-1518 bytes in length.
-+ * @rfrg: Receive fragments count. Increments for each received frame
-+ * which is less than 64 bytes in length and contains an invalid
-+ * FCS. This includes integral and non-integral lengths.
-+ * @rjbr: Receive jabber count. Increments for received frames which
-+ * exceed 1518 (non VLAN) or 1522 (VLAN) bytes and contain an
-+ * invalid FCS. This includes alignment errors.
-+ * @rdrp: Receive dropped packets count. Increments for received frames
-+ * which are streamed to system but are later dropped due to lack
-+ * of system resources. Does not increment for frames rejected due
-+ * to address filtering.
-+ * @raln: Receive alignment error count. Increments for each received
-+ * frame from 64 to 1518 (non VLAN) or 1522 (VLAN) which contains
-+ * an invalid FCS and is not an integral number of bytes.
-+ * @rund: Receive undersize packet count. Increments each time a frame is
-+ * received which is less than 64 bytes in length and contains a
-+ * valid FCS and is otherwise well formed. This count does not
-+ * include range length errors.
-+ * @rovr: Receive oversize packet count. Increments each time a frame is
-+ * received which exceeded 1518 (non VLAN) or 1522 (VLAN) and
-+ * contains a valid FCS and is otherwise well formed.
-+ * @rbyt: Receive byte count. Increments by the byte count of frames
-+ * received, including those in bad packets, excluding preamble and
-+ * SFD but including FCS bytes.
-+ * @rpkt: Receive packet count. Increments for each received frame
-+ * (including bad packets, all unicast, broadcast, and multicast
-+ * packets).
-+ * @rmca: Receive multicast packet count. Increments for each multicast
-+ * frame with valid CRC and of lengths 64 to 1518 (non VLAN) or
-+ * 1522 (VLAN), excluding broadcast frames. This count does not
-+ * include range/length errors.
-+ * @rbca: Receive broadcast packet count. Increments for each broadcast
-+ * frame with valid CRC and of lengths 64 to 1518 (non VLAN) or
-+ * 1522 (VLAN), excluding multicast frames. Does not include
-+ * range/length errors.
-+ * @tdrp: Transmit drop frame count. Increments each time a memory error
-+ * or an underrun has occurred.
-+ * @tncl: Transmit total collision counter. Increments by the number of
-+ * collisions experienced during the transmission of a frame. Does
-+ * not increment for aborted frames.
-+ *
-+ * The structure contains a group of dTSEC HW specific counters relating to the
-+ * standard RMON MIB Group 1 (or Ethernet statistics) counters. This structure
-+ * is counting only the carry events of the corresponding HW counters.
-+ *
-+ * tr64 to trmax notes: Frame sizes specified are considered excluding preamble
-+ * and SFD but including FCS bytes.
-+ */
-+struct dtsec_mib_grp_1_counters {
-+ uint64_t rdrp;
-+ uint64_t tdrp;
-+ uint64_t rbyt;
-+ uint64_t rpkt;
-+ uint64_t rbca;
-+ uint64_t rmca;
-+ uint64_t raln;
-+ uint64_t rund;
-+ uint64_t rovr;
-+ uint64_t rfrg;
-+ uint64_t rjbr;
-+ uint64_t tncl;
-+ uint64_t tr64;
-+ uint64_t tr127;
-+ uint64_t tr255;
-+ uint64_t tr511;
-+ uint64_t tr1k;
-+ uint64_t trmax;
-+};
-+
-+enum dtsec_stat_counters {
-+ E_DTSEC_STAT_TR64,
-+ E_DTSEC_STAT_TR127,
-+ E_DTSEC_STAT_TR255,
-+ E_DTSEC_STAT_TR511,
-+ E_DTSEC_STAT_TR1K,
-+ E_DTSEC_STAT_TRMAX,
-+ E_DTSEC_STAT_TRMGV,
-+ E_DTSEC_STAT_RBYT,
-+ E_DTSEC_STAT_RPKT,
-+ E_DTSEC_STAT_RMCA,
-+ E_DTSEC_STAT_RBCA,
-+ E_DTSEC_STAT_RXPF,
-+ E_DTSEC_STAT_RALN,
-+ E_DTSEC_STAT_RFLR,
-+ E_DTSEC_STAT_RCDE,
-+ E_DTSEC_STAT_RCSE,
-+ E_DTSEC_STAT_RUND,
-+ E_DTSEC_STAT_ROVR,
-+ E_DTSEC_STAT_RFRG,
-+ E_DTSEC_STAT_RJBR,
-+ E_DTSEC_STAT_RDRP,
-+ E_DTSEC_STAT_TFCS,
-+ E_DTSEC_STAT_TBYT,
-+ E_DTSEC_STAT_TPKT,
-+ E_DTSEC_STAT_TMCA,
-+ E_DTSEC_STAT_TBCA,
-+ E_DTSEC_STAT_TXPF,
-+ E_DTSEC_STAT_TNCL,
-+ E_DTSEC_STAT_TDRP
-+};
-+
-+enum dtsec_stat_level {
-+ /* No statistics */
-+ E_MAC_STAT_NONE = 0,
-+ /* Only RMON MIB group 1 (ether stats). Optimized for performance */
-+ E_MAC_STAT_MIB_GRP1,
-+ /* Only error counters are available. Optimized for performance */
-+ E_MAC_STAT_PARTIAL,
-+ /* All counters available. Not optimized for performance */
-+ E_MAC_STAT_FULL
-+};
-+
-+
-+/**
-+ * struct dtsec_cfg - dTSEC configuration
-+ *
-+ * @halfdup_on: Transmit half-duplex flow control, under software
-+ * control for 10/100-Mbps half-duplex media. If set,
-+ * back pressure is applied to media by raising carrier.
-+ * @halfdup_retransmit: Number of retransmission attempts following a collision.
-+ * If this is exceeded dTSEC aborts transmission due to
-+ * excessive collisions. The standard specifies the
-+ * attempt limit to be 15.
-+ * @halfdup_coll_window:The number of bytes of the frame during which
-+ * collisions may occur. The default value of 55
-+ * corresponds to the frame byte at the end of the
-+ * standard 512-bit slot time window. If collisions are
-+ * detected after this byte, the late collision event is
-+ * asserted and transmission of current frame is aborted.
-+ * @rx_drop_bcast: Discard broadcast frames. If set, all broadcast frames
-+ * will be discarded by dTSEC.
-+ * @rx_short_frm: Accept short frames. If set, dTSEC will accept frames
-+ * of length 14..63 bytes.
-+ * @rx_len_check: Length check for received frames. If set, the MAC
-+ * checks the frame's length field on receive to ensure it
-+ * matches the actual data field length. This only works
-+ * for received frames with length field less than 1500.
-+ * No check is performed for larger frames.
-+ * @tx_pad_crc: Pad and append CRC. If set, the MAC pads all
-+ * transmitted short frames and appends a CRC to every
-+ * frame regardless of padding requirement.
-+ * @tx_crc: Transmission CRC enable. If set, the MAC appends a CRC
-+ * to all frames. If frames presented to the MAC have a
-+ * valid length and contain a valid CRC, @tx_crc should be
-+ * reset.
-+ * This field is ignored if @tx_pad_crc is set.
-+ * @rx_ctrl_acc: Control frame accept. If set, this overrides 802.3
-+ * standard control frame behavior, and all Ethernet frames
-+ * that have an ethertype of 0x8808 are treated as normal
-+ * Ethernet frames and passed up to the packet interface on
-+ * a DA match. Received pause control frames are passed to
-+ * the packet interface only if Rx flow control is also
-+ * disabled. See fman_dtsec_handle_rx_pause() function.
-+ * @tx_pause_time: Transmit pause time value. This pause value is used as
-+ * part of the pause frame to be sent when a transmit pause
-+ * frame is initiated. If set to 0 this disables
-+ * transmission of pause frames.
-+ * @rx_preamble: Receive preamble enable. If set, the MAC recovers the
-+ * received Ethernet 7-byte preamble and passes it to the
-+ * packet interface at the start of each received frame.
-+ * This field should be reset for internal MAC loop-back
-+ * mode.
-+ * @tx_preamble: User defined preamble enable for transmitted frames.
-+ * If set, a user-defined preamble must passed to the MAC
-+ * and it is transmitted instead of the standard preamble.
-+ * @preamble_len: Length, in bytes, of the preamble field preceding each
-+ * Ethernet start-of-frame delimiter byte. The default
-+ * value of 0x7 should be used in order to guarantee
-+ * reliable operation with IEEE 802.3 compliant hardware.
-+ * @rx_prepend: Packet alignment padding length. The specified number
-+ * of bytes (1-31) of zero padding are inserted before the
-+ * start of each received frame. For Ethernet, where
-+ * optional preamble extraction is enabled, the padding
-+ * appears before the preamble, otherwise the padding
-+ * precedes the layer 2 header.
-+ *
-+ * This structure contains basic dTSEC configuration and must be passed to
-+ * fman_dtsec_init() function. A default set of configuration values can be
-+ * obtained by calling fman_dtsec_defconfig().
-+ */
-+struct dtsec_cfg {
-+ bool halfdup_on;
-+ bool halfdup_alt_backoff_en;
-+ bool halfdup_excess_defer;
-+ bool halfdup_no_backoff;
-+ bool halfdup_bp_no_backoff;
-+ uint8_t halfdup_alt_backoff_val;
-+ uint16_t halfdup_retransmit;
-+ uint16_t halfdup_coll_window;
-+ bool rx_drop_bcast;
-+ bool rx_short_frm;
-+ bool rx_len_check;
-+ bool tx_pad_crc;
-+ bool tx_crc;
-+ bool rx_ctrl_acc;
-+ unsigned short tx_pause_time;
-+ unsigned short tbipa;
-+ bool ptp_tsu_en;
-+ bool ptp_exception_en;
-+ bool rx_preamble;
-+ bool tx_preamble;
-+ unsigned char preamble_len;
-+ unsigned char rx_prepend;
-+ bool loopback;
-+ bool rx_time_stamp_en;
-+ bool tx_time_stamp_en;
-+ bool rx_flow;
-+ bool tx_flow;
-+ bool rx_group_hash_exd;
-+ bool rx_promisc;
-+ uint8_t tbi_phy_addr;
-+ uint16_t tx_pause_time_extd;
-+ uint16_t maximum_frame;
-+ uint32_t non_back_to_back_ipg1;
-+ uint32_t non_back_to_back_ipg2;
-+ uint32_t min_ifg_enforcement;
-+ uint32_t back_to_back_ipg;
-+ bool wake_on_lan;
-+};
-+
-+
-+/**
-+ * fman_dtsec_defconfig() - Get default dTSEC configuration
-+ * @cfg: pointer to configuration structure.
-+ *
-+ * Call this function to obtain a default set of configuration values for
-+ * initializing dTSEC. The user can overwrite any of the values before calling
-+ * fman_dtsec_init(), if specific configuration needs to be applied.
-+ */
-+void fman_dtsec_defconfig(struct dtsec_cfg *cfg);
-+
-+/**
-+ * fman_dtsec_init() - Init dTSEC hardware block
-+ * @regs: Pointer to dTSEC register block
-+ * @cfg: dTSEC configuration data
-+ * @iface_mode: dTSEC interface mode, the type of MAC - PHY interface.
-+ * @iface_speed: 1G or 10G
-+ * @macaddr: MAC station address to be assigned to the device
-+ * @fm_rev_maj: major rev number
-+ * @fm_rev_min: minor rev number
-+ * @exceptions_mask: initial exceptions mask
-+ *
-+ * This function initializes dTSEC and applies basic configuration.
-+ *
-+ * dTSEC initialization sequence:
-+ * Before enabling Rx/Tx call dtsec_set_address() to set MAC address,
-+ * fman_dtsec_adjust_link() to configure interface speed and duplex and finally
-+ * dtsec_enable_tx()/dtsec_enable_rx() to start transmission and reception.
-+ *
-+ * Returns: 0 if successful, an error code otherwise.
-+ */
-+int fman_dtsec_init(struct dtsec_regs *regs, struct dtsec_cfg *cfg,
-+ enum enet_interface iface_mode,
-+ enum enet_speed iface_speed,
-+ uint8_t *macaddr, uint8_t fm_rev_maj,
-+ uint8_t fm_rev_min,
-+ uint32_t exception_mask);
-+
-+/**
-+ * fman_dtsec_enable() - Enable dTSEC Tx and Tx
-+ * @regs: Pointer to dTSEC register block
-+ * @apply_rx: enable rx side
-+ * @apply_tx: enable tx side
-+ *
-+ * This function resets Tx and Rx graceful stop bit and enables dTSEC Tx and Rx.
-+ */
-+void fman_dtsec_enable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx);
-+
-+/**
-+ * fman_dtsec_disable() - Disable dTSEC Tx and Rx
-+ * @regs: Pointer to dTSEC register block
-+ * @apply_rx: disable rx side
-+ * @apply_tx: disable tx side
-+ *
-+ * This function disables Tx and Rx in dTSEC.
-+ */
-+void fman_dtsec_disable(struct dtsec_regs *regs, bool apply_rx, bool apply_tx);
-+
-+/**
-+ * fman_dtsec_get_revision() - Get dTSEC hardware revision
-+ * @regs: Pointer to dTSEC register block
-+ *
-+ * Returns dtsec_id content
-+ *
-+ * Call this function to obtain the dTSEC hardware version.
-+ */
-+uint32_t fman_dtsec_get_revision(struct dtsec_regs *regs);
-+
-+/**
-+ * fman_dtsec_set_mac_address() - Set MAC station address
-+ * @regs: Pointer to dTSEC register block
-+ * @macaddr: MAC address array
-+ *
-+ * This function sets MAC station address. To enable unicast reception call
-+ * this after fman_dtsec_init(). While promiscuous mode is disabled dTSEC will
-+ * match the destination address of received unicast frames against this
-+ * address.
-+ */
-+void fman_dtsec_set_mac_address(struct dtsec_regs *regs, uint8_t *macaddr);
-+
-+/**
-+ * fman_dtsec_get_mac_address() - Query MAC station address
-+ * @regs: Pointer to dTSEC register block
-+ * @macaddr: MAC address array
-+ */
-+void fman_dtsec_get_mac_address(struct dtsec_regs *regs, uint8_t *macaddr);
-+
-+/**
-+ * fman_dtsec_set_uc_promisc() - Sets unicast promiscuous mode
-+ * @regs: Pointer to dTSEC register block
-+ * @enable: Enable unicast promiscuous mode
-+ *
-+ * Use this function to enable/disable dTSEC L2 address filtering. If the
-+ * address filtering is disabled all unicast packets are accepted.
-+ * To set dTSEC in promiscuous mode call both fman_dtsec_set_uc_promisc() and
-+ * fman_dtsec_set_mc_promisc() to disable filtering for both unicast and
-+ * multicast addresses.
-+ */
-+void fman_dtsec_set_uc_promisc(struct dtsec_regs *regs, bool enable);
-+
-+/**
-+ * fman_dtsec_set_wol() - Enable/Disable wake on lan
-+ * (magic packet support)
-+ * @regs: Pointer to dTSEC register block
-+ * @en: Enable Wake On Lan support in dTSEC
-+ *
-+ */
-+void fman_dtsec_set_wol(struct dtsec_regs *regs, bool en);
-+
-+/**
-+ * fman_dtsec_adjust_link() - Adjust dTSEC speed/duplex settings
-+ * @regs: Pointer to dTSEC register block
-+ * @iface_mode: dTSEC interface mode
-+ * @speed: Link speed
-+ * @full_dx: True for full-duplex, false for half-duplex.
-+ *
-+ * This function configures the MAC to function and the desired rates. Use it
-+ * to configure dTSEC after fman_dtsec_init() and whenever the link speed
-+ * changes (for instance following PHY auto-negociation).
-+ *
-+ * Returns: 0 if successful, an error code otherwise.
-+ */
-+int fman_dtsec_adjust_link(struct dtsec_regs *regs,
-+ enum enet_interface iface_mode,
-+ enum enet_speed speed, bool full_dx);
-+
-+/**
-+ * fman_dtsec_set_tbi_phy_addr() - Updates TBI address field
-+ * @regs: Pointer to dTSEC register block
-+ * @address: Valid PHY address in the range of 1 to 31. 0 is reserved.
-+ *
-+ * In SGMII mode, the dTSEC's TBIPA field must contain a valid TBI PHY address
-+ * so that the associated TBI PHY (i.e. the link) may be initialized.
-+ *
-+ * Returns: 0 if successful, an error code otherwise.
-+ */
-+int fman_dtsec_set_tbi_phy_addr(struct dtsec_regs *regs,
-+ uint8_t addr);
-+
-+/**
-+ * fman_dtsec_set_max_frame_len() - Set max frame length
-+ * @regs: Pointer to dTSEC register block
-+ * @length: Max frame length.
-+ *
-+ * Sets maximum frame length for received and transmitted frames. Frames that
-+ * exceeds this length are truncated.
-+ */
-+void fman_dtsec_set_max_frame_len(struct dtsec_regs *regs, uint16_t length);
-+
-+/**
-+ * fman_dtsec_get_max_frame_len() - Query max frame length
-+ * @regs: Pointer to dTSEC register block
-+ *
-+ * Returns: the current value of the maximum frame length.
-+ */
-+uint16_t fman_dtsec_get_max_frame_len(struct dtsec_regs *regs);
-+
-+/**
-+ * fman_dtsec_handle_rx_pause() - Configure pause frame handling
-+ * @regs: Pointer to dTSEC register block
-+ * @en: Enable pause frame handling in dTSEC
-+ *
-+ * If enabled, dTSEC will handle pause frames internally. This must be disabled
-+ * if dTSEC is set in half-duplex mode.
-+ * If pause frame handling is disabled and &dtsec_cfg.rx_ctrl_acc is set, pause
-+ * frames will be transferred to the packet interface just like regular Ethernet
-+ * frames.
-+ */
-+void fman_dtsec_handle_rx_pause(struct dtsec_regs *regs, bool en);
-+
-+/**
-+ * fman_dtsec_set_tx_pause_frames() - Configure Tx pause time
-+ * @regs: Pointer to dTSEC register block
-+ * @time: Time value included in pause frames
-+ *
-+ * Call this function to set the time value used in transmitted pause frames.
-+ * If time is 0, transmission of pause frames is disabled
-+ */
-+void fman_dtsec_set_tx_pause_frames(struct dtsec_regs *regs, uint16_t time);
-+
-+/**
-+ * fman_dtsec_ack_event() - Acknowledge handled events
-+ * @regs: Pointer to dTSEC register block
-+ * @ev_mask: Events to acknowledge
-+ *
-+ * After handling events signaled by dTSEC in either polling or interrupt mode,
-+ * call this function to reset the associated status bits in dTSEC event
-+ * register.
-+ */
-+void fman_dtsec_ack_event(struct dtsec_regs *regs, uint32_t ev_mask);
-+
-+/**
-+ * fman_dtsec_get_event() - Returns currently asserted events
-+ * @regs: Pointer to dTSEC register block
-+ * @ev_mask: Mask of relevant events
-+ *
-+ * Call this function to obtain a bit-mask of events that are currently asserted
-+ * in dTSEC, taken from IEVENT register.
-+ *
-+ * Returns: a bit-mask of events asserted in dTSEC.
-+ */
-+uint32_t fman_dtsec_get_event(struct dtsec_regs *regs, uint32_t ev_mask);
-+
-+/**
-+ * fman_dtsec_get_interrupt_mask() - Returns a bit-mask of enabled interrupts
-+ * @regs: Pointer to dTSEC register block
-+ *
-+ * Call this function to obtain a bit-mask of enabled interrupts
-+ * in dTSEC, taken from IMASK register.
-+ *
-+ * Returns: a bit-mask of enabled interrupts in dTSEC.
-+ */
-+uint32_t fman_dtsec_get_interrupt_mask(struct dtsec_regs *regs);
-+
-+void fman_dtsec_clear_addr_in_paddr(struct dtsec_regs *regs,
-+ uint8_t paddr_num);
-+
-+void fman_dtsec_add_addr_in_paddr(struct dtsec_regs *regs,
-+ uint64_t addr,
-+ uint8_t paddr_num);
-+
-+void fman_dtsec_enable_tmr_interrupt (struct dtsec_regs *regs);
-+
-+void fman_dtsec_disable_tmr_interrupt(struct dtsec_regs *regs);
-+
-+/**
-+ * fman_dtsec_disable_interrupt() - Disables interrupts for the specified events
-+ * @regs: Pointer to dTSEC register block
-+ * @ev_mask: Mask of relevant events
-+ *
-+ * Call this function to disable interrupts in dTSEC for the specified events.
-+ * To enable interrupts use fman_dtsec_enable_interrupt().
-+ */
-+void fman_dtsec_disable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask);
-+
-+/**
-+ * fman_dtsec_enable_interrupt() - Enable interrupts for the specified events
-+ * @regs: Pointer to dTSEC register block
-+ * @ev_mask: Mask of relevant events
-+ *
-+ * Call this function to enable interrupts in dTSEC for the specified events.
-+ * To disable interrupts use fman_dtsec_disable_interrupt().
-+ */
-+void fman_dtsec_enable_interrupt(struct dtsec_regs *regs, uint32_t ev_mask);
-+
-+/**
-+ * fman_dtsec_set_ts() - Enables dTSEC timestamps
-+ * @regs: Pointer to dTSEC register block
-+ * @en: true to enable timestamps, false to disable them
-+ *
-+ * Call this function to enable/disable dTSEC timestamps. This affects both
-+ * Tx and Rx.
-+ */
-+void fman_dtsec_set_ts(struct dtsec_regs *regs, bool en);
-+
-+/**
-+ * fman_dtsec_set_bucket() - Enables/disables a filter bucket
-+ * @regs: Pointer to dTSEC register block
-+ * @bucket: Bucket index
-+ * @enable: true/false to enable/disable this bucket
-+ *
-+ * This function enables or disables the specified bucket. Enabling a bucket
-+ * associated with an address configures dTSEC to accept received packets
-+ * with that destination address.
-+ * Multiple addresses may be associated with the same bucket. Disabling a
-+ * bucket will affect all addresses associated with that bucket. A bucket that
-+ * is enabled requires further filtering and verification in the upper layers
-+ *
-+ */
-+void fman_dtsec_set_bucket(struct dtsec_regs *regs, int bucket, bool enable);
-+
-+/**
-+ * dtsec_set_hash_table() - insert a crc code into thr filter table
-+ * @regs: Pointer to dTSEC register block
-+ * @crc: crc to insert
-+ * @mcast: true is this is a multicast address
-+ * @ghtx: true if we are in ghtx mode
-+ *
-+ * This function inserts a crc code into the filter table.
-+ */
-+void fman_dtsec_set_hash_table(struct dtsec_regs *regs, uint32_t crc,
-+ bool mcast, bool ghtx);
-+
-+/**
-+ * fman_dtsec_reset_filter_table() - Resets the address filtering table
-+ * @regs: Pointer to dTSEC register block
-+ * @mcast: Reset multicast entries
-+ * @ucast: Reset unicast entries
-+ *
-+ * Resets all entries in L2 address filter table. After calling this function
-+ * all buckets enabled using fman_dtsec_set_bucket() will be disabled.
-+ * If dtsec_init_filter_table() was called with @unicast_hash set to false,
-+ * @ucast argument is ignored.
-+ * This does not affect the primary nor the 15 additional addresses configured
-+ * using dtsec_set_address() or dtsec_set_match_address().
-+ */
-+void fman_dtsec_reset_filter_table(struct dtsec_regs *regs, bool mcast,
-+ bool ucast);
-+
-+/**
-+ * fman_dtsec_set_mc_promisc() - Set multicast promiscuous mode
-+ * @regs: Pointer to dTSEC register block
-+ * @enable: Enable multicast promiscuous mode
-+ *
-+ * Call this to enable/disable L2 address filtering for multicast packets.
-+ */
-+void fman_dtsec_set_mc_promisc(struct dtsec_regs *regs, bool enable);
-+
-+/* statistics APIs */
-+
-+/**
-+ * fman_dtsec_set_stat_level() - Enable a group of MIB statistics counters
-+ * @regs: Pointer to dTSEC register block
-+ * @level: Specifies a certain group of dTSEC MIB HW counters or _all_,
-+ * to specify all the existing counters.
-+ * If set to _none_, it disables all the counters.
-+ *
-+ * Enables the MIB statistics hw counters and sets up the carry interrupt
-+ * masks for the counters corresponding to the @level input parameter.
-+ *
-+ * Returns: error if invalid @level value given.
-+ */
-+int fman_dtsec_set_stat_level(struct dtsec_regs *regs,
-+ enum dtsec_stat_level level);
-+
-+/**
-+ * fman_dtsec_reset_stat() - Completely resets all dTSEC HW counters
-+ * @regs: Pointer to dTSEC register block
-+ */
-+void fman_dtsec_reset_stat(struct dtsec_regs *regs);
-+
-+/**
-+ * fman_dtsec_get_clear_carry_regs() - Read and clear carry bits (CAR1-2 registers)
-+ * @regs: Pointer to dTSEC register block
-+ * @car1: car1 register value
-+ * @car2: car2 register value
-+ *
-+ * When set, the carry bits signal that an overflow occurred on the
-+ * corresponding counters.
-+ * Note that the carry bits (CAR1-2 registers) will assert the
-+ * %DTSEC_IEVENT_MSRO interrupt if unmasked (via CAM1-2 regs).
-+ *
-+ * Returns: true if overflow occurred, otherwise - false
-+ */
-+bool fman_dtsec_get_clear_carry_regs(struct dtsec_regs *regs,
-+ uint32_t *car1, uint32_t *car2);
-+
-+uint32_t fman_dtsec_check_and_clear_tmr_event(struct dtsec_regs *regs);
-+
-+uint32_t fman_dtsec_get_stat_counter(struct dtsec_regs *regs,
-+ enum dtsec_stat_counters reg_name);
-+
-+void fman_dtsec_start_tx(struct dtsec_regs *regs);
-+void fman_dtsec_start_rx(struct dtsec_regs *regs);
-+void fman_dtsec_stop_tx(struct dtsec_regs *regs);
-+void fman_dtsec_stop_rx(struct dtsec_regs *regs);
-+uint32_t fman_dtsec_get_rctrl(struct dtsec_regs *regs);
-+
-+
-+#endif /* __FSL_FMAN_DTSEC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h
-@@ -0,0 +1,107 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_DTSEC_MII_ACC_H
-+#define __FSL_FMAN_DTSEC_MII_ACC_H
-+
-+#include "common/general.h"
-+
-+
-+/* MII Management Configuration Register */
-+#define MIIMCFG_RESET_MGMT 0x80000000
-+#define MIIMCFG_MGNTCLK_MASK 0x00000007
-+#define MIIMCFG_MGNTCLK_SHIFT 0
-+
-+/* MII Management Command Register */
-+#define MIIMCOM_SCAN_CYCLE 0x00000002
-+#define MIIMCOM_READ_CYCLE 0x00000001
-+
-+/* MII Management Address Register */
-+#define MIIMADD_PHY_ADDR_SHIFT 8
-+#define MIIMADD_PHY_ADDR_MASK 0x00001f00
-+
-+#define MIIMADD_REG_ADDR_SHIFT 0
-+#define MIIMADD_REG_ADDR_MASK 0x0000001f
-+
-+/* MII Management Indicator Register */
-+#define MIIMIND_BUSY 0x00000001
-+
-+
-+/* PHY Control Register */
-+#define PHY_CR_PHY_RESET 0x8000
-+#define PHY_CR_LOOPBACK 0x4000
-+#define PHY_CR_SPEED0 0x2000
-+#define PHY_CR_ANE 0x1000
-+#define PHY_CR_RESET_AN 0x0200
-+#define PHY_CR_FULLDUPLEX 0x0100
-+#define PHY_CR_SPEED1 0x0040
-+
-+#define PHY_TBICON_SRESET 0x8000
-+#define PHY_TBICON_SPEED2 0x0020
-+#define PHY_TBICON_CLK_SEL 0x0020
-+#define PHY_TBIANA_SGMII 0x4001
-+#define PHY_TBIANA_1000X 0x01a0
-+/* register map */
-+
-+/* MII Configuration Control Memory Map Registers */
-+struct dtsec_mii_reg {
-+ uint32_t reserved1[72];
-+ uint32_t miimcfg; /* MII Mgmt:configuration */
-+ uint32_t miimcom; /* MII Mgmt:command */
-+ uint32_t miimadd; /* MII Mgmt:address */
-+ uint32_t miimcon; /* MII Mgmt:control 3 */
-+ uint32_t miimstat; /* MII Mgmt:status */
-+ uint32_t miimind; /* MII Mgmt:indicators */
-+};
-+
-+/* dTSEC MII API */
-+
-+/* functions to access the mii registers for phy configuration.
-+ * this functionality may not be available for all dtsecs in the system.
-+ * consult the reference manual for details */
-+void fman_dtsec_mii_reset(struct dtsec_mii_reg *regs);
-+/* frequency is in MHz.
-+ * note that dtsec clock is 1/2 of fman clock */
-+void fman_dtsec_mii_init(struct dtsec_mii_reg *regs, uint16_t dtsec_freq);
-+int fman_dtsec_mii_write_reg(struct dtsec_mii_reg *regs,
-+ uint8_t addr,
-+ uint8_t reg,
-+ uint16_t data,
-+ uint16_t dtsec_freq);
-+
-+int fman_dtsec_mii_read_reg(struct dtsec_mii_reg *regs,
-+ uint8_t addr,
-+ uint8_t reg,
-+ uint16_t *data,
-+ uint16_t dtsec_freq);
-+
-+#endif /* __FSL_FMAN_DTSEC_MII_ACC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_kg.h
-@@ -0,0 +1,514 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_KG_H
-+#define __FSL_FMAN_KG_H
-+
-+#include "common/general.h"
-+
-+#define FM_KG_NUM_OF_GENERIC_REGS 8 /**< Num of generic KeyGen regs */
-+#define FMAN_MAX_NUM_OF_HW_PORTS 64
-+/**< Total num of masks allowed on KG extractions */
-+#define FM_KG_EXTRACT_MASKS_NUM 4
-+#define FM_KG_NUM_CLS_PLAN_ENTR 8 /**< Num of class. plan regs */
-+#define FM_KG_CLS_PLAN_GRPS_NUM 32 /**< Max num of class. groups */
-+
-+struct fman_kg_regs {
-+ uint32_t fmkg_gcr;
-+ uint32_t res004;
-+ uint32_t res008;
-+ uint32_t fmkg_eer;
-+ uint32_t fmkg_eeer;
-+ uint32_t res014;
-+ uint32_t res018;
-+ uint32_t fmkg_seer;
-+ uint32_t fmkg_seeer;
-+ uint32_t fmkg_gsr;
-+ uint32_t fmkg_tpc;
-+ uint32_t fmkg_serc;
-+ uint32_t res030[4];
-+ uint32_t fmkg_fdor;
-+ uint32_t fmkg_gdv0r;
-+ uint32_t fmkg_gdv1r;
-+ uint32_t res04c[6];
-+ uint32_t fmkg_feer;
-+ uint32_t res068[38];
-+ uint32_t fmkg_indirect[63];
-+ uint32_t fmkg_ar;
-+};
-+
-+struct fman_kg_scheme_regs {
-+ uint32_t kgse_mode; /**< MODE */
-+ uint32_t kgse_ekfc; /**< Extract Known Fields Command */
-+ uint32_t kgse_ekdv; /**< Extract Known Default Value */
-+ uint32_t kgse_bmch; /**< Bit Mask Command High */
-+ uint32_t kgse_bmcl; /**< Bit Mask Command Low */
-+ uint32_t kgse_fqb; /**< Frame Queue Base */
-+ uint32_t kgse_hc; /**< Hash Command */
-+ uint32_t kgse_ppc; /**< Policer Profile Command */
-+ uint32_t kgse_gec[FM_KG_NUM_OF_GENERIC_REGS];
-+ /**< Generic Extract Command */
-+ uint32_t kgse_spc; /**< KeyGen Scheme Entry Statistic Packet Counter */
-+ uint32_t kgse_dv0; /**< KeyGen Scheme Entry Default Value 0 */
-+ uint32_t kgse_dv1; /**< KeyGen Scheme Entry Default Value 1 */
-+ uint32_t kgse_ccbs; /**< KeyGen Scheme Entry Coarse Classification Bit*/
-+ uint32_t kgse_mv; /**< KeyGen Scheme Entry Match vector */
-+ uint32_t kgse_om; /**< KeyGen Scheme Entry Operation Mode bits */
-+ uint32_t kgse_vsp; /**< KeyGen Scheme Entry Virtual Storage Profile */
-+};
-+
-+struct fman_kg_pe_regs{
-+ uint32_t fmkg_pe_sp;
-+ uint32_t fmkg_pe_cpp;
-+};
-+
-+struct fman_kg_cp_regs {
-+ uint32_t kgcpe[FM_KG_NUM_CLS_PLAN_ENTR];
-+};
-+
-+
-+#define FM_KG_KGAR_GO 0x80000000
-+#define FM_KG_KGAR_READ 0x40000000
-+#define FM_KG_KGAR_WRITE 0x00000000
-+#define FM_KG_KGAR_SEL_SCHEME_ENTRY 0x00000000
-+#define FM_KG_KGAR_SCM_WSEL_UPDATE_CNT 0x00008000
-+
-+#define KG_SCH_PP_SHIFT_HIGH 0x80000000
-+#define KG_SCH_PP_NO_GEN 0x10000000
-+#define KG_SCH_PP_SHIFT_LOW 0x0000F000
-+#define KG_SCH_MODE_NIA_PLCR 0x40000000
-+#define KG_SCH_GEN_EXTRACT_TYPE 0x00008000
-+#define KG_SCH_BITMASK_MASK 0x000000FF
-+#define KG_SCH_GEN_VALID 0x80000000
-+#define KG_SCH_GEN_MASK 0x00FF0000
-+#define FM_PCD_KG_KGAR_ERR 0x20000000
-+#define FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY 0x01000000
-+#define FM_PCD_KG_KGAR_SEL_PORT_ENTRY 0x02000000
-+#define FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP 0x00008000
-+#define FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP 0x00004000
-+#define FM_PCD_KG_KGAR_WSEL_MASK 0x0000FF00
-+#define KG_SCH_HASH_CONFIG_NO_FQID 0x80000000
-+#define KG_SCH_HASH_CONFIG_SYM 0x40000000
-+
-+#define FM_EX_KG_DOUBLE_ECC 0x80000000
-+#define FM_EX_KG_KEYSIZE_OVERFLOW 0x40000000
-+
-+/* ECC capture register */
-+#define KG_FMKG_SERC_CAP 0x80000000
-+#define KG_FMKG_SERC_CET 0x40000000
-+#define KG_FMKG_SERC_CNT_MSK 0x00FF0000
-+#define KG_FMKG_SERC_CNT_SHIFT 16
-+#define KG_FMKG_SERC_ADDR_MSK 0x000003FF
-+
-+/* Masks */
-+#define FM_KG_KGGCR_EN 0x80000000
-+#define KG_SCH_GEN_VALID 0x80000000
-+#define KG_SCH_GEN_EXTRACT_TYPE 0x00008000
-+#define KG_ERR_TYPE_DOUBLE 0x40000000
-+#define KG_ERR_ADDR_MASK 0x00000FFF
-+#define KG_SCH_MODE_EN 0x80000000
-+
-+/* shifts */
-+#define FM_KG_KGAR_NUM_SHIFT 16
-+#define FM_KG_PE_CPP_MASK_SHIFT 16
-+#define FM_KG_KGAR_WSEL_SHIFT 8
-+
-+#define FM_KG_SCH_GEN_HT_INVALID 0
-+
-+#define FM_KG_MASK_SEL_GEN_BASE 0x20
-+
-+#define KG_GET_MASK_SEL_SHIFT(shift, i) \
-+switch (i) \
-+{ \
-+ case 0: (shift) = 26; break; \
-+ case 1: (shift) = 20; break; \
-+ case 2: (shift) = 10; break; \
-+ case 3: (shift) = 4; break; \
-+ default: (shift) = 0; \
-+}
-+
-+#define KG_GET_MASK_OFFSET_SHIFT(shift, i) \
-+switch (i) \
-+{ \
-+ case 0: (shift) = 16; break; \
-+ case 1: (shift) = 0; break; \
-+ case 2: (shift) = 28; break; \
-+ case 3: (shift) = 24; break; \
-+ default: (shift) = 0; \
-+}
-+
-+#define KG_GET_MASK_SHIFT(shift, i) \
-+switch (i) \
-+{ \
-+ case 0: shift = 24; break; \
-+ case 1: shift = 16; break; \
-+ case 2: shift = 8; break; \
-+ case 3: shift = 0; break; \
-+ default: shift = 0; \
-+}
-+
-+/* Port entry CPP register */
-+#define FMAN_KG_PE_CPP_MASK_SHIFT 16
-+
-+/* Scheme registers */
-+#define FMAN_KG_SCH_MODE_EN 0x80000000
-+#define FMAN_KG_SCH_MODE_NIA_PLCR 0x40000000
-+#define FMAN_KG_SCH_MODE_CCOBASE_SHIFT 24
-+
-+#define FMAN_KG_SCH_DEF_MAC_ADDR_SHIFT 30
-+#define FMAN_KG_SCH_DEF_VLAN_TCI_SHIFT 28
-+#define FMAN_KG_SCH_DEF_ETYPE_SHIFT 26
-+#define FMAN_KG_SCH_DEF_PPP_SID_SHIFT 24
-+#define FMAN_KG_SCH_DEF_PPP_PID_SHIFT 22
-+#define FMAN_KG_SCH_DEF_MPLS_SHIFT 20
-+#define FMAN_KG_SCH_DEF_IP_ADDR_SHIFT 18
-+#define FMAN_KG_SCH_DEF_PTYPE_SHIFT 16
-+#define FMAN_KG_SCH_DEF_IP_TOS_TC_SHIFT 14
-+#define FMAN_KG_SCH_DEF_IPv6_FL_SHIFT 12
-+#define FMAN_KG_SCH_DEF_IPSEC_SPI_SHIFT 10
-+#define FMAN_KG_SCH_DEF_L4_PORT_SHIFT 8
-+#define FMAN_KG_SCH_DEF_TCP_FLG_SHIFT 6
-+
-+#define FMAN_KG_SCH_GEN_VALID 0x80000000
-+#define FMAN_KG_SCH_GEN_SIZE_MAX 16
-+#define FMAN_KG_SCH_GEN_OR 0x00008000
-+
-+#define FMAN_KG_SCH_GEN_DEF_SHIFT 29
-+#define FMAN_KG_SCH_GEN_SIZE_SHIFT 24
-+#define FMAN_KG_SCH_GEN_MASK_SHIFT 16
-+#define FMAN_KG_SCH_GEN_HT_SHIFT 8
-+
-+#define FMAN_KG_SCH_HASH_HSHIFT_SHIFT 24
-+#define FMAN_KG_SCH_HASH_HSHIFT_MAX 0x28
-+#define FMAN_KG_SCH_HASH_SYM 0x40000000
-+#define FMAN_KG_SCH_HASH_NO_FQID_GEN 0x80000000
-+
-+#define FMAN_KG_SCH_PP_SH_SHIFT 27
-+#define FMAN_KG_SCH_PP_SL_SHIFT 12
-+#define FMAN_KG_SCH_PP_SH_MASK 0x80000000
-+#define FMAN_KG_SCH_PP_SL_MASK 0x0000F000
-+#define FMAN_KG_SCH_PP_SHIFT_MAX 0x17
-+#define FMAN_KG_SCH_PP_MASK_SHIFT 16
-+#define FMAN_KG_SCH_PP_NO_GEN 0x10000000
-+
-+enum fman_kg_gen_extract_src {
-+ E_FMAN_KG_GEN_EXTRACT_ETH,
-+ E_FMAN_KG_GEN_EXTRACT_ETYPE,
-+ E_FMAN_KG_GEN_EXTRACT_SNAP,
-+ E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_1,
-+ E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_N,
-+ E_FMAN_KG_GEN_EXTRACT_PPPoE,
-+ E_FMAN_KG_GEN_EXTRACT_MPLS_1,
-+ E_FMAN_KG_GEN_EXTRACT_MPLS_2,
-+ E_FMAN_KG_GEN_EXTRACT_MPLS_3,
-+ E_FMAN_KG_GEN_EXTRACT_MPLS_N,
-+ E_FMAN_KG_GEN_EXTRACT_IPv4_1,
-+ E_FMAN_KG_GEN_EXTRACT_IPv6_1,
-+ E_FMAN_KG_GEN_EXTRACT_IPv4_2,
-+ E_FMAN_KG_GEN_EXTRACT_IPv6_2,
-+ E_FMAN_KG_GEN_EXTRACT_MINENCAP,
-+ E_FMAN_KG_GEN_EXTRACT_IP_PID,
-+ E_FMAN_KG_GEN_EXTRACT_GRE,
-+ E_FMAN_KG_GEN_EXTRACT_TCP,
-+ E_FMAN_KG_GEN_EXTRACT_UDP,
-+ E_FMAN_KG_GEN_EXTRACT_SCTP,
-+ E_FMAN_KG_GEN_EXTRACT_DCCP,
-+ E_FMAN_KG_GEN_EXTRACT_IPSEC_AH,
-+ E_FMAN_KG_GEN_EXTRACT_IPSEC_ESP,
-+ E_FMAN_KG_GEN_EXTRACT_SHIM_1,
-+ E_FMAN_KG_GEN_EXTRACT_SHIM_2,
-+ E_FMAN_KG_GEN_EXTRACT_FROM_DFLT,
-+ E_FMAN_KG_GEN_EXTRACT_FROM_FRAME_START,
-+ E_FMAN_KG_GEN_EXTRACT_FROM_PARSE_RESULT,
-+ E_FMAN_KG_GEN_EXTRACT_FROM_END_OF_PARSE,
-+ E_FMAN_KG_GEN_EXTRACT_FROM_FQID
-+};
-+
-+struct fman_kg_ex_ecc_attr
-+{
-+ bool valid;
-+ bool double_ecc;
-+ uint16_t addr;
-+ uint8_t single_ecc_count;
-+};
-+
-+enum fman_kg_def_select
-+{
-+ E_FMAN_KG_DEF_GLOBAL_0,
-+ E_FMAN_KG_DEF_GLOBAL_1,
-+ E_FMAN_KG_DEF_SCHEME_0,
-+ E_FMAN_KG_DEF_SCHEME_1
-+};
-+
-+struct fman_kg_extract_def
-+{
-+ enum fman_kg_def_select mac_addr;
-+ enum fman_kg_def_select vlan_tci;
-+ enum fman_kg_def_select etype;
-+ enum fman_kg_def_select ppp_sid;
-+ enum fman_kg_def_select ppp_pid;
-+ enum fman_kg_def_select mpls;
-+ enum fman_kg_def_select ip_addr;
-+ enum fman_kg_def_select ptype;
-+ enum fman_kg_def_select ip_tos_tc;
-+ enum fman_kg_def_select ipv6_fl;
-+ enum fman_kg_def_select ipsec_spi;
-+ enum fman_kg_def_select l4_port;
-+ enum fman_kg_def_select tcp_flg;
-+};
-+
-+enum fman_kg_gen_extract_type
-+{
-+ E_FMAN_KG_HASH_EXTRACT,
-+ E_FMAN_KG_OR_EXTRACT
-+};
-+
-+struct fman_kg_gen_extract_params
-+{
-+ /* Hash or Or-ed extract */
-+ enum fman_kg_gen_extract_type type;
-+ enum fman_kg_gen_extract_src src;
-+ bool no_validation;
-+ /* Extraction offset from the header location specified above */
-+ uint8_t offset;
-+ /* Size of extraction for FMAN_KG_HASH_EXTRACT,
-+ * hash result shift for FMAN_KG_OR_EXTRACT */
-+ uint8_t extract;
-+ uint8_t mask;
-+ /* Default value to use when header specified
-+ * by fman_kg_gen_extract_src doesn't present */
-+ enum fman_kg_def_select def_val;
-+};
-+
-+struct fman_kg_extract_mask
-+{
-+ /**< Indication if mask is on known field extraction or
-+ * on general extraction; TRUE for known field */
-+ bool is_known;
-+ /**< One of FMAN_KG_EXTRACT_xxx defines for known fields mask and
-+ * generic register index for generic extracts mask */
-+ uint32_t field_or_gen_idx;
-+ /**< Byte offset from start of the extracted data specified
-+ * by field_or_gen_idx */
-+ uint8_t offset;
-+ /**< Byte mask (selected bits will be used) */
-+ uint8_t mask;
-+};
-+
-+struct fman_kg_extract_params
-+{
-+ /* Or-ed mask of FMAN_KG_EXTRACT_xxx defines */
-+ uint32_t known_fields;
-+ struct fman_kg_extract_def known_fields_def;
-+ /* Number of entries in gen_extract */
-+ uint8_t gen_extract_num;
-+ struct fman_kg_gen_extract_params gen_extract[FM_KG_NUM_OF_GENERIC_REGS];
-+ /* Number of entries in masks */
-+ uint8_t masks_num;
-+ struct fman_kg_extract_mask masks[FM_KG_EXTRACT_MASKS_NUM];
-+ uint32_t def_scheme_0;
-+ uint32_t def_scheme_1;
-+};
-+
-+struct fman_kg_hash_params
-+{
-+ bool use_hash;
-+ uint8_t shift_r;
-+ uint32_t mask; /**< 24-bit mask */
-+ bool sym; /**< Symmetric hash for src and dest pairs */
-+};
-+
-+struct fman_kg_pp_params
-+{
-+ uint8_t base;
-+ uint8_t shift;
-+ uint8_t mask;
-+ bool bypass_pp_gen;
-+};
-+
-+struct fman_kg_cc_params
-+{
-+ uint8_t base_offset;
-+ uint32_t qlcv_bits_sel;
-+};
-+
-+enum fman_pcd_engine
-+{
-+ E_FMAN_PCD_INVALID = 0, /**< Invalid PCD engine indicated*/
-+ E_FMAN_PCD_DONE, /**< No PCD Engine indicated */
-+ E_FMAN_PCD_KG, /**< Keygen indicated */
-+ E_FMAN_PCD_CC, /**< Coarse classification indicated */
-+ E_FMAN_PCD_PLCR, /**< Policer indicated */
-+ E_FMAN_PCD_PRS /**< Parser indicated */
-+};
-+
-+struct fman_kg_cls_plan_params
-+{
-+ uint8_t entries_mask;
-+ uint32_t mask_vector[FM_KG_NUM_CLS_PLAN_ENTR];
-+};
-+
-+struct fman_kg_scheme_params
-+{
-+ uint32_t match_vector;
-+ struct fman_kg_extract_params extract_params;
-+ struct fman_kg_hash_params hash_params;
-+ uint32_t base_fqid;
-+ /* What we do w/features supported per FM version ?? */
-+ bool bypass_fqid_gen;
-+ struct fman_kg_pp_params policer_params;
-+ struct fman_kg_cc_params cc_params;
-+ bool update_counter;
-+ /**< counter_value: Set scheme counter to the specified value;
-+ * relevant only when update_counter = TRUE. */
-+ uint32_t counter_value;
-+ enum fman_pcd_engine next_engine;
-+ /**< Next engine action code */
-+ uint32_t next_engine_action;
-+};
-+
-+
-+
-+int fman_kg_write_ar_wait(struct fman_kg_regs *regs, uint32_t fmkg_ar);
-+void fman_kg_write_sp(struct fman_kg_regs *regs, uint32_t sp, bool add);
-+void fman_kg_write_cpp(struct fman_kg_regs *regs, uint32_t cpp);
-+void fman_kg_get_event(struct fman_kg_regs *regs,
-+ uint32_t *event,
-+ uint32_t *scheme_idx);
-+void fman_kg_init(struct fman_kg_regs *regs,
-+ uint32_t exceptions,
-+ uint32_t dflt_nia);
-+void fman_kg_enable_scheme_interrupts(struct fman_kg_regs *regs);
-+void fman_kg_enable(struct fman_kg_regs *regs);
-+void fman_kg_disable(struct fman_kg_regs *regs);
-+int fman_kg_write_bind_cls_plans(struct fman_kg_regs *regs,
-+ uint8_t hwport_id,
-+ uint32_t bind_cls_plans);
-+int fman_kg_build_bind_cls_plans(uint8_t grp_base,
-+ uint8_t grp_mask,
-+ uint32_t *bind_cls_plans);
-+int fman_kg_write_bind_schemes(struct fman_kg_regs *regs,
-+ uint8_t hwport_id,
-+ uint32_t schemes);
-+int fman_kg_write_cls_plan(struct fman_kg_regs *regs,
-+ uint8_t grp_id,
-+ uint8_t entries_mask,
-+ uint8_t hwport_id,
-+ struct fman_kg_cp_regs *cls_plan_regs);
-+int fman_kg_build_cls_plan(struct fman_kg_cls_plan_params *params,
-+ struct fman_kg_cp_regs *cls_plan_regs);
-+uint32_t fman_kg_get_schemes_total_counter(struct fman_kg_regs *regs);
-+int fman_kg_set_scheme_counter(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id,
-+ uint32_t counter);
-+int fman_kg_get_scheme_counter(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id,
-+ uint32_t *counter);
-+int fman_kg_delete_scheme(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id);
-+int fman_kg_write_scheme(struct fman_kg_regs *regs,
-+ uint8_t scheme_id,
-+ uint8_t hwport_id,
-+ struct fman_kg_scheme_regs *scheme_regs,
-+ bool update_counter);
-+int fman_kg_build_scheme(struct fman_kg_scheme_params *params,
-+ struct fman_kg_scheme_regs *scheme_regs);
-+void fman_kg_get_capture(struct fman_kg_regs *regs,
-+ struct fman_kg_ex_ecc_attr *ecc_attr,
-+ bool clear);
-+void fman_kg_get_exception(struct fman_kg_regs *regs,
-+ uint32_t *events,
-+ uint32_t *scheme_ids,
-+ bool clear);
-+void fman_kg_set_exception(struct fman_kg_regs *regs,
-+ uint32_t exception,
-+ bool enable);
-+void fman_kg_set_dflt_val(struct fman_kg_regs *regs,
-+ uint8_t def_id,
-+ uint32_t val);
-+void fman_kg_set_data_after_prs(struct fman_kg_regs *regs, uint8_t offset);
-+
-+
-+
-+/**************************************************************************//**
-+ @Description NIA Description
-+*//***************************************************************************/
-+#define KG_NIA_ORDER_RESTOR 0x00800000
-+#define KG_NIA_ENG_FM_CTL 0x00000000
-+#define KG_NIA_ENG_PRS 0x00440000
-+#define KG_NIA_ENG_KG 0x00480000
-+#define KG_NIA_ENG_PLCR 0x004C0000
-+#define KG_NIA_ENG_BMI 0x00500000
-+#define KG_NIA_ENG_QMI_ENQ 0x00540000
-+#define KG_NIA_ENG_QMI_DEQ 0x00580000
-+#define KG_NIA_ENG_MASK 0x007C0000
-+
-+#define KG_NIA_AC_MASK 0x0003FFFF
-+
-+#define KG_NIA_INVALID 0xFFFFFFFF
-+
-+static __inline__ uint32_t fm_kg_build_nia(enum fman_pcd_engine next_engine,
-+ uint32_t next_engine_action)
-+{
-+ uint32_t nia;
-+
-+ if (next_engine_action & ~KG_NIA_AC_MASK)
-+ return KG_NIA_INVALID;
-+
-+ switch (next_engine) {
-+ case E_FMAN_PCD_DONE:
-+ nia = KG_NIA_ENG_BMI | next_engine_action;
-+ break;
-+
-+ case E_FMAN_PCD_KG:
-+ nia = KG_NIA_ENG_KG | next_engine_action;
-+ break;
-+
-+ case E_FMAN_PCD_CC:
-+ nia = KG_NIA_ENG_FM_CTL | next_engine_action;
-+ break;
-+
-+ case E_FMAN_PCD_PLCR:
-+ nia = KG_NIA_ENG_PLCR | next_engine_action;
-+ break;
-+
-+ default:
-+ nia = KG_NIA_INVALID;
-+ }
-+
-+ return nia;
-+}
-+
-+#endif /* __FSL_FMAN_KG_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac.h
-@@ -0,0 +1,434 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __FSL_FMAN_MEMAC_H
-+#define __FSL_FMAN_MEMAC_H
-+
-+#include "common/general.h"
-+#include "fsl_enet.h"
-+
-+
-+#define MEMAC_NUM_OF_PADDRS 7 /* Num of additional exact match MAC adr regs */
-+
-+/* Control and Configuration Register (COMMAND_CONFIG) */
-+#define CMD_CFG_MG 0x80000000 /* 00 Magic Packet detection */
-+#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
-+#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
-+#define CMD_CFG_SFD_ANY 0x00200000 /* 10 Disable SFD check */
-+#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
-+#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
-+#define CMD_CFG_SEND_IDLE 0x00010000 /* 15 Force idle generation */
-+#define CMD_CFG_CNT_FRM_EN 0x00002000 /* 18 Control frame rx enable */
-+#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
-+#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
-+#define CMD_CFG_LOOPBACK_EN 0x00000400 /* 21 XGMII/GMII loopback enable */
-+#define CMD_CFG_TX_ADDR_INS 0x00000200 /* 22 Tx source MAC addr insertion */
-+#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
-+#define CMD_CFG_PAUSE_FWD 0x00000080 /* 24 Terminate/frwd Pause frames */
-+#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
-+#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
-+#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
-+#define CMD_CFG_WAN_MODE 0x00000008 /* 28 WAN mode enable */
-+#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
-+#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
-+
-+/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
-+#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000
-+#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF
-+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000
-+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000
-+#define TX_FIFO_SECTIONS_TX_EMPTY_PFC_10G 0x00360000
-+#define TX_FIFO_SECTIONS_TX_EMPTY_PFC_1G 0x00040000
-+#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019
-+#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020
-+#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060
-+
-+#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \
-+_val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
-+((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
-+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) : \
-+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));
-+
-+#define GET_TX_EMPTY_PFC_VALUE(_val) \
-+_val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
-+((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
-+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_PFC_10G) : \
-+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_PFC_1G));
-+
-+/* Interface Mode Register (IF_MODE) */
-+#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
-+#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
-+#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
-+#define IF_MODE_RGMII 0x00000004
-+#define IF_MODE_RGMII_AUTO 0x00008000
-+#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */
-+#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */
-+#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */
-+#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */
-+#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
-+#define IF_MODE_HD 0x00000040 /* Half duplex operation */
-+
-+/* Hash table Control Register (HASHTABLE_CTRL) */
-+#define HASH_CTRL_MCAST_SHIFT 26
-+#define HASH_CTRL_MCAST_EN 0x00000100 /* 23 Mcast frame rx for hash */
-+#define HASH_CTRL_ADDR_MASK 0x0000003F /* 26-31 Hash table address code */
-+
-+#define GROUP_ADDRESS 0x0000010000000000LL /* MAC mcast indication */
-+#define HASH_TABLE_SIZE 64 /* Hash tbl size */
-+
-+/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
-+#define MEMAC_TX_IPG_LENGTH_MASK 0x0000003F
-+
-+/* Statistics Configuration Register (STATN_CONFIG) */
-+#define STATS_CFG_CLR 0x00000004 /* 29 Reset all counters */
-+#define STATS_CFG_CLR_ON_RD 0x00000002 /* 30 Clear on read */
-+#define STATS_CFG_SATURATE 0x00000001 /* 31 Saturate at the maximum val */
-+
-+/* Interrupt Mask Register (IMASK) */
-+#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */
-+#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */
-+#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */
-+#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */
-+
-+#define MEMAC_ALL_ERRS_IMASK \
-+ ((uint32_t)(MEMAC_IMASK_TSECC_ER | \
-+ MEMAC_IMASK_TECC_ER | \
-+ MEMAC_IMASK_RECC_ER | \
-+ MEMAC_IMASK_MGI))
-+
-+#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */
-+#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */
-+#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */
-+#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */
-+#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error */
-+#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */
-+#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
-+#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */
-+#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */
-+#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */
-+#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */
-+#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */
-+#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */
-+#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */
-+#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */
-+#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */
-+#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */
-+
-+enum memac_counters {
-+ E_MEMAC_COUNTER_R64,
-+ E_MEMAC_COUNTER_T64,
-+ E_MEMAC_COUNTER_R127,
-+ E_MEMAC_COUNTER_T127,
-+ E_MEMAC_COUNTER_R255,
-+ E_MEMAC_COUNTER_T255,
-+ E_MEMAC_COUNTER_R511,
-+ E_MEMAC_COUNTER_T511,
-+ E_MEMAC_COUNTER_R1023,
-+ E_MEMAC_COUNTER_T1023,
-+ E_MEMAC_COUNTER_R1518,
-+ E_MEMAC_COUNTER_T1518,
-+ E_MEMAC_COUNTER_R1519X,
-+ E_MEMAC_COUNTER_T1519X,
-+ E_MEMAC_COUNTER_RFRG,
-+ E_MEMAC_COUNTER_RJBR,
-+ E_MEMAC_COUNTER_RDRP,
-+ E_MEMAC_COUNTER_RALN,
-+ E_MEMAC_COUNTER_TUND,
-+ E_MEMAC_COUNTER_ROVR,
-+ E_MEMAC_COUNTER_RXPF,
-+ E_MEMAC_COUNTER_TXPF,
-+ E_MEMAC_COUNTER_ROCT,
-+ E_MEMAC_COUNTER_RMCA,
-+ E_MEMAC_COUNTER_RBCA,
-+ E_MEMAC_COUNTER_RPKT,
-+ E_MEMAC_COUNTER_RUCA,
-+ E_MEMAC_COUNTER_RERR,
-+ E_MEMAC_COUNTER_TOCT,
-+ E_MEMAC_COUNTER_TMCA,
-+ E_MEMAC_COUNTER_TBCA,
-+ E_MEMAC_COUNTER_TUCA,
-+ E_MEMAC_COUNTER_TERR
-+};
-+
-+#define DEFAULT_PAUSE_QUANTA 0xf000
-+#define DEFAULT_FRAME_LENGTH 0x600
-+#define DEFAULT_TX_IPG_LENGTH 12
-+
-+/*
-+ * memory map
-+ */
-+
-+struct mac_addr {
-+ uint32_t mac_addr_l; /* Lower 32 bits of 48-bit MAC address */
-+ uint32_t mac_addr_u; /* Upper 16 bits of 48-bit MAC address */
-+};
-+
-+struct memac_regs {
-+ /* General Control and Status */
-+ uint32_t res0000[2];
-+ uint32_t command_config; /* 0x008 Ctrl and cfg */
-+ struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */
-+ uint32_t maxfrm; /* 0x014 Max frame length */
-+ uint32_t res0018[1];
-+ uint32_t rx_fifo_sections; /* Receive FIFO configuration reg */
-+ uint32_t tx_fifo_sections; /* Transmit FIFO configuration reg */
-+ uint32_t res0024[2];
-+ uint32_t hashtable_ctrl; /* 0x02C Hash table control */
-+ uint32_t res0030[4];
-+ uint32_t ievent; /* 0x040 Interrupt event */
-+ uint32_t tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */
-+ uint32_t res0048;
-+ uint32_t imask; /* 0x04C Interrupt mask */
-+ uint32_t res0050;
-+ uint32_t pause_quanta[4]; /* 0x054 Pause quanta */
-+ uint32_t pause_thresh[4]; /* 0x064 Pause quanta threshold */
-+ uint32_t rx_pause_status; /* 0x074 Receive pause status */
-+ uint32_t res0078[2];
-+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS]; /* 0x80-0x0B4 mac padr */
-+ uint32_t lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */
-+ uint32_t sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */
-+ uint32_t res00c0[8];
-+ uint32_t statn_config; /* 0x0E0 Statistics configuration */
-+ uint32_t res00e4[7];
-+ /* Rx Statistics Counter */
-+ uint32_t reoct_l;
-+ uint32_t reoct_u;
-+ uint32_t roct_l;
-+ uint32_t roct_u;
-+ uint32_t raln_l;
-+ uint32_t raln_u;
-+ uint32_t rxpf_l;
-+ uint32_t rxpf_u;
-+ uint32_t rfrm_l;
-+ uint32_t rfrm_u;
-+ uint32_t rfcs_l;
-+ uint32_t rfcs_u;
-+ uint32_t rvlan_l;
-+ uint32_t rvlan_u;
-+ uint32_t rerr_l;
-+ uint32_t rerr_u;
-+ uint32_t ruca_l;
-+ uint32_t ruca_u;
-+ uint32_t rmca_l;
-+ uint32_t rmca_u;
-+ uint32_t rbca_l;
-+ uint32_t rbca_u;
-+ uint32_t rdrp_l;
-+ uint32_t rdrp_u;
-+ uint32_t rpkt_l;
-+ uint32_t rpkt_u;
-+ uint32_t rund_l;
-+ uint32_t rund_u;
-+ uint32_t r64_l;
-+ uint32_t r64_u;
-+ uint32_t r127_l;
-+ uint32_t r127_u;
-+ uint32_t r255_l;
-+ uint32_t r255_u;
-+ uint32_t r511_l;
-+ uint32_t r511_u;
-+ uint32_t r1023_l;
-+ uint32_t r1023_u;
-+ uint32_t r1518_l;
-+ uint32_t r1518_u;
-+ uint32_t r1519x_l;
-+ uint32_t r1519x_u;
-+ uint32_t rovr_l;
-+ uint32_t rovr_u;
-+ uint32_t rjbr_l;
-+ uint32_t rjbr_u;
-+ uint32_t rfrg_l;
-+ uint32_t rfrg_u;
-+ uint32_t rcnp_l;
-+ uint32_t rcnp_u;
-+ uint32_t rdrntp_l;
-+ uint32_t rdrntp_u;
-+ uint32_t res01d0[12];
-+ /* Tx Statistics Counter */
-+ uint32_t teoct_l;
-+ uint32_t teoct_u;
-+ uint32_t toct_l;
-+ uint32_t toct_u;
-+ uint32_t res0210[2];
-+ uint32_t txpf_l;
-+ uint32_t txpf_u;
-+ uint32_t tfrm_l;
-+ uint32_t tfrm_u;
-+ uint32_t tfcs_l;
-+ uint32_t tfcs_u;
-+ uint32_t tvlan_l;
-+ uint32_t tvlan_u;
-+ uint32_t terr_l;
-+ uint32_t terr_u;
-+ uint32_t tuca_l;
-+ uint32_t tuca_u;
-+ uint32_t tmca_l;
-+ uint32_t tmca_u;
-+ uint32_t tbca_l;
-+ uint32_t tbca_u;
-+ uint32_t res0258[2];
-+ uint32_t tpkt_l;
-+ uint32_t tpkt_u;
-+ uint32_t tund_l;
-+ uint32_t tund_u;
-+ uint32_t t64_l;
-+ uint32_t t64_u;
-+ uint32_t t127_l;
-+ uint32_t t127_u;
-+ uint32_t t255_l;
-+ uint32_t t255_u;
-+ uint32_t t511_l;
-+ uint32_t t511_u;
-+ uint32_t t1023_l;
-+ uint32_t t1023_u;
-+ uint32_t t1518_l;
-+ uint32_t t1518_u;
-+ uint32_t t1519x_l;
-+ uint32_t t1519x_u;
-+ uint32_t res02a8[6];
-+ uint32_t tcnp_l;
-+ uint32_t tcnp_u;
-+ uint32_t res02c8[14];
-+ /* Line Interface Control */
-+ uint32_t if_mode; /* 0x300 Interface Mode Control */
-+ uint32_t if_status; /* 0x304 Interface Status */
-+ uint32_t res0308[14];
-+ /* HiGig/2 */
-+ uint32_t hg_config; /* 0x340 Control and cfg */
-+ uint32_t res0344[3];
-+ uint32_t hg_pause_quanta; /* 0x350 Pause quanta */
-+ uint32_t res0354[3];
-+ uint32_t hg_pause_thresh; /* 0x360 Pause quanta threshold */
-+ uint32_t res0364[3];
-+ uint32_t hgrx_pause_status; /* 0x370 Receive pause status */
-+ uint32_t hg_fifos_status; /* 0x374 fifos status */
-+ uint32_t rhm; /* 0x378 rx messages counter */
-+ uint32_t thm; /* 0x37C tx messages counter */
-+};
-+
-+struct memac_cfg {
-+ bool reset_on_init;
-+ bool rx_error_discard;
-+ bool pause_ignore;
-+ bool pause_forward_enable;
-+ bool no_length_check_enable;
-+ bool cmd_frame_enable;
-+ bool send_idle_enable;
-+ bool wan_mode_enable;
-+ bool promiscuous_mode_enable;
-+ bool tx_addr_ins_enable;
-+ bool loopback_enable;
-+ bool lgth_check_nostdr;
-+ bool time_stamp_enable;
-+ bool pad_enable;
-+ bool phy_tx_ena_on;
-+ bool rx_sfd_any;
-+ bool rx_pbl_fwd;
-+ bool tx_pbl_fwd;
-+ bool debug_mode;
-+ bool wake_on_lan;
-+ uint16_t max_frame_length;
-+ uint16_t pause_quanta;
-+ uint32_t tx_ipg_length;
-+};
-+
-+
-+/**
-+ * fman_memac_defconfig() - Get default MEMAC configuration
-+ * @cfg: pointer to configuration structure.
-+ *
-+ * Call this function to obtain a default set of configuration values for
-+ * initializing MEMAC. The user can overwrite any of the values before calling
-+ * fman_memac_init(), if specific configuration needs to be applied.
-+ */
-+void fman_memac_defconfig(struct memac_cfg *cfg);
-+
-+int fman_memac_init(struct memac_regs *regs,
-+ struct memac_cfg *cfg,
-+ enum enet_interface enet_interface,
-+ enum enet_speed enet_speed,
-+ bool slow_10g_if,
-+ uint32_t exceptions);
-+
-+void fman_memac_enable(struct memac_regs *regs, bool apply_rx, bool apply_tx);
-+
-+void fman_memac_disable(struct memac_regs *regs, bool apply_rx, bool apply_tx);
-+
-+void fman_memac_set_promiscuous(struct memac_regs *regs, bool val);
-+
-+void fman_memac_add_addr_in_paddr(struct memac_regs *regs,
-+ uint8_t *adr,
-+ uint8_t paddr_num);
-+
-+void fman_memac_clear_addr_in_paddr(struct memac_regs *regs,
-+ uint8_t paddr_num);
-+
-+uint64_t fman_memac_get_counter(struct memac_regs *regs,
-+ enum memac_counters reg_name);
-+
-+void fman_memac_set_tx_pause_frames(struct memac_regs *regs,
-+ uint8_t priority, uint16_t pauseTime, uint16_t threshTime);
-+
-+uint16_t fman_memac_get_max_frame_len(struct memac_regs *regs);
-+
-+void fman_memac_set_exception(struct memac_regs *regs, uint32_t val,
-+ bool enable);
-+
-+void fman_memac_reset_stat(struct memac_regs *regs);
-+
-+void fman_memac_reset(struct memac_regs *regs);
-+
-+void fman_memac_reset_filter_table(struct memac_regs *regs);
-+
-+void fman_memac_set_hash_table_entry(struct memac_regs *regs, uint32_t crc);
-+
-+void fman_memac_set_hash_table(struct memac_regs *regs, uint32_t val);
-+
-+void fman_memac_set_rx_ignore_pause_frames(struct memac_regs *regs,
-+ bool enable);
-+
-+void fman_memac_set_wol(struct memac_regs *regs, bool enable);
-+
-+uint32_t fman_memac_get_event(struct memac_regs *regs, uint32_t ev_mask);
-+
-+void fman_memac_ack_event(struct memac_regs *regs, uint32_t ev_mask);
-+
-+uint32_t fman_memac_get_interrupt_mask(struct memac_regs *regs);
-+
-+void fman_memac_adjust_link(struct memac_regs *regs,
-+ enum enet_interface iface_mode,
-+ enum enet_speed speed, bool full_dx);
-+
-+
-+
-+#endif /*__FSL_FMAN_MEMAC_H*/
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h
-@@ -0,0 +1,78 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_MEMAC_MII_ACC_H
-+#define __FSL_FMAN_MEMAC_MII_ACC_H
-+
-+#include "common/general.h"
-+#include "fsl_enet.h"
-+/* MII Management Registers */
-+#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
-+#define MDIO_CFG_CLK_DIV_SHIFT 7
-+#define MDIO_CFG_HOLD_MASK 0x0000001c
-+#define MDIO_CFG_ENC45 0x00000040
-+#define MDIO_CFG_READ_ERR 0x00000002
-+#define MDIO_CFG_BSY 0x00000001
-+
-+#define MDIO_CTL_PHY_ADDR_SHIFT 5
-+#define MDIO_CTL_READ 0x00008000
-+
-+#define MDIO_DATA_BSY 0x80000000
-+
-+/*MEMAC Internal PHY Registers - SGMII */
-+#define PHY_SGMII_CR_PHY_RESET 0x8000
-+#define PHY_SGMII_CR_RESET_AN 0x0200
-+#define PHY_SGMII_CR_DEF_VAL 0x1140
-+#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
-+#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
-+#define PHY_SGMII_IF_MODE_AN 0x0002
-+#define PHY_SGMII_IF_MODE_SGMII 0x0001
-+#define PHY_SGMII_IF_MODE_1000X 0x0000
-+
-+/*----------------------------------------------------*/
-+/* MII Configuration Control Memory Map Registers */
-+/*----------------------------------------------------*/
-+struct memac_mii_access_mem_map {
-+ uint32_t mdio_cfg; /* 0x030 */
-+ uint32_t mdio_ctrl; /* 0x034 */
-+ uint32_t mdio_data; /* 0x038 */
-+ uint32_t mdio_addr; /* 0x03c */
-+};
-+
-+int fman_memac_mii_read_phy_reg(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t *data,
-+ enum enet_speed enet_speed);
-+int fman_memac_mii_write_phy_reg(struct memac_mii_access_mem_map *mii_regs,
-+ uint8_t phy_addr, uint8_t reg, uint16_t data,
-+ enum enet_speed enet_speed);
-+
-+#endif /* __MAC_API_MEMAC_MII_ACC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_port.h
-@@ -0,0 +1,593 @@
-+/*
-+ * Copyright 2008-2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_PORT_H
-+#define __FSL_FMAN_PORT_H
-+
-+#include "fsl_fman_sp.h"
-+
-+/** @Collection Registers bit fields */
-+
-+/** @Description BMI defines */
-+#define BMI_EBD_EN 0x80000000
-+
-+#define BMI_PORT_CFG_EN 0x80000000
-+#define BMI_PORT_CFG_FDOVR 0x02000000
-+#define BMI_PORT_CFG_IM 0x01000000
-+
-+#define BMI_PORT_STATUS_BSY 0x80000000
-+
-+#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
-+#define BMI_DMA_ATTR_IC_STASH_ON 0x10000000
-+#define BMI_DMA_ATTR_HDR_STASH_ON 0x04000000
-+#define BMI_DMA_ATTR_SG_STASH_ON 0x01000000
-+#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
-+
-+#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
-+#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000
-+
-+#define BMI_TX_FRAME_END_CS_IGNORE_SHIFT 24
-+#define BMI_RX_FRAME_END_CS_IGNORE_SHIFT 24
-+#define BMI_RX_FRAME_END_CUT_SHIFT 16
-+
-+#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT
-+#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT
-+
-+#define BMI_INT_BUF_MARG_SHIFT 28
-+#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT
-+
-+#define BMI_CMD_MR_LEAC 0x00200000
-+#define BMI_CMD_MR_SLEAC 0x00100000
-+#define BMI_CMD_MR_MA 0x00080000
-+#define BMI_CMD_MR_DEAS 0x00040000
-+#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
-+ BMI_CMD_MR_SLEAC | \
-+ BMI_CMD_MR_MA | \
-+ BMI_CMD_MR_DEAS)
-+#define BMI_CMD_TX_MR_DEF 0
-+#define BMI_CMD_OP_MR_DEF (BMI_CMD_MR_DEAS | \
-+ BMI_CMD_MR_MA)
-+
-+#define BMI_CMD_ATTR_ORDER 0x80000000
-+#define BMI_CMD_ATTR_SYNC 0x02000000
-+#define BMI_CMD_ATTR_COLOR_SHIFT 26
-+
-+#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12
-+#define BMI_NEXT_ENG_FD_BITS_SHIFT 24
-+#define BMI_FRAME_END_CS_IGNORE_SHIFT 24
-+
-+#define BMI_COUNTERS_EN 0x80000000
-+
-+#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID
-+#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER
-+#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP
-+#define BMI_EXT_BUF_POOL_ID_SHIFT 16
-+#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
-+#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16
-+
-+#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
-+#define BMI_TX_FIFO_PIPELINE_DEPTH_SHIFT 12
-+
-+#define MAX_PERFORMANCE_TASK_COMP 64
-+#define MAX_PERFORMANCE_RX_QUEUE_COMP 64
-+#define MAX_PERFORMANCE_TX_QUEUE_COMP 8
-+#define MAX_PERFORMANCE_DMA_COMP 16
-+#define MAX_PERFORMANCE_FIFO_COMP 1024
-+
-+#define BMI_PERFORMANCE_TASK_COMP_SHIFT 24
-+#define BMI_PERFORMANCE_QUEUE_COMP_SHIFT 16
-+#define BMI_PERFORMANCE_DMA_COMP_SHIFT 12
-+
-+#define BMI_RATE_LIMIT_GRAN_TX 16000 /* In Kbps */
-+#define BMI_RATE_LIMIT_GRAN_OP 10000 /* In frames */
-+#define BMI_RATE_LIMIT_MAX_RATE_IN_GRAN_UNITS 1024
-+#define BMI_RATE_LIMIT_MAX_BURST_SIZE 1024 /* In KBytes */
-+#define BMI_RATE_LIMIT_MAX_BURST_SHIFT 16
-+#define BMI_RATE_LIMIT_HIGH_BURST_SIZE_GRAN 0x80000000
-+#define BMI_RATE_LIMIT_SCALE_TSBS_SHIFT 16
-+#define BMI_RATE_LIMIT_SCALE_EN 0x80000000
-+#define BMI_SG_DISABLE FMAN_SP_SG_DISABLE
-+
-+/** @Description QMI defines */
-+#define QMI_PORT_CFG_EN 0x80000000
-+#define QMI_PORT_CFG_EN_COUNTERS 0x10000000
-+
-+#define QMI_PORT_STATUS_DEQ_TNUM_BSY 0x80000000
-+#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
-+
-+#define QMI_DEQ_CFG_PRI 0x80000000
-+#define QMI_DEQ_CFG_TYPE1 0x10000000
-+#define QMI_DEQ_CFG_TYPE2 0x20000000
-+#define QMI_DEQ_CFG_TYPE3 0x30000000
-+#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000
-+#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000
-+#define QMI_DEQ_CFG_SP_MASK 0xf
-+#define QMI_DEQ_CFG_SP_SHIFT 20
-+
-+
-+/** @Description General port defines */
-+#define FMAN_PORT_EXT_POOLS_NUM(fm_rev_maj) \
-+ (((fm_rev_maj) == 4) ? 4 : 8)
-+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8
-+#define FMAN_PORT_OBS_EXT_POOLS_NUM 2
-+#define FMAN_PORT_CG_MAP_NUM 8
-+#define FMAN_PORT_PRS_RESULT_WORDS_NUM 8
-+#define FMAN_PORT_BMI_FIFO_UNITS 0x100
-+#define FMAN_PORT_IC_OFFSET_UNITS 0x10
-+
-+
-+/** @Collection FM Port Register Map */
-+
-+/** @Description BMI Rx port register map */
-+struct fman_port_rx_bmi_regs {
-+ uint32_t fmbm_rcfg; /**< Rx Configuration */
-+ uint32_t fmbm_rst; /**< Rx Status */
-+ uint32_t fmbm_rda; /**< Rx DMA attributes*/
-+ uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
-+ uint32_t fmbm_rfed; /**< Rx Frame End Data*/
-+ uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
-+ uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
-+ uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
-+ uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
-+ uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
-+ uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
-+ uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
-+ uint32_t fmbm_rpp; /**< Rx Policer Profile */
-+ uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
-+ uint32_t fmbm_reth; /**< Rx Excessive Threshold */
-+ uint32_t reserved003c[1]; /**< (0x03C 0x03F) */
-+ uint32_t fmbm_rprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
-+ /**< Rx Parse Results Array Init*/
-+ uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
-+ uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
-+ uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
-+ uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
-+ uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
-+ uint32_t reserved0074[0x2]; /**< (0x074-0x07C) */
-+ uint32_t fmbm_rcmne; /**< Rx Frame Continuous Mode Next Engine */
-+ uint32_t reserved0080[0x20];/**< (0x080 0x0FF) */
-+ uint32_t fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
-+ /**< Buffer Manager pool Information-*/
-+ uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
-+ /**< Allocate Counter-*/
-+ uint32_t reserved0130[8];
-+ /**< 0x130/0x140 - 0x15F reserved -*/
-+ uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
-+ /**< Congestion Group Map*/
-+ uint32_t fmbm_mpd; /**< BM Pool Depletion */
-+ uint32_t reserved0184[0x1F]; /**< (0x184 0x1FF) */
-+ uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
-+ uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
-+ uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
-+ uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
-+ uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
-+ uint32_t fmbm_rfdc; /**< Rx Frame Discard Counter*/
-+ uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
-+ uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard nntr*/
-+ uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter*/
-+ uint32_t reserved0224[0x17]; /**< (0x224 0x27F) */
-+ uint32_t fmbm_rpc; /**< Rx Performance Counters*/
-+ uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
-+ uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
-+ uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
-+ uint32_t fmbm_rrquc; /**< Rx Receive Queue Utilization cntr*/
-+ uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
-+ uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
-+ uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
-+ uint32_t reserved02a0[0x18]; /**< (0x2A0 0x2FF) */
-+ uint32_t fmbm_rdbg; /**< Rx Debug-*/
-+};
-+
-+/** @Description BMI Tx port register map */
-+struct fman_port_tx_bmi_regs {
-+ uint32_t fmbm_tcfg; /**< Tx Configuration */
-+ uint32_t fmbm_tst; /**< Tx Status */
-+ uint32_t fmbm_tda; /**< Tx DMA attributes */
-+ uint32_t fmbm_tfp; /**< Tx FIFO Parameters */
-+ uint32_t fmbm_tfed; /**< Tx Frame End Data */
-+ uint32_t fmbm_ticp; /**< Tx Internal Context Parameters */
-+ uint32_t fmbm_tfdne; /**< Tx Frame Dequeue Next Engine. */
-+ uint32_t fmbm_tfca; /**< Tx Frame Command attribute. */
-+ uint32_t fmbm_tcfqid; /**< Tx Confirmation Frame Queue ID. */
-+ uint32_t fmbm_tefqid; /**< Tx Frame Error Queue ID */
-+ uint32_t fmbm_tfene; /**< Tx Frame Enqueue Next Engine */
-+ uint32_t fmbm_trlmts; /**< Tx Rate Limiter Scale */
-+ uint32_t fmbm_trlmt; /**< Tx Rate Limiter */
-+ uint32_t reserved0034[0x0e]; /**< (0x034-0x6c) */
-+ uint32_t fmbm_tccb; /**< Tx Coarse Classification base */
-+ uint32_t fmbm_tfne; /**< Tx Frame Next Engine */
-+ uint32_t fmbm_tpfcm[0x02]; /**< Tx Priority based Flow Control (PFC) Mapping */
-+ uint32_t fmbm_tcmne; /**< Tx Frame Continuous Mode Next Engine */
-+ uint32_t reserved0080[0x60]; /**< (0x080-0x200) */
-+ uint32_t fmbm_tstc; /**< Tx Statistics Counters */
-+ uint32_t fmbm_tfrc; /**< Tx Frame Counter */
-+ uint32_t fmbm_tfdc; /**< Tx Frames Discard Counter */
-+ uint32_t fmbm_tfledc; /**< Tx Frame len error discard cntr */
-+ uint32_t fmbm_tfufdc; /**< Tx Frame unsprt frmt discard cntr*/
-+ uint32_t fmbm_tbdc; /**< Tx Buffers Deallocate Counter */
-+ uint32_t reserved0218[0x1A]; /**< (0x218-0x280) */
-+ uint32_t fmbm_tpc; /**< Tx Performance Counters*/
-+ uint32_t fmbm_tpcp; /**< Tx Performance Count Parameters*/
-+ uint32_t fmbm_tccn; /**< Tx Cycle Counter*/
-+ uint32_t fmbm_ttuc; /**< Tx Tasks Utilization Counter*/
-+ uint32_t fmbm_ttcquc; /**< Tx Transmit conf Q util Counter*/
-+ uint32_t fmbm_tduc; /**< Tx DMA Utilization Counter*/
-+ uint32_t fmbm_tfuc; /**< Tx FIFO Utilization Counter*/
-+};
-+
-+/** @Description BMI O/H port register map */
-+struct fman_port_oh_bmi_regs {
-+ uint32_t fmbm_ocfg; /**< O/H Configuration */
-+ uint32_t fmbm_ost; /**< O/H Status */
-+ uint32_t fmbm_oda; /**< O/H DMA attributes */
-+ uint32_t fmbm_oicp; /**< O/H Internal Context Parameters */
-+ uint32_t fmbm_ofdne; /**< O/H Frame Dequeue Next Engine */
-+ uint32_t fmbm_ofne; /**< O/H Frame Next Engine */
-+ uint32_t fmbm_ofca; /**< O/H Frame Command Attributes. */
-+ uint32_t fmbm_ofpne; /**< O/H Frame Parser Next Engine */
-+ uint32_t fmbm_opso; /**< O/H Parse Start Offset */
-+ uint32_t fmbm_opp; /**< O/H Policer Profile */
-+ uint32_t fmbm_occb; /**< O/H Coarse Classification base */
-+ uint32_t fmbm_oim; /**< O/H Internal margins*/
-+ uint32_t fmbm_ofp; /**< O/H Fifo Parameters*/
-+ uint32_t fmbm_ofed; /**< O/H Frame End Data*/
-+ uint32_t reserved0030[2]; /**< (0x038 - 0x03F) */
-+ uint32_t fmbm_oprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
-+ /**< O/H Parse Results Array Initialization */
-+ uint32_t fmbm_ofqid; /**< O/H Frame Queue ID */
-+ uint32_t fmbm_oefqid; /**< O/H Error Frame Queue ID */
-+ uint32_t fmbm_ofsdm; /**< O/H Frame Status Discard Mask */
-+ uint32_t fmbm_ofsem; /**< O/H Frame Status Error Mask */
-+ uint32_t fmbm_ofene; /**< O/H Frame Enqueue Next Engine */
-+ uint32_t fmbm_orlmts; /**< O/H Rate Limiter Scale */
-+ uint32_t fmbm_orlmt; /**< O/H Rate Limiter */
-+ uint32_t fmbm_ocmne; /**< O/H Continuous Mode Next Engine */
-+ uint32_t reserved0080[0x20]; /**< 0x080 - 0x0FF Reserved */
-+ uint32_t fmbm_oebmpi[2]; /**< Buf Mngr Observed Pool Info */
-+ uint32_t reserved0108[0x16]; /**< 0x108 - 0x15F Reserved */
-+ uint32_t fmbm_ocgm[FMAN_PORT_CG_MAP_NUM]; /**< Observed Congestion Group Map */
-+ uint32_t fmbm_ompd; /**< Observed BMan Pool Depletion */
-+ uint32_t reserved0184[0x1F]; /**< 0x184 - 0x1FF Reserved */
-+ uint32_t fmbm_ostc; /**< O/H Statistics Counters */
-+ uint32_t fmbm_ofrc; /**< O/H Frame Counter */
-+ uint32_t fmbm_ofdc; /**< O/H Frames Discard Counter */
-+ uint32_t fmbm_ofledc; /**< O/H Frames Len Err Discard Cntr */
-+ uint32_t fmbm_ofufdc; /**< O/H Frames Unsprtd Discard Cutr */
-+ uint32_t fmbm_offc; /**< O/H Filter Frames Counter */
-+ uint32_t fmbm_ofwdc; /**< Rx Frames WRED Discard Counter */
-+ uint32_t fmbm_ofldec; /**< O/H Frames List DMA Error Cntr */
-+ uint32_t fmbm_obdc; /**< O/H Buffers Deallocate Counter */
-+ uint32_t reserved0218[0x17]; /**< (0x218 - 0x27F) */
-+ uint32_t fmbm_opc; /**< O/H Performance Counters */
-+ uint32_t fmbm_opcp; /**< O/H Performance Count Parameters */
-+ uint32_t fmbm_occn; /**< O/H Cycle Counter */
-+ uint32_t fmbm_otuc; /**< O/H Tasks Utilization Counter */
-+ uint32_t fmbm_oduc; /**< O/H DMA Utilization Counter */
-+ uint32_t fmbm_ofuc; /**< O/H FIFO Utilization Counter */
-+};
-+
-+/** @Description BMI port register map */
-+union fman_port_bmi_regs {
-+ struct fman_port_rx_bmi_regs rx;
-+ struct fman_port_tx_bmi_regs tx;
-+ struct fman_port_oh_bmi_regs oh;
-+};
-+
-+/** @Description QMI port register map */
-+struct fman_port_qmi_regs {
-+ uint32_t fmqm_pnc; /**< PortID n Configuration Register */
-+ uint32_t fmqm_pns; /**< PortID n Status Register */
-+ uint32_t fmqm_pnts; /**< PortID n Task Status Register */
-+ uint32_t reserved00c[4]; /**< 0xn00C - 0xn01B */
-+ uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
-+ uint32_t fmqm_pnetfc; /**< PortID n Enq Total Frame Counter */
-+ uint32_t reserved024[2]; /**< 0xn024 - 0x02B */
-+ uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
-+ uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
-+ uint32_t fmqm_pndtfc; /**< PortID n Dequeue tot Frame cntr */
-+ uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID Dflt Cntr */
-+ uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
-+};
-+
-+
-+enum fman_port_dma_swap {
-+ E_FMAN_PORT_DMA_NO_SWAP, /**< No swap, transfer data as is */
-+ E_FMAN_PORT_DMA_SWAP_LE,
-+ /**< The transferred data should be swapped in PPC Little Endian mode */
-+ E_FMAN_PORT_DMA_SWAP_BE
-+ /**< The transferred data should be swapped in Big Endian mode */
-+};
-+
-+/* Default port color */
-+enum fman_port_color {
-+ E_FMAN_PORT_COLOR_GREEN, /**< Default port color is green */
-+ E_FMAN_PORT_COLOR_YELLOW, /**< Default port color is yellow */
-+ E_FMAN_PORT_COLOR_RED, /**< Default port color is red */
-+ E_FMAN_PORT_COLOR_OVERRIDE /**< Ignore color */
-+};
-+
-+/* QMI dequeue from the SP channel - types */
-+enum fman_port_deq_type {
-+ E_FMAN_PORT_DEQ_BY_PRI,
-+ /**< Priority precedence and Intra-Class scheduling */
-+ E_FMAN_PORT_DEQ_ACTIVE_FQ,
-+ /**< Active FQ precedence and Intra-Class scheduling */
-+ E_FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
-+ /**< Active FQ precedence and override Intra-Class scheduling */
-+};
-+
-+/* QMI dequeue prefetch modes */
-+enum fman_port_deq_prefetch {
-+ E_FMAN_PORT_DEQ_NO_PREFETCH, /**< No prefetch mode */
-+ E_FMAN_PORT_DEQ_PART_PREFETCH, /**< Partial prefetch mode */
-+ E_FMAN_PORT_DEQ_FULL_PREFETCH /**< Full prefetch mode */
-+};
-+
-+/* Parameters for defining performance counters behavior */
-+struct fman_port_perf_cnt_params {
-+ uint8_t task_val; /**< Task compare value */
-+ uint8_t queue_val;
-+ /**< Rx or Tx conf queue compare value (unused for O/H ports) */
-+ uint8_t dma_val; /**< Dma compare value */
-+ uint32_t fifo_val; /**< Fifo compare value (in bytes) */
-+};
-+
-+/** @Description FM Port configuration structure, used at init */
-+struct fman_port_cfg {
-+ struct fman_port_perf_cnt_params perf_cnt_params;
-+ /* BMI parameters */
-+ enum fman_port_dma_swap dma_swap_data;
-+ bool dma_ic_stash_on;
-+ bool dma_header_stash_on;
-+ bool dma_sg_stash_on;
-+ bool dma_write_optimize;
-+ uint16_t ic_ext_offset;
-+ uint8_t ic_int_offset;
-+ uint16_t ic_size;
-+ enum fman_port_color color;
-+ bool sync_req;
-+ bool discard_override;
-+ uint8_t checksum_bytes_ignore;
-+ uint8_t rx_cut_end_bytes;
-+ uint32_t rx_pri_elevation;
-+ uint32_t rx_fifo_thr;
-+ uint8_t rx_fd_bits;
-+ uint8_t int_buf_start_margin;
-+ uint16_t ext_buf_start_margin;
-+ uint16_t ext_buf_end_margin;
-+ uint32_t tx_fifo_min_level;
-+ uint32_t tx_fifo_low_comf_level;
-+ uint8_t tx_fifo_deq_pipeline_depth;
-+ bool stats_counters_enable;
-+ bool perf_counters_enable;
-+ /* QMI parameters */
-+ bool deq_high_pri;
-+ enum fman_port_deq_type deq_type;
-+ enum fman_port_deq_prefetch deq_prefetch_opt;
-+ uint16_t deq_byte_cnt;
-+ bool queue_counters_enable;
-+ bool no_scatter_gather;
-+ int errata_A006675;
-+ int errata_A006320;
-+ int excessive_threshold_register;
-+ int fmbm_rebm_has_sgd;
-+ int fmbm_tfne_has_features;
-+ int qmi_deq_options_support;
-+};
-+
-+enum fman_port_type {
-+ E_FMAN_PORT_TYPE_OP = 0,
-+ /**< Offline parsing port, shares id-s with
-+ * host command, so must have exclusive id-s */
-+ E_FMAN_PORT_TYPE_RX, /**< 1G Rx port */
-+ E_FMAN_PORT_TYPE_RX_10G, /**< 10G Rx port */
-+ E_FMAN_PORT_TYPE_TX, /**< 1G Tx port */
-+ E_FMAN_PORT_TYPE_TX_10G, /**< 10G Tx port */
-+ E_FMAN_PORT_TYPE_DUMMY,
-+ E_FMAN_PORT_TYPE_HC = E_FMAN_PORT_TYPE_DUMMY
-+ /**< Host command port, shares id-s with
-+ * offline parsing ports, so must have exclusive id-s */
-+};
-+
-+struct fman_port_params {
-+ uint32_t discard_mask;
-+ uint32_t err_mask;
-+ uint32_t dflt_fqid;
-+ uint32_t err_fqid;
-+ uint8_t deq_sp;
-+ bool dont_release_buf;
-+};
-+
-+/* Port context - used by most API functions */
-+struct fman_port {
-+ enum fman_port_type type;
-+ uint8_t fm_rev_maj;
-+ uint8_t fm_rev_min;
-+ union fman_port_bmi_regs *bmi_regs;
-+ struct fman_port_qmi_regs *qmi_regs;
-+ bool im_en;
-+ uint8_t ext_pools_num;
-+};
-+
-+/** @Description External buffer pools configuration */
-+struct fman_port_bpools {
-+ uint8_t count; /**< Num of pools to set up */
-+ bool counters_enable; /**< Enable allocate counters */
-+ uint8_t grp_bp_depleted_num;
-+ /**< Number of depleted pools - if reached the BMI indicates
-+ * the MAC to send a pause frame */
-+ struct {
-+ uint8_t bpid; /**< BM pool ID */
-+ uint16_t size;
-+ /**< Pool's size - must be in ascending order */
-+ bool is_backup;
-+ /**< If this is a backup pool */
-+ bool grp_bp_depleted;
-+ /**< Consider this buffer in multiple pools depletion criteria*/
-+ bool single_bp_depleted;
-+ /**< Consider this buffer in single pool depletion criteria */
-+ bool pfc_priorities_en;
-+ } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
-+};
-+
-+enum fman_port_rate_limiter_scale_down {
-+ E_FMAN_PORT_RATE_DOWN_NONE,
-+ E_FMAN_PORT_RATE_DOWN_BY_2,
-+ E_FMAN_PORT_RATE_DOWN_BY_4,
-+ E_FMAN_PORT_RATE_DOWN_BY_8
-+};
-+
-+/* Rate limiter configuration */
-+struct fman_port_rate_limiter {
-+ uint8_t count_1micro_bit;
-+ bool high_burst_size_gran;
-+ /**< Defines burst_size granularity for OP ports; when TRUE,
-+ * burst_size below counts in frames, otherwise in 10^3 frames */
-+ uint16_t burst_size;
-+ /**< Max burst size, in KBytes for Tx port, according to
-+ * high_burst_size_gran definition for OP port */
-+ uint32_t rate;
-+ /**< In Kbps for Tx port, in frames/sec for OP port */
-+ enum fman_port_rate_limiter_scale_down rate_factor;
-+};
-+
-+/* BMI statistics counters */
-+enum fman_port_stats_counters {
-+ E_FMAN_PORT_STATS_CNT_FRAME,
-+ /**< Number of processed frames; valid for all ports */
-+ E_FMAN_PORT_STATS_CNT_DISCARD,
-+ /**< For Rx ports - frames discarded by QMAN, for Tx or O/H ports -
-+ * frames discarded due to DMA error; valid for all ports */
-+ E_FMAN_PORT_STATS_CNT_DEALLOC_BUF,
-+ /**< Number of buffer deallocate operations; valid for all ports */
-+ E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME,
-+ /**< Number of bad Rx frames, like CRC error, Rx FIFO overflow etc;
-+ * valid for Rx ports only */
-+ E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME,
-+ /**< Number of Rx oversized frames, that is frames exceeding max frame
-+ * size configured for the corresponding ETH controller;
-+ * valid for Rx ports only */
-+ E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF,
-+ /**< Frames discarded due to lack of external buffers; valid for
-+ * Rx ports only */
-+ E_FMAN_PORT_STATS_CNT_LEN_ERR,
-+ /**< Frames discarded due to frame length error; valid for Tx and
-+ * O/H ports only */
-+ E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT,
-+ /**< Frames discarded due to unsupported FD format; valid for Tx
-+ * and O/H ports only */
-+ E_FMAN_PORT_STATS_CNT_FILTERED_FRAME,
-+ /**< Number of frames filtered out by PCD module; valid for
-+ * Rx and OP ports only */
-+ E_FMAN_PORT_STATS_CNT_DMA_ERR,
-+ /**< Frames rejected by QMAN that were not able to release their
-+ * buffers due to DMA error; valid for Rx and O/H ports only */
-+ E_FMAN_PORT_STATS_CNT_WRED_DISCARD
-+ /**< Frames going through O/H port that were not able to to enter the
-+ * return queue due to WRED algorithm; valid for O/H ports only */
-+};
-+
-+/* BMI performance counters */
-+enum fman_port_perf_counters {
-+ E_FMAN_PORT_PERF_CNT_CYCLE, /**< Cycle counter */
-+ E_FMAN_PORT_PERF_CNT_TASK_UTIL, /**< Tasks utilization counter */
-+ E_FMAN_PORT_PERF_CNT_QUEUE_UTIL,
-+ /**< For Rx ports - Rx queue utilization, for Tx ports - Tx conf queue
-+ * utilization; not valid for O/H ports */
-+ E_FMAN_PORT_PERF_CNT_DMA_UTIL, /**< DMA utilization counter */
-+ E_FMAN_PORT_PERF_CNT_FIFO_UTIL, /**< FIFO utilization counter */
-+ E_FMAN_PORT_PERF_CNT_RX_PAUSE
-+ /**< Number of cycles in which Rx pause activation control is on;
-+ * valid for Rx ports only */
-+};
-+
-+/* QMI counters */
-+enum fman_port_qmi_counters {
-+ E_FMAN_PORT_ENQ_TOTAL, /**< EnQ tot frame cntr */
-+ E_FMAN_PORT_DEQ_TOTAL, /**< DeQ tot frame cntr; invalid for Rx ports */
-+ E_FMAN_PORT_DEQ_FROM_DFLT,
-+ /**< Dequeue from default FQID counter not valid for Rx ports */
-+ E_FMAN_PORT_DEQ_CONFIRM /**< DeQ confirm cntr invalid for Rx ports */
-+};
-+
-+
-+/** @Collection FM Port API */
-+void fman_port_defconfig(struct fman_port_cfg *cfg, enum fman_port_type type);
-+int fman_port_init(struct fman_port *port,
-+ struct fman_port_cfg *cfg,
-+ struct fman_port_params *params);
-+int fman_port_enable(struct fman_port *port);
-+int fman_port_disable(const struct fman_port *port);
-+int fman_port_set_bpools(const struct fman_port *port,
-+ const struct fman_port_bpools *bp);
-+int fman_port_set_rate_limiter(struct fman_port *port,
-+ struct fman_port_rate_limiter *rate_limiter);
-+int fman_port_delete_rate_limiter(struct fman_port *port);
-+int fman_port_set_err_mask(struct fman_port *port, uint32_t err_mask);
-+int fman_port_set_discard_mask(struct fman_port *port, uint32_t discard_mask);
-+int fman_port_modify_rx_fd_bits(struct fman_port *port,
-+ uint8_t rx_fd_bits,
-+ bool add);
-+int fman_port_set_perf_cnt_params(struct fman_port *port,
-+ struct fman_port_perf_cnt_params *params);
-+int fman_port_set_stats_cnt_mode(struct fman_port *port, bool enable);
-+int fman_port_set_perf_cnt_mode(struct fman_port *port, bool enable);
-+int fman_port_set_queue_cnt_mode(struct fman_port *port, bool enable);
-+int fman_port_set_bpool_cnt_mode(struct fman_port *port,
-+ uint8_t bpid,
-+ bool enable);
-+uint32_t fman_port_get_stats_counter(struct fman_port *port,
-+ enum fman_port_stats_counters counter);
-+void fman_port_set_stats_counter(struct fman_port *port,
-+ enum fman_port_stats_counters counter,
-+ uint32_t value);
-+uint32_t fman_port_get_perf_counter(struct fman_port *port,
-+ enum fman_port_perf_counters counter);
-+void fman_port_set_perf_counter(struct fman_port *port,
-+ enum fman_port_perf_counters counter,
-+ uint32_t value);
-+uint32_t fman_port_get_qmi_counter(struct fman_port *port,
-+ enum fman_port_qmi_counters counter);
-+void fman_port_set_qmi_counter(struct fman_port *port,
-+ enum fman_port_qmi_counters counter,
-+ uint32_t value);
-+uint32_t fman_port_get_bpool_counter(struct fman_port *port, uint8_t bpid);
-+void fman_port_set_bpool_counter(struct fman_port *port,
-+ uint8_t bpid,
-+ uint32_t value);
-+int fman_port_add_congestion_grps(struct fman_port *port,
-+ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM]);
-+int fman_port_remove_congestion_grps(struct fman_port *port,
-+ uint32_t grps_map[FMAN_PORT_CG_MAP_NUM]);
-+
-+
-+#endif /* __FSL_FMAN_PORT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_prs.h
-@@ -0,0 +1,102 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_PRS_H
-+#define __FSL_FMAN_PRS_H
-+
-+#include "common/general.h"
-+
-+#define FM_PCD_EX_PRS_DOUBLE_ECC 0x02000000
-+#define FM_PCD_EX_PRS_SINGLE_ECC 0x01000000
-+
-+#define FM_PCD_PRS_PPSC_ALL_PORTS 0xffff0000
-+#define FM_PCD_PRS_RPIMAC_EN 0x00000001
-+#define FM_PCD_PRS_PORT_IDLE_STS 0xffff0000
-+#define FM_PCD_PRS_SINGLE_ECC 0x00004000
-+#define FM_PCD_PRS_DOUBLE_ECC 0x00004000
-+#define PRS_MAX_CYCLE_LIMIT 8191
-+
-+#define DEFAULT_MAX_PRS_CYC_LIM 0
-+
-+struct fman_prs_regs {
-+ uint32_t fmpr_rpclim;
-+ uint32_t fmpr_rpimac;
-+ uint32_t pmeec;
-+ uint32_t res00c[5];
-+ uint32_t fmpr_pevr;
-+ uint32_t fmpr_pever;
-+ uint32_t res028;
-+ uint32_t fmpr_perr;
-+ uint32_t fmpr_perer;
-+ uint32_t res034;
-+ uint32_t res038[10];
-+ uint32_t fmpr_ppsc;
-+ uint32_t res064;
-+ uint32_t fmpr_pds;
-+ uint32_t fmpr_l2rrs;
-+ uint32_t fmpr_l3rrs;
-+ uint32_t fmpr_l4rrs;
-+ uint32_t fmpr_srrs;
-+ uint32_t fmpr_l2rres;
-+ uint32_t fmpr_l3rres;
-+ uint32_t fmpr_l4rres;
-+ uint32_t fmpr_srres;
-+ uint32_t fmpr_spcs;
-+ uint32_t fmpr_spscs;
-+ uint32_t fmpr_hxscs;
-+ uint32_t fmpr_mrcs;
-+ uint32_t fmpr_mwcs;
-+ uint32_t fmpr_mrscs;
-+ uint32_t fmpr_mwscs;
-+ uint32_t fmpr_fcscs;
-+};
-+
-+struct fman_prs_cfg {
-+ uint32_t port_id_stat;
-+ uint16_t max_prs_cyc_lim;
-+ uint32_t prs_exceptions;
-+};
-+
-+uint32_t fman_prs_get_err_event(struct fman_prs_regs *regs, uint32_t ev_mask);
-+uint32_t fman_prs_get_err_ev_mask(struct fman_prs_regs *regs);
-+void fman_prs_ack_err_event(struct fman_prs_regs *regs, uint32_t event);
-+uint32_t fman_prs_get_expt_event(struct fman_prs_regs *regs, uint32_t ev_mask);
-+uint32_t fman_prs_get_expt_ev_mask(struct fman_prs_regs *regs);
-+void fman_prs_ack_expt_event(struct fman_prs_regs *regs, uint32_t event);
-+void fman_prs_defconfig(struct fman_prs_cfg *cfg);
-+int fman_prs_init(struct fman_prs_regs *regs, struct fman_prs_cfg *cfg);
-+void fman_prs_enable(struct fman_prs_regs *regs);
-+void fman_prs_disable(struct fman_prs_regs *regs);
-+int fman_prs_is_enabled(struct fman_prs_regs *regs);
-+void fman_prs_set_stst_port_msk(struct fman_prs_regs *regs, uint32_t pid_msk);
-+void fman_prs_set_stst(struct fman_prs_regs *regs, bool enable);
-+#endif /* __FSL_FMAN_PRS_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_rtc.h
-@@ -0,0 +1,449 @@
-+/*
-+ * Copyright 2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_RTC_H
-+#define __FSL_FMAN_RTC_H
-+
-+#include "common/general.h"
-+
-+/* FM RTC Registers definitions */
-+#define FMAN_RTC_TMR_CTRL_ALMP1 0x80000000
-+#define FMAN_RTC_TMR_CTRL_ALMP2 0x40000000
-+#define FMAN_RTC_TMR_CTRL_FS 0x10000000
-+#define FMAN_RTC_TMR_CTRL_PP1L 0x08000000
-+#define FMAN_RTC_TMR_CTRL_PP2L 0x04000000
-+#define FMAN_RTC_TMR_CTRL_TCLK_PERIOD_MASK 0x03FF0000
-+#define FMAN_RTC_TMR_CTRL_FRD 0x00004000
-+#define FMAN_RTC_TMR_CTRL_SLV 0x00002000
-+#define FMAN_RTC_TMR_CTRL_ETEP1 0x00000100
-+#define FMAN_RTC_TMR_CTRL_COPH 0x00000080
-+#define FMAN_RTC_TMR_CTRL_CIPH 0x00000040
-+#define FMAN_RTC_TMR_CTRL_TMSR 0x00000020
-+#define FMAN_RTC_TMR_CTRL_DBG 0x00000010
-+#define FMAN_RTC_TMR_CTRL_BYP 0x00000008
-+#define FMAN_RTC_TMR_CTRL_TE 0x00000004
-+#define FMAN_RTC_TMR_CTRL_CKSEL_OSC_CLK 0x00000003
-+#define FMAN_RTC_TMR_CTRL_CKSEL_MAC_CLK 0x00000001
-+#define FMAN_RTC_TMR_CTRL_CKSEL_EXT_CLK 0x00000000
-+#define FMAN_RTC_TMR_CTRL_TCLK_PERIOD_SHIFT 16
-+
-+#define FMAN_RTC_TMR_TEVENT_ETS2 0x02000000
-+#define FMAN_RTC_TMR_TEVENT_ETS1 0x01000000
-+#define FMAN_RTC_TMR_TEVENT_ALM2 0x00020000
-+#define FMAN_RTC_TMR_TEVENT_ALM1 0x00010000
-+#define FMAN_RTC_TMR_TEVENT_PP1 0x00000080
-+#define FMAN_RTC_TMR_TEVENT_PP2 0x00000040
-+#define FMAN_RTC_TMR_TEVENT_PP3 0x00000020
-+#define FMAN_RTC_TMR_TEVENT_ALL (FMAN_RTC_TMR_TEVENT_ETS2 |\
-+ FMAN_RTC_TMR_TEVENT_ETS1 |\
-+ FMAN_RTC_TMR_TEVENT_ALM2 |\
-+ FMAN_RTC_TMR_TEVENT_ALM1 |\
-+ FMAN_RTC_TMR_TEVENT_PP1 |\
-+ FMAN_RTC_TMR_TEVENT_PP2 |\
-+ FMAN_RTC_TMR_TEVENT_PP3)
-+
-+#define FMAN_RTC_TMR_PRSC_OCK_MASK 0x0000FFFF
-+
-+/**************************************************************************//**
-+ @Description FM RTC Alarm Polarity Options.
-+*//***************************************************************************/
-+enum fman_rtc_alarm_polarity {
-+ E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH, /**< Active-high output polarity */
-+ E_FMAN_RTC_ALARM_POLARITY_ACTIVE_LOW /**< Active-low output polarity */
-+};
-+
-+/**************************************************************************//**
-+ @Description FM RTC Trigger Polarity Options.
-+*//***************************************************************************/
-+enum fman_rtc_trigger_polarity {
-+ E_FMAN_RTC_TRIGGER_ON_RISING_EDGE, /**< Trigger on rising edge */
-+ E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE /**< Trigger on falling edge */
-+};
-+
-+/**************************************************************************//**
-+ @Description IEEE1588 Timer Module FM RTC Optional Clock Sources.
-+*//***************************************************************************/
-+enum fman_src_clock {
-+ E_FMAN_RTC_SOURCE_CLOCK_EXTERNAL, /**< external high precision timer
-+ reference clock */
-+ E_FMAN_RTC_SOURCE_CLOCK_SYSTEM, /**< MAC system clock */
-+ E_FMAN_RTC_SOURCE_CLOCK_OSCILATOR /**< RTC clock oscilator */
-+};
-+
-+/* RTC default values */
-+#define DEFAULT_SRC_CLOCK E_FMAN_RTC_SOURCE_CLOCK_SYSTEM
-+#define DEFAULT_INVERT_INPUT_CLK_PHASE FALSE
-+#define DEFAULT_INVERT_OUTPUT_CLK_PHASE FALSE
-+#define DEFAULT_ALARM_POLARITY E_FMAN_RTC_ALARM_POLARITY_ACTIVE_HIGH
-+#define DEFAULT_TRIGGER_POLARITY E_FMAN_RTC_TRIGGER_ON_FALLING_EDGE
-+#define DEFAULT_PULSE_REALIGN FALSE
-+
-+#define FMAN_RTC_MAX_NUM_OF_ALARMS 3
-+#define FMAN_RTC_MAX_NUM_OF_PERIODIC_PULSES 4
-+#define FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS 3
-+
-+/**************************************************************************//**
-+ @Description FM RTC timer alarm
-+*//***************************************************************************/
-+struct t_tmr_alarm{
-+ uint32_t tmr_alarm_h; /**< */
-+ uint32_t tmr_alarm_l; /**< */
-+};
-+
-+/**************************************************************************//**
-+ @Description FM RTC timer Ex trigger
-+*//***************************************************************************/
-+struct t_tmr_ext_trigger{
-+ uint32_t tmr_etts_h; /**< */
-+ uint32_t tmr_etts_l; /**< */
-+};
-+
-+struct rtc_regs {
-+ uint32_t tmr_id; /* 0x000 Module ID register */
-+ uint32_t tmr_id2; /* 0x004 Controller ID register */
-+ uint32_t reserved0008[30];
-+ uint32_t tmr_ctrl; /* 0x0080 timer control register */
-+ uint32_t tmr_tevent; /* 0x0084 timer event register */
-+ uint32_t tmr_temask; /* 0x0088 timer event mask register */
-+ uint32_t reserved008c[3];
-+ uint32_t tmr_cnt_h; /* 0x0098 timer counter high register */
-+ uint32_t tmr_cnt_l; /* 0x009c timer counter low register */
-+ uint32_t tmr_add; /* 0x00a0 timer drift compensation addend register */
-+ uint32_t tmr_acc; /* 0x00a4 timer accumulator register */
-+ uint32_t tmr_prsc; /* 0x00a8 timer prescale */
-+ uint32_t reserved00ac;
-+ uint32_t tmr_off_h; /* 0x00b0 timer offset high */
-+ uint32_t tmr_off_l; /* 0x00b4 timer offset low */
-+ struct t_tmr_alarm tmr_alarm[FMAN_RTC_MAX_NUM_OF_ALARMS]; /* 0x00b8 timer
-+ alarm */
-+ uint32_t tmr_fiper[FMAN_RTC_MAX_NUM_OF_PERIODIC_PULSES]; /* 0x00d0 timer
-+ fixed period interval */
-+ struct t_tmr_ext_trigger tmr_etts[FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS];
-+ /* 0x00e0 time stamp general purpose external */
-+ uint32_t reserved00f0[4];
-+};
-+
-+struct rtc_cfg {
-+ enum fman_src_clock src_clk;
-+ uint32_t ext_src_clk_freq;
-+ uint32_t rtc_freq_hz;
-+ bool timer_slave_mode;
-+ bool invert_input_clk_phase;
-+ bool invert_output_clk_phase;
-+ uint32_t events_mask;
-+ bool bypass; /**< Indicates if frequency compensation
-+ is bypassed */
-+ bool pulse_realign;
-+ enum fman_rtc_alarm_polarity alarm_polarity[FMAN_RTC_MAX_NUM_OF_ALARMS];
-+ enum fman_rtc_trigger_polarity trigger_polarity
-+ [FMAN_RTC_MAX_NUM_OF_EXT_TRIGGERS];
-+};
-+
-+/**
-+ * fman_rtc_defconfig() - Get default RTC configuration
-+ * @cfg: pointer to configuration structure.
-+ *
-+ * Call this function to obtain a default set of configuration values for
-+ * initializing RTC. The user can overwrite any of the values before calling
-+ * fman_rtc_init(), if specific configuration needs to be applied.
-+ */
-+void fman_rtc_defconfig(struct rtc_cfg *cfg);
-+
-+/**
-+ * fman_rtc_get_events() - Get the events
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Returns: The events
-+ */
-+uint32_t fman_rtc_get_events(struct rtc_regs *regs);
-+
-+/**
-+ * fman_rtc_get_interrupt_mask() - Get the events mask
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Returns: The events mask
-+ */
-+uint32_t fman_rtc_get_interrupt_mask(struct rtc_regs *regs);
-+
-+
-+/**
-+ * fman_rtc_set_interrupt_mask() - Set the events mask
-+ * @regs: Pointer to RTC register block
-+ * @mask: The mask to set
-+ */
-+void fman_rtc_set_interrupt_mask(struct rtc_regs *regs, uint32_t mask);
-+
-+/**
-+ * fman_rtc_get_event() - Check if specific events occurred
-+ * @regs: Pointer to RTC register block
-+ * @ev_mask: a mask of the events to check
-+ *
-+ * Returns: 0 if the events did not occur. Non zero if one of the events occurred
-+ */
-+uint32_t fman_rtc_get_event(struct rtc_regs *regs, uint32_t ev_mask);
-+
-+/**
-+ * fman_rtc_check_and_clear_event() - Clear events which are on
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Returns: A mask of the events which were cleared
-+ */
-+uint32_t fman_rtc_check_and_clear_event(struct rtc_regs *regs);
-+
-+/**
-+ * fman_rtc_ack_event() - Clear events
-+ * @regs: Pointer to RTC register block
-+ * @events: The events to disable
-+ */
-+void fman_rtc_ack_event(struct rtc_regs *regs, uint32_t events);
-+
-+/**
-+ * fman_rtc_enable_interupt() - Enable events interrupts
-+ * @regs: Pointer to RTC register block
-+ * @mask: The events to disable
-+ */
-+void fman_rtc_enable_interupt(struct rtc_regs *regs, uint32_t mask);
-+
-+/**
-+ * fman_rtc_disable_interupt() - Disable events interrupts
-+ * @regs: Pointer to RTC register block
-+ * @mask: The events to disable
-+ */
-+void fman_rtc_disable_interupt(struct rtc_regs *regs, uint32_t mask);
-+
-+/**
-+ * fman_rtc_get_timer_ctrl() - Get the control register
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Returns: The control register value
-+ */
-+uint32_t fman_rtc_get_timer_ctrl(struct rtc_regs *regs);
-+
-+/**
-+ * fman_rtc_set_timer_ctrl() - Set timer control register
-+ * @regs: Pointer to RTC register block
-+ * @val: The value to set
-+ */
-+void fman_rtc_set_timer_ctrl(struct rtc_regs *regs, uint32_t val);
-+
-+/**
-+ * fman_rtc_get_frequency_compensation() - Get the frequency compensation
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Returns: The timer counter
-+ */
-+uint32_t fman_rtc_get_frequency_compensation(struct rtc_regs *regs);
-+
-+/**
-+ * fman_rtc_set_frequency_compensation() - Set frequency compensation
-+ * @regs: Pointer to RTC register block
-+ * @val: The value to set
-+ */
-+void fman_rtc_set_frequency_compensation(struct rtc_regs *regs, uint32_t val);
-+
-+/**
-+ * fman_rtc_get_trigger_stamp() - Get a trigger stamp
-+ * @regs: Pointer to RTC register block
-+ * @id: The id of the trigger stamp
-+ *
-+ * Returns: The time stamp
-+ */
-+uint64_t fman_rtc_get_trigger_stamp(struct rtc_regs *regs, int id);
-+
-+/**
-+ * fman_rtc_set_timer_alarm_l() - Set timer alarm low register
-+ * @regs: Pointer to RTC register block
-+ * @index: The index of alarm to set
-+ * @val: The value to set
-+ */
-+void fman_rtc_set_timer_alarm_l(struct rtc_regs *regs, int index,
-+ uint32_t val);
-+
-+/**
-+ * fman_rtc_set_timer_alarm() - Set timer alarm
-+ * @regs: Pointer to RTC register block
-+ * @index: The index of alarm to set
-+ * @val: The value to set
-+ */
-+void fman_rtc_set_timer_alarm(struct rtc_regs *regs, int index, int64_t val);
-+
-+/**
-+ * fman_rtc_set_timer_fiper() - Set timer fiper
-+ * @regs: Pointer to RTC register block
-+ * @index: The index of fiper to set
-+ * @val: The value to set
-+ */
-+void fman_rtc_set_timer_fiper(struct rtc_regs *regs, int index, uint32_t val);
-+
-+/**
-+ * fman_rtc_set_timer_offset() - Set timer offset
-+ * @regs: Pointer to RTC register block
-+ * @val: The value to set
-+ */
-+void fman_rtc_set_timer_offset(struct rtc_regs *regs, int64_t val);
-+
-+/**
-+ * fman_rtc_get_timer() - Get the timer counter
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Returns: The timer counter
-+ */
-+static inline uint64_t fman_rtc_get_timer(struct rtc_regs *regs)
-+{
-+ uint64_t time;
-+ /* TMR_CNT_L must be read first to get an accurate value */
-+ time = (uint64_t)ioread32be(&regs->tmr_cnt_l);
-+ time |= ((uint64_t)ioread32be(&regs->tmr_cnt_h) << 32);
-+
-+ return time;
-+}
-+
-+/**
-+ * fman_rtc_set_timer() - Set timer counter
-+ * @regs: Pointer to RTC register block
-+ * @val: The value to set
-+ */
-+static inline void fman_rtc_set_timer(struct rtc_regs *regs, int64_t val)
-+{
-+ iowrite32be((uint32_t)val, &regs->tmr_cnt_l);
-+ iowrite32be((uint32_t)(val >> 32), &regs->tmr_cnt_h);
-+}
-+
-+/**
-+ * fman_rtc_timers_soft_reset() - Soft reset
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Resets all the timer registers and state machines for the 1588 IP and
-+ * the attached client 1588
-+ */
-+void fman_rtc_timers_soft_reset(struct rtc_regs *regs);
-+
-+/**
-+ * fman_rtc_clear_external_trigger() - Clear an external trigger
-+ * @regs: Pointer to RTC register block
-+ * @id: The id of the trigger to clear
-+ */
-+void fman_rtc_clear_external_trigger(struct rtc_regs *regs, int id);
-+
-+/**
-+ * fman_rtc_clear_periodic_pulse() - Clear periodic pulse
-+ * @regs: Pointer to RTC register block
-+ * @id: The id of the fiper to clear
-+ */
-+void fman_rtc_clear_periodic_pulse(struct rtc_regs *regs, int id);
-+
-+/**
-+ * fman_rtc_enable() - Enable RTC hardware block
-+ * @regs: Pointer to RTC register block
-+ */
-+void fman_rtc_enable(struct rtc_regs *regs, bool reset_clock);
-+
-+/**
-+ * fman_rtc_is_enabled() - Is RTC hardware block enabled
-+ * @regs: Pointer to RTC register block
-+ *
-+ * Return: TRUE if enabled
-+ */
-+bool fman_rtc_is_enabled(struct rtc_regs *regs);
-+
-+/**
-+ * fman_rtc_disable() - Disable RTC hardware block
-+ * @regs: Pointer to RTC register block
-+ */
-+void fman_rtc_disable(struct rtc_regs *regs);
-+
-+/**
-+ * fman_rtc_init() - Init RTC hardware block
-+ * @cfg: RTC configuration data
-+ * @regs: Pointer to RTC register block
-+ * @num_alarms: Number of alarms in RTC
-+ * @num_fipers: Number of fipers in RTC
-+ * @num_ext_triggers: Number of external triggers in RTC
-+ * @freq_compensation: Frequency compensation
-+ * @output_clock_divisor: Output clock divisor
-+ *
-+ * This function initializes RTC and applies basic configuration.
-+ */
-+void fman_rtc_init(struct rtc_cfg *cfg, struct rtc_regs *regs, int num_alarms,
-+ int num_fipers, int num_ext_triggers, bool init_freq_comp,
-+ uint32_t freq_compensation, uint32_t output_clock_divisor);
-+
-+/**
-+ * fman_rtc_set_alarm() - Set an alarm
-+ * @regs: Pointer to RTC register block
-+ * @id: id of alarm
-+ * @val: value to write
-+ * @enable: should interrupt be enabled
-+ */
-+void fman_rtc_set_alarm(struct rtc_regs *regs, int id, uint32_t val, bool enable);
-+
-+/**
-+ * fman_rtc_set_periodic_pulse() - Set an alarm
-+ * @regs: Pointer to RTC register block
-+ * @id: id of fiper
-+ * @val: value to write
-+ * @enable: should interrupt be enabled
-+ */
-+void fman_rtc_set_periodic_pulse(struct rtc_regs *regs, int id, uint32_t val,
-+ bool enable);
-+
-+/**
-+ * fman_rtc_set_ext_trigger() - Set an external trigger
-+ * @regs: Pointer to RTC register block
-+ * @id: id of trigger
-+ * @enable: should interrupt be enabled
-+ * @use_pulse_as_input: use the pulse as input
-+ */
-+void fman_rtc_set_ext_trigger(struct rtc_regs *regs, int id, bool enable,
-+ bool use_pulse_as_input);
-+
-+struct fm_rtc_alarm_params {
-+ uint8_t alarm_id; /**< 0 or 1 */
-+ uint64_t alarm_time; /**< In nanoseconds, the time when the
-+ alarm should go off - must be a
-+ multiple of the RTC period */
-+ void (*f_alarm_callback)(void* app, uint8_t id); /**< This routine will
-+ be called when RTC reaches alarmTime */
-+ bool clear_on_expiration; /**< TRUE to turn off the alarm once
-+ expired.*/
-+};
-+
-+struct fm_rtc_periodic_pulse_params {
-+ uint8_t periodic_pulse_id; /**< 0 or 1 */
-+ uint64_t periodic_pulse_period; /**< In Nanoseconds. Must be a multiple
-+ of the RTC period */
-+ void (*f_periodic_pulse_callback)(void* app, uint8_t id); /**< This
-+ routine will be called every
-+ periodicPulsePeriod. */
-+};
-+
-+#endif /* __FSL_FMAN_RTC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_sp.h
-@@ -0,0 +1,138 @@
-+/*
-+ * Copyright 2013 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_SP_H
-+#define __FSL_FMAN_SP_H
-+
-+#include "common/general.h"
-+#include "fsl_fman.h"
-+
-+
-+struct fm_pcd_storage_profile_regs{
-+ uint32_t fm_sp_ebmpi[8];
-+ /*offset 0 - 0xc*/
-+ /**< Buffer Manager pool Information */
-+
-+ uint32_t fm_sp_acnt; /*offset 0x20*/
-+ uint32_t fm_sp_ebm; /*offset 0x24*/
-+ uint32_t fm_sp_da; /*offset 0x28*/
-+ uint32_t fm_sp_icp; /*offset 0x2c*/
-+ uint32_t fm_sp_mpd; /*offset 0x30*/
-+ uint32_t res1[2]; /*offset 0x34 - 0x38*/
-+ uint32_t fm_sp_spliodn; /*offset 0x3c*/
-+};
-+
-+/**************************************************************************//**
-+ @Description structure for defining internal context copying
-+*//***************************************************************************/
-+struct fman_sp_int_context_data_copy{
-+ uint16_t ext_buf_offset; /**< Offset in External buffer to which
-+ internal context is copied to (Rx)
-+ or taken from (Tx, Op). */
-+ uint8_t int_context_offset; /**< Offset within internal context to copy
-+ from (Rx) or to copy to (Tx, Op).*/
-+ uint16_t size; /**< Internal offset size to be copied */
-+};
-+
-+/**************************************************************************//**
-+ @Description struct for defining external buffer margins
-+*//***************************************************************************/
-+struct fman_sp_buf_margins{
-+ uint16_t start_margins; /**< Number of bytes to be left at the
-+ beginning of the external buffer (must be
-+ divisible by 16) */
-+ uint16_t end_margins; /**< number of bytes to be left at the end of
-+ the external buffer(must be divisible by 16)*/
-+};
-+
-+struct fm_storage_profile_params {
-+ struct fman_ext_pools fm_ext_pools;
-+ struct fman_backup_bm_pools backup_pools;
-+ struct fman_sp_int_context_data_copy *int_context;
-+ struct fman_sp_buf_margins *buf_margins;
-+ enum fman_dma_swap_option dma_swap_data;
-+ enum fman_dma_cache_option int_context_cache_attr;
-+ enum fman_dma_cache_option header_cache_attr;
-+ enum fman_dma_cache_option scatter_gather_cache_attr;
-+ bool dma_write_optimize;
-+ uint16_t liodn_offset;
-+ bool no_scather_gather;
-+ struct fman_buf_pool_depletion buf_pool_depletion;
-+};
-+
-+/**************************************************************************//**
-+ @Description Registers bit fields
-+*//***************************************************************************/
-+#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000
-+#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000
-+#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000
-+#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000
-+#define FMAN_SP_SG_DISABLE 0x80000000
-+
-+/* shifts */
-+#define FMAN_SP_EXT_BUF_POOL_ID_SHIFT 16
-+#define FMAN_SP_POOL_DEP_NUM_OF_POOLS_SHIFT 16
-+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
-+#define FMAN_SP_EXT_BUF_MARG_END_SHIFT 0
-+#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30
-+#define FMAN_SP_DMA_ATTR_IC_CACHE_SHIFT 28
-+#define FMAN_SP_DMA_ATTR_HDR_CACHE_SHIFT 26
-+#define FMAN_SP_DMA_ATTR_SG_CACHE_SHIFT 24
-+#define FMAN_SP_IC_TO_EXT_SHIFT 16
-+#define FMAN_SP_IC_FROM_INT_SHIFT 8
-+#define FMAN_SP_IC_SIZE_SHIFT 0
-+
-+/**************************************************************************//**
-+ @Description defaults
-+*//***************************************************************************/
-+#define DEFAULT_FMAN_SP_DMA_SWAP_DATA FMAN_DMA_NO_SWP
-+#define DEFAULT_FMAN_SP_DMA_INT_CONTEXT_CACHE_ATTR FMAN_DMA_NO_STASH
-+#define DEFAULT_FMAN_SP_DMA_HEADER_CACHE_ATTR FMAN_DMA_NO_STASH
-+#define DEFAULT_FMAN_SP_DMA_SCATTER_GATHER_CACHE_ATTR FMAN_DMA_NO_STASH
-+#define DEFAULT_FMAN_SP_DMA_WRITE_OPTIMIZE TRUE
-+#define DEFAULT_FMAN_SP_NO_SCATTER_GATHER FALSE
-+
-+void fman_vsp_defconfig(struct fm_storage_profile_params *cfg);
-+
-+void fman_vsp_init(struct fm_pcd_storage_profile_regs *regs,
-+ uint16_t index, struct fm_storage_profile_params *fm_vsp_params,
-+ int port_max_num_of_ext_pools, int bm_max_num_of_pools,
-+ int max_num_of_pfc_priorities);
-+
-+uint32_t fman_vsp_get_statistics(struct fm_pcd_storage_profile_regs *regs,
-+ uint16_t index);
-+
-+void fman_vsp_set_statistics(struct fm_pcd_storage_profile_regs *regs,
-+ uint16_t index, uint32_t value);
-+
-+
-+#endif /* __FSL_FMAN_SP_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/flib/fsl_fman_tgec.h
-@@ -0,0 +1,479 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __FSL_FMAN_TGEC_H
-+#define __FSL_FMAN_TGEC_H
-+
-+#include "common/general.h"
-+#include "fsl_enet.h"
-+
-+
-+/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
-+#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
-+
-+enum tgec_counters {
-+ E_TGEC_COUNTER_R64,
-+ E_TGEC_COUNTER_R127,
-+ E_TGEC_COUNTER_R255,
-+ E_TGEC_COUNTER_R511,
-+ E_TGEC_COUNTER_R1023,
-+ E_TGEC_COUNTER_R1518,
-+ E_TGEC_COUNTER_R1519X,
-+ E_TGEC_COUNTER_TRFRG,
-+ E_TGEC_COUNTER_TRJBR,
-+ E_TGEC_COUNTER_RDRP,
-+ E_TGEC_COUNTER_RALN,
-+ E_TGEC_COUNTER_TRUND,
-+ E_TGEC_COUNTER_TROVR,
-+ E_TGEC_COUNTER_RXPF,
-+ E_TGEC_COUNTER_TXPF,
-+ E_TGEC_COUNTER_ROCT,
-+ E_TGEC_COUNTER_RMCA,
-+ E_TGEC_COUNTER_RBCA,
-+ E_TGEC_COUNTER_RPKT,
-+ E_TGEC_COUNTER_RUCA,
-+ E_TGEC_COUNTER_RERR,
-+ E_TGEC_COUNTER_TOCT,
-+ E_TGEC_COUNTER_TMCA,
-+ E_TGEC_COUNTER_TBCA,
-+ E_TGEC_COUNTER_TUCA,
-+ E_TGEC_COUNTER_TERR
-+};
-+
-+/* Command and Configuration Register (COMMAND_CONFIG) */
-+#define CMD_CFG_EN_TIMESTAMP 0x00100000
-+#define CMD_CFG_TX_ADDR_INS_SEL 0x00080000
-+#define CMD_CFG_NO_LEN_CHK 0x00020000
-+#define CMD_CFG_SEND_IDLE 0x00010000
-+#define CMD_CFG_RX_ER_DISC 0x00004000
-+#define CMD_CFG_CMD_FRM_EN 0x00002000
-+#define CMD_CFG_STAT_CLR 0x00001000
-+#define CMD_CFG_LOOPBACK_EN 0x00000400
-+#define CMD_CFG_TX_ADDR_INS 0x00000200
-+#define CMD_CFG_PAUSE_IGNORE 0x00000100
-+#define CMD_CFG_PAUSE_FWD 0x00000080
-+#define CMD_CFG_PROMIS_EN 0x00000010
-+#define CMD_CFG_WAN_MODE 0x00000008
-+#define CMD_CFG_RX_EN 0x00000002
-+#define CMD_CFG_TX_EN 0x00000001
-+
-+/* Interrupt Mask Register (IMASK) */
-+#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000
-+#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000
-+#define TGEC_IMASK_REM_FAULT 0x00004000
-+#define TGEC_IMASK_LOC_FAULT 0x00002000
-+#define TGEC_IMASK_TX_ECC_ER 0x00001000
-+#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800
-+#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400
-+#define TGEC_IMASK_TX_ER 0x00000200
-+#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100
-+#define TGEC_IMASK_RX_ECC_ER 0x00000080
-+#define TGEC_IMASK_RX_JAB_FRM 0x00000040
-+#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020
-+#define TGEC_IMASK_RX_RUNT_FRM 0x00000010
-+#define TGEC_IMASK_RX_FRAG_FRM 0x00000008
-+#define TGEC_IMASK_RX_LEN_ER 0x00000004
-+#define TGEC_IMASK_RX_CRC_ER 0x00000002
-+#define TGEC_IMASK_RX_ALIGN_ER 0x00000001
-+
-+#define TGEC_EVENTS_MASK \
-+ ((uint32_t)(TGEC_IMASK_MDIO_SCAN_EVENT | \
-+ TGEC_IMASK_MDIO_CMD_CMPL | \
-+ TGEC_IMASK_REM_FAULT | \
-+ TGEC_IMASK_LOC_FAULT | \
-+ TGEC_IMASK_TX_ECC_ER | \
-+ TGEC_IMASK_TX_FIFO_UNFL | \
-+ TGEC_IMASK_TX_FIFO_OVFL | \
-+ TGEC_IMASK_TX_ER | \
-+ TGEC_IMASK_RX_FIFO_OVFL | \
-+ TGEC_IMASK_RX_ECC_ER | \
-+ TGEC_IMASK_RX_JAB_FRM | \
-+ TGEC_IMASK_RX_OVRSZ_FRM | \
-+ TGEC_IMASK_RX_RUNT_FRM | \
-+ TGEC_IMASK_RX_FRAG_FRM | \
-+ TGEC_IMASK_RX_LEN_ER | \
-+ TGEC_IMASK_RX_CRC_ER | \
-+ TGEC_IMASK_RX_ALIGN_ER))
-+
-+/* Hashtable Control Register (HASHTABLE_CTRL) */
-+#define TGEC_HASH_MCAST_SHIFT 23
-+#define TGEC_HASH_MCAST_EN 0x00000200
-+#define TGEC_HASH_ADR_MSK 0x000001ff
-+
-+#define DEFAULT_WAN_MODE_ENABLE FALSE
-+#define DEFAULT_PROMISCUOUS_MODE_ENABLE FALSE
-+#define DEFAULT_PAUSE_FORWARD_ENABLE FALSE
-+#define DEFAULT_PAUSE_IGNORE FALSE
-+#define DEFAULT_TX_ADDR_INS_ENABLE FALSE
-+#define DEFAULT_LOOPBACK_ENABLE FALSE
-+#define DEFAULT_CMD_FRAME_ENABLE FALSE
-+#define DEFAULT_RX_ERROR_DISCARD FALSE
-+#define DEFAULT_SEND_IDLE_ENABLE FALSE
-+#define DEFAULT_NO_LENGTH_CHECK_ENABLE TRUE
-+#define DEFAULT_LGTH_CHECK_NOSTDR FALSE
-+#define DEFAULT_TIME_STAMP_ENABLE FALSE
-+#define DEFAULT_TX_IPG_LENGTH 12
-+#define DEFAULT_MAX_FRAME_LENGTH 0x600
-+#define DEFAULT_PAUSE_QUANT 0xf000
-+
-+/*
-+ * 10G memory map
-+ */
-+struct tgec_regs {
-+ uint32_t tgec_id; /* 0x000 Controller ID */
-+ uint32_t reserved001[1]; /* 0x004 */
-+ uint32_t command_config; /* 0x008 Control and configuration */
-+ uint32_t mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */
-+ uint32_t mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */
-+ uint32_t maxfrm; /* 0x014 Maximum frame length */
-+ uint32_t pause_quant; /* 0x018 Pause quanta */
-+ uint32_t rx_fifo_sections; /* 0x01c */
-+ uint32_t tx_fifo_sections; /* 0x020 */
-+ uint32_t rx_fifo_almost_f_e; /* 0x024 */
-+ uint32_t tx_fifo_almost_f_e; /* 0x028 */
-+ uint32_t hashtable_ctrl; /* 0x02c Hash table control*/
-+ uint32_t mdio_cfg_status; /* 0x030 */
-+ uint32_t mdio_command; /* 0x034 */
-+ uint32_t mdio_data; /* 0x038 */
-+ uint32_t mdio_regaddr; /* 0x03c */
-+ uint32_t status; /* 0x040 */
-+ uint32_t tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */
-+ uint32_t mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */
-+ uint32_t mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */
-+ uint32_t rx_fifo_ptr_rd; /* 0x050 */
-+ uint32_t rx_fifo_ptr_wr; /* 0x054 */
-+ uint32_t tx_fifo_ptr_rd; /* 0x058 */
-+ uint32_t tx_fifo_ptr_wr; /* 0x05c */
-+ uint32_t imask; /* 0x060 Interrupt mask */
-+ uint32_t ievent; /* 0x064 Interrupt event */
-+ uint32_t udp_port; /* 0x068 Defines a UDP Port number */
-+ uint32_t type_1588v2; /* 0x06c Type field for 1588v2 */
-+ uint32_t reserved070[4]; /* 0x070 */
-+ /*10Ge Statistics Counter */
-+ uint32_t tfrm_u; /* 80 aFramesTransmittedOK */
-+ uint32_t tfrm_l; /* 84 aFramesTransmittedOK */
-+ uint32_t rfrm_u; /* 88 aFramesReceivedOK */
-+ uint32_t rfrm_l; /* 8c aFramesReceivedOK */
-+ uint32_t rfcs_u; /* 90 aFrameCheckSequenceErrors */
-+ uint32_t rfcs_l; /* 94 aFrameCheckSequenceErrors */
-+ uint32_t raln_u; /* 98 aAlignmentErrors */
-+ uint32_t raln_l; /* 9c aAlignmentErrors */
-+ uint32_t txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */
-+ uint32_t txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */
-+ uint32_t rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */
-+ uint32_t rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */
-+ uint32_t rlong_u; /* B0 aFrameTooLongErrors */
-+ uint32_t rlong_l; /* B4 aFrameTooLongErrors */
-+ uint32_t rflr_u; /* B8 aInRangeLengthErrors */
-+ uint32_t rflr_l; /* Bc aInRangeLengthErrors */
-+ uint32_t tvlan_u; /* C0 VLANTransmittedOK */
-+ uint32_t tvlan_l; /* C4 VLANTransmittedOK */
-+ uint32_t rvlan_u; /* C8 VLANReceivedOK */
-+ uint32_t rvlan_l; /* Cc VLANReceivedOK */
-+ uint32_t toct_u; /* D0 ifOutOctets */
-+ uint32_t toct_l; /* D4 ifOutOctets */
-+ uint32_t roct_u; /* D8 ifInOctets */
-+ uint32_t roct_l; /* Dc ifInOctets */
-+ uint32_t ruca_u; /* E0 ifInUcastPkts */
-+ uint32_t ruca_l; /* E4 ifInUcastPkts */
-+ uint32_t rmca_u; /* E8 ifInMulticastPkts */
-+ uint32_t rmca_l; /* Ec ifInMulticastPkts */
-+ uint32_t rbca_u; /* F0 ifInBroadcastPkts */
-+ uint32_t rbca_l; /* F4 ifInBroadcastPkts */
-+ uint32_t terr_u; /* F8 ifOutErrors */
-+ uint32_t terr_l; /* Fc ifOutErrors */
-+ uint32_t reserved100[2]; /* 100-108*/
-+ uint32_t tuca_u; /* 108 ifOutUcastPkts */
-+ uint32_t tuca_l; /* 10c ifOutUcastPkts */
-+ uint32_t tmca_u; /* 110 ifOutMulticastPkts */
-+ uint32_t tmca_l; /* 114 ifOutMulticastPkts */
-+ uint32_t tbca_u; /* 118 ifOutBroadcastPkts */
-+ uint32_t tbca_l; /* 11c ifOutBroadcastPkts */
-+ uint32_t rdrp_u; /* 120 etherStatsDropEvents */
-+ uint32_t rdrp_l; /* 124 etherStatsDropEvents */
-+ uint32_t reoct_u; /* 128 etherStatsOctets */
-+ uint32_t reoct_l; /* 12c etherStatsOctets */
-+ uint32_t rpkt_u; /* 130 etherStatsPkts */
-+ uint32_t rpkt_l; /* 134 etherStatsPkts */
-+ uint32_t trund_u; /* 138 etherStatsUndersizePkts */
-+ uint32_t trund_l; /* 13c etherStatsUndersizePkts */
-+ uint32_t r64_u; /* 140 etherStatsPkts64Octets */
-+ uint32_t r64_l; /* 144 etherStatsPkts64Octets */
-+ uint32_t r127_u; /* 148 etherStatsPkts65to127Octets */
-+ uint32_t r127_l; /* 14c etherStatsPkts65to127Octets */
-+ uint32_t r255_u; /* 150 etherStatsPkts128to255Octets */
-+ uint32_t r255_l; /* 154 etherStatsPkts128to255Octets */
-+ uint32_t r511_u; /* 158 etherStatsPkts256to511Octets */
-+ uint32_t r511_l; /* 15c etherStatsPkts256to511Octets */
-+ uint32_t r1023_u; /* 160 etherStatsPkts512to1023Octets */
-+ uint32_t r1023_l; /* 164 etherStatsPkts512to1023Octets */
-+ uint32_t r1518_u; /* 168 etherStatsPkts1024to1518Octets */
-+ uint32_t r1518_l; /* 16c etherStatsPkts1024to1518Octets */
-+ uint32_t r1519x_u; /* 170 etherStatsPkts1519toX */
-+ uint32_t r1519x_l; /* 174 etherStatsPkts1519toX */
-+ uint32_t trovr_u; /* 178 etherStatsOversizePkts */
-+ uint32_t trovr_l; /* 17c etherStatsOversizePkts */
-+ uint32_t trjbr_u; /* 180 etherStatsJabbers */
-+ uint32_t trjbr_l; /* 184 etherStatsJabbers */
-+ uint32_t trfrg_u; /* 188 etherStatsFragments */
-+ uint32_t trfrg_l; /* 18C etherStatsFragments */
-+ uint32_t rerr_u; /* 190 ifInErrors */
-+ uint32_t rerr_l; /* 194 ifInErrors */
-+};
-+
-+/**
-+ * struct tgec_cfg - TGEC configuration
-+ *
-+ * @rx_error_discard: Receive Erroneous Frame Discard Enable. When set to 1
-+ * any frame received with an error is discarded in the
-+ * Core and not forwarded to the Client interface.
-+ * When set to 0 (Reset value), erroneous Frames are
-+ * forwarded to the Client interface with ff_rx_err
-+ * asserted.
-+ * @pause_ignore: Ignore Pause Frame Quanta. If set to 1 received pause
-+ * frames are ignored by the MAC. When set to 0
-+ * (Reset value) the transmit process is stopped for the
-+ * amount of time specified in the pause quanta received
-+ * within a pause frame.
-+ * @pause_forward_enable:
-+ * Terminate / Forward Pause Frames. If set to 1 pause
-+ * frames are forwarded to the user application. When set
-+ * to 0 (Reset value) pause frames are terminated and
-+ * discarded within the MAC.
-+ * @no_length_check_enable:
-+ * Payload Length Check Disable. When set to 0
-+ * (Reset value), the Core checks the frame's payload
-+ * length with the Frame Length/Type field, when set to 1
-+ * the payload length check is disabled.
-+ * @cmd_frame_enable: Enables reception of all command frames. When set to 1
-+ * all Command Frames are accepted, when set to 0
-+ * (Reset Value) only Pause Frames are accepted and all
-+ * other Command Frames are rejected.
-+ * @send_idle_enable: Force Idle Generation. When set to 1, the MAC
-+ * permanently sends XGMII Idle sequences even when faults
-+ * are received.
-+ * @wan_mode_enable: WAN Mode Enable. Sets WAN mode (1) or LAN mode
-+ * (0, default) of operation.
-+ * @promiscuous_mode_enable:
-+ * Enables MAC promiscuous operation. When set to 1, all
-+ * frames are received without any MAC address filtering,
-+ * when set to 0 (Reset value) Unicast Frames with a
-+ * destination address not matching the Core MAC Address
-+ * (MAC Address programmed in Registers MAC_ADDR_0 and
-+ * MAC_ADDR_1 or the MAC address programmed in Registers
-+ * MAC_ADDR_2 and MAC_ADDR_3) are rejected.
-+ * @tx_addr_ins_enable: Set Source MAC Address on Transmit. If set to 1 the
-+ * MAC overwrites the source MAC address received from the
-+ * Client Interface with one of the MAC addresses. If set
-+ * to 0 (Reset value), the source MAC address from the
-+ * Client Interface is transmitted unmodified to the line.
-+ * @loopback_enable: PHY Interface Loopback. When set to 1, the signal
-+ * loop_ena is set to '1', when set to 0 (Reset value)
-+ * the signal loop_ena is set to 0.
-+ * @lgth_check_nostdr: The Core interprets the Length/Type field differently
-+ * depending on the value of this Bit
-+ * @time_stamp_enable: This bit selects between enabling and disabling the
-+ * IEEE 1588 functionality. 1: IEEE 1588 is enabled
-+ * 0: IEEE 1588 is disabled
-+ * @max_frame_length: Maximum supported received frame length.
-+ * The 10GEC MAC supports reception of any frame size up
-+ * to 16,352 bytes (0x3FE0). Typical settings are
-+ * 0x05EE (1,518 bytes) for standard frames.
-+ * Default setting is 0x0600 (1,536 bytes).
-+ * Received frames that exceed this stated maximum
-+ * are truncated.
-+ * @pause_quant: Pause quanta value used with transmitted pause frames.
-+ * Each quanta represents a 512 bit-times.
-+ * @tx_ipg_length: Transmit Inter-Packet-Gap (IPG) value. A 6-bit value:
-+ * Depending on LAN or WAN mode of operation the value has
-+ * the following meaning: - LAN Mode: Number of octets in
-+ * steps of 4. Valid values are 8, 12, 16, ... 100. DIC is
-+ * fully supported (see 10.6.1 page 49) for any setting. A
-+ * default of 12 (reset value) must be set to conform to
-+ * IEEE802.3ae. Warning: When set to 8, PCS layers may not
-+ * be able to perform clock rate compensation. - WAN Mode:
-+ * Stretch factor. Valid values are 4..15. The stretch
-+ * factor is calculated as (value+1)*8. A default of 12
-+ * (reset value) must be set to conform to IEEE 802.3ae
-+ * (i.e. 13*8=104). A larger value shrinks the IPG
-+ * (increasing bandwidth).
-+ *
-+ * This structure contains basic TGEC configuration and must be passed to
-+ * fman_tgec_init() function. A default set of configuration values can be
-+ * obtained by calling fman_tgec_defconfig().
-+ */
-+struct tgec_cfg {
-+ bool rx_error_discard;
-+ bool pause_ignore;
-+ bool pause_forward_enable;
-+ bool no_length_check_enable;
-+ bool cmd_frame_enable;
-+ bool send_idle_enable;
-+ bool wan_mode_enable;
-+ bool promiscuous_mode_enable;
-+ bool tx_addr_ins_enable;
-+ bool loopback_enable;
-+ bool lgth_check_nostdr;
-+ bool time_stamp_enable;
-+ uint16_t max_frame_length;
-+ uint16_t pause_quant;
-+ uint32_t tx_ipg_length;
-+ bool skip_fman11_workaround;
-+};
-+
-+
-+void fman_tgec_defconfig(struct tgec_cfg *cfg);
-+
-+/**
-+ * fman_tgec_init() - Init tgec hardware block
-+ * @regs: Pointer to tgec register block
-+ * @cfg: tgec configuration data
-+ * @exceptions_mask: initial exceptions mask
-+ *
-+ * This function initializes the tgec controller and applies its
-+ * basic configuration.
-+ *
-+ * Returns: 0 if successful, an error code otherwise.
-+ */
-+
-+int fman_tgec_init(struct tgec_regs *regs, struct tgec_cfg *cfg,
-+ uint32_t exception_mask);
-+
-+void fman_tgec_enable(struct tgec_regs *regs, bool apply_rx, bool apply_tx);
-+
-+void fman_tgec_disable(struct tgec_regs *regs, bool apply_rx, bool apply_tx);
-+
-+uint32_t fman_tgec_get_revision(struct tgec_regs *regs);
-+
-+void fman_tgec_set_mac_address(struct tgec_regs *regs, uint8_t *macaddr);
-+
-+void fman_tgec_set_promiscuous(struct tgec_regs *regs, bool val);
-+
-+/**
-+ * fman_tgec_reset_stat() - Completely resets all TGEC HW counters
-+ * @regs: Pointer to TGEC register block
-+ */
-+void fman_tgec_reset_stat(struct tgec_regs *regs);
-+
-+/**
-+ * fman_tgec_get_counter() - Reads TGEC HW counters
-+ * @regs: Pointer to TGEC register block
-+ * @reg_name: Counter name according to the appropriate enum
-+ *
-+ * Returns: Required counter value
-+ */
-+uint64_t fman_tgec_get_counter(struct tgec_regs *regs,
-+ enum tgec_counters reg_name);
-+
-+/**
-+ * fman_tgec_set_hash_table() - Sets the Hashtable Control Register
-+ * @regs: Pointer to TGEC register block
-+ * @value: Value to be written in Hashtable Control Register
-+ */
-+void fman_tgec_set_hash_table(struct tgec_regs *regs, uint32_t value);
-+
-+/**
-+ * fman_tgec_set_tx_pause_frames() - Sets the Pause Quanta Register
-+ * @regs: Pointer to TGEC register block
-+ * @pause_time: Pause quanta value used with transmitted pause frames.
-+ * Each quanta represents a 512 bit-times
-+ */
-+void fman_tgec_set_tx_pause_frames(struct tgec_regs *regs, uint16_t pause_time);
-+
-+/**
-+ * fman_tgec_set_rx_ignore_pause_frames() - Changes the policy WRT pause frames
-+ * @regs: Pointer to TGEC register block
-+ * @en: Ignore/Respond to pause frame quanta
-+ *
-+ * Sets the value of PAUSE_IGNORE field in the COMMAND_CONFIG Register
-+ * 0 - MAC stops transmit process for the duration specified
-+ * in the Pause frame quanta of a received Pause frame.
-+ * 1 - MAC ignores received Pause frames.
-+ */
-+void fman_tgec_set_rx_ignore_pause_frames(struct tgec_regs *regs, bool en);
-+
-+/**
-+ * fman_tgec_enable_1588_time_stamp() - change timestamp functionality
-+ * @regs: Pointer to TGEC register block
-+ * @en: enable/disable timestamp functionality
-+ *
-+ * Sets the value of EN_TIMESTAMP field in the COMMAND_CONFIG Register
-+ * IEEE 1588 timestamp functionality control:
-+ * 0 disabled, 1 enabled
-+ */
-+
-+void fman_tgec_enable_1588_time_stamp(struct tgec_regs *regs, bool en);
-+
-+uint32_t fman_tgec_get_event(struct tgec_regs *regs, uint32_t ev_mask);
-+
-+void fman_tgec_ack_event(struct tgec_regs *regs, uint32_t ev_mask);
-+
-+uint32_t fman_tgec_get_interrupt_mask(struct tgec_regs *regs);
-+
-+/**
-+ * fman_tgec_add_addr_in_paddr() - Sets additional exact match MAC address
-+ * @regs: Pointer to TGEC register block
-+ * @addr_ptr: Pointer to 6-byte array containing the MAC address
-+ *
-+ * Sets the additional station MAC address
-+ */
-+void fman_tgec_add_addr_in_paddr(struct tgec_regs *regs, uint8_t *addr_ptr);
-+
-+void fman_tgec_clear_addr_in_paddr(struct tgec_regs *regs);
-+
-+void fman_tgec_enable_interrupt(struct tgec_regs *regs, uint32_t ev_mask);
-+
-+void fman_tgec_disable_interrupt(struct tgec_regs *regs, uint32_t ev_mask);
-+
-+void fman_tgec_reset_filter_table(struct tgec_regs *regs);
-+
-+void fman_tgec_set_hash_table_entry(struct tgec_regs *regs, uint32_t crc);
-+
-+
-+/**
-+ * fman_tgec_get_max_frame_len() - Returns the maximum frame length value
-+ * @regs: Pointer to TGEC register block
-+ */
-+uint16_t fman_tgec_get_max_frame_len(struct tgec_regs *regs);
-+
-+/**
-+ * fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007() - Initialize the
-+ * main tgec configuration parameters
-+ * @regs: Pointer to TGEC register block
-+ *
-+ * TODO
-+ */
-+void fman_tgec_set_erratum_tx_fifo_corruption_10gmac_a007(struct tgec_regs
-+ *regs);
-+
-+
-+#endif /* __FSL_FMAN_TGEC_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/dpaa_integration_ext.h
-@@ -0,0 +1,291 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**
-+
-+ @File dpaa_integration_ext.h
-+
-+ @Description T4240 FM external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __DPAA_INTEGRATION_EXT_H
-+#define __DPAA_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+#define DPAA_VERSION 11
-+
-+/**************************************************************************//**
-+ @Description DPAA SW Portals Enumeration.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_DPAA_SWPORTAL0 = 0,
-+ e_DPAA_SWPORTAL1,
-+ e_DPAA_SWPORTAL2,
-+ e_DPAA_SWPORTAL3,
-+ e_DPAA_SWPORTAL4,
-+ e_DPAA_SWPORTAL5,
-+ e_DPAA_SWPORTAL6,
-+ e_DPAA_SWPORTAL7,
-+ e_DPAA_SWPORTAL8,
-+ e_DPAA_SWPORTAL9,
-+ e_DPAA_SWPORTAL10,
-+ e_DPAA_SWPORTAL11,
-+ e_DPAA_SWPORTAL12,
-+ e_DPAA_SWPORTAL13,
-+ e_DPAA_SWPORTAL14,
-+ e_DPAA_SWPORTAL15,
-+ e_DPAA_SWPORTAL16,
-+ e_DPAA_SWPORTAL17,
-+ e_DPAA_SWPORTAL18,
-+ e_DPAA_SWPORTAL19,
-+ e_DPAA_SWPORTAL20,
-+ e_DPAA_SWPORTAL21,
-+ e_DPAA_SWPORTAL22,
-+ e_DPAA_SWPORTAL23,
-+ e_DPAA_SWPORTAL24,
-+ e_DPAA_SWPORTAL_DUMMY_LAST
-+} e_DpaaSwPortal;
-+
-+/**************************************************************************//**
-+ @Description DPAA Direct Connect Portals Enumeration.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_DPAA_DCPORTAL0 = 0,
-+ e_DPAA_DCPORTAL1,
-+ e_DPAA_DCPORTAL2,
-+ e_DPAA_DCPORTAL_DUMMY_LAST
-+} e_DpaaDcPortal;
-+
-+#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
-+#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
-+
-+/*****************************************************************************
-+ QMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
-+#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
-+#define QM_MAX_NUM_OF_CGS 256 /**< Congestion groups number */
-+#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE)
-+ /**< FQIDs range - 24 bits */
-+
-+/**************************************************************************//**
-+ @Description Work Queue Channel assignments in QMan.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_QM_FQ_CHANNEL_SWPORTAL0 = 0x0, /**< Dedicated channels serviced by software portals 0 to 24 */
-+ e_QM_FQ_CHANNEL_SWPORTAL1,
-+ e_QM_FQ_CHANNEL_SWPORTAL2,
-+ e_QM_FQ_CHANNEL_SWPORTAL3,
-+ e_QM_FQ_CHANNEL_SWPORTAL4,
-+ e_QM_FQ_CHANNEL_SWPORTAL5,
-+ e_QM_FQ_CHANNEL_SWPORTAL6,
-+ e_QM_FQ_CHANNEL_SWPORTAL7,
-+ e_QM_FQ_CHANNEL_SWPORTAL8,
-+ e_QM_FQ_CHANNEL_SWPORTAL9,
-+ e_QM_FQ_CHANNEL_SWPORTAL10,
-+ e_QM_FQ_CHANNEL_SWPORTAL11,
-+ e_QM_FQ_CHANNEL_SWPORTAL12,
-+ e_QM_FQ_CHANNEL_SWPORTAL13,
-+ e_QM_FQ_CHANNEL_SWPORTAL14,
-+ e_QM_FQ_CHANNEL_SWPORTAL15,
-+ e_QM_FQ_CHANNEL_SWPORTAL16,
-+ e_QM_FQ_CHANNEL_SWPORTAL17,
-+ e_QM_FQ_CHANNEL_SWPORTAL18,
-+ e_QM_FQ_CHANNEL_SWPORTAL19,
-+ e_QM_FQ_CHANNEL_SWPORTAL20,
-+ e_QM_FQ_CHANNEL_SWPORTAL21,
-+ e_QM_FQ_CHANNEL_SWPORTAL22,
-+ e_QM_FQ_CHANNEL_SWPORTAL23,
-+ e_QM_FQ_CHANNEL_SWPORTAL24,
-+
-+ e_QM_FQ_CHANNEL_POOL1 = 0x401, /**< Pool channels that can be serviced by any of the software portals */
-+ e_QM_FQ_CHANNEL_POOL2,
-+ e_QM_FQ_CHANNEL_POOL3,
-+ e_QM_FQ_CHANNEL_POOL4,
-+ e_QM_FQ_CHANNEL_POOL5,
-+ e_QM_FQ_CHANNEL_POOL6,
-+ e_QM_FQ_CHANNEL_POOL7,
-+ e_QM_FQ_CHANNEL_POOL8,
-+ e_QM_FQ_CHANNEL_POOL9,
-+ e_QM_FQ_CHANNEL_POOL10,
-+ e_QM_FQ_CHANNEL_POOL11,
-+ e_QM_FQ_CHANNEL_POOL12,
-+ e_QM_FQ_CHANNEL_POOL13,
-+ e_QM_FQ_CHANNEL_POOL14,
-+ e_QM_FQ_CHANNEL_POOL15,
-+
-+ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x800, /**< Dedicated channels serviced by Direct Connect Portal 0:
-+ connected to FMan 0; assigned in incrementing order to
-+ each sub-portal (SP) in the portal */
-+ e_QM_FQ_CHANNEL_FMAN0_SP1,
-+ e_QM_FQ_CHANNEL_FMAN0_SP2,
-+ e_QM_FQ_CHANNEL_FMAN0_SP3,
-+ e_QM_FQ_CHANNEL_FMAN0_SP4,
-+ e_QM_FQ_CHANNEL_FMAN0_SP5,
-+ e_QM_FQ_CHANNEL_FMAN0_SP6,
-+ e_QM_FQ_CHANNEL_FMAN0_SP7,
-+ e_QM_FQ_CHANNEL_FMAN0_SP8,
-+ e_QM_FQ_CHANNEL_FMAN0_SP9,
-+ e_QM_FQ_CHANNEL_FMAN0_SP10,
-+ e_QM_FQ_CHANNEL_FMAN0_SP11,
-+ e_QM_FQ_CHANNEL_FMAN0_SP12,
-+ e_QM_FQ_CHANNEL_FMAN0_SP13,
-+ e_QM_FQ_CHANNEL_FMAN0_SP14,
-+ e_QM_FQ_CHANNEL_FMAN0_SP15,
-+
-+ e_QM_FQ_CHANNEL_RMAN_SP0 = 0x820, /**< Dedicated channels serviced by Direct Connect Portal 1: connected to RMan */
-+ e_QM_FQ_CHANNEL_RMAN_SP1,
-+
-+ e_QM_FQ_CHANNEL_CAAM = 0x840 /**< Dedicated channel serviced by Direct Connect Portal 2:
-+ connected to SEC */
-+} e_QmFQChannel;
-+
-+/*****************************************************************************
-+ BMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
-+
-+/*****************************************************************************
-+ SEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define SEC_NUM_OF_DECOS 3
-+#define SEC_ALL_DECOS_MASK 0x00000003
-+
-+
-+/*****************************************************************************
-+ FM INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define INTG_MAX_NUM_OF_FM 2
-+/* Ports defines */
-+#define FM_MAX_NUM_OF_1G_MACS 6
-+#define FM_MAX_NUM_OF_10G_MACS 2
-+#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
-+#define FM_MAX_NUM_OF_OH_PORTS 6
-+
-+#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
-+
-+#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
-+
-+#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
-+#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
-+#define FM_MAX_NUM_OF_SUB_PORTALS 16
-+#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
-+
-+#define FM_VSP_MAX_NUM_OF_ENTRIES 64
-+#define FM_MAX_NUM_OF_PFC_PRIORITIES 8
-+
-+/* RAMs defines */
-+#define FM_MURAM_SIZE (384 * KILOBYTE)
-+#define FM_IRAM_SIZE(major, minor) (64 * KILOBYTE)
-+#define FM_NUM_OF_CTRL 4
-+
-+/* PCD defines */
-+#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
-+#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
-+#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
-+#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000600 /**< Number of bytes saved for patches */
-+#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
-+
-+/* RTC defines */
-+#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
-+#define FM_RTC_NUM_OF_PERIODIC_PULSES 3 /**< RTC number of periodic pulses */
-+#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
-+
-+/* QMI defines */
-+#define QMI_MAX_NUM_OF_TNUMS 64
-+#define QMI_DEF_TNUMS_THRESH 32
-+/* FPM defines */
-+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
-+
-+/* DMA defines */
-+#define DMA_THRESH_MAX_COMMQ 83
-+#define DMA_THRESH_MAX_BUF 127
-+
-+/* BMI defines */
-+#define BMI_MAX_NUM_OF_TASKS 128
-+#define BMI_MAX_NUM_OF_DMAS 84
-+
-+#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
-+#define PORT_MAX_WEIGHT 16
-+
-+#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
-+
-+/* Unique T4240 */
-+#define FM_OP_OPEN_DMA_MIN_LIMIT
-+#define FM_NO_RESTRICT_ON_ACCESS_RSRC
-+#define FM_NO_OP_OBSERVED_POOLS
-+#define FM_FRAME_END_PARAMS_FOR_OP
-+#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
-+#define FM_QMI_NO_SINGLE_ECC_EXCEPTION
-+
-+#define FM_NO_GUARANTEED_RESET_VALUES
-+
-+/* FM errata */
-+#define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
-+#define FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127
-+#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
-+#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
-+#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
-+#define FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273
-+
-+#define FM_BCB_ERRATA_BMI_SW001
-+#define FM_LEN_CHECK_ERRATA_FMAN_SW002
-+#define FM_AID_MODE_NO_TNUM_SW005 /* refer to pdm TKT068794 - only support of port_id on aid */
-+#define FM_ERROR_VSP_NO_MATCH_SW006 /* refer to pdm TKT174304 - no match between errorQ and VSP */
-+
-+/*****************************************************************************
-+ RMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define RM_MAX_NUM_OF_IB 4 /**< Number of inbound blocks */
-+#define RM_NUM_OF_IBCU 8 /**< NUmber of classification units in an inbound block */
-+
-+/* RMan erratas */
-+#define RM_ERRONEOUS_ACK_ERRATA_RMAN_A006756
-+
-+/*****************************************************************************
-+ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define NUM_OF_RX_SC 16
-+#define NUM_OF_TX_SC 16
-+
-+#define NUM_OF_SA_PER_RX_SC 2
-+#define NUM_OF_SA_PER_TX_SC 2
-+
-+#endif /* __DPAA_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_ext.h
-@@ -0,0 +1,71 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+
-+ @File part_ext.h
-+
-+ @Description Definitions for the part (integration) module.
-+*//***************************************************************************/
-+
-+#ifndef __PART_EXT_H
-+#define __PART_EXT_H
-+
-+#include "std_ext.h"
-+#include "part_integration_ext.h"
-+
-+#if !(defined(P1023) || \
-+ defined(P2041) || \
-+ defined(P3041) || \
-+ defined(P4080) || \
-+ defined(P5020) || \
-+ defined(P5040) || \
-+ defined(B4860) || \
-+ defined(T4240))
-+#error "unable to proceed without chip-definition"
-+#endif
-+
-+
-+/**************************************************************************//*
-+ @Description Part data structure - must be contained in any integration
-+ data structure.
-+*//***************************************************************************/
-+typedef struct t_Part
-+{
-+ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
-+ /**< Returns the address of the module's memory map base. */
-+ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
-+ /**< Returns the module's ID according to its memory map base. */
-+} t_Part;
-+
-+
-+#endif /* __PART_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3H/part_integration_ext.h
-@@ -0,0 +1,304 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**
-+
-+ @File part_integration_ext.h
-+
-+ @Description T4240 external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __PART_INTEGRATION_EXT_H
-+#define __PART_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+#include "ddr_std_ext.h"
-+#include "enet_ext.h"
-+#include "dpaa_integration_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group T4240_chip_id T4240 Application Programming Interface
-+
-+ @Description T4240 Chip functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define CORE_E6500
-+
-+#define INTG_MAX_NUM_OF_CORES 24
-+
-+
-+/**************************************************************************//**
-+ @Description Module types.
-+*//***************************************************************************/
-+typedef enum e_ModuleId
-+{
-+ e_MODULE_ID_DUART_1 = 0,
-+ e_MODULE_ID_DUART_2,
-+ e_MODULE_ID_DUART_3,
-+ e_MODULE_ID_DUART_4,
-+ e_MODULE_ID_LAW,
-+ e_MODULE_ID_IFC,
-+ e_MODULE_ID_PAMU,
-+ e_MODULE_ID_QM, /**< Queue manager module */
-+ e_MODULE_ID_BM, /**< Buffer manager module */
-+ e_MODULE_ID_QM_CE_PORTAL_0,
-+ e_MODULE_ID_QM_CI_PORTAL_0,
-+ e_MODULE_ID_QM_CE_PORTAL_1,
-+ e_MODULE_ID_QM_CI_PORTAL_1,
-+ e_MODULE_ID_QM_CE_PORTAL_2,
-+ e_MODULE_ID_QM_CI_PORTAL_2,
-+ e_MODULE_ID_QM_CE_PORTAL_3,
-+ e_MODULE_ID_QM_CI_PORTAL_3,
-+ e_MODULE_ID_QM_CE_PORTAL_4,
-+ e_MODULE_ID_QM_CI_PORTAL_4,
-+ e_MODULE_ID_QM_CE_PORTAL_5,
-+ e_MODULE_ID_QM_CI_PORTAL_5,
-+ e_MODULE_ID_QM_CE_PORTAL_6,
-+ e_MODULE_ID_QM_CI_PORTAL_6,
-+ e_MODULE_ID_QM_CE_PORTAL_7,
-+ e_MODULE_ID_QM_CI_PORTAL_7,
-+ e_MODULE_ID_QM_CE_PORTAL_8,
-+ e_MODULE_ID_QM_CI_PORTAL_8,
-+ e_MODULE_ID_QM_CE_PORTAL_9,
-+ e_MODULE_ID_QM_CI_PORTAL_9,
-+ e_MODULE_ID_BM_CE_PORTAL_0,
-+ e_MODULE_ID_BM_CI_PORTAL_0,
-+ e_MODULE_ID_BM_CE_PORTAL_1,
-+ e_MODULE_ID_BM_CI_PORTAL_1,
-+ e_MODULE_ID_BM_CE_PORTAL_2,
-+ e_MODULE_ID_BM_CI_PORTAL_2,
-+ e_MODULE_ID_BM_CE_PORTAL_3,
-+ e_MODULE_ID_BM_CI_PORTAL_3,
-+ e_MODULE_ID_BM_CE_PORTAL_4,
-+ e_MODULE_ID_BM_CI_PORTAL_4,
-+ e_MODULE_ID_BM_CE_PORTAL_5,
-+ e_MODULE_ID_BM_CI_PORTAL_5,
-+ e_MODULE_ID_BM_CE_PORTAL_6,
-+ e_MODULE_ID_BM_CI_PORTAL_6,
-+ e_MODULE_ID_BM_CE_PORTAL_7,
-+ e_MODULE_ID_BM_CI_PORTAL_7,
-+ e_MODULE_ID_BM_CE_PORTAL_8,
-+ e_MODULE_ID_BM_CI_PORTAL_8,
-+ e_MODULE_ID_BM_CE_PORTAL_9,
-+ e_MODULE_ID_BM_CI_PORTAL_9,
-+ e_MODULE_ID_FM, /**< Frame manager module */
-+ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
-+ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
-+ e_MODULE_ID_FM_BMI, /**< FM BMI block */
-+ e_MODULE_ID_FM_QMI, /**< FM QMI block */
-+ e_MODULE_ID_FM_PARSER, /**< FM parser block */
-+ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO5, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO6, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO7, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx2, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx3, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx4, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx5, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx6, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GRx1, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GRx2, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx2, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx3, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx4, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx5, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx6, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GTx1, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GTx2, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM_PLCR, /**< FM Policer */
-+ e_MODULE_ID_FM_KG, /**< FM Keygen */
-+ e_MODULE_ID_FM_DMA, /**< FM DMA */
-+ e_MODULE_ID_FM_FPM, /**< FM FPM */
-+ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
-+ e_MODULE_ID_FM_1GMDIO, /**< FM 1G MDIO MAC */
-+ e_MODULE_ID_FM_10GMDIO, /**< FM 10G MDIO */
-+ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
-+ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
-+ e_MODULE_ID_FM_1GMAC2, /**< FM 1G MAC #2 */
-+ e_MODULE_ID_FM_1GMAC3, /**< FM 1G MAC #3 */
-+ e_MODULE_ID_FM_1GMAC4, /**< FM 1G MAC #4 */
-+ e_MODULE_ID_FM_1GMAC5, /**< FM 1G MAC #5 */
-+ e_MODULE_ID_FM_1GMAC6, /**< FM 1G MAC #6 */
-+ e_MODULE_ID_FM_10GMAC1, /**< FM 10G MAC */
-+ e_MODULE_ID_FM_10GMAC2, /**< FM 10G MAC */
-+
-+ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
-+ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
-+ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
-+ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
-+ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
-+ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
-+ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
-+ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
-+ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
-+ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
-+ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
-+ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
-+
-+ e_MODULE_ID_PIC, /**< PIC */
-+ e_MODULE_ID_GPIO, /**< GPIO */
-+ e_MODULE_ID_SERDES, /**< SERDES */
-+ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
-+ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
-+
-+ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
-+
-+ e_MODULE_ID_DUMMY_LAST
-+} e_ModuleId;
-+
-+#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
-+
-+#if 0 /* using unified values */
-+/*****************************************************************************
-+ INTEGRATION-SPECIFIC MODULE CODES
-+******************************************************************************/
-+#define MODULE_UNKNOWN 0x00000000
-+#define MODULE_MEM 0x00010000
-+#define MODULE_MM 0x00020000
-+#define MODULE_CORE 0x00030000
-+#define MODULE_T4240 0x00040000
-+#define MODULE_T4240_PLATFORM 0x00050000
-+#define MODULE_PM 0x00060000
-+#define MODULE_MMU 0x00070000
-+#define MODULE_PIC 0x00080000
-+#define MODULE_CPC 0x00090000
-+#define MODULE_DUART 0x000a0000
-+#define MODULE_SERDES 0x000b0000
-+#define MODULE_PIO 0x000c0000
-+#define MODULE_QM 0x000d0000
-+#define MODULE_BM 0x000e0000
-+#define MODULE_SEC 0x000f0000
-+#define MODULE_LAW 0x00100000
-+#define MODULE_LBC 0x00110000
-+#define MODULE_PAMU 0x00120000
-+#define MODULE_FM 0x00130000
-+#define MODULE_FM_MURAM 0x00140000
-+#define MODULE_FM_PCD 0x00150000
-+#define MODULE_FM_RTC 0x00160000
-+#define MODULE_FM_MAC 0x00170000
-+#define MODULE_FM_PORT 0x00180000
-+#define MODULE_FM_SP 0x00190000
-+#define MODULE_DPA_PORT 0x001a0000
-+#define MODULE_MII 0x001b0000
-+#define MODULE_I2C 0x001c0000
-+#define MODULE_DMA 0x001d0000
-+#define MODULE_DDR 0x001e0000
-+#define MODULE_ESPI 0x001f0000
-+#define MODULE_DPAA_IPSEC 0x00200000
-+#endif /* using unified values */
-+
-+/*****************************************************************************
-+ PAMU INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define PAMU_NUM_OF_PARTITIONS 4
-+
-+/*****************************************************************************
-+ LAW INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define LAW_NUM_OF_WINDOWS 32
-+#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4 Kbytes */
-+#define LAW_MAX_WINDOW_SIZE 0x0000010000000000LL /**< 1 Tbytes for 40-bit address space */
-+
-+
-+/*****************************************************************************
-+ LBC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+/**************************************************************************//**
-+ @Group lbc_exception_grp LBC Exception Unit
-+
-+ @Description LBC Exception unit API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Anchor lbc_exbm
-+
-+ @Collection LBC Errors Bit Mask
-+
-+ These errors are reported through the exceptions callback..
-+ The values can be or'ed in any combination in the errors mask
-+ parameter of the errors report structure.
-+
-+ These errors can also be passed as a bit-mask to
-+ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
-+ for enabling or disabling error checking.
-+ @{
-+*//***************************************************************************/
-+#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
-+#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
-+#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
-+#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
-+
-+#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
-+ LBC_ERR_WRITE_PROTECT | LBC_ERR_CHIP_SELECT)
-+ /**< All possible errors */
-+/* @} */
-+/** @} */ /* end of lbc_exception_grp group */
-+
-+#define LBC_INCORRECT_ERROR_REPORT_ERRATA
-+
-+#define LBC_NUM_OF_BANKS 8
-+#define LBC_MAX_CS_SIZE 0x0000000100000000LL /* Up to 4G memory block size */
-+#define LBC_PARITY_SUPPORT
-+#define LBC_ADDRESS_HOLD_TIME_CTRL
-+#define LBC_HIGH_CLK_DIVIDERS
-+#define LBC_FCM_AVAILABLE
-+
-+/*****************************************************************************
-+ GPIO INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define GPIO_PORT_OFFSET_0x1000
-+
-+#define GPIO_NUM_OF_PORTS 3 /**< Number of ports in GPIO module;
-+ Each port contains up to 32 I/O pins. */
-+
-+#define GPIO_VALID_PIN_MASKS \
-+ { /* Port A */ 0xFFFFFFFF, \
-+ /* Port B */ 0xFFFFFFFF, \
-+ /* Port C */ 0xFFFFFFFF }
-+
-+#define GPIO_VALID_INTR_MASKS \
-+ { /* Port A */ 0xFFFFFFFF, \
-+ /* Port B */ 0xFFFFFFFF, \
-+ /* Port C */ 0xFFFFFFFF }
-+
-+
-+
-+#endif /* __PART_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/dpaa_integration_ext.h
-@@ -0,0 +1,293 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**
-+
-+ @File dpaa_integration_ext.h
-+
-+ @Description T4240 FM external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __DPAA_INTEGRATION_EXT_H
-+#define __DPAA_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+#define DPAA_VERSION 11
-+
-+/**************************************************************************//**
-+ @Description DPAA SW Portals Enumeration.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_DPAA_SWPORTAL0 = 0,
-+ e_DPAA_SWPORTAL1,
-+ e_DPAA_SWPORTAL2,
-+ e_DPAA_SWPORTAL3,
-+ e_DPAA_SWPORTAL4,
-+ e_DPAA_SWPORTAL5,
-+ e_DPAA_SWPORTAL6,
-+ e_DPAA_SWPORTAL7,
-+ e_DPAA_SWPORTAL8,
-+ e_DPAA_SWPORTAL9,
-+ e_DPAA_SWPORTAL10,
-+ e_DPAA_SWPORTAL11,
-+ e_DPAA_SWPORTAL12,
-+ e_DPAA_SWPORTAL13,
-+ e_DPAA_SWPORTAL14,
-+ e_DPAA_SWPORTAL15,
-+ e_DPAA_SWPORTAL16,
-+ e_DPAA_SWPORTAL17,
-+ e_DPAA_SWPORTAL18,
-+ e_DPAA_SWPORTAL19,
-+ e_DPAA_SWPORTAL20,
-+ e_DPAA_SWPORTAL21,
-+ e_DPAA_SWPORTAL22,
-+ e_DPAA_SWPORTAL23,
-+ e_DPAA_SWPORTAL24,
-+ e_DPAA_SWPORTAL_DUMMY_LAST
-+} e_DpaaSwPortal;
-+
-+/**************************************************************************//**
-+ @Description DPAA Direct Connect Portals Enumeration.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_DPAA_DCPORTAL0 = 0,
-+ e_DPAA_DCPORTAL1,
-+ e_DPAA_DCPORTAL2,
-+ e_DPAA_DCPORTAL_DUMMY_LAST
-+} e_DpaaDcPortal;
-+
-+#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
-+#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
-+
-+/*****************************************************************************
-+ QMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
-+#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
-+#define QM_MAX_NUM_OF_CGS 256 /**< Congestion groups number */
-+#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE)
-+ /**< FQIDs range - 24 bits */
-+
-+/**************************************************************************//**
-+ @Description Work Queue Channel assignments in QMan.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_QM_FQ_CHANNEL_SWPORTAL0 = 0x0, /**< Dedicated channels serviced by software portals 0 to 24 */
-+ e_QM_FQ_CHANNEL_SWPORTAL1,
-+ e_QM_FQ_CHANNEL_SWPORTAL2,
-+ e_QM_FQ_CHANNEL_SWPORTAL3,
-+ e_QM_FQ_CHANNEL_SWPORTAL4,
-+ e_QM_FQ_CHANNEL_SWPORTAL5,
-+ e_QM_FQ_CHANNEL_SWPORTAL6,
-+ e_QM_FQ_CHANNEL_SWPORTAL7,
-+ e_QM_FQ_CHANNEL_SWPORTAL8,
-+ e_QM_FQ_CHANNEL_SWPORTAL9,
-+ e_QM_FQ_CHANNEL_SWPORTAL10,
-+ e_QM_FQ_CHANNEL_SWPORTAL11,
-+ e_QM_FQ_CHANNEL_SWPORTAL12,
-+ e_QM_FQ_CHANNEL_SWPORTAL13,
-+ e_QM_FQ_CHANNEL_SWPORTAL14,
-+ e_QM_FQ_CHANNEL_SWPORTAL15,
-+ e_QM_FQ_CHANNEL_SWPORTAL16,
-+ e_QM_FQ_CHANNEL_SWPORTAL17,
-+ e_QM_FQ_CHANNEL_SWPORTAL18,
-+ e_QM_FQ_CHANNEL_SWPORTAL19,
-+ e_QM_FQ_CHANNEL_SWPORTAL20,
-+ e_QM_FQ_CHANNEL_SWPORTAL21,
-+ e_QM_FQ_CHANNEL_SWPORTAL22,
-+ e_QM_FQ_CHANNEL_SWPORTAL23,
-+ e_QM_FQ_CHANNEL_SWPORTAL24,
-+
-+ e_QM_FQ_CHANNEL_POOL1 = 0x401, /**< Pool channels that can be serviced by any of the software portals */
-+ e_QM_FQ_CHANNEL_POOL2,
-+ e_QM_FQ_CHANNEL_POOL3,
-+ e_QM_FQ_CHANNEL_POOL4,
-+ e_QM_FQ_CHANNEL_POOL5,
-+ e_QM_FQ_CHANNEL_POOL6,
-+ e_QM_FQ_CHANNEL_POOL7,
-+ e_QM_FQ_CHANNEL_POOL8,
-+ e_QM_FQ_CHANNEL_POOL9,
-+ e_QM_FQ_CHANNEL_POOL10,
-+ e_QM_FQ_CHANNEL_POOL11,
-+ e_QM_FQ_CHANNEL_POOL12,
-+ e_QM_FQ_CHANNEL_POOL13,
-+ e_QM_FQ_CHANNEL_POOL14,
-+ e_QM_FQ_CHANNEL_POOL15,
-+
-+ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x800, /**< Dedicated channels serviced by Direct Connect Portal 0:
-+ connected to FMan 0; assigned in incrementing order to
-+ each sub-portal (SP) in the portal */
-+ e_QM_FQ_CHANNEL_FMAN0_SP1,
-+ e_QM_FQ_CHANNEL_FMAN0_SP2,
-+ e_QM_FQ_CHANNEL_FMAN0_SP3,
-+ e_QM_FQ_CHANNEL_FMAN0_SP4,
-+ e_QM_FQ_CHANNEL_FMAN0_SP5,
-+ e_QM_FQ_CHANNEL_FMAN0_SP6,
-+ e_QM_FQ_CHANNEL_FMAN0_SP7,
-+ e_QM_FQ_CHANNEL_FMAN0_SP8,
-+ e_QM_FQ_CHANNEL_FMAN0_SP9,
-+ e_QM_FQ_CHANNEL_FMAN0_SP10,
-+ e_QM_FQ_CHANNEL_FMAN0_SP11,
-+ e_QM_FQ_CHANNEL_FMAN0_SP12,
-+ e_QM_FQ_CHANNEL_FMAN0_SP13,
-+ e_QM_FQ_CHANNEL_FMAN0_SP14,
-+ e_QM_FQ_CHANNEL_FMAN0_SP15,
-+
-+ e_QM_FQ_CHANNEL_RMAN_SP0 = 0x820, /**< Dedicated channels serviced by Direct Connect Portal 1: connected to RMan */
-+ e_QM_FQ_CHANNEL_RMAN_SP1,
-+
-+ e_QM_FQ_CHANNEL_CAAM = 0x840 /**< Dedicated channel serviced by Direct Connect Portal 2:
-+ connected to SEC */
-+} e_QmFQChannel;
-+
-+/*****************************************************************************
-+ BMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
-+
-+/*****************************************************************************
-+ SEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define SEC_NUM_OF_DECOS 3
-+#define SEC_ALL_DECOS_MASK 0x00000003
-+
-+
-+/*****************************************************************************
-+ FM INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define INTG_MAX_NUM_OF_FM 1
-+/* Ports defines */
-+#define FM_MAX_NUM_OF_1G_MACS 5
-+#define FM_MAX_NUM_OF_10G_MACS 1
-+#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
-+#define FM_MAX_NUM_OF_OH_PORTS 4
-+
-+#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
-+
-+#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
-+
-+#define FM_MAX_NUM_OF_MACSECS 1 /* Should be updated */
-+
-+#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
-+#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
-+#define FM_MAX_NUM_OF_SUB_PORTALS 16
-+#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
-+
-+#define FM_VSP_MAX_NUM_OF_ENTRIES 32
-+#define FM_MAX_NUM_OF_PFC_PRIORITIES 8
-+
-+/* RAMs defines */
-+#define FM_MURAM_SIZE (192 * KILOBYTE)
-+#define FM_IRAM_SIZE(major, minor) \
-+ (((major == 6) && ((minor == 4) )) ? (64 * KILOBYTE) : (32 * KILOBYTE))
-+#define FM_NUM_OF_CTRL 2
-+
-+/* PCD defines */
-+#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
-+#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
-+#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
-+#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000600 /**< Number of bytes saved for patches */
-+#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
-+
-+/* RTC defines */
-+#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
-+#define FM_RTC_NUM_OF_PERIODIC_PULSES 3 /**< RTC number of periodic pulses */
-+#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
-+
-+/* QMI defines */
-+#define QMI_MAX_NUM_OF_TNUMS 64
-+#define QMI_DEF_TNUMS_THRESH 32
-+/* FPM defines */
-+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
-+
-+/* DMA defines */
-+#define DMA_THRESH_MAX_COMMQ 83
-+#define DMA_THRESH_MAX_BUF 127
-+
-+/* BMI defines */
-+#define BMI_MAX_NUM_OF_TASKS 64
-+#define BMI_MAX_NUM_OF_DMAS 32
-+
-+#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
-+#define PORT_MAX_WEIGHT 16
-+
-+#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
-+
-+/* Unique T4240 */
-+#define FM_OP_OPEN_DMA_MIN_LIMIT
-+#define FM_NO_RESTRICT_ON_ACCESS_RSRC
-+#define FM_NO_OP_OBSERVED_POOLS
-+#define FM_FRAME_END_PARAMS_FOR_OP
-+#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
-+#define FM_QMI_NO_SINGLE_ECC_EXCEPTION
-+
-+#define FM_NO_GUARANTEED_RESET_VALUES
-+
-+/* FM errata */
-+#define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
-+#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
-+#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
-+#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
-+#define FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273
-+
-+#define FM_BCB_ERRATA_BMI_SW001
-+#define FM_LEN_CHECK_ERRATA_FMAN_SW002
-+#define FM_AID_MODE_NO_TNUM_SW005 /* refer to pdm TKT068794 - only support of port_id on aid */
-+#define FM_ERROR_VSP_NO_MATCH_SW006 /* refer to pdm TKT174304 - no match between errorQ and VSP */
-+
-+/*****************************************************************************
-+ RMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define RM_MAX_NUM_OF_IB 4 /**< Number of inbound blocks */
-+#define RM_NUM_OF_IBCU 8 /**< NUmber of classification units in an inbound block */
-+
-+/* RMan erratas */
-+#define RM_ERRONEOUS_ACK_ERRATA_RMAN_A006756
-+
-+/*****************************************************************************
-+ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define NUM_OF_RX_SC 16
-+#define NUM_OF_TX_SC 16
-+
-+#define NUM_OF_SA_PER_RX_SC 2
-+#define NUM_OF_SA_PER_TX_SC 2
-+
-+#endif /* __DPAA_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_ext.h
-@@ -0,0 +1,59 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+
-+ @File part_ext.h
-+
-+ @Description Definitions for the part (integration) module.
-+*//***************************************************************************/
-+
-+#ifndef __PART_EXT_H
-+#define __PART_EXT_H
-+
-+#include "std_ext.h"
-+#include "part_integration_ext.h"
-+
-+/**************************************************************************//*
-+ @Description Part data structure - must be contained in any integration
-+ data structure.
-+*//***************************************************************************/
-+typedef struct t_Part
-+{
-+ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
-+ /**< Returns the address of the module's memory map base. */
-+ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
-+ /**< Returns the module's ID according to its memory map base. */
-+} t_Part;
-+
-+
-+#endif /* __PART_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/FMANV3L/part_integration_ext.h
-@@ -0,0 +1,304 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**
-+
-+ @File part_integration_ext.h
-+
-+ @Description T4240 external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __PART_INTEGRATION_EXT_H
-+#define __PART_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+#include "ddr_std_ext.h"
-+#include "enet_ext.h"
-+#include "dpaa_integration_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group T4240_chip_id T4240 Application Programming Interface
-+
-+ @Description T4240 Chip functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define CORE_E6500
-+
-+#define INTG_MAX_NUM_OF_CORES 24
-+
-+
-+/**************************************************************************//**
-+ @Description Module types.
-+*//***************************************************************************/
-+typedef enum e_ModuleId
-+{
-+ e_MODULE_ID_DUART_1 = 0,
-+ e_MODULE_ID_DUART_2,
-+ e_MODULE_ID_DUART_3,
-+ e_MODULE_ID_DUART_4,
-+ e_MODULE_ID_LAW,
-+ e_MODULE_ID_IFC,
-+ e_MODULE_ID_PAMU,
-+ e_MODULE_ID_QM, /**< Queue manager module */
-+ e_MODULE_ID_BM, /**< Buffer manager module */
-+ e_MODULE_ID_QM_CE_PORTAL_0,
-+ e_MODULE_ID_QM_CI_PORTAL_0,
-+ e_MODULE_ID_QM_CE_PORTAL_1,
-+ e_MODULE_ID_QM_CI_PORTAL_1,
-+ e_MODULE_ID_QM_CE_PORTAL_2,
-+ e_MODULE_ID_QM_CI_PORTAL_2,
-+ e_MODULE_ID_QM_CE_PORTAL_3,
-+ e_MODULE_ID_QM_CI_PORTAL_3,
-+ e_MODULE_ID_QM_CE_PORTAL_4,
-+ e_MODULE_ID_QM_CI_PORTAL_4,
-+ e_MODULE_ID_QM_CE_PORTAL_5,
-+ e_MODULE_ID_QM_CI_PORTAL_5,
-+ e_MODULE_ID_QM_CE_PORTAL_6,
-+ e_MODULE_ID_QM_CI_PORTAL_6,
-+ e_MODULE_ID_QM_CE_PORTAL_7,
-+ e_MODULE_ID_QM_CI_PORTAL_7,
-+ e_MODULE_ID_QM_CE_PORTAL_8,
-+ e_MODULE_ID_QM_CI_PORTAL_8,
-+ e_MODULE_ID_QM_CE_PORTAL_9,
-+ e_MODULE_ID_QM_CI_PORTAL_9,
-+ e_MODULE_ID_BM_CE_PORTAL_0,
-+ e_MODULE_ID_BM_CI_PORTAL_0,
-+ e_MODULE_ID_BM_CE_PORTAL_1,
-+ e_MODULE_ID_BM_CI_PORTAL_1,
-+ e_MODULE_ID_BM_CE_PORTAL_2,
-+ e_MODULE_ID_BM_CI_PORTAL_2,
-+ e_MODULE_ID_BM_CE_PORTAL_3,
-+ e_MODULE_ID_BM_CI_PORTAL_3,
-+ e_MODULE_ID_BM_CE_PORTAL_4,
-+ e_MODULE_ID_BM_CI_PORTAL_4,
-+ e_MODULE_ID_BM_CE_PORTAL_5,
-+ e_MODULE_ID_BM_CI_PORTAL_5,
-+ e_MODULE_ID_BM_CE_PORTAL_6,
-+ e_MODULE_ID_BM_CI_PORTAL_6,
-+ e_MODULE_ID_BM_CE_PORTAL_7,
-+ e_MODULE_ID_BM_CI_PORTAL_7,
-+ e_MODULE_ID_BM_CE_PORTAL_8,
-+ e_MODULE_ID_BM_CI_PORTAL_8,
-+ e_MODULE_ID_BM_CE_PORTAL_9,
-+ e_MODULE_ID_BM_CI_PORTAL_9,
-+ e_MODULE_ID_FM, /**< Frame manager module */
-+ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
-+ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
-+ e_MODULE_ID_FM_BMI, /**< FM BMI block */
-+ e_MODULE_ID_FM_QMI, /**< FM QMI block */
-+ e_MODULE_ID_FM_PARSER, /**< FM parser block */
-+ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO5, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO6, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO7, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx2, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx3, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx4, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx5, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx6, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GRx1, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GRx2, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx2, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx3, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx4, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx5, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx6, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GTx1, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GTx2, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM_PLCR, /**< FM Policer */
-+ e_MODULE_ID_FM_KG, /**< FM Keygen */
-+ e_MODULE_ID_FM_DMA, /**< FM DMA */
-+ e_MODULE_ID_FM_FPM, /**< FM FPM */
-+ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
-+ e_MODULE_ID_FM_1GMDIO, /**< FM 1G MDIO MAC */
-+ e_MODULE_ID_FM_10GMDIO, /**< FM 10G MDIO */
-+ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
-+ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
-+ e_MODULE_ID_FM_1GMAC2, /**< FM 1G MAC #2 */
-+ e_MODULE_ID_FM_1GMAC3, /**< FM 1G MAC #3 */
-+ e_MODULE_ID_FM_1GMAC4, /**< FM 1G MAC #4 */
-+ e_MODULE_ID_FM_1GMAC5, /**< FM 1G MAC #5 */
-+ e_MODULE_ID_FM_1GMAC6, /**< FM 1G MAC #6 */
-+ e_MODULE_ID_FM_10GMAC1, /**< FM 10G MAC */
-+ e_MODULE_ID_FM_10GMAC2, /**< FM 10G MAC */
-+
-+ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
-+ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
-+ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
-+ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
-+ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
-+ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
-+ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
-+ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
-+ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
-+ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
-+ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
-+ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
-+
-+ e_MODULE_ID_PIC, /**< PIC */
-+ e_MODULE_ID_GPIO, /**< GPIO */
-+ e_MODULE_ID_SERDES, /**< SERDES */
-+ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
-+ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
-+
-+ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
-+
-+ e_MODULE_ID_DUMMY_LAST
-+} e_ModuleId;
-+
-+#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
-+
-+#if 0 /* using unified values */
-+/*****************************************************************************
-+ INTEGRATION-SPECIFIC MODULE CODES
-+******************************************************************************/
-+#define MODULE_UNKNOWN 0x00000000
-+#define MODULE_MEM 0x00010000
-+#define MODULE_MM 0x00020000
-+#define MODULE_CORE 0x00030000
-+#define MODULE_T4240 0x00040000
-+#define MODULE_T4240_PLATFORM 0x00050000
-+#define MODULE_PM 0x00060000
-+#define MODULE_MMU 0x00070000
-+#define MODULE_PIC 0x00080000
-+#define MODULE_CPC 0x00090000
-+#define MODULE_DUART 0x000a0000
-+#define MODULE_SERDES 0x000b0000
-+#define MODULE_PIO 0x000c0000
-+#define MODULE_QM 0x000d0000
-+#define MODULE_BM 0x000e0000
-+#define MODULE_SEC 0x000f0000
-+#define MODULE_LAW 0x00100000
-+#define MODULE_LBC 0x00110000
-+#define MODULE_PAMU 0x00120000
-+#define MODULE_FM 0x00130000
-+#define MODULE_FM_MURAM 0x00140000
-+#define MODULE_FM_PCD 0x00150000
-+#define MODULE_FM_RTC 0x00160000
-+#define MODULE_FM_MAC 0x00170000
-+#define MODULE_FM_PORT 0x00180000
-+#define MODULE_FM_SP 0x00190000
-+#define MODULE_DPA_PORT 0x001a0000
-+#define MODULE_MII 0x001b0000
-+#define MODULE_I2C 0x001c0000
-+#define MODULE_DMA 0x001d0000
-+#define MODULE_DDR 0x001e0000
-+#define MODULE_ESPI 0x001f0000
-+#define MODULE_DPAA_IPSEC 0x00200000
-+#endif /* using unified values */
-+
-+/*****************************************************************************
-+ PAMU INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define PAMU_NUM_OF_PARTITIONS 4
-+
-+/*****************************************************************************
-+ LAW INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define LAW_NUM_OF_WINDOWS 32
-+#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4 Kbytes */
-+#define LAW_MAX_WINDOW_SIZE 0x0000010000000000LL /**< 1 Tbytes for 40-bit address space */
-+
-+
-+/*****************************************************************************
-+ LBC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+/**************************************************************************//**
-+ @Group lbc_exception_grp LBC Exception Unit
-+
-+ @Description LBC Exception unit API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Anchor lbc_exbm
-+
-+ @Collection LBC Errors Bit Mask
-+
-+ These errors are reported through the exceptions callback..
-+ The values can be or'ed in any combination in the errors mask
-+ parameter of the errors report structure.
-+
-+ These errors can also be passed as a bit-mask to
-+ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
-+ for enabling or disabling error checking.
-+ @{
-+*//***************************************************************************/
-+#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
-+#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
-+#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
-+#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
-+
-+#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
-+ LBC_ERR_WRITE_PROTECT | LBC_ERR_CHIP_SELECT)
-+ /**< All possible errors */
-+/* @} */
-+/** @} */ /* end of lbc_exception_grp group */
-+
-+#define LBC_INCORRECT_ERROR_REPORT_ERRATA
-+
-+#define LBC_NUM_OF_BANKS 8
-+#define LBC_MAX_CS_SIZE 0x0000000100000000LL /* Up to 4G memory block size */
-+#define LBC_PARITY_SUPPORT
-+#define LBC_ADDRESS_HOLD_TIME_CTRL
-+#define LBC_HIGH_CLK_DIVIDERS
-+#define LBC_FCM_AVAILABLE
-+
-+/*****************************************************************************
-+ GPIO INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define GPIO_PORT_OFFSET_0x1000
-+
-+#define GPIO_NUM_OF_PORTS 3 /**< Number of ports in GPIO module;
-+ Each port contains up to 32 I/O pins. */
-+
-+#define GPIO_VALID_PIN_MASKS \
-+ { /* Port A */ 0xFFFFFFFF, \
-+ /* Port B */ 0xFFFFFFFF, \
-+ /* Port C */ 0xFFFFFFFF }
-+
-+#define GPIO_VALID_INTR_MASKS \
-+ { /* Port A */ 0xFFFFFFFF, \
-+ /* Port B */ 0xFFFFFFFF, \
-+ /* Port C */ 0xFFFFFFFF }
-+
-+
-+
-+#endif /* __PART_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/dpaa_integration_ext.h
-@@ -0,0 +1,291 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**
-+
-+ @File dpaa_integration_ext.h
-+
-+ @Description T4240 FM external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __DPAA_INTEGRATION_EXT_H
-+#define __DPAA_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+#define DPAA_VERSION 11
-+
-+/**************************************************************************//**
-+ @Description DPAA SW Portals Enumeration.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_DPAA_SWPORTAL0 = 0,
-+ e_DPAA_SWPORTAL1,
-+ e_DPAA_SWPORTAL2,
-+ e_DPAA_SWPORTAL3,
-+ e_DPAA_SWPORTAL4,
-+ e_DPAA_SWPORTAL5,
-+ e_DPAA_SWPORTAL6,
-+ e_DPAA_SWPORTAL7,
-+ e_DPAA_SWPORTAL8,
-+ e_DPAA_SWPORTAL9,
-+ e_DPAA_SWPORTAL10,
-+ e_DPAA_SWPORTAL11,
-+ e_DPAA_SWPORTAL12,
-+ e_DPAA_SWPORTAL13,
-+ e_DPAA_SWPORTAL14,
-+ e_DPAA_SWPORTAL15,
-+ e_DPAA_SWPORTAL16,
-+ e_DPAA_SWPORTAL17,
-+ e_DPAA_SWPORTAL18,
-+ e_DPAA_SWPORTAL19,
-+ e_DPAA_SWPORTAL20,
-+ e_DPAA_SWPORTAL21,
-+ e_DPAA_SWPORTAL22,
-+ e_DPAA_SWPORTAL23,
-+ e_DPAA_SWPORTAL24,
-+ e_DPAA_SWPORTAL_DUMMY_LAST
-+} e_DpaaSwPortal;
-+
-+/**************************************************************************//**
-+ @Description DPAA Direct Connect Portals Enumeration.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_DPAA_DCPORTAL0 = 0,
-+ e_DPAA_DCPORTAL1,
-+ e_DPAA_DCPORTAL2,
-+ e_DPAA_DCPORTAL_DUMMY_LAST
-+} e_DpaaDcPortal;
-+
-+#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
-+#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
-+
-+/*****************************************************************************
-+ QMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
-+#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
-+#define QM_MAX_NUM_OF_CGS 256 /**< Congestion groups number */
-+#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE)
-+ /**< FQIDs range - 24 bits */
-+
-+/**************************************************************************//**
-+ @Description Work Queue Channel assignments in QMan.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_QM_FQ_CHANNEL_SWPORTAL0 = 0x0, /**< Dedicated channels serviced by software portals 0 to 24 */
-+ e_QM_FQ_CHANNEL_SWPORTAL1,
-+ e_QM_FQ_CHANNEL_SWPORTAL2,
-+ e_QM_FQ_CHANNEL_SWPORTAL3,
-+ e_QM_FQ_CHANNEL_SWPORTAL4,
-+ e_QM_FQ_CHANNEL_SWPORTAL5,
-+ e_QM_FQ_CHANNEL_SWPORTAL6,
-+ e_QM_FQ_CHANNEL_SWPORTAL7,
-+ e_QM_FQ_CHANNEL_SWPORTAL8,
-+ e_QM_FQ_CHANNEL_SWPORTAL9,
-+ e_QM_FQ_CHANNEL_SWPORTAL10,
-+ e_QM_FQ_CHANNEL_SWPORTAL11,
-+ e_QM_FQ_CHANNEL_SWPORTAL12,
-+ e_QM_FQ_CHANNEL_SWPORTAL13,
-+ e_QM_FQ_CHANNEL_SWPORTAL14,
-+ e_QM_FQ_CHANNEL_SWPORTAL15,
-+ e_QM_FQ_CHANNEL_SWPORTAL16,
-+ e_QM_FQ_CHANNEL_SWPORTAL17,
-+ e_QM_FQ_CHANNEL_SWPORTAL18,
-+ e_QM_FQ_CHANNEL_SWPORTAL19,
-+ e_QM_FQ_CHANNEL_SWPORTAL20,
-+ e_QM_FQ_CHANNEL_SWPORTAL21,
-+ e_QM_FQ_CHANNEL_SWPORTAL22,
-+ e_QM_FQ_CHANNEL_SWPORTAL23,
-+ e_QM_FQ_CHANNEL_SWPORTAL24,
-+
-+ e_QM_FQ_CHANNEL_POOL1 = 0x401, /**< Pool channels that can be serviced by any of the software portals */
-+ e_QM_FQ_CHANNEL_POOL2,
-+ e_QM_FQ_CHANNEL_POOL3,
-+ e_QM_FQ_CHANNEL_POOL4,
-+ e_QM_FQ_CHANNEL_POOL5,
-+ e_QM_FQ_CHANNEL_POOL6,
-+ e_QM_FQ_CHANNEL_POOL7,
-+ e_QM_FQ_CHANNEL_POOL8,
-+ e_QM_FQ_CHANNEL_POOL9,
-+ e_QM_FQ_CHANNEL_POOL10,
-+ e_QM_FQ_CHANNEL_POOL11,
-+ e_QM_FQ_CHANNEL_POOL12,
-+ e_QM_FQ_CHANNEL_POOL13,
-+ e_QM_FQ_CHANNEL_POOL14,
-+ e_QM_FQ_CHANNEL_POOL15,
-+
-+ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x800, /**< Dedicated channels serviced by Direct Connect Portal 0:
-+ connected to FMan 0; assigned in incrementing order to
-+ each sub-portal (SP) in the portal */
-+ e_QM_FQ_CHANNEL_FMAN0_SP1,
-+ e_QM_FQ_CHANNEL_FMAN0_SP2,
-+ e_QM_FQ_CHANNEL_FMAN0_SP3,
-+ e_QM_FQ_CHANNEL_FMAN0_SP4,
-+ e_QM_FQ_CHANNEL_FMAN0_SP5,
-+ e_QM_FQ_CHANNEL_FMAN0_SP6,
-+ e_QM_FQ_CHANNEL_FMAN0_SP7,
-+ e_QM_FQ_CHANNEL_FMAN0_SP8,
-+ e_QM_FQ_CHANNEL_FMAN0_SP9,
-+ e_QM_FQ_CHANNEL_FMAN0_SP10,
-+ e_QM_FQ_CHANNEL_FMAN0_SP11,
-+ e_QM_FQ_CHANNEL_FMAN0_SP12,
-+ e_QM_FQ_CHANNEL_FMAN0_SP13,
-+ e_QM_FQ_CHANNEL_FMAN0_SP14,
-+ e_QM_FQ_CHANNEL_FMAN0_SP15,
-+
-+ e_QM_FQ_CHANNEL_RMAN_SP0 = 0x820, /**< Dedicated channels serviced by Direct Connect Portal 1: connected to RMan */
-+ e_QM_FQ_CHANNEL_RMAN_SP1,
-+
-+ e_QM_FQ_CHANNEL_CAAM = 0x840 /**< Dedicated channel serviced by Direct Connect Portal 2:
-+ connected to SEC */
-+} e_QmFQChannel;
-+
-+/*****************************************************************************
-+ BMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
-+
-+/*****************************************************************************
-+ SEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define SEC_NUM_OF_DECOS 3
-+#define SEC_ALL_DECOS_MASK 0x00000003
-+
-+
-+/*****************************************************************************
-+ FM INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define INTG_MAX_NUM_OF_FM 2
-+
-+/* Ports defines */
-+#define FM_MAX_NUM_OF_1G_MACS 6
-+#define FM_MAX_NUM_OF_10G_MACS 2
-+#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
-+#define FM_MAX_NUM_OF_OH_PORTS 6
-+
-+#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
-+
-+#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
-+
-+#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
-+#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
-+#define FM_MAX_NUM_OF_SUB_PORTALS 16
-+#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
-+
-+#define FM_VSP_MAX_NUM_OF_ENTRIES 64
-+#define FM_MAX_NUM_OF_PFC_PRIORITIES 8
-+
-+/* RAMs defines */
-+#define FM_MURAM_SIZE (384 * KILOBYTE)
-+#define FM_IRAM_SIZE(major, minor) (64 * KILOBYTE)
-+#define FM_NUM_OF_CTRL 4
-+
-+/* PCD defines */
-+#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
-+#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
-+#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
-+#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000600 /**< Number of bytes saved for patches */
-+#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
-+
-+/* RTC defines */
-+#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
-+#define FM_RTC_NUM_OF_PERIODIC_PULSES 3 /**< RTC number of periodic pulses */
-+#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
-+
-+/* QMI defines */
-+#define QMI_MAX_NUM_OF_TNUMS 64
-+#define QMI_DEF_TNUMS_THRESH 32
-+/* FPM defines */
-+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
-+
-+/* DMA defines */
-+#define DMA_THRESH_MAX_COMMQ 83
-+#define DMA_THRESH_MAX_BUF 127
-+
-+/* BMI defines */
-+#define BMI_MAX_NUM_OF_TASKS 128
-+#define BMI_MAX_NUM_OF_DMAS 84
-+
-+#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
-+#define PORT_MAX_WEIGHT 16
-+
-+#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
-+
-+/* Unique T4240 */
-+#define FM_OP_OPEN_DMA_MIN_LIMIT
-+#define FM_NO_RESTRICT_ON_ACCESS_RSRC
-+#define FM_NO_OP_OBSERVED_POOLS
-+#define FM_FRAME_END_PARAMS_FOR_OP
-+#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
-+#define FM_QMI_NO_SINGLE_ECC_EXCEPTION
-+
-+#define FM_NO_GUARANTEED_RESET_VALUES
-+
-+/* FM errata */
-+#define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
-+#define FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127
-+#define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320
-+#define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675
-+#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981
-+
-+#define FM_BCB_ERRATA_BMI_SW001
-+#define FM_LEN_CHECK_ERRATA_FMAN_SW002
-+#define FM_AID_MODE_NO_TNUM_SW005 /* refer to pdm TKT068794 - only support of port_id on aid */
-+#define FM_ERROR_VSP_NO_MATCH_SW006 /* refer to pdm TKT174304 - no match between errorQ and VSP */
-+
-+/*****************************************************************************
-+ RMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define RM_MAX_NUM_OF_IB 4 /**< Number of inbound blocks */
-+#define RM_NUM_OF_IBCU 8 /**< NUmber of classification units in an inbound block */
-+
-+/* RMan erratas */
-+#define RM_ERRONEOUS_ACK_ERRATA_RMAN_A006756
-+
-+/*****************************************************************************
-+ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define NUM_OF_RX_SC 16
-+#define NUM_OF_TX_SC 16
-+
-+#define NUM_OF_SA_PER_RX_SC 2
-+#define NUM_OF_SA_PER_TX_SC 2
-+
-+#endif /* __DPAA_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_ext.h
-@@ -0,0 +1,64 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+
-+ @File part_ext.h
-+
-+ @Description Definitions for the part (integration) module.
-+*//***************************************************************************/
-+
-+#ifndef __PART_EXT_H
-+#define __PART_EXT_H
-+
-+#include "std_ext.h"
-+#include "part_integration_ext.h"
-+
-+#if !(defined(LS1043))
-+#error "unable to proceed without chip-definition"
-+#endif
-+
-+
-+/**************************************************************************//*
-+ @Description Part data structure - must be contained in any integration
-+ data structure.
-+*//***************************************************************************/
-+typedef struct t_Part
-+{
-+ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
-+ /**< Returns the address of the module's memory map base. */
-+ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
-+ /**< Returns the module's ID according to its memory map base. */
-+} t_Part;
-+
-+
-+#endif /* __PART_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/LS1043/part_integration_ext.h
-@@ -0,0 +1,185 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**
-+
-+ @File part_integration_ext.h
-+
-+ @Description T4240 external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __PART_INTEGRATION_EXT_H
-+#define __PART_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+#include "ddr_std_ext.h"
-+#include "enet_ext.h"
-+#include "dpaa_integration_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group T4240_chip_id T4240 Application Programming Interface
-+
-+ @Description T4240 Chip functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define INTG_MAX_NUM_OF_CORES 4
-+
-+/**************************************************************************//**
-+ @Description Module types.
-+*//***************************************************************************/
-+typedef enum e_ModuleId
-+{
-+ e_MODULE_ID_DUART_1 = 0,
-+ e_MODULE_ID_DUART_2,
-+ e_MODULE_ID_DUART_3,
-+ e_MODULE_ID_DUART_4,
-+ e_MODULE_ID_LAW,
-+ e_MODULE_ID_IFC,
-+ e_MODULE_ID_PAMU,
-+ e_MODULE_ID_QM, /**< Queue manager module */
-+ e_MODULE_ID_BM, /**< Buffer manager module */
-+ e_MODULE_ID_QM_CE_PORTAL_0,
-+ e_MODULE_ID_QM_CI_PORTAL_0,
-+ e_MODULE_ID_QM_CE_PORTAL_1,
-+ e_MODULE_ID_QM_CI_PORTAL_1,
-+ e_MODULE_ID_QM_CE_PORTAL_2,
-+ e_MODULE_ID_QM_CI_PORTAL_2,
-+ e_MODULE_ID_QM_CE_PORTAL_3,
-+ e_MODULE_ID_QM_CI_PORTAL_3,
-+ e_MODULE_ID_QM_CE_PORTAL_4,
-+ e_MODULE_ID_QM_CI_PORTAL_4,
-+ e_MODULE_ID_QM_CE_PORTAL_5,
-+ e_MODULE_ID_QM_CI_PORTAL_5,
-+ e_MODULE_ID_QM_CE_PORTAL_6,
-+ e_MODULE_ID_QM_CI_PORTAL_6,
-+ e_MODULE_ID_QM_CE_PORTAL_7,
-+ e_MODULE_ID_QM_CI_PORTAL_7,
-+ e_MODULE_ID_QM_CE_PORTAL_8,
-+ e_MODULE_ID_QM_CI_PORTAL_8,
-+ e_MODULE_ID_QM_CE_PORTAL_9,
-+ e_MODULE_ID_QM_CI_PORTAL_9,
-+ e_MODULE_ID_BM_CE_PORTAL_0,
-+ e_MODULE_ID_BM_CI_PORTAL_0,
-+ e_MODULE_ID_BM_CE_PORTAL_1,
-+ e_MODULE_ID_BM_CI_PORTAL_1,
-+ e_MODULE_ID_BM_CE_PORTAL_2,
-+ e_MODULE_ID_BM_CI_PORTAL_2,
-+ e_MODULE_ID_BM_CE_PORTAL_3,
-+ e_MODULE_ID_BM_CI_PORTAL_3,
-+ e_MODULE_ID_BM_CE_PORTAL_4,
-+ e_MODULE_ID_BM_CI_PORTAL_4,
-+ e_MODULE_ID_BM_CE_PORTAL_5,
-+ e_MODULE_ID_BM_CI_PORTAL_5,
-+ e_MODULE_ID_BM_CE_PORTAL_6,
-+ e_MODULE_ID_BM_CI_PORTAL_6,
-+ e_MODULE_ID_BM_CE_PORTAL_7,
-+ e_MODULE_ID_BM_CI_PORTAL_7,
-+ e_MODULE_ID_BM_CE_PORTAL_8,
-+ e_MODULE_ID_BM_CI_PORTAL_8,
-+ e_MODULE_ID_BM_CE_PORTAL_9,
-+ e_MODULE_ID_BM_CI_PORTAL_9,
-+ e_MODULE_ID_FM, /**< Frame manager module */
-+ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
-+ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
-+ e_MODULE_ID_FM_BMI, /**< FM BMI block */
-+ e_MODULE_ID_FM_QMI, /**< FM QMI block */
-+ e_MODULE_ID_FM_PARSER, /**< FM parser block */
-+ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO5, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO6, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO7, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx2, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx3, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx4, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx5, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx6, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GRx1, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GRx2, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx2, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx3, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx4, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx5, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx6, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GTx1, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM_PORT_10GTx2, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM_PLCR, /**< FM Policer */
-+ e_MODULE_ID_FM_KG, /**< FM Keygen */
-+ e_MODULE_ID_FM_DMA, /**< FM DMA */
-+ e_MODULE_ID_FM_FPM, /**< FM FPM */
-+ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
-+ e_MODULE_ID_FM_1GMDIO, /**< FM 1G MDIO MAC */
-+ e_MODULE_ID_FM_10GMDIO, /**< FM 10G MDIO */
-+ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
-+ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
-+ e_MODULE_ID_FM_1GMAC2, /**< FM 1G MAC #2 */
-+ e_MODULE_ID_FM_1GMAC3, /**< FM 1G MAC #3 */
-+ e_MODULE_ID_FM_1GMAC4, /**< FM 1G MAC #4 */
-+ e_MODULE_ID_FM_1GMAC5, /**< FM 1G MAC #5 */
-+ e_MODULE_ID_FM_1GMAC6, /**< FM 1G MAC #6 */
-+ e_MODULE_ID_FM_10GMAC1, /**< FM 10G MAC */
-+ e_MODULE_ID_FM_10GMAC2, /**< FM 10G MAC */
-+
-+ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
-+ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
-+ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
-+ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
-+ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
-+ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
-+ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
-+ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
-+ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
-+ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
-+ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
-+ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
-+
-+ e_MODULE_ID_PIC, /**< PIC */
-+ e_MODULE_ID_GPIO, /**< GPIO */
-+ e_MODULE_ID_SERDES, /**< SERDES */
-+ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
-+ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
-+
-+ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
-+
-+ e_MODULE_ID_DUMMY_LAST
-+} e_ModuleId;
-+
-+#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
-+
-+
-+#endif /* __PART_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/dpaa_integration_ext.h
-@@ -0,0 +1,213 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**
-+
-+ @File dpaa_integration_ext.h
-+
-+ @Description P1023 FM external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __DPAA_INTEGRATION_EXT_H
-+#define __DPAA_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+#define DPAA_VERSION 10
-+
-+typedef enum e_DpaaSwPortal {
-+ e_DPAA_SWPORTAL0 = 0,
-+ e_DPAA_SWPORTAL1,
-+ e_DPAA_SWPORTAL2,
-+ e_DPAA_SWPORTAL_DUMMY_LAST
-+} e_DpaaSwPortal;
-+
-+typedef enum {
-+ e_DPAA_DCPORTAL0 = 0,
-+ e_DPAA_DCPORTAL2,
-+ e_DPAA_DCPORTAL_DUMMY_LAST
-+} e_DpaaDcPortal;
-+
-+#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
-+#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
-+
-+/*****************************************************************************
-+ QMAN INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define QM_MAX_NUM_OF_POOL_CHANNELS 3
-+#define QM_MAX_NUM_OF_WQ 8
-+#define QM_MAX_NUM_OF_SWP_AS 2
-+#define QM_MAX_NUM_OF_CGS 64
-+#define QM_MAX_NUM_OF_FQIDS (16*MEGABYTE)
-+
-+typedef enum {
-+ e_QM_FQ_CHANNEL_SWPORTAL0 = 0,
-+ e_QM_FQ_CHANNEL_SWPORTAL1,
-+ e_QM_FQ_CHANNEL_SWPORTAL2,
-+
-+ e_QM_FQ_CHANNEL_POOL1 = 0x21,
-+ e_QM_FQ_CHANNEL_POOL2,
-+ e_QM_FQ_CHANNEL_POOL3,
-+
-+ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x40,
-+ e_QM_FQ_CHANNEL_FMAN0_SP1,
-+ e_QM_FQ_CHANNEL_FMAN0_SP2,
-+ e_QM_FQ_CHANNEL_FMAN0_SP3,
-+ e_QM_FQ_CHANNEL_FMAN0_SP4,
-+ e_QM_FQ_CHANNEL_FMAN0_SP5,
-+ e_QM_FQ_CHANNEL_FMAN0_SP6,
-+
-+
-+ e_QM_FQ_CHANNEL_CAAM = 0x80
-+} e_QmFQChannel;
-+
-+/*****************************************************************************
-+ BMAN INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define BM_MAX_NUM_OF_POOLS 8
-+
-+/*****************************************************************************
-+ SEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define SEC_NUM_OF_DECOS 2
-+#define SEC_ALL_DECOS_MASK 0x00000003
-+#define SEC_RNGB
-+#define SEC_NO_ESP_TRAILER_REMOVAL
-+
-+/*****************************************************************************
-+ FM INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define INTG_MAX_NUM_OF_FM 1
-+
-+/* Ports defines */
-+#define FM_MAX_NUM_OF_1G_MACS 2
-+#define FM_MAX_NUM_OF_10G_MACS 0
-+#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
-+#define FM_MAX_NUM_OF_OH_PORTS 5
-+
-+#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
-+
-+#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
-+
-+#define FM_MAX_NUM_OF_MACSECS 1
-+
-+#define FM_MACSEC_SUPPORT
-+
-+#define FM_LOW_END_RESTRICTION /* prevents the use of TX port 1 with OP port 0 */
-+
-+#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */
-+#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 2 /**< Number of Offline parsing port external BM pools per Rx port */
-+#define FM_PORT_NUM_OF_CONGESTION_GRPS 32 /**< Total number of congestion groups in QM */
-+#define FM_MAX_NUM_OF_SUB_PORTALS 7
-+
-+/* Rams defines */
-+#define FM_MURAM_SIZE (64*KILOBYTE)
-+#define FM_IRAM_SIZE(major, minor) (32 * KILOBYTE)
-+#define FM_NUM_OF_CTRL 2
-+
-+/* PCD defines */
-+#define FM_PCD_PLCR_NUM_ENTRIES 32 /**< Total number of policer profiles */
-+#define FM_PCD_KG_NUM_OF_SCHEMES 16 /**< Total number of KG schemes */
-+#define FM_PCD_MAX_NUM_OF_CLS_PLANS 128 /**< Number of classification plan entries. */
-+#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000240 /**< Number of bytes saved for patches */
-+#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
-+
-+/* RTC defines */
-+#define FM_RTC_NUM_OF_ALARMS 2
-+#define FM_RTC_NUM_OF_PERIODIC_PULSES 2
-+#define FM_RTC_NUM_OF_EXT_TRIGGERS 2
-+
-+/* QMI defines */
-+#define QMI_MAX_NUM_OF_TNUMS 15
-+
-+/* FPM defines */
-+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
-+
-+/* DMA defines */
-+#define DMA_THRESH_MAX_COMMQ 15
-+#define DMA_THRESH_MAX_BUF 7
-+
-+/* BMI defines */
-+#define BMI_MAX_NUM_OF_TASKS 64
-+#define BMI_MAX_NUM_OF_DMAS 16
-+#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
-+#define PORT_MAX_WEIGHT 4
-+
-+/*****************************************************************************
-+ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define NUM_OF_RX_SC 16
-+#define NUM_OF_TX_SC 16
-+
-+#define NUM_OF_SA_PER_RX_SC 2
-+#define NUM_OF_SA_PER_TX_SC 2
-+
-+/**************************************************************************//**
-+ @Description Enum for inter-module interrupts registration
-+*//***************************************************************************/
-+
-+/* 1023 unique features */
-+#define FM_QMI_NO_ECC_EXCEPTIONS
-+#define FM_CSI_CFED_LIMIT
-+#define FM_PEDANTIC_DMA
-+#define FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+#define FM_FIFO_ALLOCATION_ALG
-+#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
-+#define FM_HAS_TOTAL_DMAS
-+#define FM_KG_NO_IPPID_SUPPORT
-+#define FM_NO_GUARANTEED_RESET_VALUES
-+#define FM_MAC_RESET
-+
-+/* FM erratas */
-+#define FM_RX_PREAM_4_ERRATA_DTSEC_A001
-+#define FM_MAGIC_PACKET_UNRECOGNIZED_ERRATA_DTSEC2 /* No implementation, Out of LLD scope */
-+
-+#define FM_DEBUG_TRACE_FMAN_A004 /* No implementation, Out of LLD scope */
-+#define FM_INT_BUF_LEAK_FMAN_A005 /* No implementation, Out of LLD scope. App must avoid S/G */
-+
-+#define FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839
-+
-+/* #define FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */
-+
-+/*
-+TKT056919 - axi12axi0 can hang if read request follows the single byte write on the very next cycle
-+TKT038900 - FM dma lockup occur due to AXI slave protocol violation
-+*/
-+#define FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004
-+
-+
-+#endif /* __DPAA_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_ext.h
-@@ -0,0 +1,82 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+
-+ @File part_ext.h
-+
-+ @Description Definitions for the part (integration) module.
-+*//***************************************************************************/
-+
-+#ifndef __PART_EXT_H
-+#define __PART_EXT_H
-+
-+#include "std_ext.h"
-+#include "part_integration_ext.h"
-+
-+
-+#if !(defined(MPC8306) || \
-+ defined(MPC8309) || \
-+ defined(MPC834x) || \
-+ defined(MPC836x) || \
-+ defined(MPC832x) || \
-+ defined(MPC837x) || \
-+ defined(MPC8568) || \
-+ defined(MPC8569) || \
-+ defined(P1020) || \
-+ defined(P1021) || \
-+ defined(P1022) || \
-+ defined(P1023) || \
-+ defined(P2020) || \
-+ defined(P3041) || \
-+ defined(P4080) || \
-+ defined(P5020) || \
-+ defined(MSC814x))
-+#error "unable to proceed without chip-definition"
-+#endif
-+
-+
-+/**************************************************************************//*
-+ @Description Part data structure - must be contained in any integration
-+ data structure.
-+*//***************************************************************************/
-+typedef struct t_Part
-+{
-+ uint64_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
-+ /**< Returns the address of the module's memory map base. */
-+ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uint64_t baseAddress);
-+ /**< Returns the module's ID according to its memory map base. */
-+} t_Part;
-+
-+
-+#endif /* __PART_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P1023/part_integration_ext.h
-@@ -0,0 +1,635 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File part_integration_ext.h
-+
-+ @Description P1023 external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __PART_INTEGRATION_EXT_H
-+#define __PART_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+#include "dpaa_integration_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group 1023_chip_id P1023 Application Programming Interface
-+
-+ @Description P1023 Chip functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define INTG_MAX_NUM_OF_CORES 2
-+
-+
-+/**************************************************************************//**
-+ @Description Module types.
-+*//***************************************************************************/
-+typedef enum e_ModuleId
-+{
-+ e_MODULE_ID_LAW, /**< Local Access module */
-+ e_MODULE_ID_ECM, /**< e500 Coherency Module */
-+ e_MODULE_ID_DDR, /**< DDR memory controller */
-+ e_MODULE_ID_I2C_1, /**< I2C 1 */
-+ e_MODULE_ID_I2C_2, /**< I2C 1 */
-+ e_MODULE_ID_DUART_1, /**< DUART module 1 */
-+ e_MODULE_ID_DUART_2, /**< DUART module 2 */
-+ e_MODULE_ID_LBC, /**< Local bus memory controller module */
-+ e_MODULE_ID_PCIE_1, /**< PCI Express 1 controller module */
-+ e_MODULE_ID_PCIE_ATMU_1, /**< PCI 1 ATMU Window */
-+ e_MODULE_ID_PCIE_2, /**< PCI Express 2 controller module */
-+ e_MODULE_ID_PCIE_ATMU_2, /**< PCI 2 ATMU Window */
-+ e_MODULE_ID_PCIE_3, /**< PCI Express 3 controller module */
-+ e_MODULE_ID_PCIE_ATMU_3, /**< PCI 3 ATMU Window */
-+ e_MODULE_ID_MSI, /**< MSI registers */
-+ e_MODULE_ID_L2_SRAM, /**< L2/SRAM Memory-Mapped controller module */
-+ e_MODULE_ID_DMA_1, /**< DMA controller 1 */
-+ e_MODULE_ID_DMA_2, /**< DMA controller 2 */
-+ e_MODULE_ID_EPIC, /**< Programmable interrupt controller */
-+ e_MODULE_ID_ESPI, /**< ESPI module */
-+ e_MODULE_ID_GPIO, /**< General Purpose I/O */
-+ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
-+ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
-+ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
-+ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
-+ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
-+ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
-+ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
-+ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
-+ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
-+ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
-+ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
-+ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
-+ e_MODULE_ID_USB_DR_1, /**< USB 2.0 module 1 */
-+ e_MODULE_ID_USB_DR_2, /**< USB 2.0 module 2 */
-+ e_MODULE_ID_ETSEC_MII_MNG, /**< MII MNG registers */
-+ e_MODULE_ID_ETSEC_1, /**< ETSEC module 1 */
-+ e_MODULE_ID_ETSEC_2, /**< ETSEC module 2 */
-+ e_MODULE_ID_GUTS, /**< Serial DMA */
-+ e_MODULE_ID_PM, /**< Performance Monitor module */
-+ e_MODULE_ID_QM, /**< Queue manager module */
-+ e_MODULE_ID_BM, /**< Buffer manager module */
-+ e_MODULE_ID_QM_CE_PORTAL,
-+ e_MODULE_ID_QM_CI_PORTAL,
-+ e_MODULE_ID_BM_CE_PORTAL,
-+ e_MODULE_ID_BM_CI_PORTAL,
-+ e_MODULE_ID_FM, /**< Frame manager #1 module */
-+ e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */
-+ e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */
-+ e_MODULE_ID_FM_BMI, /**< FM BMI block */
-+ e_MODULE_ID_FM_QMI, /**< FM QMI block */
-+ e_MODULE_ID_FM_PRS, /**< FM parser block */
-+ e_MODULE_ID_FM_PORT_HO0, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM_PORT_1GRx0, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx0, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM_PLCR, /**< FM Policer */
-+ e_MODULE_ID_FM_KG, /**< FM Keygen */
-+ e_MODULE_ID_FM_DMA, /**< FM DMA */
-+ e_MODULE_ID_FM_FPM, /**< FM FPM */
-+ e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */
-+ e_MODULE_ID_FM_1GMDIO0, /**< FM 1G MDIO MAC 0*/
-+ e_MODULE_ID_FM_1GMDIO1, /**< FM 1G MDIO MAC 1*/
-+ e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
-+ e_MODULE_ID_FM_RISC0, /**< FM risc #0 */
-+ e_MODULE_ID_FM_RISC1, /**< FM risc #1 */
-+ e_MODULE_ID_FM_1GMAC0, /**< FM 1G MAC #0 */
-+ e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */
-+ e_MODULE_ID_FM_MACSEC, /**< FM MACSEC */
-+
-+ e_MODULE_ID_DUMMY_LAST
-+} e_ModuleId;
-+
-+#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
-+
-+
-+#define P1023_OFFSET_LAW 0x00000C08
-+#define P1023_OFFSET_ECM 0x00001000
-+#define P1023_OFFSET_DDR 0x00002000
-+#define P1023_OFFSET_I2C1 0x00003000
-+#define P1023_OFFSET_I2C2 0x00003100
-+#define P1023_OFFSET_DUART1 0x00004500
-+#define P1023_OFFSET_DUART2 0x00004600
-+#define P1023_OFFSET_LBC 0x00005000
-+#define P1023_OFFSET_ESPI 0x00007000
-+#define P1023_OFFSET_PCIE2 0x00009000
-+#define P1023_OFFSET_PCIE2_ATMU 0x00009C00
-+#define P1023_OFFSET_PCIE1 0x0000A000
-+#define P1023_OFFSET_PCIE1_ATMU 0x0000AC00
-+#define P1023_OFFSET_PCIE3 0x0000B000
-+#define P1023_OFFSET_PCIE3_ATMU 0x0000BC00
-+#define P1023_OFFSET_DMA2 0x0000C100
-+#define P1023_OFFSET_GPIO 0x0000F000
-+#define P1023_OFFSET_L2_SRAM 0x00020000
-+#define P1023_OFFSET_DMA1 0x00021100
-+#define P1023_OFFSET_USB1 0x00022000
-+#define P1023_OFFSET_SEC_GEN 0x00030000
-+#define P1023_OFFSET_SEC_JQ0 0x00031000
-+#define P1023_OFFSET_SEC_JQ1 0x00032000
-+#define P1023_OFFSET_SEC_JQ2 0x00033000
-+#define P1023_OFFSET_SEC_JQ3 0x00034000
-+#define P1023_OFFSET_SEC_RTIC 0x00036000
-+#define P1023_OFFSET_SEC_QI 0x00037000
-+#define P1023_OFFSET_SEC_DECO0_CCB0 0x00038000
-+#define P1023_OFFSET_SEC_DECO1_CCB1 0x00039000
-+#define P1023_OFFSET_SEC_DECO2_CCB2 0x0003a000
-+#define P1023_OFFSET_SEC_DECO3_CCB3 0x0003b000
-+#define P1023_OFFSET_SEC_DECO4_CCB4 0x0003c000
-+#define P1023_OFFSET_PIC 0x00040000
-+#define P1023_OFFSET_MSI 0x00041600
-+#define P1023_OFFSET_AXI 0x00081000
-+#define P1023_OFFSET_QM 0x00088000
-+#define P1023_OFFSET_BM 0x0008A000
-+#define P1022_OFFSET_PM 0x000E1000
-+
-+#define P1023_OFFSET_GUTIL 0x000E0000
-+#define P1023_OFFSET_PM 0x000E1000
-+#define P1023_OFFSET_DEBUG 0x000E2000
-+#define P1023_OFFSET_SERDES 0x000E3000
-+#define P1023_OFFSET_ROM 0x000F0000
-+#define P1023_OFFSET_FM 0x00100000
-+
-+#define P1023_OFFSET_FM_MURAM (P1023_OFFSET_FM + 0x00000000)
-+#define P1023_OFFSET_FM_BMI (P1023_OFFSET_FM + 0x00080000)
-+#define P1023_OFFSET_FM_QMI (P1023_OFFSET_FM + 0x00080400)
-+#define P1023_OFFSET_FM_PRS (P1023_OFFSET_FM + 0x00080800)
-+#define P1023_OFFSET_FM_PORT_HO0 (P1023_OFFSET_FM + 0x00081000)
-+#define P1023_OFFSET_FM_PORT_HO1 (P1023_OFFSET_FM + 0x00082000)
-+#define P1023_OFFSET_FM_PORT_HO2 (P1023_OFFSET_FM + 0x00083000)
-+#define P1023_OFFSET_FM_PORT_HO3 (P1023_OFFSET_FM + 0x00084000)
-+#define P1023_OFFSET_FM_PORT_HO4 (P1023_OFFSET_FM + 0x00085000)
-+#define P1023_OFFSET_FM_PORT_1GRX0 (P1023_OFFSET_FM + 0x00088000)
-+#define P1023_OFFSET_FM_PORT_1GRX1 (P1023_OFFSET_FM + 0x00089000)
-+#define P1023_OFFSET_FM_PORT_1GTX0 (P1023_OFFSET_FM + 0x000A8000)
-+#define P1023_OFFSET_FM_PORT_1GTX1 (P1023_OFFSET_FM + 0x000A9000)
-+#define P1023_OFFSET_FM_PLCR (P1023_OFFSET_FM + 0x000C0000)
-+#define P1023_OFFSET_FM_KG (P1023_OFFSET_FM + 0x000C1000)
-+#define P1023_OFFSET_FM_DMA (P1023_OFFSET_FM + 0x000C2000)
-+#define P1023_OFFSET_FM_FPM (P1023_OFFSET_FM + 0x000C3000)
-+#define P1023_OFFSET_FM_IRAM (P1023_OFFSET_FM + 0x000C4000)
-+#define P1023_OFFSET_FM_PRS_IRAM (P1023_OFFSET_FM + 0x000C7000)
-+#define P1023_OFFSET_FM_RISC0 (P1023_OFFSET_FM + 0x000D0000)
-+#define P1023_OFFSET_FM_RISC1 (P1023_OFFSET_FM + 0x000D0400)
-+#define P1023_OFFSET_FM_MACSEC (P1023_OFFSET_FM + 0x000D8000)
-+#define P1023_OFFSET_FM_1GMAC0 (P1023_OFFSET_FM + 0x000E0000)
-+#define P1023_OFFSET_FM_1GMDIO0 (P1023_OFFSET_FM + 0x000E1120)
-+#define P1023_OFFSET_FM_1GMAC1 (P1023_OFFSET_FM + 0x000E2000)
-+#define P1023_OFFSET_FM_1GMDIO1 (P1023_OFFSET_FM + 0x000E3000)
-+#define P1023_OFFSET_FM_RTC (P1023_OFFSET_FM + 0x000FE000)
-+
-+/* Offsets relative to QM or BM portals base */
-+#define P1023_OFFSET_PORTALS_CE_AREA 0x00000000 /* cache enabled area */
-+#define P1023_OFFSET_PORTALS_CI_AREA 0x00100000 /* cache inhibited area */
-+
-+#define P1023_OFFSET_PORTALS_CE(portal) (P1023_OFFSET_PORTALS_CE_AREA + 0x4000 * (portal))
-+#define P1023_OFFSET_PORTALS_CI(portal) (P1023_OFFSET_PORTALS_CI_AREA + 0x1000 * (portal))
-+
-+/**************************************************************************//**
-+ @Description Transaction source ID (for memory controllers error reporting).
-+*//***************************************************************************/
-+typedef enum e_TransSrc
-+{
-+ e_TRANS_SRC_PCIE_2 = 0x01, /**< PCIe port 2 */
-+ e_TRANS_SRC_PCIE_1 = 0x02, /**< PCIe port 1 */
-+ e_TRANS_SRC_PCIE_3 = 0x03, /**< PCIe port 3 */
-+ e_TRANS_SRC_LBC = 0x04, /**< Enhanced local bus */
-+ e_TRANS_SRC_DPAA_SW_PORTALS = 0x0E, /**< DPAA software portals or SRAM */
-+ e_TRANS_SRC_DDR = 0x0F, /**< DDR controller */
-+ e_TRANS_SRC_CORE_INS_FETCH = 0x10, /**< Processor (instruction) */
-+ e_TRANS_SRC_CORE_DATA = 0x11, /**< Processor (data) */
-+ e_TRANS_SRC_DMA = 0x15 /**< DMA */
-+} e_TransSrc;
-+
-+/**************************************************************************//**
-+ @Description Local Access Window Target interface ID
-+*//***************************************************************************/
-+typedef enum e_P1023LawTargetId
-+{
-+ e_P1023_LAW_TARGET_PCIE_2 = 0x01, /**< PCI Express 2 target interface */
-+ e_P1023_LAW_TARGET_PCIE_1 = 0x02, /**< PCI Express 1 target interface */
-+ e_P1023_LAW_TARGET_PCIE_3 = 0x03, /**< PCI Express 3 target interface */
-+ e_P1023_LAW_TARGET_LBC = 0x04, /**< Local bus target interface */
-+ e_P1023_LAW_TARGET_QM_PORTALS = 0x0E, /**< Queue Manager Portals */
-+ e_P1023_LAW_TARGET_BM_PORTALS = 0x0E, /**< Buffer Manager Portals */
-+ e_P1023_LAW_TARGET_SRAM = 0x0E, /**< SRAM scratchpad */
-+ e_P1023_LAW_TARGET_DDR = 0x0F, /**< DDR target interface */
-+ e_P1023_LAW_TARGET_NONE = 0xFF /**< Invalid target interface */
-+} e_P1023LawTargetId;
-+
-+
-+/**************************************************************************//**
-+ @Group 1023_init_grp P1023 Initialization Unit
-+
-+ @Description P1023 initialization unit API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description Part ID and revision number
-+*//***************************************************************************/
-+typedef enum e_P1023DeviceName
-+{
-+ e_P1023_REV_INVALID = 0x00000000, /**< Invalid revision */
-+ e_SC1023_REV_1_0 = (int)0x80FC0010, /**< SC1023 rev 1.0 */
-+ e_SC1023_REV_1_1 = (int)0x80FC0011, /**< SC1023 rev 1.1 */
-+ e_P1023_REV_1_0 = (int)0x80FE0010, /**< P1023 rev 1.0 with security */
-+ e_P1023_REV_1_1 = (int)0x80FE0011, /**< P1023 rev 1.1 with security */
-+ e_P1017_REV_1_1 = (int)0x80FF0011, /**< P1017 rev 1.1 with security */
-+ e_P1023_REV_1_0_NO_SEC = (int)0x80F60010, /**< P1023 rev 1.0 without security */
-+ e_P1023_REV_1_1_NO_SEC = (int)0x80F60011, /**< P1023 rev 1.1 without security */
-+ e_P1017_REV_1_1_NO_SEC = (int)0x80F70011 /**< P1017 rev 1.1 without security */
-+} e_P1023DeviceName;
-+
-+/**************************************************************************//**
-+ @Description structure representing P1023 initialization parameters
-+*//***************************************************************************/
-+typedef struct t_P1023Params
-+{
-+ uintptr_t ccsrBaseAddress; /**< CCSR base address (virtual) */
-+ uintptr_t bmPortalsBaseAddress; /**< Portals base address (virtual) */
-+ uintptr_t qmPortalsBaseAddress; /**< Portals base address (virtual) */
-+} t_P1023Params;
-+
-+/**************************************************************************//**
-+ @Function P1023_ConfigAndInit
-+
-+ @Description General initiation of the chip registers.
-+
-+ @Param[in] p_P1023Params - A pointer to data structure of parameters
-+
-+ @Return A handle to the P1023 data structure.
-+*//***************************************************************************/
-+t_Handle P1023_ConfigAndInit(t_P1023Params *p_P1023Params);
-+
-+/**************************************************************************//**
-+ @Function P1023_Free
-+
-+ @Description Free all resources.
-+
-+ @Param h_P1023 - (In) The handle of the initialized P1023 object.
-+
-+ @Return E_OK on success; Other value otherwise.
-+*//***************************************************************************/
-+t_Error P1023_Free(t_Handle h_P1023);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetRevInfo
-+
-+ @Description This routine enables access to chip and revision information.
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+
-+ @Return Part ID and revision.
-+*//***************************************************************************/
-+e_P1023DeviceName P1023_GetRevInfo(uintptr_t gutilBase);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetE500Factor
-+
-+ @Description Returns E500 core clock multiplication factor.
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+ @Param[in] coreId - Id of the requested core.
-+ @Param[out] p_E500MulFactor - Returns E500 to CCB multification factor.
-+ @Param[out] p_E500DivFactor - Returns E500 to CCB division factor.
-+
-+ @Return E_OK on success; Other value otherwise.
-+*
-+*//***************************************************************************/
-+t_Error P1023_GetE500Factor(uintptr_t gutilBase,
-+ uint32_t coreId,
-+ uint32_t *p_E500MulFactor,
-+ uint32_t *p_E500DivFactor);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetFmFactor
-+
-+ @Description returns FM multiplication factors. (This value is returned using
-+ two parameters to avoid using float parameter).
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+ @Param[out] p_FmMulFactor - returns E500 to CCB multification factor.
-+ @Param[out] p_FmDivFactor - returns E500 to CCB division factor.
-+
-+ @Return E_OK on success; Other value otherwise.
-+*//***************************************************************************/
-+t_Error P1023_GetFmFactor(uintptr_t gutilBase, uint32_t *p_FmMulFactor, uint32_t *p_FmDivFactor);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetCcbFactor
-+
-+ @Description returns system multiplication factor.
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+
-+ @Return System multiplication factor.
-+*//***************************************************************************/
-+uint32_t P1023_GetCcbFactor(uintptr_t gutilBase);
-+
-+#if 0
-+/**************************************************************************//**
-+ @Function P1023_GetDdrFactor
-+
-+ @Description returns the multiplication factor of the clock in for the DDR clock .
-+ Note: assumes the ddr_in_clk is identical to the sys_in_clk
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+ @Param p_DdrMulFactor - returns DDR in clk multification factor.
-+ @Param p_DdrDivFactor - returns DDR division factor.
-+
-+ @Return E_OK on success; Other value otherwise..
-+*//***************************************************************************/
-+t_Error P1023_GetDdrFactor( uintptr_t gutilBase,
-+ uint32_t *p_DdrMulFactor,
-+ uint32_t *p_DdrDivFactor);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetDdrType
-+
-+ @Description returns the multiplication factor of the clock in for the DDR clock .
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+ @Param p_DdrType - (Out) returns DDR type DDR1/DDR2/DDR3.
-+
-+ @Return E_OK on success; Other value otherwise.
-+*//***************************************************************************/
-+t_Error P1023_GetDdrType(uintptr_t gutilBase, e_DdrType *p_DdrType );
-+#endif
-+
-+/** @} */ /* end of 1023_init_grp group */
-+/** @} */ /* end of 1023_grp group */
-+
-+#define CORE_E500V2
-+
-+#if 0 /* using unified values */
-+/*****************************************************************************
-+ INTEGRATION-SPECIFIC MODULE CODES
-+******************************************************************************/
-+#define MODULE_UNKNOWN 0x00000000
-+#define MODULE_MEM 0x00010000
-+#define MODULE_MM 0x00020000
-+#define MODULE_CORE 0x00030000
-+#define MODULE_P1023 0x00040000
-+#define MODULE_MII 0x00050000
-+#define MODULE_PM 0x00060000
-+#define MODULE_MMU 0x00070000
-+#define MODULE_PIC 0x00080000
-+#define MODULE_L2_CACHE 0x00090000
-+#define MODULE_DUART 0x000a0000
-+#define MODULE_SERDES 0x000b0000
-+#define MODULE_PIO 0x000c0000
-+#define MODULE_QM 0x000d0000
-+#define MODULE_BM 0x000e0000
-+#define MODULE_SEC 0x000f0000
-+#define MODULE_FM 0x00100000
-+#define MODULE_FM_MURAM 0x00110000
-+#define MODULE_FM_PCD 0x00120000
-+#define MODULE_FM_RTC 0x00130000
-+#define MODULE_FM_MAC 0x00140000
-+#define MODULE_FM_PORT 0x00150000
-+#define MODULE_FM_MACSEC 0x00160000
-+#define MODULE_FM_MACSEC_SECY 0x00170000
-+#define MODULE_FM_SP 0x00280000
-+#define MODULE_ECM 0x00190000
-+#define MODULE_DMA 0x001a0000
-+#define MODULE_DDR 0x001b0000
-+#define MODULE_LAW 0x001c0000
-+#define MODULE_LBC 0x001d0000
-+#define MODULE_I2C 0x001e0000
-+#define MODULE_ESPI 0x001f0000
-+#define MODULE_PCI 0x00200000
-+#define MODULE_DPA_PORT 0x00210000
-+#define MODULE_USB 0x00220000
-+#endif /* using unified values */
-+
-+/*****************************************************************************
-+ LBC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+/**************************************************************************//**
-+ @Group lbc_exception_grp LBC Exception Unit
-+
-+ @Description LBC Exception unit API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Anchor lbc_exbm
-+
-+ @Collection LBC Errors Bit Mask
-+
-+ These errors are reported through the exceptions callback..
-+ The values can be or'ed in any combination in the errors mask
-+ parameter of the errors report structure.
-+
-+ These errors can also be passed as a bit-mask to
-+ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
-+ for enabling or disabling error checking.
-+ @{
-+*//***************************************************************************/
-+#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
-+#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
-+#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
-+#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
-+
-+#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
-+ LBC_ERR_WRITE_PROTECT | LBC_ERR_CHIP_SELECT)
-+ /**< All possible errors */
-+/* @} */
-+/** @} */ /* end of lbc_exception_grp group */
-+
-+#define LBC_NUM_OF_BANKS 2
-+#define LBC_MAX_CS_SIZE 0x0000000100000000LL
-+#define LBC_ATOMIC_OPERATION_SUPPORT
-+#define LBC_PARITY_SUPPORT
-+#define LBC_ADDRESS_SHIFT_SUPPORT
-+#define LBC_ADDRESS_HOLD_TIME_CTRL
-+#define LBC_HIGH_CLK_DIVIDERS
-+#define LBC_FCM_AVAILABLE
-+
-+
-+/*****************************************************************************
-+ LAW INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define LAW_ARCH_CCB
-+#define LAW_NUM_OF_WINDOWS 12
-+#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4KB */
-+#define LAW_MAX_WINDOW_SIZE 0x0000001000000000LL /**< 32GB */
-+
-+
-+/*****************************************************************************
-+ SPI INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define SPI_NUM_OF_CONTROLLERS 1
-+
-+/*****************************************************************************
-+ PCI/PCIe INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+
-+#define PCI_MAX_INBOUND_WINDOWS_NUM 4
-+#define PCI_MAX_OUTBOUND_WINDOWS_NUM 5
-+
-+/**************************************************************************//**
-+ @Description Target interface of an inbound window
-+*//***************************************************************************/
-+typedef enum e_PciTargetInterface
-+{
-+ e_PCI_TARGET_PCIE_2 = 0x1, /**< PCI Express target interface 2 */
-+ e_PCI_TARGET_PCIE_1 = 0x2, /**< PCI Express target interface 1 */
-+ e_PCI_TARGET_PCIE_3 = 0x3, /**< PCI Express target interface 3 */
-+ e_PCI_TARGET_LOCAL_MEMORY = 0xF /**< Local Memory (DDR SDRAM, Local Bus, SRAM) target interface */
-+
-+} e_PciTargetInterface;
-+
-+/*****************************************************************************
-+ DDR INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define DDR_NUM_OF_VALID_CS 2
-+
-+/*****************************************************************************
-+ SEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define SEC_ERRATA_STAT_REGS_UNUSABLE
-+
-+/*****************************************************************************
-+ DMA INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define DMA_NUM_OF_CONTROLLERS 2
-+
-+
-+
-+
-+/*****************************************************************************
-+ 1588 INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define PTP_V2
-+
-+/**************************************************************************//**
-+ @Function P1023_GetMuxControlReg
-+
-+ @Description Returns the value of PMUXCR (Alternate Function Signal Multiplex
-+ Control Register)
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+
-+ @Return Value of PMUXCR
-+*//***************************************************************************/
-+uint32_t P1023_GetMuxControlReg(uintptr_t gutilBase);
-+
-+/**************************************************************************//**
-+ @Function P1023_SetMuxControlReg
-+
-+ @Description Sets the value of PMUXCR (Alternate Function Signal Multiplex
-+ Control Register)
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+ @Param[in] val - the new value for PMUXCR.
-+
-+ @Return None
-+*//***************************************************************************/
-+void P1023_SetMuxControlReg(uintptr_t gutilBase, uint32_t val);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetDeviceDisableStatusRegister
-+
-+ @Description Returns the value of DEVDISR (Device Disable Register)
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+
-+ @Return Value of DEVDISR
-+*//***************************************************************************/
-+uint32_t P1023_GetDeviceDisableStatusRegister(uintptr_t gutilBase);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetPorDeviceStatusRegister
-+
-+ @Description Returns the value of POR Device Status Register
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+
-+ @Return POR Device Status Register
-+*//***************************************************************************/
-+uint32_t P1023_GetPorDeviceStatusRegister(uintptr_t gutilBase);
-+
-+/**************************************************************************//**
-+ @Function P1023_GetPorBootModeStatusRegister
-+
-+ @Description Returns the value of POR Boot Mode Status Register
-+
-+ @Param[in] gutilBase - Base address of P1023 GUTIL registers.
-+
-+ @Return POR Boot Mode Status Register value
-+*//***************************************************************************/
-+uint32_t P1023_GetPorBootModeStatusRegister(uintptr_t gutilBase);
-+
-+
-+#define PORDEVSR_SGMII1_DIS 0x10000000
-+#define PORDEVSR_SGMII2_DIS 0x08000000
-+#define PORDEVSR_ECP1 0x02000000
-+#define PORDEVSR_IO_SEL 0x00780000
-+#define PORDEVSR_IO_SEL_SHIFT 19
-+#define PORBMSR_HA 0x00070000
-+#define PORBMSR_HA_SHIFT 16
-+
-+#define DEVDISR_QM_BM 0x80000000
-+#define DEVDISR_FM 0x40000000
-+#define DEVDISR_PCIE1 0x20000000
-+#define DEVDISR_MAC_SEC 0x10000000
-+#define DEVDISR_ELBC 0x08000000
-+#define DEVDISR_PCIE2 0x04000000
-+#define DEVDISR_PCIE3 0x02000000
-+#define DEVDISR_CAAM 0x01000000
-+#define DEVDISR_USB0 0x00800000
-+#define DEVDISR_1588 0x00020000
-+#define DEVDISR_CORE0 0x00008000
-+#define DEVDISR_TB0 0x00004000
-+#define DEVDISR_CORE1 0x00002000
-+#define DEVDISR_TB1 0x00001000
-+#define DEVDISR_DMA1 0x00000400
-+#define DEVDISR_DMA2 0x00000200
-+#define DEVDISR_DDR 0x00000010
-+#define DEVDISR_TSEC1 0x00000080
-+#define DEVDISR_TSEC2 0x00000040
-+#define DEVDISR_SPI 0x00000008
-+#define DEVDISR_I2C 0x00000004
-+#define DEVDISR_DUART 0x00000002
-+
-+
-+#endif /* __PART_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h
-@@ -0,0 +1,276 @@
-+/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File dpaa_integration_ext.h
-+
-+ @Description P3040/P4080/P5020 FM external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __DPAA_INTEGRATION_EXT_H
-+#define __DPAA_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+#define DPAA_VERSION 10
-+
-+typedef enum {
-+ e_DPAA_SWPORTAL0 = 0,
-+ e_DPAA_SWPORTAL1,
-+ e_DPAA_SWPORTAL2,
-+ e_DPAA_SWPORTAL3,
-+ e_DPAA_SWPORTAL4,
-+ e_DPAA_SWPORTAL5,
-+ e_DPAA_SWPORTAL6,
-+ e_DPAA_SWPORTAL7,
-+ e_DPAA_SWPORTAL8,
-+ e_DPAA_SWPORTAL9,
-+ e_DPAA_SWPORTAL_DUMMY_LAST
-+} e_DpaaSwPortal;
-+
-+typedef enum {
-+ e_DPAA_DCPORTAL0 = 0,
-+ e_DPAA_DCPORTAL1,
-+ e_DPAA_DCPORTAL2,
-+ e_DPAA_DCPORTAL3,
-+ e_DPAA_DCPORTAL4,
-+ e_DPAA_DCPORTAL_DUMMY_LAST
-+} e_DpaaDcPortal;
-+
-+#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST
-+#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST
-+
-+/*****************************************************************************
-+ QMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define QM_MAX_NUM_OF_POOL_CHANNELS 15 /**< Total number of channels, dedicated and pool */
-+#define QM_MAX_NUM_OF_WQ 8 /**< Number of work queues per channel */
-+#define QM_MAX_NUM_OF_SWP_AS 4
-+#define QM_MAX_NUM_OF_CGS 256 /**< Number of congestion groups */
-+#define QM_MAX_NUM_OF_FQIDS (16 * MEGABYTE) /**< FQIDs range - 24 bits */
-+
-+/**************************************************************************//**
-+ @Description Work Queue Channel assignments in QMan.
-+*//***************************************************************************/
-+typedef enum
-+{
-+ e_QM_FQ_CHANNEL_SWPORTAL0 = 0, /**< Dedicated channels serviced by software portals 0 to 9 */
-+ e_QM_FQ_CHANNEL_SWPORTAL1,
-+ e_QM_FQ_CHANNEL_SWPORTAL2,
-+ e_QM_FQ_CHANNEL_SWPORTAL3,
-+ e_QM_FQ_CHANNEL_SWPORTAL4,
-+ e_QM_FQ_CHANNEL_SWPORTAL5,
-+ e_QM_FQ_CHANNEL_SWPORTAL6,
-+ e_QM_FQ_CHANNEL_SWPORTAL7,
-+ e_QM_FQ_CHANNEL_SWPORTAL8,
-+ e_QM_FQ_CHANNEL_SWPORTAL9,
-+
-+ e_QM_FQ_CHANNEL_POOL1 = 0x21, /**< Pool channels that can be serviced by any of the software portals */
-+ e_QM_FQ_CHANNEL_POOL2,
-+ e_QM_FQ_CHANNEL_POOL3,
-+ e_QM_FQ_CHANNEL_POOL4,
-+ e_QM_FQ_CHANNEL_POOL5,
-+ e_QM_FQ_CHANNEL_POOL6,
-+ e_QM_FQ_CHANNEL_POOL7,
-+ e_QM_FQ_CHANNEL_POOL8,
-+ e_QM_FQ_CHANNEL_POOL9,
-+ e_QM_FQ_CHANNEL_POOL10,
-+ e_QM_FQ_CHANNEL_POOL11,
-+ e_QM_FQ_CHANNEL_POOL12,
-+ e_QM_FQ_CHANNEL_POOL13,
-+ e_QM_FQ_CHANNEL_POOL14,
-+ e_QM_FQ_CHANNEL_POOL15,
-+
-+ e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x40, /**< Dedicated channels serviced by Direct Connect Portal 0:
-+ connected to FMan 0; assigned in incrementing order to
-+ each sub-portal (SP) in the portal */
-+ e_QM_FQ_CHANNEL_FMAN0_SP1,
-+ e_QM_FQ_CHANNEL_FMAN0_SP2,
-+ e_QM_FQ_CHANNEL_FMAN0_SP3,
-+ e_QM_FQ_CHANNEL_FMAN0_SP4,
-+ e_QM_FQ_CHANNEL_FMAN0_SP5,
-+ e_QM_FQ_CHANNEL_FMAN0_SP6,
-+ e_QM_FQ_CHANNEL_FMAN0_SP7,
-+ e_QM_FQ_CHANNEL_FMAN0_SP8,
-+ e_QM_FQ_CHANNEL_FMAN0_SP9,
-+ e_QM_FQ_CHANNEL_FMAN0_SP10,
-+ e_QM_FQ_CHANNEL_FMAN0_SP11,
-+/* difference between 5020 and 4080 :) */
-+ e_QM_FQ_CHANNEL_FMAN1_SP0 = 0x60,
-+ e_QM_FQ_CHANNEL_FMAN1_SP1,
-+ e_QM_FQ_CHANNEL_FMAN1_SP2,
-+ e_QM_FQ_CHANNEL_FMAN1_SP3,
-+ e_QM_FQ_CHANNEL_FMAN1_SP4,
-+ e_QM_FQ_CHANNEL_FMAN1_SP5,
-+ e_QM_FQ_CHANNEL_FMAN1_SP6,
-+ e_QM_FQ_CHANNEL_FMAN1_SP7,
-+ e_QM_FQ_CHANNEL_FMAN1_SP8,
-+ e_QM_FQ_CHANNEL_FMAN1_SP9,
-+ e_QM_FQ_CHANNEL_FMAN1_SP10,
-+ e_QM_FQ_CHANNEL_FMAN1_SP11,
-+
-+ e_QM_FQ_CHANNEL_CAAM = 0x80, /**< Dedicated channel serviced by Direct Connect Portal 2:
-+ connected to SEC 4.x */
-+
-+ e_QM_FQ_CHANNEL_PME = 0xA0, /**< Dedicated channel serviced by Direct Connect Portal 3:
-+ connected to PME */
-+ e_QM_FQ_CHANNEL_RAID = 0xC0 /**< Dedicated channel serviced by Direct Connect Portal 4:
-+ connected to RAID */
-+} e_QmFQChannel;
-+
-+/*****************************************************************************
-+ BMan INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define BM_MAX_NUM_OF_POOLS 64 /**< Number of buffers pools */
-+
-+
-+/*****************************************************************************
-+ FM INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define INTG_MAX_NUM_OF_FM 2
-+
-+/* Ports defines */
-+#define FM_MAX_NUM_OF_1G_MACS 5
-+#define FM_MAX_NUM_OF_10G_MACS 1
-+#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS + FM_MAX_NUM_OF_10G_MACS)
-+#define FM_MAX_NUM_OF_OH_PORTS 7
-+
-+#define FM_MAX_NUM_OF_1G_RX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_RX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS + FM_MAX_NUM_OF_1G_RX_PORTS)
-+
-+#define FM_MAX_NUM_OF_1G_TX_PORTS FM_MAX_NUM_OF_1G_MACS
-+#define FM_MAX_NUM_OF_10G_TX_PORTS FM_MAX_NUM_OF_10G_MACS
-+#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS + FM_MAX_NUM_OF_1G_TX_PORTS)
-+
-+#define FM_PORT_MAX_NUM_OF_EXT_POOLS 8 /**< Number of external BM pools per Rx port */
-+#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */
-+#define FM_MAX_NUM_OF_SUB_PORTALS 12
-+#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0
-+
-+/* Rams defines */
-+#define FM_MURAM_SIZE (160*KILOBYTE)
-+#define FM_IRAM_SIZE(major, minor) (64 * KILOBYTE)
-+#define FM_NUM_OF_CTRL 2
-+
-+/* PCD defines */
-+#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */
-+#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */
-+#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */
-+#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000200 /**< Number of bytes saved for patches */
-+#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
-+
-+/* RTC defines */
-+#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */
-+#define FM_RTC_NUM_OF_PERIODIC_PULSES 2 /**< RTC number of periodic pulses */
-+#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */
-+
-+/* QMI defines */
-+#define QMI_MAX_NUM_OF_TNUMS 64
-+#define QMI_DEF_TNUMS_THRESH 48
-+
-+/* FPM defines */
-+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
-+
-+/* DMA defines */
-+#define DMA_THRESH_MAX_COMMQ 31
-+#define DMA_THRESH_MAX_BUF 127
-+
-+/* BMI defines */
-+#define BMI_MAX_NUM_OF_TASKS 128
-+#define BMI_MAX_NUM_OF_DMAS 32
-+#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE)
-+#define PORT_MAX_WEIGHT 16
-+
-+
-+#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE
-+
-+/* p4080-rev1 unique features */
-+#define QM_CGS_NO_FRAME_MODE
-+
-+/* p4080 unique features */
-+#define FM_NO_DISPATCH_RAM_ECC
-+#define FM_NO_WATCHDOG
-+#define FM_NO_TNUM_AGING
-+#define FM_KG_NO_BYPASS_FQID_GEN
-+#define FM_KG_NO_BYPASS_PLCR_PROFILE_GEN
-+#define FM_NO_BACKUP_POOLS
-+#define FM_NO_OP_OBSERVED_POOLS
-+#define FM_NO_ADVANCED_RATE_LIMITER
-+#define FM_NO_OP_OBSERVED_CGS
-+#define FM_HAS_TOTAL_DMAS
-+#define FM_KG_NO_IPPID_SUPPORT
-+#define FM_NO_GUARANTEED_RESET_VALUES
-+#define FM_MAC_RESET
-+
-+/* FM erratas */
-+#define FM_TX_ECC_FRMS_ERRATA_10GMAC_A004
-+#define FM_TX_SHORT_FRAME_BAD_TS_ERRATA_10GMAC_A006 /* No implementation, Out of LLD scope */
-+#define FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007
-+#define FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008
-+#define FM_TX_INVALID_ECC_ERRATA_10GMAC_A009 /* Out of LLD scope, user may disable ECC exceptions using FM_DisableRamsEcc */
-+#define FM_BAD_VLAN_DETECT_ERRATA_10GMAC_A010
-+
-+#define FM_RX_PREAM_4_ERRATA_DTSEC_A001
-+#define FM_GRS_ERRATA_DTSEC_A002
-+#define FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003
-+#define FM_GTS_ERRATA_DTSEC_A004
-+#define FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012
-+#define FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014
-+#define FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839
-+
-+#define FM_MAGIC_PACKET_UNRECOGNIZED_ERRATA_DTSEC2 /* No implementation, Out of LLD scope */
-+#define FM_TX_LOCKUP_ERRATA_DTSEC6
-+
-+#define FM_HC_DEF_FQID_ONLY_ERRATA_FMAN_A003 /* Implemented by ucode */
-+#define FM_DEBUG_TRACE_FMAN_A004 /* No implementation, Out of LLD scope */
-+
-+#define FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173
-+
-+#define FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005
-+
-+#define FM_LEN_CHECK_ERRATA_FMAN_SW002
-+
-+#define FM_NO_CTXA_COPY_ERRATA_FMAN_SW001
-+#define FM_KG_ERASE_FLOW_ID_ERRATA_FMAN_SW004
-+
-+/*****************************************************************************
-+ FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define NUM_OF_RX_SC 16
-+#define NUM_OF_TX_SC 16
-+
-+#define NUM_OF_SA_PER_RX_SC 2
-+#define NUM_OF_SA_PER_TX_SC 2
-+
-+
-+#endif /* __DPAA_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_ext.h
-@@ -0,0 +1,83 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+
-+ @File part_ext.h
-+
-+ @Description Definitions for the part (integration) module.
-+*//***************************************************************************/
-+
-+#ifndef __PART_EXT_H
-+#define __PART_EXT_H
-+
-+#include "std_ext.h"
-+#include "part_integration_ext.h"
-+
-+
-+#if !(defined(MPC8306) || \
-+ defined(MPC8309) || \
-+ defined(MPC834x) || \
-+ defined(MPC836x) || \
-+ defined(MPC832x) || \
-+ defined(MPC837x) || \
-+ defined(MPC8568) || \
-+ defined(MPC8569) || \
-+ defined(P1020) || \
-+ defined(P1021) || \
-+ defined(P1022) || \
-+ defined(P1023) || \
-+ defined(P2020) || \
-+ defined(P2040) || \
-+ defined(P3041) || \
-+ defined(P4080) || \
-+ defined(SC4080) || \
-+ defined(P5020) || \
-+ defined(MSC814x))
-+#error "unable to proceed without chip-definition"
-+#endif /* !(defined(MPC834x) || ... */
-+
-+
-+/**************************************************************************//*
-+ @Description Part data structure - must be contained in any integration
-+ data structure.
-+*//***************************************************************************/
-+typedef struct t_Part
-+{
-+ uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId);
-+ /**< Returns the address of the module's memory map base. */
-+ e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress);
-+ /**< Returns the module's ID according to its memory map base. */
-+} t_Part;
-+
-+
-+#endif /* __PART_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/integrations/P3040_P4080_P5020/part_integration_ext.h
-@@ -0,0 +1,336 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File part_integration_ext.h
-+
-+ @Description P3040/P4080/P5020 external definitions and structures.
-+*//***************************************************************************/
-+#ifndef __PART_INTEGRATION_EXT_H
-+#define __PART_INTEGRATION_EXT_H
-+
-+#include "std_ext.h"
-+#include "dpaa_integration_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group P3040/P4080/P5020_chip_id P5020 Application Programming Interface
-+
-+ @Description P3040/P4080/P5020 Chip functions,definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define CORE_E500MC
-+
-+#define INTG_MAX_NUM_OF_CORES 1
-+
-+
-+/**************************************************************************//**
-+ @Description Module types.
-+*//***************************************************************************/
-+typedef enum e_ModuleId
-+{
-+ e_MODULE_ID_DUART_1 = 0,
-+ e_MODULE_ID_DUART_2,
-+ e_MODULE_ID_DUART_3,
-+ e_MODULE_ID_DUART_4,
-+ e_MODULE_ID_LAW,
-+ e_MODULE_ID_LBC,
-+ e_MODULE_ID_PAMU,
-+ e_MODULE_ID_QM, /**< Queue manager module */
-+ e_MODULE_ID_BM, /**< Buffer manager module */
-+ e_MODULE_ID_QM_CE_PORTAL_0,
-+ e_MODULE_ID_QM_CI_PORTAL_0,
-+ e_MODULE_ID_QM_CE_PORTAL_1,
-+ e_MODULE_ID_QM_CI_PORTAL_1,
-+ e_MODULE_ID_QM_CE_PORTAL_2,
-+ e_MODULE_ID_QM_CI_PORTAL_2,
-+ e_MODULE_ID_QM_CE_PORTAL_3,
-+ e_MODULE_ID_QM_CI_PORTAL_3,
-+ e_MODULE_ID_QM_CE_PORTAL_4,
-+ e_MODULE_ID_QM_CI_PORTAL_4,
-+ e_MODULE_ID_QM_CE_PORTAL_5,
-+ e_MODULE_ID_QM_CI_PORTAL_5,
-+ e_MODULE_ID_QM_CE_PORTAL_6,
-+ e_MODULE_ID_QM_CI_PORTAL_6,
-+ e_MODULE_ID_QM_CE_PORTAL_7,
-+ e_MODULE_ID_QM_CI_PORTAL_7,
-+ e_MODULE_ID_QM_CE_PORTAL_8,
-+ e_MODULE_ID_QM_CI_PORTAL_8,
-+ e_MODULE_ID_QM_CE_PORTAL_9,
-+ e_MODULE_ID_QM_CI_PORTAL_9,
-+ e_MODULE_ID_BM_CE_PORTAL_0,
-+ e_MODULE_ID_BM_CI_PORTAL_0,
-+ e_MODULE_ID_BM_CE_PORTAL_1,
-+ e_MODULE_ID_BM_CI_PORTAL_1,
-+ e_MODULE_ID_BM_CE_PORTAL_2,
-+ e_MODULE_ID_BM_CI_PORTAL_2,
-+ e_MODULE_ID_BM_CE_PORTAL_3,
-+ e_MODULE_ID_BM_CI_PORTAL_3,
-+ e_MODULE_ID_BM_CE_PORTAL_4,
-+ e_MODULE_ID_BM_CI_PORTAL_4,
-+ e_MODULE_ID_BM_CE_PORTAL_5,
-+ e_MODULE_ID_BM_CI_PORTAL_5,
-+ e_MODULE_ID_BM_CE_PORTAL_6,
-+ e_MODULE_ID_BM_CI_PORTAL_6,
-+ e_MODULE_ID_BM_CE_PORTAL_7,
-+ e_MODULE_ID_BM_CI_PORTAL_7,
-+ e_MODULE_ID_BM_CE_PORTAL_8,
-+ e_MODULE_ID_BM_CI_PORTAL_8,
-+ e_MODULE_ID_BM_CE_PORTAL_9,
-+ e_MODULE_ID_BM_CI_PORTAL_9,
-+ e_MODULE_ID_FM1, /**< Frame manager #1 module */
-+ e_MODULE_ID_FM1_RTC, /**< FM Real-Time-Clock */
-+ e_MODULE_ID_FM1_MURAM, /**< FM Multi-User-RAM */
-+ e_MODULE_ID_FM1_BMI, /**< FM BMI block */
-+ e_MODULE_ID_FM1_QMI, /**< FM QMI block */
-+ e_MODULE_ID_FM1_PRS, /**< FM parser block */
-+ e_MODULE_ID_FM1_PORT_HO0, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM1_PORT_HO1, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM1_PORT_HO2, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM1_PORT_HO3, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM1_PORT_HO4, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM1_PORT_HO5, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM1_PORT_HO6, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM1_PORT_1GRx0, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GRx1, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GRx2, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GRx3, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GRx4, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_10GRx0, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GTx0, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GTx1, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GTx2, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GTx3, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_1GTx4, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM1_PORT_10GTx0, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM1_PLCR, /**< FM Policer */
-+ e_MODULE_ID_FM1_KG, /**< FM Keygen */
-+ e_MODULE_ID_FM1_DMA, /**< FM DMA */
-+ e_MODULE_ID_FM1_FPM, /**< FM FPM */
-+ e_MODULE_ID_FM1_IRAM, /**< FM Instruction-RAM */
-+ e_MODULE_ID_FM1_1GMDIO0, /**< FM 1G MDIO MAC 0*/
-+ e_MODULE_ID_FM1_1GMDIO1, /**< FM 1G MDIO MAC 1*/
-+ e_MODULE_ID_FM1_1GMDIO2, /**< FM 1G MDIO MAC 2*/
-+ e_MODULE_ID_FM1_1GMDIO3, /**< FM 1G MDIO MAC 3*/
-+ e_MODULE_ID_FM1_10GMDIO, /**< FM 10G MDIO */
-+ e_MODULE_ID_FM1_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
-+ e_MODULE_ID_FM1_1GMAC0, /**< FM 1G MAC #0 */
-+ e_MODULE_ID_FM1_1GMAC1, /**< FM 1G MAC #1 */
-+ e_MODULE_ID_FM1_1GMAC2, /**< FM 1G MAC #2 */
-+ e_MODULE_ID_FM1_1GMAC3, /**< FM 1G MAC #3 */
-+ e_MODULE_ID_FM1_10GMAC0, /**< FM 10G MAC #0 */
-+
-+ e_MODULE_ID_FM2, /**< Frame manager #2 module */
-+ e_MODULE_ID_FM2_RTC, /**< FM Real-Time-Clock */
-+ e_MODULE_ID_FM2_MURAM, /**< FM Multi-User-RAM */
-+ e_MODULE_ID_FM2_BMI, /**< FM BMI block */
-+ e_MODULE_ID_FM2_QMI, /**< FM QMI block */
-+ e_MODULE_ID_FM2_PRS, /**< FM parser block */
-+ e_MODULE_ID_FM2_PORT_HO0, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM2_PORT_HO1, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM2_PORT_HO2, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM2_PORT_HO3, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM2_PORT_HO4, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM2_PORT_HO5, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM2_PORT_HO6, /**< FM Host-command/offline-parsing port block */
-+ e_MODULE_ID_FM2_PORT_1GRx0, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_1GRx1, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_1GRx2, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_1GRx3, /**< FM Rx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_10GRx0, /**< FM Rx 10G MAC port block */
-+ e_MODULE_ID_FM2_PORT_1GTx0, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_1GTx1, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_1GTx2, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_1GTx3, /**< FM Tx 1G MAC port block */
-+ e_MODULE_ID_FM2_PORT_10GTx0, /**< FM Tx 10G MAC port block */
-+ e_MODULE_ID_FM2_PLCR, /**< FM Policer */
-+ e_MODULE_ID_FM2_KG, /**< FM Keygen */
-+ e_MODULE_ID_FM2_DMA, /**< FM DMA */
-+ e_MODULE_ID_FM2_FPM, /**< FM FPM */
-+ e_MODULE_ID_FM2_IRAM, /**< FM Instruction-RAM */
-+ e_MODULE_ID_FM2_1GMDIO0, /**< FM 1G MDIO MAC 0*/
-+ e_MODULE_ID_FM2_1GMDIO1, /**< FM 1G MDIO MAC 1*/
-+ e_MODULE_ID_FM2_1GMDIO2, /**< FM 1G MDIO MAC 2*/
-+ e_MODULE_ID_FM2_1GMDIO3, /**< FM 1G MDIO MAC 3*/
-+ e_MODULE_ID_FM2_10GMDIO, /**< FM 10G MDIO */
-+ e_MODULE_ID_FM2_PRS_IRAM, /**< FM SW-parser Instruction-RAM */
-+ e_MODULE_ID_FM2_1GMAC0, /**< FM 1G MAC #0 */
-+ e_MODULE_ID_FM2_1GMAC1, /**< FM 1G MAC #1 */
-+ e_MODULE_ID_FM2_1GMAC2, /**< FM 1G MAC #2 */
-+ e_MODULE_ID_FM2_1GMAC3, /**< FM 1G MAC #3 */
-+ e_MODULE_ID_FM2_10GMAC0, /**< FM 10G MAC #0 */
-+
-+ e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */
-+ e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */
-+ e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */
-+ e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */
-+ e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */
-+ e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */
-+ e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */
-+ e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */
-+ e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */
-+ e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */
-+ e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */
-+ e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */
-+
-+ e_MODULE_ID_MPIC, /**< MPIC */
-+ e_MODULE_ID_GPIO, /**< GPIO */
-+ e_MODULE_ID_SERDES, /**< SERDES */
-+ e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */
-+ e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */
-+
-+ e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */
-+ e_MODULE_ID_SRIO_MU, /**< RapidIO messaging unit module */
-+
-+ e_MODULE_ID_DUMMY_LAST
-+} e_ModuleId;
-+
-+#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST
-+
-+#if 0 /* using unified values */
-+/*****************************************************************************
-+ INTEGRATION-SPECIFIC MODULE CODES
-+******************************************************************************/
-+#define MODULE_UNKNOWN 0x00000000
-+#define MODULE_MEM 0x00010000
-+#define MODULE_MM 0x00020000
-+#define MODULE_CORE 0x00030000
-+#define MODULE_CHIP 0x00040000
-+#define MODULE_PLTFRM 0x00050000
-+#define MODULE_PM 0x00060000
-+#define MODULE_MMU 0x00070000
-+#define MODULE_PIC 0x00080000
-+#define MODULE_CPC 0x00090000
-+#define MODULE_DUART 0x000a0000
-+#define MODULE_SERDES 0x000b0000
-+#define MODULE_PIO 0x000c0000
-+#define MODULE_QM 0x000d0000
-+#define MODULE_BM 0x000e0000
-+#define MODULE_SEC 0x000f0000
-+#define MODULE_LAW 0x00100000
-+#define MODULE_LBC 0x00110000
-+#define MODULE_PAMU 0x00120000
-+#define MODULE_FM 0x00130000
-+#define MODULE_FM_MURAM 0x00140000
-+#define MODULE_FM_PCD 0x00150000
-+#define MODULE_FM_RTC 0x00160000
-+#define MODULE_FM_MAC 0x00170000
-+#define MODULE_FM_PORT 0x00180000
-+#define MODULE_FM_SP 0x00190000
-+#define MODULE_DPA_PORT 0x001a0000
-+#define MODULE_MII 0x001b0000
-+#define MODULE_I2C 0x001c0000
-+#define MODULE_DMA 0x001d0000
-+#define MODULE_DDR 0x001e0000
-+#define MODULE_ESPI 0x001f0000
-+#define MODULE_DPAA_IPSEC 0x00200000
-+#endif /* using unified values */
-+
-+/*****************************************************************************
-+ PAMU INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define PAMU_NUM_OF_PARTITIONS 5
-+
-+#define PAMU_PICS_AVICS_ERRATA_PAMU3
-+
-+/*****************************************************************************
-+ LAW INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define LAW_NUM_OF_WINDOWS 32
-+#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4KB */
-+#define LAW_MAX_WINDOW_SIZE 0x0000002000000000LL /**< 64GB */
-+
-+
-+/*****************************************************************************
-+ LBC INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+/**************************************************************************//**
-+ @Group lbc_exception_grp LBC Exception Unit
-+
-+ @Description LBC Exception unit API functions, definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Anchor lbc_exbm
-+
-+ @Collection LBC Errors Bit Mask
-+
-+ These errors are reported through the exceptions callback..
-+ The values can be or'ed in any combination in the errors mask
-+ parameter of the errors report structure.
-+
-+ These errors can also be passed as a bit-mask to
-+ LBC_EnableErrorChecking() or LBC_DisableErrorChecking(),
-+ for enabling or disabling error checking.
-+ @{
-+*//***************************************************************************/
-+#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */
-+#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */
-+#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */
-+#define LBC_ERR_ATOMIC_WRITE 0x00800000 /**< Atomic write error */
-+#define LBC_ERR_ATOMIC_READ 0x00400000 /**< Atomic read error */
-+#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */
-+
-+#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \
-+ LBC_ERR_WRITE_PROTECT | LBC_ERR_ATOMIC_WRITE | \
-+ LBC_ERR_ATOMIC_READ | LBC_ERR_CHIP_SELECT)
-+ /**< All possible errors */
-+/* @} */
-+/** @} */ /* end of lbc_exception_grp group */
-+
-+#define LBC_INCORRECT_ERROR_REPORT_ERRATA
-+
-+#define LBC_NUM_OF_BANKS 8
-+#define LBC_MAX_CS_SIZE 0x0000000100000000LL
-+#define LBC_ATOMIC_OPERATION_SUPPORT
-+#define LBC_PARITY_SUPPORT
-+#define LBC_ADDRESS_HOLD_TIME_CTRL
-+#define LBC_HIGH_CLK_DIVIDERS
-+#define LBC_FCM_AVAILABLE
-+
-+/*****************************************************************************
-+ GPIO INTEGRATION-SPECIFIC DEFINITIONS
-+******************************************************************************/
-+#define GPIO_NUM_OF_PORTS 1 /**< Number of ports in GPIO module;
-+ Each port contains up to 32 i/O pins. */
-+
-+#define GPIO_VALID_PIN_MASKS \
-+ { /* Port A */ 0xFFFFFFFF }
-+
-+#define GPIO_VALID_INTR_MASKS \
-+ { /* Port A */ 0xFFFFFFFF }
-+
-+#endif /* __PART_INTEGRATION_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/math_ext.h
-@@ -0,0 +1,100 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __MATH_EXT_H
-+#define __MATH_EXT_H
-+
-+
-+#if defined(NCSW_LINUX) && defined(__KERNEL__)
-+#include <linux/math.h>
-+#include <linux/math64.h>
-+
-+#elif defined(__MWERKS__)
-+#define LOW(x) ( sizeof(x)==8 ? *(1+(int32_t*)&x) : (*(int32_t*)&x))
-+#define HIGH(x) (*(int32_t*)&x)
-+#define ULOW(x) ( sizeof(x)==8 ? *(1+(uint32_t*)&x) : (*(uint32_t*)&x))
-+#define UHIGH(x) (*(uint32_t*)&x)
-+
-+static const double big = 1.0e300;
-+
-+/* Macro for checking if a number is a power of 2 */
-+static __inline__ double ceil(double x)
-+{
-+ int32_t i0,i1,j0; /*- cc 020130 -*/
-+ uint32_t i,j; /*- cc 020130 -*/
-+ i0 = HIGH(x);
-+ i1 = LOW(x);
-+ j0 = ((i0>>20)&0x7ff)-0x3ff;
-+ if(j0<20) {
-+ if(j0<0) { /* raise inexact if x != 0 */
-+ if(big+x>0.0) {/* return 0*sign(x) if |x|<1 */
-+ if(i0<0) {i0=0x80000000;i1=0;}
-+ else if((i0|i1)!=0) { i0=0x3ff00000;i1=0;}
-+ }
-+ } else {
-+ i = (uint32_t)(0x000fffff)>>j0;
-+ if(((i0&i)|i1)==0) return x; /* x is integral */
-+ if(big+x>0.0) { /* raise inexact flag */
-+ if(i0>0) i0 += (0x00100000)>>j0;
-+ i0 &= (~i); i1=0;
-+ }
-+ }
-+ } else if (j0>51) {
-+ if(j0==0x400) return x+x; /* inf or NaN */
-+ else return x; /* x is integral */
-+ } else {
-+ i = ((uint32_t)(0xffffffff))>>(j0-20); /*- cc 020130 -*/
-+ if((i1&i)==0) return x; /* x is integral */
-+ if(big+x>0.0) { /* raise inexact flag */
-+ if(i0>0) {
-+ if(j0==20) i0+=1;
-+ else {
-+ j = (uint32_t)(i1 + (1<<(52-j0)));
-+ if(j<i1) i0+=1; /* got a carry */
-+ i1 = (int32_t)j;
-+ }
-+ }
-+ i1 &= (~i);
-+ }
-+ }
-+ HIGH(x) = i0;
-+ LOW(x) = i1;
-+ return x;
-+}
-+
-+#else
-+#include <math.h>
-+#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
-+
-+
-+#endif /* __MATH_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h
-@@ -0,0 +1,435 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File ncsw_ext.h
-+
-+ @Description General NetCommSw Standard Definitions
-+*//***************************************************************************/
-+
-+#ifndef __NCSW_EXT_H
-+#define __NCSW_EXT_H
-+
-+
-+#include "memcpy_ext.h"
-+
-+#define WRITE_BLOCK IOMemSet32 /* include memcpy_ext.h */
-+#define COPY_BLOCK Mem2IOCpy32 /* include memcpy_ext.h */
-+
-+#define PTR_TO_UINT(_ptr) ((uintptr_t)(_ptr))
-+#define UINT_TO_PTR(_val) ((void*)(uintptr_t)(_val))
-+
-+#define PTR_MOVE(_ptr, _offset) (void*)((uint8_t*)(_ptr) + (_offset))
-+
-+
-+#define WRITE_UINT8_UINT24(arg, data08, data24) \
-+ WRITE_UINT32(arg,((uint32_t)(data08)<<24)|((uint32_t)(data24)&0x00FFFFFF))
-+#define WRITE_UINT24_UINT8(arg, data24, data08) \
-+ WRITE_UINT32(arg,((uint32_t)(data24)<< 8)|((uint32_t)(data08)&0x000000FF))
-+
-+/* Little-Endian access macros */
-+
-+#define WRITE_UINT16_LE(arg, data) \
-+ WRITE_UINT16((arg), SwapUint16(data))
-+
-+#define WRITE_UINT32_LE(arg, data) \
-+ WRITE_UINT32((arg), SwapUint32(data))
-+
-+#define WRITE_UINT64_LE(arg, data) \
-+ WRITE_UINT64((arg), SwapUint64(data))
-+
-+#define GET_UINT16_LE(arg) \
-+ SwapUint16(GET_UINT16(arg))
-+
-+#define GET_UINT32_LE(arg) \
-+ SwapUint32(GET_UINT32(arg))
-+
-+#define GET_UINT64_LE(arg) \
-+ SwapUint64(GET_UINT64(arg))
-+
-+/* Write and Read again macros */
-+#define WRITE_UINT_SYNC(size, arg, data) \
-+ do { \
-+ WRITE_UINT##size((arg), (data)); \
-+ CORE_MemoryBarrier(); \
-+ } while (0)
-+
-+#define WRITE_UINT8_SYNC(arg, data) WRITE_UINT_SYNC(8, (arg), (data))
-+
-+#define WRITE_UINT16_SYNC(arg, data) WRITE_UINT_SYNC(16, (arg), (data))
-+#define WRITE_UINT32_SYNC(arg, data) WRITE_UINT_SYNC(32, (arg), (data))
-+
-+#define MAKE_UINT64(high32, low32) (((uint64_t)high32 << 32) | (low32))
-+
-+
-+/*----------------------*/
-+/* Miscellaneous macros */
-+/*----------------------*/
-+
-+#define UNUSED(_x) ((void)(_x))
-+
-+#define KILOBYTE 0x400UL /* 1024 */
-+#define MEGABYTE (KILOBYTE * KILOBYTE) /* 1024*1024 */
-+#define GIGABYTE ((uint64_t)(KILOBYTE * MEGABYTE)) /* 1024*1024*1024 */
-+#define TERABYTE ((uint64_t)(KILOBYTE * GIGABYTE)) /* 1024*1024*1024*1024 */
-+
-+#ifndef NO_IRQ
-+#define NO_IRQ (0)
-+#endif
-+#define NCSW_MASTER_ID (0)
-+
-+/* Macro for checking if a number is a power of 2 */
-+#define POWER_OF_2(n) (!((n) & ((n)-1)))
-+
-+/* Macro for calculating log of base 2 */
-+#define LOG2(num, log2Num) \
-+ do \
-+ { \
-+ uint64_t tmp = (num); \
-+ log2Num = 0; \
-+ while (tmp > 1) \
-+ { \
-+ log2Num++; \
-+ tmp >>= 1; \
-+ } \
-+ } while (0)
-+
-+#define NEXT_POWER_OF_2(_num, _nextPow) \
-+do \
-+{ \
-+ if (POWER_OF_2(_num)) \
-+ _nextPow = (_num); \
-+ else \
-+ { \
-+ uint64_t tmp = (_num); \
-+ _nextPow = 1; \
-+ while (tmp) \
-+ { \
-+ _nextPow <<= 1; \
-+ tmp >>= 1; \
-+ } \
-+ } \
-+} while (0)
-+
-+/* Ceiling division - not the fastest way, but safer in terms of overflow */
-+#define DIV_CEIL(x,y) (div64_u64((x),(y)) + (((div64_u64((x),(y))*(y)) == (x)) ? 0 : 1))
-+
-+/* Round up a number to be a multiple of a second number */
-+#define ROUND_UP(x,y) ((((x) + (y) - 1) / (y)) * (y))
-+
-+/* Timing macro for converting usec units to number of ticks. */
-+/* (number of usec * clock_Hz) / 1,000,000) - since */
-+/* clk is in MHz units, no division needed. */
-+#define USEC_TO_CLK(usec,clk) ((usec) * (clk))
-+#define CYCLES_TO_USEC(cycles,clk) ((cycles) / (clk))
-+
-+/* Timing macros for converting between nsec units and number of clocks. */
-+#define NSEC_TO_CLK(nsec,clk) DIV_CEIL(((nsec) * (clk)), 1000)
-+#define CYCLES_TO_NSEC(cycles,clk) (((cycles) * 1000) / (clk))
-+
-+/* Timing macros for converting between psec units and number of clocks. */
-+#define PSEC_TO_CLK(psec,clk) DIV_CEIL(((psec) * (clk)), 1000000)
-+#define CYCLES_TO_PSEC(cycles,clk) (((cycles) * 1000000) / (clk))
-+
-+/* Min, Max macros */
-+#define MIN(a,b) ((a) < (b) ? (a) : (b))
-+#define MAX(a,b) ((a) > (b) ? (a) : (b))
-+#define IN_RANGE(min,val,max) ((min)<=(val) && (val)<=(max))
-+
-+#define ABS(a) ((a<0)?(a*-1):a)
-+
-+#if !(defined(ARRAY_SIZE))
-+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-+#endif /* !defined(ARRAY_SIZE) */
-+
-+
-+/* possible alignments */
-+#define HALF_WORD_ALIGNMENT 2
-+#define WORD_ALIGNMENT 4
-+#define DOUBLE_WORD_ALIGNMENT 8
-+#define BURST_ALIGNMENT 32
-+
-+#define HALF_WORD_ALIGNED 0x00000001
-+#define WORD_ALIGNED 0x00000003
-+#define DOUBLE_WORD_ALIGNED 0x00000007
-+#define BURST_ALIGNED 0x0000001f
-+#ifndef IS_ALIGNED
-+#define IS_ALIGNED(n,align) (!((uint32_t)(n) & (align - 1)))
-+#endif /* IS_ALIGNED */
-+
-+
-+#define LAST_BUF 1
-+#define FIRST_BUF 2
-+#define SINGLE_BUF (LAST_BUF | FIRST_BUF)
-+#define MIDDLE_BUF 4
-+
-+#define ARRAY_END -1
-+
-+#define ILLEGAL_BASE (~0)
-+
-+#define BUF_POSITION(first, last) state[(!!(last))<<1 | !!(first)]
-+#define DECLARE_POSITION static uint8_t state[4] = { (uint8_t)MIDDLE_BUF, (uint8_t)FIRST_BUF, (uint8_t)LAST_BUF, (uint8_t)SINGLE_BUF };
-+
-+
-+/**************************************************************************//**
-+ @Description Timers operation mode
-+*//***************************************************************************/
-+typedef enum e_TimerMode
-+{
-+ e_TIMER_MODE_INVALID = 0,
-+ e_TIMER_MODE_FREE_RUN, /**< Free run - counter continues to increase
-+ after reaching the reference value. */
-+ e_TIMER_MODE_PERIODIC, /**< Periodic - counter restarts counting from 0
-+ after reaching the reference value. */
-+ e_TIMER_MODE_SINGLE /**< Single (one-shot) - counter stops counting
-+ after reaching the reference value. */
-+} e_TimerMode;
-+
-+
-+/**************************************************************************//**
-+ @Description Enumeration (bit flags) of communication modes (Transmit,
-+ receive or both).
-+*//***************************************************************************/
-+typedef enum e_CommMode
-+{
-+ e_COMM_MODE_NONE = 0, /**< No transmit/receive communication */
-+ e_COMM_MODE_RX = 1, /**< Only receive communication */
-+ e_COMM_MODE_TX = 2, /**< Only transmit communication */
-+ e_COMM_MODE_RX_AND_TX = 3 /**< Both transmit and receive communication */
-+} e_CommMode;
-+
-+/**************************************************************************//**
-+ @Description General Diagnostic Mode
-+*//***************************************************************************/
-+typedef enum e_DiagMode
-+{
-+ e_DIAG_MODE_NONE = 0, /**< Normal operation; no diagnostic mode */
-+ e_DIAG_MODE_CTRL_LOOPBACK, /**< Loopback in the controller */
-+ e_DIAG_MODE_CHIP_LOOPBACK, /**< Loopback in the chip but not in the
-+ controller; e.g. IO-pins, SerDes, etc. */
-+ e_DIAG_MODE_PHY_LOOPBACK, /**< Loopback in the external PHY */
-+ e_DIAG_MODE_EXT_LOOPBACK, /**< Loopback in the external line (beyond the PHY) */
-+ e_DIAG_MODE_CTRL_ECHO, /**< Echo incoming data by the controller */
-+ e_DIAG_MODE_PHY_ECHO /**< Echo incoming data by the PHY */
-+} e_DiagMode;
-+
-+/**************************************************************************//**
-+ @Description Possible RxStore callback responses.
-+*//***************************************************************************/
-+typedef enum e_RxStoreResponse
-+{
-+ e_RX_STORE_RESPONSE_PAUSE /**< Pause invoking callback with received data;
-+ in polling mode, start again invoking callback
-+ only next time user invokes the receive routine;
-+ in interrupt mode, start again invoking callback
-+ only next time a receive event triggers an interrupt;
-+ in all cases, received data that are pending are not
-+ lost, rather, their processing is temporarily deferred;
-+ in all cases, received data are processed in the order
-+ in which they were received. */
-+ , e_RX_STORE_RESPONSE_CONTINUE /**< Continue invoking callback with received data. */
-+} e_RxStoreResponse;
-+
-+
-+/**************************************************************************//**
-+ @Description General Handle
-+*//***************************************************************************/
-+typedef void * t_Handle; /**< handle, used as object's descriptor */
-+
-+/**************************************************************************//**
-+ @Description MUTEX type
-+*//***************************************************************************/
-+typedef uint32_t t_Mutex;
-+
-+/**************************************************************************//**
-+ @Description Error Code.
-+
-+ The high word of the error code is the code of the software
-+ module (driver). The low word is the error type (e_ErrorType).
-+ To get the values from the error code, use GET_ERROR_TYPE()
-+ and GET_ERROR_MODULE().
-+*//***************************************************************************/
-+typedef uint32_t t_Error;
-+
-+/**************************************************************************//**
-+ @Description General prototype of interrupt service routine (ISR).
-+
-+ @Param[in] handle - Optional handle of the module handling the interrupt.
-+
-+ @Return None
-+ *//***************************************************************************/
-+typedef void (t_Isr)(t_Handle handle);
-+
-+/**************************************************************************//**
-+ @Anchor mem_attr
-+
-+ @Collection Memory Attributes
-+
-+ Various attributes of memory partitions. These values may be
-+ or'ed together to create a mask of all memory attributes.
-+ @{
-+*//***************************************************************************/
-+#define MEMORY_ATTR_CACHEABLE 0x00000001
-+ /**< Memory is cacheable */
-+#define MEMORY_ATTR_QE_2ND_BUS_ACCESS 0x00000002
-+ /**< Memory can be accessed by QUICC Engine
-+ through its secondary bus interface */
-+
-+/* @} */
-+
-+
-+/**************************************************************************//**
-+ @Function t_GetBufFunction
-+
-+ @Description User callback function called by driver to get data buffer.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_BufferPool - A handle to buffer pool manager
-+ @Param[out] p_BufContextHandle - Returns the user's private context that
-+ should be associated with the buffer
-+
-+ @Return Pointer to data buffer, NULL if error
-+ *//***************************************************************************/
-+typedef uint8_t * (t_GetBufFunction)(t_Handle h_BufferPool,
-+ t_Handle *p_BufContextHandle);
-+
-+/**************************************************************************//**
-+ @Function t_PutBufFunction
-+
-+ @Description User callback function called by driver to return data buffer.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_BufferPool - A handle to buffer pool manager
-+ @Param[in] p_Buffer - A pointer to buffer to return
-+ @Param[in] h_BufContext - The user's private context associated with
-+ the returned buffer
-+
-+ @Return E_OK on success; Error code otherwise
-+ *//***************************************************************************/
-+typedef t_Error (t_PutBufFunction)(t_Handle h_BufferPool,
-+ uint8_t *p_Buffer,
-+ t_Handle h_BufContext);
-+
-+/**************************************************************************//**
-+ @Function t_PhysToVirt
-+
-+ @Description Translates a physical address to the matching virtual address.
-+
-+ @Param[in] addr - The physical address to translate.
-+
-+ @Return Virtual address.
-+*//***************************************************************************/
-+typedef void * t_PhysToVirt(physAddress_t addr);
-+
-+/**************************************************************************//**
-+ @Function t_VirtToPhys
-+
-+ @Description Translates a virtual address to the matching physical address.
-+
-+ @Param[in] addr - The virtual address to translate.
-+
-+ @Return Physical address.
-+*//***************************************************************************/
-+typedef physAddress_t t_VirtToPhys(void *addr);
-+
-+/**************************************************************************//**
-+ @Description Buffer Pool Information Structure.
-+*//***************************************************************************/
-+typedef struct t_BufferPoolInfo
-+{
-+ t_Handle h_BufferPool; /**< A handle to the buffer pool manager */
-+ t_GetBufFunction *f_GetBuf; /**< User callback to get a free buffer */
-+ t_PutBufFunction *f_PutBuf; /**< User callback to return a buffer */
-+ uint16_t bufferSize; /**< Buffer size (in bytes) */
-+
-+ t_PhysToVirt *f_PhysToVirt; /**< User callback to translate pool buffers
-+ physical addresses to virtual addresses */
-+ t_VirtToPhys *f_VirtToPhys; /**< User callback to translate pool buffers
-+ virtual addresses to physical addresses */
-+} t_BufferPoolInfo;
-+
-+
-+/**************************************************************************//**
-+ @Description User callback function called by driver when transmit completed.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_App - Application's handle, as was provided to the
-+ driver by the user
-+ @Param[in] queueId - Transmit queue ID
-+ @Param[in] p_Data - Pointer to the data buffer
-+ @Param[in] h_BufContext - The user's private context associated with
-+ the given data buffer
-+ @Param[in] status - Transmit status and errors
-+ @Param[in] flags - Driver-dependent information
-+ *//***************************************************************************/
-+typedef void (t_TxConfFunction)(t_Handle h_App,
-+ uint32_t queueId,
-+ uint8_t *p_Data,
-+ t_Handle h_BufContext,
-+ uint16_t status,
-+ uint32_t flags);
-+
-+/**************************************************************************//**
-+ @Description User callback function called by driver with receive data.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_App - Application's handle, as was provided to the
-+ driver by the user
-+ @Param[in] queueId - Receive queue ID
-+ @Param[in] p_Data - Pointer to the buffer with received data
-+ @Param[in] h_BufContext - The user's private context associated with
-+ the given data buffer
-+ @Param[in] length - Length of received data
-+ @Param[in] status - Receive status and errors
-+ @Param[in] position - Position of buffer in frame
-+ @Param[in] flags - Driver-dependent information
-+
-+ @Retval e_RX_STORE_RESPONSE_CONTINUE - order the driver to continue Rx
-+ operation for all ready data.
-+ @Retval e_RX_STORE_RESPONSE_PAUSE - order the driver to stop Rx operation.
-+ *//***************************************************************************/
-+typedef e_RxStoreResponse (t_RxStoreFunction)(t_Handle h_App,
-+ uint32_t queueId,
-+ uint8_t *p_Data,
-+ t_Handle h_BufContext,
-+ uint32_t length,
-+ uint16_t status,
-+ uint8_t position,
-+ uint32_t flags);
-+
-+
-+#endif /* __NCSW_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/net_ext.h
-@@ -0,0 +1,430 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File net_ext.h
-+
-+ @Description This file contains common and general netcomm headers definitions.
-+*//***************************************************************************/
-+#ifndef __NET_EXT_H
-+#define __NET_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+typedef uint8_t headerFieldPpp_t;
-+
-+#define NET_HEADER_FIELD_PPP_PID (1)
-+#define NET_HEADER_FIELD_PPP_COMPRESSED (NET_HEADER_FIELD_PPP_PID << 1)
-+#define NET_HEADER_FIELD_PPP_ALL_FIELDS ((NET_HEADER_FIELD_PPP_PID << 2) - 1)
-+
-+
-+typedef uint8_t headerFieldPppoe_t;
-+
-+#define NET_HEADER_FIELD_PPPoE_VER (1)
-+#define NET_HEADER_FIELD_PPPoE_TYPE (NET_HEADER_FIELD_PPPoE_VER << 1)
-+#define NET_HEADER_FIELD_PPPoE_CODE (NET_HEADER_FIELD_PPPoE_VER << 2)
-+#define NET_HEADER_FIELD_PPPoE_SID (NET_HEADER_FIELD_PPPoE_VER << 3)
-+#define NET_HEADER_FIELD_PPPoE_LEN (NET_HEADER_FIELD_PPPoE_VER << 4)
-+#define NET_HEADER_FIELD_PPPoE_SESSION (NET_HEADER_FIELD_PPPoE_VER << 5)
-+#define NET_HEADER_FIELD_PPPoE_PID (NET_HEADER_FIELD_PPPoE_VER << 6)
-+#define NET_HEADER_FIELD_PPPoE_ALL_FIELDS ((NET_HEADER_FIELD_PPPoE_VER << 7) - 1)
-+
-+#define NET_HEADER_FIELD_PPPMUX_PID (1)
-+#define NET_HEADER_FIELD_PPPMUX_CKSUM (NET_HEADER_FIELD_PPPMUX_PID << 1)
-+#define NET_HEADER_FIELD_PPPMUX_COMPRESSED (NET_HEADER_FIELD_PPPMUX_PID << 2)
-+#define NET_HEADER_FIELD_PPPMUX_ALL_FIELDS ((NET_HEADER_FIELD_PPPMUX_PID << 3) - 1)
-+
-+#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF (1)
-+#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_LXT (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 1)
-+#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_LEN (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 2)
-+#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_PID (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 3)
-+#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_USE_PID (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 4)
-+#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS ((NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 5) - 1)
-+
-+
-+typedef uint8_t headerFieldEth_t;
-+
-+#define NET_HEADER_FIELD_ETH_DA (1)
-+#define NET_HEADER_FIELD_ETH_SA (NET_HEADER_FIELD_ETH_DA << 1)
-+#define NET_HEADER_FIELD_ETH_LENGTH (NET_HEADER_FIELD_ETH_DA << 2)
-+#define NET_HEADER_FIELD_ETH_TYPE (NET_HEADER_FIELD_ETH_DA << 3)
-+#define NET_HEADER_FIELD_ETH_FINAL_CKSUM (NET_HEADER_FIELD_ETH_DA << 4)
-+#define NET_HEADER_FIELD_ETH_PADDING (NET_HEADER_FIELD_ETH_DA << 5)
-+#define NET_HEADER_FIELD_ETH_ALL_FIELDS ((NET_HEADER_FIELD_ETH_DA << 6) - 1)
-+
-+#define NET_HEADER_FIELD_ETH_ADDR_SIZE 6
-+
-+typedef uint16_t headerFieldIp_t;
-+
-+#define NET_HEADER_FIELD_IP_VER (1)
-+#define NET_HEADER_FIELD_IP_DSCP (NET_HEADER_FIELD_IP_VER << 2)
-+#define NET_HEADER_FIELD_IP_ECN (NET_HEADER_FIELD_IP_VER << 3)
-+#define NET_HEADER_FIELD_IP_PROTO (NET_HEADER_FIELD_IP_VER << 4)
-+
-+#define NET_HEADER_FIELD_IP_PROTO_SIZE 1
-+
-+typedef uint16_t headerFieldIpv4_t;
-+
-+#define NET_HEADER_FIELD_IPv4_VER (1)
-+#define NET_HEADER_FIELD_IPv4_HDR_LEN (NET_HEADER_FIELD_IPv4_VER << 1)
-+#define NET_HEADER_FIELD_IPv4_TOS (NET_HEADER_FIELD_IPv4_VER << 2)
-+#define NET_HEADER_FIELD_IPv4_TOTAL_LEN (NET_HEADER_FIELD_IPv4_VER << 3)
-+#define NET_HEADER_FIELD_IPv4_ID (NET_HEADER_FIELD_IPv4_VER << 4)
-+#define NET_HEADER_FIELD_IPv4_FLAG_D (NET_HEADER_FIELD_IPv4_VER << 5)
-+#define NET_HEADER_FIELD_IPv4_FLAG_M (NET_HEADER_FIELD_IPv4_VER << 6)
-+#define NET_HEADER_FIELD_IPv4_OFFSET (NET_HEADER_FIELD_IPv4_VER << 7)
-+#define NET_HEADER_FIELD_IPv4_TTL (NET_HEADER_FIELD_IPv4_VER << 8)
-+#define NET_HEADER_FIELD_IPv4_PROTO (NET_HEADER_FIELD_IPv4_VER << 9)
-+#define NET_HEADER_FIELD_IPv4_CKSUM (NET_HEADER_FIELD_IPv4_VER << 10)
-+#define NET_HEADER_FIELD_IPv4_SRC_IP (NET_HEADER_FIELD_IPv4_VER << 11)
-+#define NET_HEADER_FIELD_IPv4_DST_IP (NET_HEADER_FIELD_IPv4_VER << 12)
-+#define NET_HEADER_FIELD_IPv4_OPTS (NET_HEADER_FIELD_IPv4_VER << 13)
-+#define NET_HEADER_FIELD_IPv4_OPTS_COUNT (NET_HEADER_FIELD_IPv4_VER << 14)
-+#define NET_HEADER_FIELD_IPv4_ALL_FIELDS ((NET_HEADER_FIELD_IPv4_VER << 15) - 1)
-+
-+#define NET_HEADER_FIELD_IPv4_ADDR_SIZE 4
-+#define NET_HEADER_FIELD_IPv4_PROTO_SIZE 1
-+
-+
-+typedef uint8_t headerFieldIpv6_t;
-+
-+#define NET_HEADER_FIELD_IPv6_VER (1)
-+#define NET_HEADER_FIELD_IPv6_TC (NET_HEADER_FIELD_IPv6_VER << 1)
-+#define NET_HEADER_FIELD_IPv6_SRC_IP (NET_HEADER_FIELD_IPv6_VER << 2)
-+#define NET_HEADER_FIELD_IPv6_DST_IP (NET_HEADER_FIELD_IPv6_VER << 3)
-+#define NET_HEADER_FIELD_IPv6_NEXT_HDR (NET_HEADER_FIELD_IPv6_VER << 4)
-+#define NET_HEADER_FIELD_IPv6_FL (NET_HEADER_FIELD_IPv6_VER << 5)
-+#define NET_HEADER_FIELD_IPv6_HOP_LIMIT (NET_HEADER_FIELD_IPv6_VER << 6)
-+#define NET_HEADER_FIELD_IPv6_ALL_FIELDS ((NET_HEADER_FIELD_IPv6_VER << 7) - 1)
-+
-+#define NET_HEADER_FIELD_IPv6_ADDR_SIZE 16
-+#define NET_HEADER_FIELD_IPv6_NEXT_HDR_SIZE 1
-+
-+#define NET_HEADER_FIELD_ICMP_TYPE (1)
-+#define NET_HEADER_FIELD_ICMP_CODE (NET_HEADER_FIELD_ICMP_TYPE << 1)
-+#define NET_HEADER_FIELD_ICMP_CKSUM (NET_HEADER_FIELD_ICMP_TYPE << 2)
-+#define NET_HEADER_FIELD_ICMP_ID (NET_HEADER_FIELD_ICMP_TYPE << 3)
-+#define NET_HEADER_FIELD_ICMP_SQ_NUM (NET_HEADER_FIELD_ICMP_TYPE << 4)
-+#define NET_HEADER_FIELD_ICMP_ALL_FIELDS ((NET_HEADER_FIELD_ICMP_TYPE << 5) - 1)
-+
-+#define NET_HEADER_FIELD_ICMP_CODE_SIZE 1
-+#define NET_HEADER_FIELD_ICMP_TYPE_SIZE 1
-+
-+#define NET_HEADER_FIELD_IGMP_VERSION (1)
-+#define NET_HEADER_FIELD_IGMP_TYPE (NET_HEADER_FIELD_IGMP_VERSION << 1)
-+#define NET_HEADER_FIELD_IGMP_CKSUM (NET_HEADER_FIELD_IGMP_VERSION << 2)
-+#define NET_HEADER_FIELD_IGMP_DATA (NET_HEADER_FIELD_IGMP_VERSION << 3)
-+#define NET_HEADER_FIELD_IGMP_ALL_FIELDS ((NET_HEADER_FIELD_IGMP_VERSION << 4) - 1)
-+
-+
-+typedef uint16_t headerFieldTcp_t;
-+
-+#define NET_HEADER_FIELD_TCP_PORT_SRC (1)
-+#define NET_HEADER_FIELD_TCP_PORT_DST (NET_HEADER_FIELD_TCP_PORT_SRC << 1)
-+#define NET_HEADER_FIELD_TCP_SEQ (NET_HEADER_FIELD_TCP_PORT_SRC << 2)
-+#define NET_HEADER_FIELD_TCP_ACK (NET_HEADER_FIELD_TCP_PORT_SRC << 3)
-+#define NET_HEADER_FIELD_TCP_OFFSET (NET_HEADER_FIELD_TCP_PORT_SRC << 4)
-+#define NET_HEADER_FIELD_TCP_FLAGS (NET_HEADER_FIELD_TCP_PORT_SRC << 5)
-+#define NET_HEADER_FIELD_TCP_WINDOW (NET_HEADER_FIELD_TCP_PORT_SRC << 6)
-+#define NET_HEADER_FIELD_TCP_CKSUM (NET_HEADER_FIELD_TCP_PORT_SRC << 7)
-+#define NET_HEADER_FIELD_TCP_URGPTR (NET_HEADER_FIELD_TCP_PORT_SRC << 8)
-+#define NET_HEADER_FIELD_TCP_OPTS (NET_HEADER_FIELD_TCP_PORT_SRC << 9)
-+#define NET_HEADER_FIELD_TCP_OPTS_COUNT (NET_HEADER_FIELD_TCP_PORT_SRC << 10)
-+#define NET_HEADER_FIELD_TCP_ALL_FIELDS ((NET_HEADER_FIELD_TCP_PORT_SRC << 11) - 1)
-+
-+#define NET_HEADER_FIELD_TCP_PORT_SIZE 2
-+
-+
-+typedef uint8_t headerFieldSctp_t;
-+
-+#define NET_HEADER_FIELD_SCTP_PORT_SRC (1)
-+#define NET_HEADER_FIELD_SCTP_PORT_DST (NET_HEADER_FIELD_SCTP_PORT_SRC << 1)
-+#define NET_HEADER_FIELD_SCTP_VER_TAG (NET_HEADER_FIELD_SCTP_PORT_SRC << 2)
-+#define NET_HEADER_FIELD_SCTP_CKSUM (NET_HEADER_FIELD_SCTP_PORT_SRC << 3)
-+#define NET_HEADER_FIELD_SCTP_ALL_FIELDS ((NET_HEADER_FIELD_SCTP_PORT_SRC << 4) - 1)
-+
-+#define NET_HEADER_FIELD_SCTP_PORT_SIZE 2
-+
-+typedef uint8_t headerFieldDccp_t;
-+
-+#define NET_HEADER_FIELD_DCCP_PORT_SRC (1)
-+#define NET_HEADER_FIELD_DCCP_PORT_DST (NET_HEADER_FIELD_DCCP_PORT_SRC << 1)
-+#define NET_HEADER_FIELD_DCCP_ALL_FIELDS ((NET_HEADER_FIELD_DCCP_PORT_SRC << 2) - 1)
-+
-+#define NET_HEADER_FIELD_DCCP_PORT_SIZE 2
-+
-+
-+typedef uint8_t headerFieldUdp_t;
-+
-+#define NET_HEADER_FIELD_UDP_PORT_SRC (1)
-+#define NET_HEADER_FIELD_UDP_PORT_DST (NET_HEADER_FIELD_UDP_PORT_SRC << 1)
-+#define NET_HEADER_FIELD_UDP_LEN (NET_HEADER_FIELD_UDP_PORT_SRC << 2)
-+#define NET_HEADER_FIELD_UDP_CKSUM (NET_HEADER_FIELD_UDP_PORT_SRC << 3)
-+#define NET_HEADER_FIELD_UDP_ALL_FIELDS ((NET_HEADER_FIELD_UDP_PORT_SRC << 4) - 1)
-+
-+#define NET_HEADER_FIELD_UDP_PORT_SIZE 2
-+
-+typedef uint8_t headerFieldUdpLite_t;
-+
-+#define NET_HEADER_FIELD_UDP_LITE_PORT_SRC (1)
-+#define NET_HEADER_FIELD_UDP_LITE_PORT_DST (NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 1)
-+#define NET_HEADER_FIELD_UDP_LITE_ALL_FIELDS ((NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 2) - 1)
-+
-+#define NET_HEADER_FIELD_UDP_LITE_PORT_SIZE 2
-+
-+typedef uint8_t headerFieldUdpEncapEsp_t;
-+
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC (1)
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 1)
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 2)
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 3)
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 4)
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 5)
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS ((NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 6) - 1)
-+
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SIZE 2
-+#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI_SIZE 4
-+
-+#define NET_HEADER_FIELD_IPHC_CID (1)
-+#define NET_HEADER_FIELD_IPHC_CID_TYPE (NET_HEADER_FIELD_IPHC_CID << 1)
-+#define NET_HEADER_FIELD_IPHC_HCINDEX (NET_HEADER_FIELD_IPHC_CID << 2)
-+#define NET_HEADER_FIELD_IPHC_GEN (NET_HEADER_FIELD_IPHC_CID << 3)
-+#define NET_HEADER_FIELD_IPHC_D_BIT (NET_HEADER_FIELD_IPHC_CID << 4)
-+#define NET_HEADER_FIELD_IPHC_ALL_FIELDS ((NET_HEADER_FIELD_IPHC_CID << 5) - 1)
-+
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE (1)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_FLAGS (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 1)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_LENGTH (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 2)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_TSN (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 3)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_ID (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 4)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_SQN (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 5)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_PAYLOAD_PID (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 6)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_UNORDERED (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 7)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_BEGGINING (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 8)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_END (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 9)
-+#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS ((NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
-+
-+#define NET_HEADER_FIELD_L2TPv2_TYPE_BIT (1)
-+#define NET_HEADER_FIELD_L2TPv2_LENGTH_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 1)
-+#define NET_HEADER_FIELD_L2TPv2_SEQUENCE_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 2)
-+#define NET_HEADER_FIELD_L2TPv2_OFFSET_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 3)
-+#define NET_HEADER_FIELD_L2TPv2_PRIORITY_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 4)
-+#define NET_HEADER_FIELD_L2TPv2_VERSION (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 5)
-+#define NET_HEADER_FIELD_L2TPv2_LEN (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 6)
-+#define NET_HEADER_FIELD_L2TPv2_TUNNEL_ID (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 7)
-+#define NET_HEADER_FIELD_L2TPv2_SESSION_ID (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 8)
-+#define NET_HEADER_FIELD_L2TPv2_NS (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 9)
-+#define NET_HEADER_FIELD_L2TPv2_NR (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 10)
-+#define NET_HEADER_FIELD_L2TPv2_OFFSET_SIZE (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 11)
-+#define NET_HEADER_FIELD_L2TPv2_FIRST_BYTE (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 12)
-+#define NET_HEADER_FIELD_L2TPv2_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 13) - 1)
-+
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT (1)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH_BIT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 1)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_SEQUENCE_BIT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 2)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_VERSION (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 3)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 4)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_CONTROL (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 5)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_SENT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 6)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_RECV (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 7)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_FIRST_BYTE (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 8)
-+#define NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 9) - 1)
-+
-+#define NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT (1)
-+#define NET_HEADER_FIELD_L2TPv3_SESS_VERSION (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 1)
-+#define NET_HEADER_FIELD_L2TPv3_SESS_ID (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 2)
-+#define NET_HEADER_FIELD_L2TPv3_SESS_COOKIE (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 3)
-+#define NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 4) - 1)
-+
-+
-+typedef uint8_t headerFieldVlan_t;
-+
-+#define NET_HEADER_FIELD_VLAN_VPRI (1)
-+#define NET_HEADER_FIELD_VLAN_CFI (NET_HEADER_FIELD_VLAN_VPRI << 1)
-+#define NET_HEADER_FIELD_VLAN_VID (NET_HEADER_FIELD_VLAN_VPRI << 2)
-+#define NET_HEADER_FIELD_VLAN_LENGTH (NET_HEADER_FIELD_VLAN_VPRI << 3)
-+#define NET_HEADER_FIELD_VLAN_TYPE (NET_HEADER_FIELD_VLAN_VPRI << 4)
-+#define NET_HEADER_FIELD_VLAN_ALL_FIELDS ((NET_HEADER_FIELD_VLAN_VPRI << 5) - 1)
-+
-+#define NET_HEADER_FIELD_VLAN_TCI (NET_HEADER_FIELD_VLAN_VPRI | \
-+ NET_HEADER_FIELD_VLAN_CFI | \
-+ NET_HEADER_FIELD_VLAN_VID)
-+
-+
-+typedef uint8_t headerFieldLlc_t;
-+
-+#define NET_HEADER_FIELD_LLC_DSAP (1)
-+#define NET_HEADER_FIELD_LLC_SSAP (NET_HEADER_FIELD_LLC_DSAP << 1)
-+#define NET_HEADER_FIELD_LLC_CTRL (NET_HEADER_FIELD_LLC_DSAP << 2)
-+#define NET_HEADER_FIELD_LLC_ALL_FIELDS ((NET_HEADER_FIELD_LLC_DSAP << 3) - 1)
-+
-+#define NET_HEADER_FIELD_NLPID_NLPID (1)
-+#define NET_HEADER_FIELD_NLPID_ALL_FIELDS ((NET_HEADER_FIELD_NLPID_NLPID << 1) - 1)
-+
-+
-+typedef uint8_t headerFieldSnap_t;
-+
-+#define NET_HEADER_FIELD_SNAP_OUI (1)
-+#define NET_HEADER_FIELD_SNAP_PID (NET_HEADER_FIELD_SNAP_OUI << 1)
-+#define NET_HEADER_FIELD_SNAP_ALL_FIELDS ((NET_HEADER_FIELD_SNAP_OUI << 2) - 1)
-+
-+
-+typedef uint8_t headerFieldLlcSnap_t;
-+
-+#define NET_HEADER_FIELD_LLC_SNAP_TYPE (1)
-+#define NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS ((NET_HEADER_FIELD_LLC_SNAP_TYPE << 1) - 1)
-+
-+#define NET_HEADER_FIELD_ARP_HTYPE (1)
-+#define NET_HEADER_FIELD_ARP_PTYPE (NET_HEADER_FIELD_ARP_HTYPE << 1)
-+#define NET_HEADER_FIELD_ARP_HLEN (NET_HEADER_FIELD_ARP_HTYPE << 2)
-+#define NET_HEADER_FIELD_ARP_PLEN (NET_HEADER_FIELD_ARP_HTYPE << 3)
-+#define NET_HEADER_FIELD_ARP_OPER (NET_HEADER_FIELD_ARP_HTYPE << 4)
-+#define NET_HEADER_FIELD_ARP_SHA (NET_HEADER_FIELD_ARP_HTYPE << 5)
-+#define NET_HEADER_FIELD_ARP_SPA (NET_HEADER_FIELD_ARP_HTYPE << 6)
-+#define NET_HEADER_FIELD_ARP_THA (NET_HEADER_FIELD_ARP_HTYPE << 7)
-+#define NET_HEADER_FIELD_ARP_TPA (NET_HEADER_FIELD_ARP_HTYPE << 8)
-+#define NET_HEADER_FIELD_ARP_ALL_FIELDS ((NET_HEADER_FIELD_ARP_HTYPE << 9) - 1)
-+
-+#define NET_HEADER_FIELD_RFC2684_LLC (1)
-+#define NET_HEADER_FIELD_RFC2684_NLPID (NET_HEADER_FIELD_RFC2684_LLC << 1)
-+#define NET_HEADER_FIELD_RFC2684_OUI (NET_HEADER_FIELD_RFC2684_LLC << 2)
-+#define NET_HEADER_FIELD_RFC2684_PID (NET_HEADER_FIELD_RFC2684_LLC << 3)
-+#define NET_HEADER_FIELD_RFC2684_VPN_OUI (NET_HEADER_FIELD_RFC2684_LLC << 4)
-+#define NET_HEADER_FIELD_RFC2684_VPN_IDX (NET_HEADER_FIELD_RFC2684_LLC << 5)
-+#define NET_HEADER_FIELD_RFC2684_ALL_FIELDS ((NET_HEADER_FIELD_RFC2684_LLC << 6) - 1)
-+
-+#define NET_HEADER_FIELD_USER_DEFINED_SRCPORT (1)
-+#define NET_HEADER_FIELD_USER_DEFINED_PCDID (NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 1)
-+#define NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS ((NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 2) - 1)
-+
-+#define NET_HEADER_FIELD_PAYLOAD_BUFFER (1)
-+#define NET_HEADER_FIELD_PAYLOAD_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 1)
-+#define NET_HEADER_FIELD_MAX_FRM_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 2)
-+#define NET_HEADER_FIELD_MIN_FRM_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 3)
-+#define NET_HEADER_FIELD_PAYLOAD_TYPE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 4)
-+#define NET_HEADER_FIELD_FRAME_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 5)
-+#define NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS ((NET_HEADER_FIELD_PAYLOAD_BUFFER << 6) - 1)
-+
-+
-+typedef uint8_t headerFieldGre_t;
-+
-+#define NET_HEADER_FIELD_GRE_TYPE (1)
-+#define NET_HEADER_FIELD_GRE_ALL_FIELDS ((NET_HEADER_FIELD_GRE_TYPE << 1) - 1)
-+
-+
-+typedef uint8_t headerFieldMinencap_t;
-+
-+#define NET_HEADER_FIELD_MINENCAP_SRC_IP (1)
-+#define NET_HEADER_FIELD_MINENCAP_DST_IP (NET_HEADER_FIELD_MINENCAP_SRC_IP << 1)
-+#define NET_HEADER_FIELD_MINENCAP_TYPE (NET_HEADER_FIELD_MINENCAP_SRC_IP << 2)
-+#define NET_HEADER_FIELD_MINENCAP_ALL_FIELDS ((NET_HEADER_FIELD_MINENCAP_SRC_IP << 3) - 1)
-+
-+
-+typedef uint8_t headerFieldIpsecAh_t;
-+
-+#define NET_HEADER_FIELD_IPSEC_AH_SPI (1)
-+#define NET_HEADER_FIELD_IPSEC_AH_NH (NET_HEADER_FIELD_IPSEC_AH_SPI << 1)
-+#define NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS ((NET_HEADER_FIELD_IPSEC_AH_SPI << 2) - 1)
-+
-+
-+typedef uint8_t headerFieldIpsecEsp_t;
-+
-+#define NET_HEADER_FIELD_IPSEC_ESP_SPI (1)
-+#define NET_HEADER_FIELD_IPSEC_ESP_SEQUENCE_NUM (NET_HEADER_FIELD_IPSEC_ESP_SPI << 1)
-+#define NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS ((NET_HEADER_FIELD_IPSEC_ESP_SPI << 2) - 1)
-+
-+#define NET_HEADER_FIELD_IPSEC_ESP_SPI_SIZE 4
-+
-+
-+typedef uint8_t headerFieldMpls_t;
-+
-+#define NET_HEADER_FIELD_MPLS_LABEL_STACK (1)
-+#define NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS ((NET_HEADER_FIELD_MPLS_LABEL_STACK << 1) - 1)
-+
-+
-+typedef uint8_t headerFieldMacsec_t;
-+
-+#define NET_HEADER_FIELD_MACSEC_SECTAG (1)
-+#define NET_HEADER_FIELD_MACSEC_ALL_FIELDS ((NET_HEADER_FIELD_MACSEC_SECTAG << 1) - 1)
-+
-+
-+typedef enum {
-+ HEADER_TYPE_NONE = 0,
-+ HEADER_TYPE_PAYLOAD,
-+ HEADER_TYPE_ETH,
-+ HEADER_TYPE_VLAN,
-+ HEADER_TYPE_IPv4,
-+ HEADER_TYPE_IPv6,
-+ HEADER_TYPE_IP,
-+ HEADER_TYPE_TCP,
-+ HEADER_TYPE_UDP,
-+ HEADER_TYPE_UDP_LITE,
-+ HEADER_TYPE_IPHC,
-+ HEADER_TYPE_SCTP,
-+ HEADER_TYPE_SCTP_CHUNK_DATA,
-+ HEADER_TYPE_PPPoE,
-+ HEADER_TYPE_PPP,
-+ HEADER_TYPE_PPPMUX,
-+ HEADER_TYPE_PPPMUX_SUBFRAME,
-+ HEADER_TYPE_L2TPv2,
-+ HEADER_TYPE_L2TPv3_CTRL,
-+ HEADER_TYPE_L2TPv3_SESS,
-+ HEADER_TYPE_LLC,
-+ HEADER_TYPE_LLC_SNAP,
-+ HEADER_TYPE_NLPID,
-+ HEADER_TYPE_SNAP,
-+ HEADER_TYPE_MPLS,
-+ HEADER_TYPE_IPSEC_AH,
-+ HEADER_TYPE_IPSEC_ESP,
-+ HEADER_TYPE_UDP_ENCAP_ESP, /* RFC 3948 */
-+ HEADER_TYPE_MACSEC,
-+ HEADER_TYPE_GRE,
-+ HEADER_TYPE_MINENCAP,
-+ HEADER_TYPE_DCCP,
-+ HEADER_TYPE_ICMP,
-+ HEADER_TYPE_IGMP,
-+ HEADER_TYPE_ARP,
-+ HEADER_TYPE_CAPWAP,
-+ HEADER_TYPE_CAPWAP_DTLS,
-+ HEADER_TYPE_RFC2684,
-+ HEADER_TYPE_USER_DEFINED_L2,
-+ HEADER_TYPE_USER_DEFINED_L3,
-+ HEADER_TYPE_USER_DEFINED_L4,
-+ HEADER_TYPE_USER_DEFINED_SHIM1,
-+ HEADER_TYPE_USER_DEFINED_SHIM2,
-+ MAX_HEADER_TYPE_COUNT
-+} e_NetHeaderType;
-+
-+
-+#endif /* __NET_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/std_ext.h
-@@ -0,0 +1,48 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File std_ext.h
-+
-+ @Description General Standard Definitions
-+*//***************************************************************************/
-+
-+#ifndef __STD_EXT_H
-+#define __STD_EXT_H
-+
-+
-+#include "types_ext.h"
-+#include "ncsw_ext.h"
-+
-+
-+#endif /* __STD_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/stdarg_ext.h
-@@ -0,0 +1,49 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __STDARG_EXT_H
-+#define __STDARG_EXT_H
-+
-+
-+#if defined(NCSW_LINUX) && defined(__KERNEL__)
-+#include <stdarg.h>
-+
-+#else
-+#include <stdarg.h>
-+
-+#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
-+
-+#include "std_ext.h"
-+
-+
-+#endif /* __STDARG_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/stdlib_ext.h
-@@ -0,0 +1,162 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+
-+#ifndef __STDLIB_EXT_H
-+#define __STDLIB_EXT_H
-+
-+
-+#if (defined(NCSW_LINUX)) && defined(__KERNEL__)
-+#include "stdarg_ext.h"
-+#include "std_ext.h"
-+
-+
-+/**
-+ * strtoul - convert a string to an uint32_t
-+ * @cp: The start of the string
-+ * @endp: A pointer to the end of the parsed string will be placed here
-+ * @base: The number base to use
-+ */
-+uint32_t strtoul(const char *cp,char **endp,uint32_t base);
-+
-+/**
-+ * strtol - convert a string to a int32_t
-+ * @cp: The start of the string
-+ * @endp: A pointer to the end of the parsed string will be placed here
-+ * @base: The number base to use
-+ */
-+long strtol(const char *cp,char **endp,uint32_t base);
-+
-+/**
-+ * strtoull - convert a string to an uint64_t
-+ * @cp: The start of the string
-+ * @endp: A pointer to the end of the parsed string will be placed here
-+ * @base: The number base to use
-+ */
-+uint64_t strtoull(const char *cp,char **endp,uint32_t base);
-+
-+/**
-+ * strtoll - convert a string to a int64 long
-+ * @cp: The start of the string
-+ * @endp: A pointer to the end of the parsed string will be placed here
-+ * @base: The number base to use
-+ */
-+long long strtoll(const char *cp,char **endp,uint32_t base);
-+
-+/**
-+ * atoi - convert a character to a int
-+ * @s: The start of the string
-+ */
-+int atoi(const char *s);
-+
-+/**
-+ * strnlen - Find the length of a length-limited string
-+ * @s: The string to be sized
-+ * @count: The maximum number of bytes to search
-+ */
-+size_t strnlen(const char * s, size_t count);
-+
-+/**
-+ * strlen - Find the length of a string
-+ * @s: The string to be sized
-+ */
-+size_t strlen(const char * s);
-+
-+/**
-+ * strtok - Split a string into tokens
-+ * @s: The string to be searched
-+ * @ct: The characters to search for
-+ *
-+ * WARNING: strtok is deprecated, use strsep instead.
-+ */
-+char * strtok(char * s,const char * ct);
-+
-+/**
-+ * strncpy - Copy a length-limited, %NUL-terminated string
-+ * @dest: Where to copy the string to
-+ * @src: Where to copy the string from
-+ * @count: The maximum number of bytes to copy
-+ *
-+ * Note that unlike userspace strncpy, this does not %NUL-pad the buffer.
-+ * However, the result is not %NUL-terminated if the source exceeds
-+ * @count bytes.
-+ */
-+char * strncpy(char * dest,const char *src,size_t count);
-+
-+/**
-+ * strcpy - Copy a %NUL terminated string
-+ * @dest: Where to copy the string to
-+ * @src: Where to copy the string from
-+ */
-+char * strcpy(char * dest,const char *src);
-+
-+/**
-+ * vsscanf - Unformat a buffer into a list of arguments
-+ * @buf: input buffer
-+ * @fmt: format of buffer
-+ * @args: arguments
-+ */
-+int vsscanf(const char * buf, const char * fmt, va_list args);
-+
-+/**
-+ * vsnprintf - Format a string and place it in a buffer
-+ * @buf: The buffer to place the result into
-+ * @size: The size of the buffer, including the trailing null space
-+ * @fmt: The format string to use
-+ * @args: Arguments for the format string
-+ *
-+ * Call this function if you are already dealing with a va_list.
-+ * You probably want snprintf instead.
-+ */
-+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-+
-+/**
-+ * vsprintf - Format a string and place it in a buffer
-+ * @buf: The buffer to place the result into
-+ * @fmt: The format string to use
-+ * @args: Arguments for the format string
-+ *
-+ * Call this function if you are already dealing with a va_list.
-+ * You probably want sprintf instead.
-+ */
-+int vsprintf(char *buf, const char *fmt, va_list args);
-+
-+#else
-+#include <stdlib.h>
-+#include <stdio.h>
-+#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
-+
-+#include "std_ext.h"
-+
-+
-+#endif /* __STDLIB_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/string_ext.h
-@@ -0,0 +1,56 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef __STRING_EXT_H
-+#define __STRING_EXT_H
-+
-+
-+#if defined(NCSW_LINUX) && defined(__KERNEL__)
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+extern char * strtok ( char * str, const char * delimiters );
-+
-+#elif defined(__KERNEL__)
-+#include "linux/types.h"
-+#include "linux/posix_types.h"
-+#include "linux/string.h"
-+
-+#else
-+#include <string.h>
-+
-+#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */
-+
-+#include "std_ext.h"
-+
-+
-+#endif /* __STRING_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/types_ext.h
-@@ -0,0 +1,62 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File types_ext.h
-+
-+ @Description General types Standard Definitions
-+*//***************************************************************************/
-+
-+#ifndef __TYPES_EXT_H
-+#define __TYPES_EXT_H
-+
-+#if defined(NCSW_LINUX)
-+#include "types_linux.h"
-+
-+#elif defined(NCSW_VXWORKS)
-+#include "types_vxworks.h"
-+
-+#elif defined(__GNUC__) && defined(__cplusplus)
-+#include "types_bb_gpp.h"
-+
-+#elif defined(__GNUC__)
-+#include "types_bb_gcc.h"
-+
-+#elif defined(__ghs__)
-+#include "types_ghs.h"
-+
-+#else
-+#include "types_dflt.h"
-+#endif /* defined (__ROCOO__) */
-+
-+#endif /* __TYPES_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/xx_common.h
-@@ -0,0 +1,56 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File debug_ext.h
-+
-+ @Description Debug mode definitions.
-+*//***************************************************************************/
-+
-+#ifndef __XX_COMMON_H
-+#define __XX_COMMON_H
-+
-+/*****************************************************************************
-+ * UNIFIED MODULE CODES
-+ *****************************************************************************/
-+#define MODULE_UNKNOWN 0x00000000
-+#define MODULE_FM 0x00010000
-+#define MODULE_FM_MURAM 0x00020000
-+#define MODULE_FM_PCD 0x00030000
-+#define MODULE_FM_RTC 0x00040000
-+#define MODULE_FM_MAC 0x00050000
-+#define MODULE_FM_PORT 0x00060000
-+#define MODULE_MM 0x00070000
-+#define MODULE_FM_SP 0x00080000
-+#define MODULE_FM_MACSEC 0x00090000
-+#endif /* __XX_COMMON_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/inc/xx_ext.h
-@@ -0,0 +1,791 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File xx_ext.h
-+
-+ @Description Prototypes, externals and typedefs for system-supplied
-+ (external) routines
-+*//***************************************************************************/
-+
-+#ifndef __XX_EXT_H
-+#define __XX_EXT_H
-+
-+#include "std_ext.h"
-+#include "xx_common.h"
-+#include "part_ext.h"
-+
-+
-+
-+/**************************************************************************//**
-+ @Group xx_id XX Interface (System call hooks)
-+
-+ @Description Prototypes, externals and typedefs for system-supplied
-+ (external) routines
-+
-+ @{
-+*//***************************************************************************/
-+
-+#ifdef DEBUG_XX_MALLOC
-+void * XX_MallocDebug(uint32_t size, char *fname, int line);
-+
-+void * XX_MallocSmartDebug(uint32_t size,
-+ int memPartitionId,
-+ uint32_t alignment,
-+ char *fname,
-+ int line);
-+
-+#define XX_Malloc(sz) \
-+ XX_MallocDebug((sz), __FILE__, __LINE__)
-+
-+#define XX_MallocSmart(sz, memt, al) \
-+ XX_MallocSmartDebug((sz), (memt), (al), __FILE__, __LINE__)
-+
-+#else /* not DEBUG_XX_MALLOC */
-+/**************************************************************************//**
-+ @Function XX_Malloc
-+
-+ @Description allocates contiguous block of memory.
-+
-+ @Param[in] size - Number of bytes to allocate.
-+
-+ @Return The address of the newly allocated block on success, NULL on failure.
-+*//***************************************************************************/
-+void * XX_Malloc(uint32_t size);
-+
-+/**************************************************************************//**
-+ @Function XX_MallocSmart
-+
-+ @Description Allocates contiguous block of memory in a specified
-+ alignment and from the specified segment.
-+
-+ @Param[in] size - Number of bytes to allocate.
-+ @Param[in] memPartitionId - Memory partition ID; The value zero must
-+ be mapped to the default heap partition.
-+ @Param[in] alignment - Required memory alignment (in bytes).
-+
-+ @Return The address of the newly allocated block on success, NULL on failure.
-+*//***************************************************************************/
-+void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment);
-+#endif /* not DEBUG_XX_MALLOC */
-+
-+/**************************************************************************//**
-+ @Function XX_FreeSmart
-+
-+ @Description Frees the memory block pointed to by "p".
-+ Only for memory allocated by XX_MallocSmart
-+
-+ @Param[in] p_Memory - pointer to the memory block.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_FreeSmart(void *p_Memory);
-+
-+/**************************************************************************//**
-+ @Function XX_Free
-+
-+ @Description frees the memory block pointed to by "p".
-+
-+ @Param[in] p_Memory - pointer to the memory block.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_Free(void *p_Memory);
-+
-+/**************************************************************************//**
-+ @Function XX_Print
-+
-+ @Description print a string.
-+
-+ @Param[in] str - string to print.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_Print(char *str, ...);
-+
-+/**************************************************************************//**
-+ @Function XX_SetIntr
-+
-+ @Description Set an interrupt service routine for a specific interrupt source.
-+
-+ @Param[in] irq - Interrupt ID (system-specific number).
-+ @Param[in] f_Isr - Callback routine that will be called when the interrupt occurs.
-+ @Param[in] handle - The argument for the user callback routine.
-+
-+ @Return E_OK on success; error code otherwise..
-+*//***************************************************************************/
-+t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle);
-+
-+/**************************************************************************//**
-+ @Function XX_FreeIntr
-+
-+ @Description Free a specific interrupt and a specific callback routine.
-+
-+ @Param[in] irq - Interrupt ID (system-specific number).
-+
-+ @Return E_OK on success; error code otherwise..
-+*//***************************************************************************/
-+t_Error XX_FreeIntr(int irq);
-+
-+/**************************************************************************//**
-+ @Function XX_EnableIntr
-+
-+ @Description Enable a specific interrupt.
-+
-+ @Param[in] irq - Interrupt ID (system-specific number).
-+
-+ @Return E_OK on success; error code otherwise..
-+*//***************************************************************************/
-+t_Error XX_EnableIntr(int irq);
-+
-+/**************************************************************************//**
-+ @Function XX_DisableIntr
-+
-+ @Description Disable a specific interrupt.
-+
-+ @Param[in] irq - Interrupt ID (system-specific number).
-+
-+ @Return E_OK on success; error code otherwise..
-+*//***************************************************************************/
-+t_Error XX_DisableIntr(int irq);
-+
-+/**************************************************************************//**
-+ @Function XX_DisableAllIntr
-+
-+ @Description Disable all interrupts by masking them at the CPU.
-+
-+ @Return A value that represents the interrupts state before the
-+ operation, and should be passed to the matching
-+ XX_RestoreAllIntr() call.
-+*//***************************************************************************/
-+uint32_t XX_DisableAllIntr(void);
-+
-+/**************************************************************************//**
-+ @Function XX_RestoreAllIntr
-+
-+ @Description Restore previous state of interrupts level at the CPU.
-+
-+ @Param[in] flags - A value that represents the interrupts state to restore,
-+ as returned by the matching call for XX_DisableAllIntr().
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_RestoreAllIntr(uint32_t flags);
-+
-+
-+/**************************************************************************//**
-+ @Function XX_Exit
-+
-+ @Description Stop execution and report status (where it is applicable)
-+
-+ @Param[in] status - exit status
-+*//***************************************************************************/
-+void XX_Exit(int status);
-+
-+
-+/*****************************************************************************/
-+/* Tasklet Service Routines */
-+/*****************************************************************************/
-+typedef t_Handle t_TaskletHandle;
-+
-+/**************************************************************************//**
-+ @Function XX_InitTasklet
-+
-+ @Description Create and initialize a tasklet object.
-+
-+ @Param[in] routine - A routine to be ran as a tasklet.
-+ @Param[in] data - An argument to pass to the tasklet.
-+
-+ @Return Tasklet handle is returned on success. NULL is returned otherwise.
-+*//***************************************************************************/
-+t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data);
-+
-+/**************************************************************************//**
-+ @Function XX_FreeTasklet
-+
-+ @Description Free a tasklet object.
-+
-+ @Param[in] h_Tasklet - A handle to a tasklet to be free.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_FreeTasklet (t_TaskletHandle h_Tasklet);
-+
-+/**************************************************************************//**
-+ @Function XX_ScheduleTask
-+
-+ @Description Schedule a tasklet object.
-+
-+ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
-+ @Param[in] immediate - Indicate whether to schedule this tasklet on
-+ the immediate queue or on the delayed one.
-+
-+ @Return 0 - on success. Error code - otherwise.
-+*//***************************************************************************/
-+int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate);
-+
-+/**************************************************************************//**
-+ @Function XX_FlushScheduledTasks
-+
-+ @Description Flush all tasks there are in the scheduled tasks queue.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_FlushScheduledTasks(void);
-+
-+/**************************************************************************//**
-+ @Function XX_TaskletIsQueued
-+
-+ @Description Check if task is queued.
-+
-+ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
-+
-+ @Return 1 - task is queued. 0 - otherwise.
-+*//***************************************************************************/
-+int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet);
-+
-+/**************************************************************************//**
-+ @Function XX_SetTaskletData
-+
-+ @Description Set data to a scheduled task. Used to change data of already
-+ scheduled task.
-+
-+ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
-+ @Param[in] data - Data to be set.
-+*//***************************************************************************/
-+void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data);
-+
-+/**************************************************************************//**
-+ @Function XX_GetTaskletData
-+
-+ @Description Get the data of scheduled task.
-+
-+ @Param[in] h_Tasklet - A handle to a tasklet to be scheduled.
-+
-+ @Return handle to the data of the task.
-+*//***************************************************************************/
-+t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet);
-+
-+/**************************************************************************//**
-+ @Function XX_BottomHalf
-+
-+ @Description Bottom half implementation, invoked by the interrupt handler.
-+
-+ This routine handles all bottom-half tasklets with interrupts
-+ enabled.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_BottomHalf(void);
-+
-+
-+/*****************************************************************************/
-+/* Spinlock Service Routines */
-+/*****************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function XX_InitSpinlock
-+
-+ @Description Creates a spinlock.
-+
-+ @Return Spinlock handle is returned on success; NULL otherwise.
-+*//***************************************************************************/
-+t_Handle XX_InitSpinlock(void);
-+
-+/**************************************************************************//**
-+ @Function XX_FreeSpinlock
-+
-+ @Description Frees the memory allocated for the spinlock creation.
-+
-+ @Param[in] h_Spinlock - A handle to a spinlock.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_FreeSpinlock(t_Handle h_Spinlock);
-+
-+/**************************************************************************//**
-+ @Function XX_LockSpinlock
-+
-+ @Description Locks a spinlock.
-+
-+ @Param[in] h_Spinlock - A handle to a spinlock.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_LockSpinlock(t_Handle h_Spinlock);
-+
-+/**************************************************************************//**
-+ @Function XX_UnlockSpinlock
-+
-+ @Description Unlocks a spinlock.
-+
-+ @Param[in] h_Spinlock - A handle to a spinlock.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_UnlockSpinlock(t_Handle h_Spinlock);
-+
-+/**************************************************************************//**
-+ @Function XX_LockIntrSpinlock
-+
-+ @Description Locks a spinlock (interrupt safe).
-+
-+ @Param[in] h_Spinlock - A handle to a spinlock.
-+
-+ @Return A value that represents the interrupts state before the
-+ operation, and should be passed to the matching
-+ XX_UnlockIntrSpinlock() call.
-+*//***************************************************************************/
-+uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock);
-+
-+/**************************************************************************//**
-+ @Function XX_UnlockIntrSpinlock
-+
-+ @Description Unlocks a spinlock (interrupt safe).
-+
-+ @Param[in] h_Spinlock - A handle to a spinlock.
-+ @Param[in] intrFlags - A value that represents the interrupts state to
-+ restore, as returned by the matching call for
-+ XX_LockIntrSpinlock().
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags);
-+
-+
-+/*****************************************************************************/
-+/* Timers Service Routines */
-+/*****************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function XX_CurrentTime
-+
-+ @Description Returns current system time.
-+
-+ @Return Current system time (in milliseconds).
-+*//***************************************************************************/
-+uint32_t XX_CurrentTime(void);
-+
-+/**************************************************************************//**
-+ @Function XX_CreateTimer
-+
-+ @Description Creates a timer.
-+
-+ @Return Timer handle is returned on success; NULL otherwise.
-+*//***************************************************************************/
-+t_Handle XX_CreateTimer(void);
-+
-+/**************************************************************************//**
-+ @Function XX_FreeTimer
-+
-+ @Description Frees the memory allocated for the timer creation.
-+
-+ @Param[in] h_Timer - A handle to a timer.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_FreeTimer(t_Handle h_Timer);
-+
-+/**************************************************************************//**
-+ @Function XX_StartTimer
-+
-+ @Description Starts a timer.
-+
-+ The user can select to start the timer as periodic timer or as
-+ one-shot timer. The user should provide a callback routine that
-+ will be called when the timer expires.
-+
-+ @Param[in] h_Timer - A handle to a timer.
-+ @Param[in] msecs - Timer expiration period (in milliseconds).
-+ @Param[in] periodic - TRUE for a periodic timer;
-+ FALSE for a one-shot timer..
-+ @Param[in] f_TimerExpired - A callback routine to be called when the
-+ timer expires.
-+ @Param[in] h_Arg - The argument to pass in the timer-expired
-+ callback routine.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_StartTimer(t_Handle h_Timer,
-+ uint32_t msecs,
-+ bool periodic,
-+ void (*f_TimerExpired)(t_Handle h_Arg),
-+ t_Handle h_Arg);
-+
-+/**************************************************************************//**
-+ @Function XX_StopTimer
-+
-+ @Description Frees the memory allocated for the timer creation.
-+
-+ @Param[in] h_Timer - A handle to a timer.
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_StopTimer(t_Handle h_Timer);
-+
-+/**************************************************************************//**
-+ @Function XX_ModTimer
-+
-+ @Description Updates the expiration time of a timer.
-+
-+ This routine adds the given time to the current system time,
-+ and sets this value as the new expiration time of the timer.
-+
-+ @Param[in] h_Timer - A handle to a timer.
-+ @Param[in] msecs - The new interval until timer expiration
-+ (in milliseconds).
-+
-+ @Return None.
-+*//***************************************************************************/
-+void XX_ModTimer(t_Handle h_Timer, uint32_t msecs);
-+
-+/**************************************************************************//**
-+ @Function XX_Sleep
-+
-+ @Description Non-busy wait until the desired time (in milliseconds) has passed.
-+
-+ @Param[in] msecs - The requested sleep time (in milliseconds).
-+
-+ @Return Zero if the requested time has elapsed; Otherwise, the value
-+ returned will be the unslept amount) in milliseconds.
-+
-+ @Cautions This routine enables interrupts during its wait time.
-+*//***************************************************************************/
-+uint32_t XX_Sleep(uint32_t msecs);
-+
-+/**************************************************************************//**
-+ @Function XX_UDelay
-+
-+ @Description Busy-wait until the desired time (in microseconds) has passed.
-+
-+ @Param[in] usecs - The requested delay time (in microseconds).
-+
-+ @Return None.
-+
-+ @Cautions It is highly unrecommended to call this routine during interrupt
-+ time, because the system time may not be updated properly during
-+ the delay loop. The behavior of this routine during interrupt
-+ time is unexpected.
-+*//***************************************************************************/
-+void XX_UDelay(uint32_t usecs);
-+
-+
-+/*****************************************************************************/
-+/* Other Service Routines */
-+/*****************************************************************************/
-+
-+/**************************************************************************//**
-+ @Function XX_PhysToVirt
-+
-+ @Description Translates a physical address to the matching virtual address.
-+
-+ @Param[in] addr - The physical address to translate.
-+
-+ @Return Virtual address.
-+*//***************************************************************************/
-+void * XX_PhysToVirt(physAddress_t addr);
-+
-+/**************************************************************************//**
-+ @Function XX_VirtToPhys
-+
-+ @Description Translates a virtual address to the matching physical address.
-+
-+ @Param[in] addr - The virtual address to translate.
-+
-+ @Return Physical address.
-+*//***************************************************************************/
-+physAddress_t XX_VirtToPhys(void *addr);
-+
-+
-+/**************************************************************************//**
-+ @Group xx_ipc XX Inter-Partition-Communication API
-+
-+ @Description The following API is to be used when working with multiple
-+ partitions configuration.
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define XX_IPC_MAX_ADDR_NAME_LENGTH 16 /**< Maximum length of an endpoint name string;
-+ The IPC service can use this constant to limit
-+ the storage space for IPC endpoint names. */
-+
-+
-+/**************************************************************************//**
-+ @Function t_IpcMsgCompletion
-+
-+ @Description Callback function used upon IPC non-blocking transaction completion
-+ to return message buffer to the caller and to forward reply if available.
-+
-+ This callback function may be attached by the source endpoint to any outgoing
-+ IPC message to indicate a non-blocking send (see also XX_IpcSendMessage() routine).
-+ Upon completion of an IPC transaction (consisting of a message and an optional reply),
-+ the IPC service invokes this callback routine to return the message buffer to the sender
-+ and to provide the received reply, if requested.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_Module - Abstract handle to the sending module - the same handle as was passed
-+ in the XX_IpcSendMessage() function; This handle is typically used to point
-+ to the internal data structure of the source endpoint.
-+ @Param[in] p_Msg - Pointer to original (sent) message buffer;
-+ The source endpoint can free (or reuse) this buffer when message
-+ completion callback is called.
-+ @Param[in] p_Reply - Pointer to (received) reply buffer;
-+ This pointer is the same as was provided by the source endpoint in
-+ XX_IpcSendMessage().
-+ @Param[in] replyLength - Length (in bytes) of actual data in the reply buffer.
-+ @Param[in] status - Completion status - E_OK or failure indication, e.g. IPC transaction completion
-+ timeout.
-+
-+ @Return None
-+ *//***************************************************************************/
-+typedef void (t_IpcMsgCompletion)(t_Handle h_Module,
-+ uint8_t *p_Msg,
-+ uint8_t *p_Reply,
-+ uint32_t replyLength,
-+ t_Error status);
-+
-+/**************************************************************************//**
-+ @Function t_IpcMsgHandler
-+
-+ @Description Callback function used as IPC message handler.
-+
-+ The IPC service invokes message handlers for each IPC message received.
-+ The actual function pointer should be registered by each destination endpoint
-+ via the XX_IpcRegisterMsgHandler() routine.
-+
-+ User provides this function. Driver invokes it.
-+
-+ @Param[in] h_Module - Abstract handle to the message handling module - the same handle as
-+ was passed in the XX_IpcRegisterMsgHandler() function; this handle is
-+ typically used to point to the internal data structure of the destination
-+ endpoint.
-+ @Param[in] p_Msg - Pointer to message buffer with data received from peer.
-+ @Param[in] msgLength - Length (in bytes) of message data.
-+ @Param[in] p_Reply - Pointer to reply buffer, to be filled by the message handler and then sent
-+ by the IPC service;
-+ The reply buffer is allocated by the IPC service with size equals to the
-+ replyLength parameter provided in message handler registration (see
-+ XX_IpcRegisterMsgHandler() function);
-+ If replyLength was initially specified as zero during message handler registration,
-+ the IPC service may set this pointer to NULL and assume that a reply is not needed;
-+ The IPC service is also responsible for freeing the reply buffer after the
-+ reply has been sent or dismissed.
-+ @Param[in,out] p_ReplyLength - Pointer to reply length, which has a dual role in this function:
-+ [In] equals the replyLength parameter provided in message handler
-+ registration (see XX_IpcRegisterMsgHandler() function), and
-+ [Out] should be updated by message handler to the actual reply length; if
-+ this value is set to zero, the IPC service must assume that a reply should
-+ not be sent;
-+ Note: If p_Reply is not NULL, p_ReplyLength must not be NULL as well.
-+
-+ @Return E_OK on success; Error code otherwise.
-+ *//***************************************************************************/
-+typedef t_Error (t_IpcMsgHandler)(t_Handle h_Module,
-+ uint8_t *p_Msg,
-+ uint32_t msgLength,
-+ uint8_t *p_Reply,
-+ uint32_t *p_ReplyLength);
-+
-+/**************************************************************************//**
-+ @Function XX_IpcRegisterMsgHandler
-+
-+ @Description IPC mailbox registration.
-+
-+ This function is used for registering an IPC message handler in the IPC service.
-+ This function is called by each destination endpoint to indicate that it is ready
-+ to handle incoming messages. The IPC service invokes the message handler upon receiving
-+ a message addressed to the specified destination endpoint.
-+
-+ @Param[in] addr - The address name string associated with the destination endpoint;
-+ This address must be unique across the IPC service domain to ensure
-+ correct message routing.
-+ @Param[in] f_MsgHandler - Pointer to the message handler callback for processing incoming
-+ message; invoked by the IPC service upon receiving a message
-+ addressed to the destination endpoint specified by the addr
-+ parameter.
-+ @Param[in] h_Module - Abstract handle to the message handling module, passed unchanged
-+ to f_MsgHandler callback function.
-+ @Param[in] replyLength - The maximal data length (in bytes) of any reply that the specified message handler
-+ may generate; the IPC service provides the message handler with buffer
-+ for reply according to the length specified here (refer also to the description
-+ of #t_IpcMsgHandler callback function type);
-+ This size shall be zero if the message handler never generates replies.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH],
-+ t_IpcMsgHandler *f_MsgHandler,
-+ t_Handle h_Module,
-+ uint32_t replyLength);
-+
-+/**************************************************************************//**
-+ @Function XX_IpcUnregisterMsgHandler
-+
-+ @Description Release IPC mailbox routine.
-+
-+ This function is used for unregistering an IPC message handler from the IPC service.
-+ This function is called by each destination endpoint to indicate that it is no longer
-+ capable of handling incoming messages.
-+
-+ @Param[in] addr - The address name string associated with the destination endpoint;
-+ This address is the same as was used when the message handler was
-+ registered via XX_IpcRegisterMsgHandler().
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH]);
-+
-+/**************************************************************************//**
-+ @Function XX_IpcInitSession
-+
-+ @Description This function is used for creating an IPC session between the source endpoint
-+ and the destination endpoint.
-+
-+ The actual implementation and representation of a session is left for the IPC service.
-+ The function returns an abstract handle to the created session. This handle shall be used
-+ by the source endpoint in subsequent calls to XX_IpcSendMessage().
-+ The IPC service assumes that before this function is called, no messages are sent from
-+ the specified source endpoint to the specified destination endpoint.
-+
-+ The IPC service may use a connection-oriented approach or a connectionless approach (or both)
-+ as described below.
-+
-+ @par Connection-Oriented Approach
-+
-+ The IPC service may implement a session in a connection-oriented approach - when this function is called,
-+ the IPC service should take the necessary steps to bring up a source-to-destination channel for messages
-+ and a destination-to-source channel for replies. The returned handle should represent the internal
-+ representation of these channels.
-+
-+ @par Connectionless Approach
-+
-+ The IPC service may implement a session in a connectionless approach - when this function is called, the
-+ IPC service should not perform any particular steps, but it must store the pair of source and destination
-+ addresses in some session representation and return it as a handle. When XX_IpcSendMessage() shall be
-+ called, the IPC service may use this handle to provide the necessary identifiers for routing the messages
-+ through the connectionless medium.
-+
-+ @Param[in] destAddr - The address name string associated with the destination endpoint.
-+ @Param[in] srcAddr - The address name string associated with the source endpoint.
-+
-+ @Return Abstract handle to the initialized session, or NULL on error.
-+*//***************************************************************************/
-+t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH],
-+ char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH]);
-+
-+/**************************************************************************//**
-+ @Function XX_IpcFreeSession
-+
-+ @Description This function is used for terminating an existing IPC session between a source endpoint
-+ and a destination endpoint.
-+
-+ The IPC service assumes that after this function is called, no messages shall be sent from
-+ the associated source endpoint to the associated destination endpoint.
-+
-+ @Param[in] h_Session - Abstract handle to the IPC session - the same handle as was originally
-+ returned by the XX_IpcInitSession() function.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error XX_IpcFreeSession(t_Handle h_Session);
-+
-+/**************************************************************************//**
-+ @Function XX_IpcSendMessage
-+
-+ @Description IPC message send routine.
-+
-+ This function may be used by a source endpoint to send an IPC message to a destination
-+ endpoint. The source endpoint cannot send a message to the destination endpoint without
-+ first initiating a session with that destination endpoint via XX_IpcInitSession() routine.
-+
-+ The source endpoint must provide the buffer pointer and length of the outgoing message.
-+ Optionally, it may also provide a buffer for an expected reply. In the latter case, the
-+ transaction is not considered complete by the IPC service until the reply has been received.
-+ If the source endpoint does not provide a reply buffer, the transaction is considered
-+ complete after the message has been sent. The source endpoint must keep the message (and
-+ optional reply) buffers valid until the transaction is complete.
-+
-+ @par Non-blocking mode
-+
-+ The source endpoint may request a non-blocking send by providing a non-NULL pointer to a message
-+ completion callback function (f_Completion). Upon completion of the IPC transaction (consisting of a
-+ message and an optional reply), the IPC service invokes this callback routine to return the message
-+ buffer to the sender and to provide the received reply, if requested.
-+
-+ @par Blocking mode
-+
-+ The source endpoint may request a blocking send by setting f_Completion to NULL. The function is
-+ expected to block until the IPC transaction is complete - either the reply has been received or (if no reply
-+ was requested) the message has been sent.
-+
-+ @Param[in] h_Session - Abstract handle to the IPC session - the same handle as was originally
-+ returned by the XX_IpcInitSession() function.
-+ @Param[in] p_Msg - Pointer to message buffer to send.
-+ @Param[in] msgLength - Length (in bytes) of actual data in the message buffer.
-+ @Param[in] p_Reply - Pointer to reply buffer - if this buffer is not NULL, the IPC service
-+ fills this buffer with the received reply data;
-+ In blocking mode, the reply data must be valid when the function returns;
-+ In non-blocking mode, the reply data is valid when f_Completion is called;
-+ If this pointer is NULL, no reply is expected.
-+ @Param[in,out] p_ReplyLength - Pointer to reply length, which has a dual role in this function:
-+ [In] specifies the maximal length (in bytes) of the reply buffer pointed by
-+ p_Reply, and
-+ [Out] in non-blocking mode this value is updated by the IPC service to the
-+ actual reply length (in bytes).
-+ @Param[in] f_Completion - Pointer to a completion callback to be used in non-blocking send mode;
-+ The completion callback is invoked by the IPC service upon
-+ completion of the IPC transaction (consisting of a message and an optional
-+ reply);
-+ If this pointer is NULL, the function is expected to block until the IPC
-+ transaction is complete.
-+ @Param[in] h_Arg - Abstract handle to the sending module; passed unchanged to the f_Completion
-+ callback function as the first argument.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error XX_IpcSendMessage(t_Handle h_Session,
-+ uint8_t *p_Msg,
-+ uint32_t msgLength,
-+ uint8_t *p_Reply,
-+ uint32_t *p_ReplyLength,
-+ t_IpcMsgCompletion *f_Completion,
-+ t_Handle h_Arg);
-+
-+
-+/** @} */ /* end of xx_ipc group */
-+/** @} */ /* end of xx_id group */
-+
-+
-+#endif /* __XX_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/ls1043_dflags.h
-@@ -0,0 +1,56 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __dflags_h
-+#define __dflags_h
-+
-+
-+#define NCSW_LINUX
-+
-+#define LS1043
-+
-+#define DEBUG_ERRORS 1
-+
-+#if defined(DEBUG)
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
-+
-+#define DEBUG_XX_MALLOC
-+#define DEBUG_MEM_LEAKS
-+
-+#else
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
-+#endif /* (DEBUG) */
-+
-+#define REPORT_EVENTS 1
-+#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
-+
-+#endif /* __dflags_h */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-@@ -0,0 +1,53 @@
-+#
-+# Makefile config for the Freescale NetcommSW
-+#
-+NET_DPA = $(srctree)/drivers/net
-+DRV_DPA = $(srctree)/drivers/net/ethernet/freescale/sdk_dpaa
-+FMAN = $(srctree)/drivers/net/ethernet/freescale/sdk_fman
-+
-+ifeq ("$(CONFIG_FMAN_P3040_P4080_P5020)", "y")
-+ccflags-y +=-include $(FMAN)/p3040_4080_5020_dflags.h
-+endif
-+ifeq ("$(CONFIG_FMAN_P1023)", "y")
-+ccflags-y +=-include $(FMAN)/p1023_dflags.h
-+endif
-+ifdef CONFIG_FMAN_V3H
-+ccflags-y +=-include $(FMAN)/fmanv3h_dflags.h
-+endif
-+ifdef CONFIG_FMAN_V3L
-+ccflags-y +=-include $(FMAN)/fmanv3l_dflags.h
-+endif
-+ifdef CONFIG_FMAN_ARM
-+ccflags-y +=-include $(FMAN)/ls1043_dflags.h
-+endif
-+
-+ccflags-y += -I$(DRV_DPA)/
-+ccflags-y += -I$(FMAN)/inc
-+ccflags-y += -I$(FMAN)/inc/cores
-+ccflags-y += -I$(FMAN)/inc/etc
-+ccflags-y += -I$(FMAN)/inc/Peripherals
-+ccflags-y += -I$(FMAN)/inc/flib
-+
-+ifeq ("$(CONFIG_FMAN_P3040_P4080_P5020)", "y")
-+ccflags-y += -I$(FMAN)/inc/integrations/P3040_P4080_P5020
-+endif
-+ifeq ("$(CONFIG_FMAN_P1023)", "y")
-+ccflags-y += -I$(FMAN)/inc/integrations/P1023
-+endif
-+ifdef CONFIG_FMAN_V3H
-+ccflags-y += -I$(FMAN)/inc/integrations/FMANV3H
-+endif
-+ifdef CONFIG_FMAN_V3L
-+ccflags-y += -I$(FMAN)/inc/integrations/FMANV3L
-+endif
-+ifdef CONFIG_FMAN_ARM
-+ccflags-y += -I$(FMAN)/inc/integrations/LS1043
-+endif
-+
-+ccflags-y += -I$(FMAN)/src/inc
-+ccflags-y += -I$(FMAN)/src/inc/system
-+ccflags-y += -I$(FMAN)/src/inc/wrapper
-+ccflags-y += -I$(FMAN)/src/inc/xx
-+ccflags-y += -I$(srctree)/include/uapi/linux/fmd
-+ccflags-y += -I$(srctree)/include/uapi/linux/fmd/Peripherals
-+ccflags-y += -I$(srctree)/include/uapi/linux/fmd/integrations
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/p1023_dflags.h
-@@ -0,0 +1,65 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __dflags_h
-+#define __dflags_h
-+
-+
-+#define NCSW_LINUX
-+#if 0
-+#define DEBUG
-+#endif
-+
-+#define P1023
-+#define NCSW_PPC_CORE
-+
-+#define DEBUG_ERRORS 1
-+
-+#if defined(DEBUG)
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
-+
-+#define DEBUG_XX_MALLOC
-+#define DEBUG_MEM_LEAKS
-+
-+#else
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
-+#endif /* (DEBUG) */
-+
-+#define REPORT_EVENTS 1
-+#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
-+
-+#ifdef CONFIG_P4080_SIM
-+#error "Do not define CONFIG_P4080_SIM..."
-+#endif
-+
-+
-+#endif /* __dflags_h */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/p3040_4080_5020_dflags.h
-@@ -0,0 +1,62 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __dflags_h
-+#define __dflags_h
-+
-+
-+#define NCSW_LINUX
-+
-+#define P4080
-+#define NCSW_PPC_CORE
-+
-+#define DEBUG_ERRORS 1
-+
-+#if defined(DEBUG)
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO
-+
-+#define DEBUG_XX_MALLOC
-+#define DEBUG_MEM_LEAKS
-+
-+#else
-+#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING
-+#endif /* (DEBUG) */
-+
-+#define REPORT_EVENTS 1
-+#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR
-+
-+#ifdef CONFIG_P4080_SIM
-+#define SIMULATOR
-+#endif /* CONFIG_P4080_SIM */
-+
-+
-+#endif /* __dflags_h */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/Makefile
-@@ -0,0 +1,11 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+#
-+obj-y += system/
-+obj-y += wrapper/
-+obj-y += xx/
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_ext.h
-@@ -0,0 +1,118 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __SYS_EXT_H
-+#define __SYS_EXT_H
-+
-+#include "std_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group sys_grp System Interfaces
-+
-+ @Description Linux system programming interfaces.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group sys_gen_grp System General Interface
-+
-+ @Description General definitions, structures and routines of the linux
-+ system programming interface.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection Macros for Advanced Configuration Requests
-+ @{
-+*//***************************************************************************/
-+#define SYS_MAX_ADV_CONFIG_ARGS 4
-+ /**< Maximum number of arguments in
-+ an advanced configuration entry */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description System Object Advanced Configuration Entry
-+
-+ This structure represents a single request for an advanced
-+ configuration call on the initialized object. An array of such
-+ requests may be contained in the settings structure of the
-+ corresponding object.
-+
-+ The maximum number of arguments is limited to #SYS_MAX_ADV_CONFIG_ARGS.
-+*//***************************************************************************/
-+typedef struct t_SysObjectAdvConfigEntry
-+{
-+ void *p_Function; /**< Pointer to advanced configuration routine */
-+
-+ uintptr_t args[SYS_MAX_ADV_CONFIG_ARGS];
-+ /**< Array of arguments for the specified routine;
-+ All arguments should be casted to uint32_t. */
-+} t_SysObjectAdvConfigEntry;
-+
-+
-+/** @} */ /* end of sys_gen_grp */
-+/** @} */ /* end of sys_grp */
-+
-+#define NCSW_PARAMS(_num, _params) ADV_CONFIG_PARAMS_##_num _params
-+
-+#define ADV_CONFIG_PARAMS_1(_type) \
-+ , (_type)p_Entry->args[0]
-+
-+#define SET_ADV_CONFIG_ARGS_1(_arg0) \
-+ p_Entry->args[0] = (uintptr_t )(_arg0); \
-+
-+#define ARGS(_num, _params) SET_ADV_CONFIG_ARGS_##_num _params
-+
-+#define ADD_ADV_CONFIG_START(_p_Entries, _maxEntries) \
-+ { \
-+ t_SysObjectAdvConfigEntry *p_Entry; \
-+ t_SysObjectAdvConfigEntry *p_Entrys = (_p_Entries); \
-+ int i=0, max = (_maxEntries); \
-+
-+#define ADD_ADV_CONFIG_END \
-+ }
-+
-+#define ADV_CONFIG_CHECK_START(_p_Entry) \
-+ { \
-+ t_SysObjectAdvConfigEntry *p_Entry = _p_Entry; \
-+ t_Error errCode; \
-+
-+#define ADV_CONFIG_CHECK(_handle, _func, _params) \
-+ if (p_Entry->p_Function == _func) \
-+ { \
-+ errCode = _func(_handle _params); \
-+ } else
-+
-+#endif /* __SYS_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/system/sys_io_ext.h
-@@ -0,0 +1,46 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __SYS_IO_EXT_H
-+#define __SYS_IO_EXT_H
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+
-+
-+t_Error SYS_RegisterIoMap (uint64_t virtAddr, uint64_t physAddr, uint32_t size);
-+t_Error SYS_UnregisterIoMap (uint64_t virtAddr);
-+uint64_t SYS_PhysToVirt (uint64_t addr);
-+uint64_t SYS_VirtToPhys (uint64_t addr);
-+
-+
-+#endif /* __SYS_IO_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/types_linux.h
-@@ -0,0 +1,208 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __TYPES_LINUX_H__
-+#define __TYPES_LINUX_H__
-+
-+#include <linux/version.h>
-+
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#include <config/modversions.h>
-+#endif /* MODVERSIONS */
-+
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <asm/io.h>
-+#include <linux/delay.h>
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
-+ #error "This kernel is probably not supported!!!"
-+#elif (!((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) || \
-+ (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) || \
-+ (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30))))
-+ #warning "This kernel is probably not supported!!! You may need to add some fixes."
-+#endif /* LINUX_VERSION_CODE */
-+
-+
-+typedef float float_t; /* Single precision floating point */
-+typedef double double_t; /* Double precision floating point */
-+
-+
-+#define _Packed
-+#define _PackedType __attribute__ ((packed))
-+
-+typedef phys_addr_t physAddress_t;
-+
-+#define UINT8_MAX 0xFF
-+#define UINT8_MIN 0
-+#define UINT16_MAX 0xFFFF
-+#define UINT16_MIN 0
-+#define UINT32_MAX 0xFFFFFFFF
-+#define UINT32_MIN 0
-+#define UINT64_MAX 0xFFFFFFFFFFFFFFFFLL
-+#define UINT64_MIN 0
-+#define INT8_MAX 0x7F
-+#define INT8_MIN 0x80
-+#define INT16_MAX 0x7FFF
-+#define INT16_MIN 0x8000
-+#define INT32_MAX 0x7FFFFFFF
-+#define INT32_MIN 0x80000000
-+#define INT64_MAX 0x7FFFFFFFFFFFFFFFLL
-+#define INT64_MIN 0x8000000000000000LL
-+
-+#define ON 1
-+#define OFF 0
-+
-+#define FALSE false
-+#define TRUE true
-+
-+
-+/************************/
-+/* memory access macros */
-+/************************/
-+#ifdef CONFIG_FMAN_ARM
-+#define in_be16(a) __be16_to_cpu(__raw_readw(a))
-+#define in_be32(a) __be32_to_cpu(__raw_readl(a))
-+#define out_be16(a, v) __raw_writew(__cpu_to_be16(v), a)
-+#define out_be32(a, v) __raw_writel(__cpu_to_be32(v), a)
-+#endif
-+
-+#define GET_UINT8(arg) *(volatile uint8_t *)(&(arg))
-+#define GET_UINT16(arg) in_be16(&(arg))//*(volatile uint16_t*)(&(arg))
-+#define GET_UINT32(arg) in_be32(&(arg))//*(volatile uint32_t*)(&(arg))
-+#define GET_UINT64(arg) *(volatile uint64_t*)(&(arg))
-+
-+#ifdef VERBOSE_WRITE
-+void XX_Print(char *str, ...);
-+#define WRITE_UINT8(arg, data) \
-+ do { XX_Print("ADDR: 0x%08x, VAL: 0x%02x\r\n", (uint32_t)&(arg), (data)); *(volatile uint8_t *)(&(arg)) = (data); } while (0)
-+#define WRITE_UINT16(arg, data) \
-+ do { XX_Print("ADDR: 0x%08x, VAL: 0x%04x\r\n", (uint32_t)&(arg), (data)); out_be16(&(arg), data); /* *(volatile uint16_t*)(&(arg)) = (data);*/ } while (0)
-+#define WRITE_UINT32(arg, data) \
-+ do { XX_Print("ADDR: 0x%08x, VAL: 0x%08x\r\n", (uint32_t)&(arg), (data)); out_be32(&(arg), data); /* *(volatile uint32_t*)(&(arg)) = (data);*/ } while (0)
-+#define WRITE_UINT64(arg, data) \
-+ do { XX_Print("ADDR: 0x%08x, VAL: 0x%016llx\r\n", (uint32_t)&(arg), (data)); *(volatile uint64_t*)(&(arg)) = (data); } while (0)
-+
-+#else /* not VERBOSE_WRITE */
-+#define WRITE_UINT8(arg, data) *(volatile uint8_t *)(&(arg)) = (data)
-+#define WRITE_UINT16(arg, data) out_be16(&(arg), data)//*(volatile uint16_t*)(&(arg)) = (data)
-+#define WRITE_UINT32(arg, data) out_be32(&(arg), data)//*(volatile unsigned int *)(&(arg)) = (data)
-+#define WRITE_UINT64(arg, data) *(volatile uint64_t*)(&(arg)) = (data)
-+#endif /* not VERBOSE_WRITE */
-+
-+
-+/*****************************************************************************/
-+/* General stuff */
-+/*****************************************************************************/
-+#ifdef ARRAY_SIZE
-+#undef ARRAY_SIZE
-+#endif /* ARRAY_SIZE */
-+
-+#ifdef MAJOR
-+#undef MAJOR
-+#endif /* MAJOR */
-+
-+#ifdef MINOR
-+#undef MINOR
-+#endif /* MINOR */
-+
-+#ifdef QE_SIZEOF_BD
-+#undef QE_SIZEOF_BD
-+#endif /* QE_SIZEOF_BD */
-+
-+#ifdef BD_BUFFER_CLEAR
-+#undef BD_BUFFER_CLEAR
-+#endif /* BD_BUFFER_CLEAR */
-+
-+#ifdef BD_BUFFER
-+#undef BD_BUFFER
-+#endif /* BD_BUFFER */
-+
-+#ifdef BD_STATUS_AND_LENGTH_SET
-+#undef BD_STATUS_AND_LENGTH_SET
-+#endif /* BD_STATUS_AND_LENGTH_SET */
-+
-+#ifdef BD_STATUS_AND_LENGTH
-+#undef BD_STATUS_AND_LENGTH
-+#endif /* BD_STATUS_AND_LENGTH */
-+
-+#ifdef BD_BUFFER_ARG
-+#undef BD_BUFFER_ARG
-+#endif /* BD_BUFFER_ARG */
-+
-+#ifdef BD_GET_NEXT
-+#undef BD_GET_NEXT
-+#endif /* BD_GET_NEXT */
-+
-+#ifdef QE_SDEBCR_BA_MASK
-+#undef QE_SDEBCR_BA_MASK
-+#endif /* QE_SDEBCR_BA_MASK */
-+
-+#ifdef BD_BUFFER_SET
-+#undef BD_BUFFER_SET
-+#endif /* BD_BUFFER_SET */
-+
-+#ifdef UPGCR_PROTOCOL
-+#undef UPGCR_PROTOCOL
-+#endif /* UPGCR_PROTOCOL */
-+
-+#ifdef UPGCR_TMS
-+#undef UPGCR_TMS
-+#endif /* UPGCR_TMS */
-+
-+#ifdef UPGCR_RMS
-+#undef UPGCR_RMS
-+#endif /* UPGCR_RMS */
-+
-+#ifdef UPGCR_ADDR
-+#undef UPGCR_ADDR
-+#endif /* UPGCR_ADDR */
-+
-+#ifdef UPGCR_DIAG
-+#undef UPGCR_DIAG
-+#endif /* UPGCR_DIAG */
-+
-+#ifdef NCSW_PARAMS
-+#undef NCSW_PARAMS
-+#endif /* NCSW_PARAMS */
-+
-+#ifdef NO_IRQ
-+#undef NO_IRQ
-+#endif /* NO_IRQ */
-+
-+#define PRINT_LINE XX_Print("%s:\n %s [%d]\n",__FILE__,__FUNCTION__,__LINE__);
-+
-+
-+#endif /* __TYPES_LINUX_H__ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/fsl_fman_test.h
-@@ -0,0 +1,84 @@
-+/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fsl_fman_test.h
-+
-+ @Description
-+*//***************************************************************************/
-+
-+#ifndef __FSL_FMAN_TEST_H
-+#define __FSL_FMAN_TEST_H
-+
-+#include <linux/types.h>
-+#include <linux/smp.h> /* raw_smp_processor_id() */
-+
-+//#define FMT_K_DBG
-+//#define FMT_K_DBG_RUNTIME
-+
-+#define _fmt_prk(stage, format, arg...) \
-+ printk(stage "fmt (cpu:%u): " format, raw_smp_processor_id(), ##arg)
-+
-+#define _fmt_inf(format, arg...) _fmt_prk(KERN_INFO, format, ##arg)
-+#define _fmt_wrn(format, arg...) _fmt_prk(KERN_WARNING, format, ##arg)
-+#define _fmt_err(format, arg...) _fmt_prk(KERN_ERR, format, ##arg)
-+
-+/* there are two macros for debugging: for runtime and generic.
-+ * Helps when the runtime functions are not targeted for debugging,
-+ * thus all the unnecessary information will be skipped.
-+ */
-+/* used for generic debugging */
-+#if defined(FMT_K_DBG)
-+ #define _fmt_dbg(format, arg...) \
-+ printk("fmt [%s:%u](cpu:%u) - " format, \
-+ __func__, __LINE__, raw_smp_processor_id(), ##arg)
-+#else
-+# define _fmt_dbg(arg...)
-+#endif
-+
-+/* used for debugging runtime functions */
-+#if defined(FMT_K_DBG_RUNTIME)
-+ #define _fmt_dbgr(format, arg...) \
-+ printk("fmt [%s:%u](cpu:%u) - " format, \
-+ __func__, __LINE__, raw_smp_processor_id(), ##arg)
-+#else
-+# define _fmt_dbgr(arg...)
-+#endif
-+
-+#define FMT_RX_ERR_Q 0xffffffff
-+#define FMT_RX_DFLT_Q 0xfffffffe
-+#define FMT_TX_ERR_Q 0xfffffffd
-+#define FMT_TX_CONF_Q 0xfffffffc
-+
-+#define FMAN_TEST_MAX_TX_FQS 8
-+
-+#endif /* __FSL_FMAN_TEST_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h
-@@ -0,0 +1,130 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_exp_sym.h
-+ @Description FMan exported routines
-+*/
-+
-+#ifndef __LNXWRP_EXP_SYM_H
-+#define __LNXWRP_EXP_SYM_H
-+
-+#include "fm_port_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_mac_ext.h"
-+
-+
-+/* FMAN Port exported routines */
-+EXPORT_SYMBOL(FM_PORT_Disable);
-+EXPORT_SYMBOL(FM_PORT_Enable);
-+EXPORT_SYMBOL(FM_PORT_SetPCD);
-+EXPORT_SYMBOL(FM_PORT_DeletePCD);
-+
-+/* Runtime PCD exported routines */
-+EXPORT_SYMBOL(FM_PCD_Enable);
-+EXPORT_SYMBOL(FM_PCD_Disable);
-+EXPORT_SYMBOL(FM_PCD_GetCounter);
-+EXPORT_SYMBOL(FM_PCD_PrsLoadSw);
-+EXPORT_SYMBOL(FM_PCD_KgSetDfltValue);
-+EXPORT_SYMBOL(FM_PCD_KgSetAdditionalDataAfterParsing);
-+EXPORT_SYMBOL(FM_PCD_SetException);
-+EXPORT_SYMBOL(FM_PCD_ModifyCounter);
-+EXPORT_SYMBOL(FM_PCD_SetPlcrStatistics);
-+EXPORT_SYMBOL(FM_PCD_SetPrsStatistics);
-+EXPORT_SYMBOL(FM_PCD_ForceIntr);
-+EXPORT_SYMBOL(FM_PCD_HcTxConf);
-+
-+EXPORT_SYMBOL(FM_PCD_NetEnvCharacteristicsSet);
-+EXPORT_SYMBOL(FM_PCD_NetEnvCharacteristicsDelete);
-+EXPORT_SYMBOL(FM_PCD_KgSchemeSet);
-+EXPORT_SYMBOL(FM_PCD_KgSchemeDelete);
-+EXPORT_SYMBOL(FM_PCD_KgSchemeGetCounter);
-+EXPORT_SYMBOL(FM_PCD_KgSchemeSetCounter);
-+EXPORT_SYMBOL(FM_PCD_CcRootBuild);
-+EXPORT_SYMBOL(FM_PCD_CcRootDelete);
-+EXPORT_SYMBOL(FM_PCD_MatchTableSet);
-+EXPORT_SYMBOL(FM_PCD_MatchTableDelete);
-+EXPORT_SYMBOL(FM_PCD_CcRootModifyNextEngine);
-+EXPORT_SYMBOL(FM_PCD_MatchTableModifyNextEngine);
-+EXPORT_SYMBOL(FM_PCD_MatchTableFindNModifyNextEngine);
-+EXPORT_SYMBOL(FM_PCD_MatchTableModifyMissNextEngine);
-+EXPORT_SYMBOL(FM_PCD_MatchTableRemoveKey);
-+EXPORT_SYMBOL(FM_PCD_MatchTableFindNRemoveKey);
-+EXPORT_SYMBOL(FM_PCD_MatchTableAddKey);
-+EXPORT_SYMBOL(FM_PCD_MatchTableModifyKeyAndNextEngine);
-+EXPORT_SYMBOL(FM_PCD_MatchTableFindNModifyKeyAndNextEngine);
-+EXPORT_SYMBOL(FM_PCD_MatchTableModifyKey);
-+EXPORT_SYMBOL(FM_PCD_MatchTableFindNModifyKey);
-+EXPORT_SYMBOL(FM_PCD_MatchTableGetIndexedHashBucket);
-+EXPORT_SYMBOL(FM_PCD_MatchTableGetNextEngine);
-+EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyCounter);
-+EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyStatistics);
-+EXPORT_SYMBOL(FM_PCD_MatchTableFindNGetKeyStatistics);
-+EXPORT_SYMBOL(FM_PCD_MatchTableGetMissStatistics);
-+EXPORT_SYMBOL(FM_PCD_HashTableGetMissStatistics);
-+EXPORT_SYMBOL(FM_PCD_HashTableSet);
-+EXPORT_SYMBOL(FM_PCD_HashTableDelete);
-+EXPORT_SYMBOL(FM_PCD_HashTableAddKey);
-+EXPORT_SYMBOL(FM_PCD_HashTableRemoveKey);
-+EXPORT_SYMBOL(FM_PCD_HashTableModifyNextEngine);
-+EXPORT_SYMBOL(FM_PCD_HashTableModifyMissNextEngine);
-+EXPORT_SYMBOL(FM_PCD_HashTableGetMissNextEngine);
-+EXPORT_SYMBOL(FM_PCD_HashTableFindNGetKeyStatistics);
-+EXPORT_SYMBOL(FM_PCD_PlcrProfileSet);
-+EXPORT_SYMBOL(FM_PCD_PlcrProfileDelete);
-+EXPORT_SYMBOL(FM_PCD_PlcrProfileGetCounter);
-+EXPORT_SYMBOL(FM_PCD_PlcrProfileSetCounter);
-+EXPORT_SYMBOL(FM_PCD_ManipNodeSet);
-+EXPORT_SYMBOL(FM_PCD_ManipNodeDelete);
-+EXPORT_SYMBOL(FM_PCD_ManipGetStatistics);
-+EXPORT_SYMBOL(FM_PCD_ManipNodeReplace);
-+#if (DPAA_VERSION >= 11)
-+EXPORT_SYMBOL(FM_PCD_FrmReplicSetGroup);
-+EXPORT_SYMBOL(FM_PCD_FrmReplicDeleteGroup);
-+EXPORT_SYMBOL(FM_PCD_FrmReplicAddMember);
-+EXPORT_SYMBOL(FM_PCD_FrmReplicRemoveMember);
-+#endif /* DPAA_VERSION >= 11 */
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+EXPORT_SYMBOL(FM_PCD_StatisticsSetNode);
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+EXPORT_SYMBOL(FM_PCD_SetAdvancedOffloadSupport);
-+
-+/* FMAN MAC exported routines */
-+EXPORT_SYMBOL(FM_MAC_GetStatistics);
-+
-+EXPORT_SYMBOL(FM_MAC_GetFrameSizeCounters);
-+
-+EXPORT_SYMBOL(FM_GetSpecialOperationCoding);
-+
-+#endif /* __LNXWRP_EXP_SYM_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h
-@@ -0,0 +1,163 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File lnxwrp_fm_ext.h
-+
-+ @Description TODO
-+*//***************************************************************************/
-+
-+#ifndef __LNXWRP_FM_EXT_H
-+#define __LNXWRP_FM_EXT_H
-+
-+#include "std_ext.h"
-+#include "sys_ext.h"
-+#include "fm_ext.h"
-+#include "fm_muram_ext.h"
-+#include "fm_pcd_ext.h"
-+#include "fm_port_ext.h"
-+#include "fm_mac_ext.h"
-+#include "fm_rtc_ext.h"
-+
-+
-+/**************************************************************************//**
-+ @Group FM_LnxKern_grp Frame Manager Linux wrapper API
-+
-+ @Description FM API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_LnxKern_init_grp Initialization Unit
-+
-+ @Description Initialization Unit
-+
-+ Initialization Flow:
-+ Initialization of the FM Module will be carried out by the Linux
-+ kernel according to the following sequence:
-+ a. Calling the initialization routine with no parameters.
-+ b. The driver will register to the Device-Tree.
-+ c. The Linux Device-Tree will initiate a call to the driver for
-+ initialization.
-+ d. The driver will read the appropriate information from the Device-Tree
-+ e. [Optional] Calling the advance initialization routines to change
-+ driver's defaults.
-+ f. Initialization of the device will be automatically upon using it.
-+
-+ @{
-+*//***************************************************************************/
-+
-+typedef struct t_WrpFmDevSettings
-+{
-+ t_FmParams param;
-+ t_SysObjectAdvConfigEntry *advConfig;
-+} t_WrpFmDevSettings;
-+
-+typedef struct t_WrpFmPcdDevSettings
-+{
-+ t_FmPcdParams param;
-+ t_SysObjectAdvConfigEntry *advConfig;
-+} t_WrpFmPcdDevSettings;
-+
-+typedef struct t_WrpFmPortDevSettings
-+{
-+ bool frag_enabled;
-+ t_FmPortParams param;
-+ t_SysObjectAdvConfigEntry *advConfig;
-+} t_WrpFmPortDevSettings;
-+
-+typedef struct t_WrpFmMacDevSettings
-+{
-+ t_FmMacParams param;
-+ t_SysObjectAdvConfigEntry *advConfig;
-+} t_WrpFmMacDevSettings;
-+
-+
-+/**************************************************************************//**
-+ @Function LNXWRP_FM_Init
-+
-+ @Description Initialize the FM linux wrapper.
-+
-+ @Return A handle (descriptor) of the newly created FM Linux wrapper
-+ structure.
-+*//***************************************************************************/
-+t_Handle LNXWRP_FM_Init(void);
-+
-+/**************************************************************************//**
-+ @Function LNXWRP_FM_Free
-+
-+ @Description Free the FM linux wrapper.
-+
-+ @Param[in] h_LnxWrpFm - A handle to the FM linux wrapper.
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+t_Error LNXWRP_FM_Free(t_Handle h_LnxWrpFm);
-+
-+/**************************************************************************//**
-+ @Function LNXWRP_FM_GetMacHandle
-+
-+ @Description Get the FM-MAC LLD handle from the FM linux wrapper.
-+
-+ @Param[in] h_LnxWrpFm - A handle to the FM linux wrapper.
-+ @Param[in] fmId - Index of the FM device to get the MAC handle from.
-+ @Param[in] macId - Index of the mac handle.
-+
-+ @Return A handle of the LLD compressor.
-+*//***************************************************************************/
-+t_Handle LNXWRP_FM_GetMacHandle(t_Handle h_LnxWrpFm, uint8_t fmId, uint8_t macId);
-+
-+#ifdef CONFIG_FSL_SDK_FMAN_TEST
-+t_Handle LNXWRP_FM_TEST_Init(void);
-+t_Error LNXWRP_FM_TEST_Free(t_Handle h_FmTestLnxWrp);
-+#endif /* CONFIG_FSL_SDK_FMAN_TEST */
-+
-+/** @} */ /* end of FM_LnxKern_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group FM_LnxKern_ctrl_grp Control Unit
-+
-+ @Description Control Unit
-+
-+ TODO
-+ @{
-+*//***************************************************************************/
-+
-+#include "lnxwrp_fsl_fman.h"
-+
-+/** @} */ /* end of FM_LnxKern_ctrl_grp group */
-+/** @} */ /* end of FM_LnxKern_grp group */
-+
-+
-+#endif /* __LNXWRP_FM_EXT_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h
-@@ -0,0 +1,921 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File lnxwrp_fsl_fman.h
-+
-+ @Description Linux internal kernel API
-+*//***************************************************************************/
-+
-+#ifndef __LNXWRP_FSL_FMAN_H
-+#define __LNXWRP_FSL_FMAN_H
-+
-+#include <linux/types.h>
-+#include <linux/device.h> /* struct device */
-+#include <linux/fsl_qman.h> /* struct qman_fq */
-+#include "dpaa_integration_ext.h"
-+#include "fm_port_ext.h"
-+#include "fm_mac_ext.h"
-+#include "fm_macsec_ext.h"
-+#include "fm_rtc_ext.h"
-+
-+/**************************************************************************//**
-+ @Group FM_LnxKern_grp Frame Manager Linux wrapper API
-+
-+ @Description FM API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group FM_LnxKern_ctrl_grp Control Unit
-+
-+ @Description Control Unit
-+
-+ Internal Kernel Control Unit API
-+ @{
-+*//***************************************************************************/
-+
-+/*****************************************************************************/
-+/* Internal Linux kernel routines */
-+/*****************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description MACSEC Exceptions wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_exception {
-+ SINGLE_BIT_ECC = e_FM_MACSEC_EX_SINGLE_BIT_ECC,
-+ MULTI_BIT_ECC = e_FM_MACSEC_EX_MULTI_BIT_ECC
-+} fm_macsec_exception;
-+
-+/**************************************************************************//**
-+ @Description Unknown sci frame treatment wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_unknown_sci_frame_treatment {
-+ SCI_DISCARD_BOTH = e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_BOTH,
-+ SCI_DISCARD_UNCTRL_DELIVER_DISCARD_CTRL = \
-+ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED,
-+ SCI_DELIVER_UNCTRL_DISCARD_CTRL = \
-+ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED,
-+ SCI_DELIVER_DISCARD_UNCTRL_DELIVER_DISCARD_CTRL = \
-+ e_FM_MACSEC_UNKNOWN_SCI_FRAME_TREATMENT_DELIVER_OR_DISCARD_UNCONTROLLED_DELIVER_OR_DISCARD_CONTROLLED
-+} fm_macsec_unknown_sci_frame_treatment;
-+
-+/**************************************************************************//**
-+ @Description Untag frame treatment wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_untag_frame_treatment {
-+ UNTAG_DELIVER_UNCTRL_DISCARD_CTRL = \
-+ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DELIVER_UNCONTROLLED_DISCARD_CONTROLLED,
-+ UNTAG_DISCARD_BOTH = e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_BOTH,
-+ UNTAG_DISCARD_UNCTRL_DELIVER_CTRL_UNMODIFIED = \
-+ e_FM_MACSEC_UNTAG_FRAME_TREATMENT_DISCARD_UNCONTROLLED_DELIVER_CONTROLLED_UNMODIFIED
-+} fm_macsec_untag_frame_treatment;
-+
-+/**************************************************************************//**
-+@Description MACSEC SECY Cipher Suite wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_secy_cipher_suite {
-+ SECY_GCM_AES_128 = e_FM_MACSEC_SECY_GCM_AES_128, /**< GCM-AES-128 */
-+#if (DPAA_VERSION >= 11)
-+ SECY_GCM_AES_256 = e_FM_MACSEC_SECY_GCM_AES_256 /**< GCM-AES-256 */
-+#endif /* (DPAA_VERSION >= 11) */
-+} fm_macsec_secy_cipher_suite;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SECY Exceptions wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_secy_exception {
-+ SECY_EX_FRAME_DISCARDED = e_FM_MACSEC_SECY_EX_FRAME_DISCARDED
-+} fm_macsec_secy_exception;
-+
-+/**************************************************************************//**
-+ @Description MACSEC SECY Events wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_secy_event {
-+ SECY_EV_NEXT_PN = e_FM_MACSEC_SECY_EV_NEXT_PN
-+} fm_macsec_secy_event;
-+
-+/**************************************************************************//**
-+ @Description Valid frame behaviors wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_valid_frame_behavior {
-+ VALID_FRAME_BEHAVIOR_DISABLE = e_FM_MACSEC_VALID_FRAME_BEHAVIOR_DISABLE,
-+ VALID_FRAME_BEHAVIOR_CHECK = e_FM_MACSEC_VALID_FRAME_BEHAVIOR_CHECK,
-+ VALID_FRAME_BEHAVIOR_STRICT = e_FM_MACSEC_VALID_FRAME_BEHAVIOR_STRICT
-+} fm_macsec_valid_frame_behavior;
-+
-+/**************************************************************************//**
-+ @Description SCI insertion modes wrapper
-+*//***************************************************************************/
-+typedef enum fm_macsec_sci_insertion_mode {
-+ SCI_INSERTION_MODE_EXPLICIT_SECTAG = \
-+ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_SECTAG,
-+ SCI_INSERTION_MODE_EXPLICIT_MAC_SA = \
-+ e_FM_MACSEC_SCI_INSERTION_MODE_EXPLICIT_MAC_SA,
-+ SCI_INSERTION_MODE_IMPLICT_PTP = e_FM_MACSEC_SCI_INSERTION_MODE_IMPLICT_PTP
-+} fm_macsec_sci_insertion_mode;
-+
-+typedef macsecSAKey_t macsec_sa_key_t;
-+typedef macsecSCI_t macsec_sci_t;
-+typedef macsecAN_t macsec_an_t;
-+typedef t_Handle handle_t;
-+
-+/**************************************************************************//**
-+ @Function fm_macsec_secy_exception_callback wrapper
-+ @Description Exceptions user callback routine, will be called upon an
-+ exception passing the exception identification.
-+ @Param[in] app_h A handle to an application layer object; This handle
-+ will be passed by the driver upon calling this callback.
-+ @Param[in] exception The exception.
-+*//***************************************************************************/
-+typedef void (fm_macsec_secy_exception_callback) (handle_t app_h,
-+ fm_macsec_secy_exception exception);
-+
-+/**************************************************************************//**
-+ @Function fm_macsec_secy_event_callback wrapper
-+ @Description Events user callback routine, will be called upon an
-+ event passing the event identification.
-+ @Param[in] app_h A handle to an application layer object; This handle
-+ will be passed by the driver upon calling this callback.
-+ @Param[in] event The event.
-+*//***************************************************************************/
-+typedef void (fm_macsec_secy_event_callback) (handle_t app_h,
-+ fm_macsec_secy_event event);
-+
-+/**************************************************************************//**
-+ @Function fm_macsec_exception_callback wrapper
-+ @Description Exceptions user callback routine, will be called upon an
-+ exception passing the exception identification.
-+ @Param[in] app_h A handle to an application layer object; This handle
-+ will be passed by the driver upon calling this callback.
-+ @Param[in] exception The exception.
-+*//***************************************************************************/
-+typedef void (fm_macsec_exception_callback) (handle_t app_h,
-+ fm_macsec_exception exception);
-+
-+/**************************************************************************//**
-+ @Description MACSEC SecY SC Params wrapper
-+*//***************************************************************************/
-+struct fm_macsec_secy_sc_params {
-+ macsec_sci_t sci;
-+ fm_macsec_secy_cipher_suite cipher_suite;
-+};
-+
-+/**************************************************************************//**
-+ @Description FM MACSEC SecY config input wrapper
-+*//***************************************************************************/
-+struct fm_macsec_secy_params {
-+ handle_t fm_macsec_h;
-+ struct fm_macsec_secy_sc_params tx_sc_params;
-+ uint32_t num_receive_channels;
-+ fm_macsec_secy_exception_callback *exception_f;
-+ fm_macsec_secy_event_callback *event_f;
-+ handle_t app_h;
-+};
-+
-+/**************************************************************************//**
-+ @Description FM MACSEC config input wrapper
-+*//***************************************************************************/
-+struct fm_macsec_params {
-+ handle_t fm_h;
-+ bool guest_mode;
-+
-+ union {
-+ struct {
-+ uint8_t fm_mac_id;
-+ } guest_params;
-+
-+ struct {
-+ uintptr_t base_addr;
-+ handle_t fm_mac_h;
-+ fm_macsec_exception_callback *exception_f;
-+ handle_t app_h;
-+ } non_guest_params;
-+ };
-+
-+};
-+
-+/**************************************************************************//**
-+ @Description FM device opaque structure used for type checking
-+*//***************************************************************************/
-+struct fm;
-+
-+/**************************************************************************//**
-+ @Description FM MAC device opaque structure used for type checking
-+*//***************************************************************************/
-+struct fm_mac_dev;
-+
-+/**************************************************************************//**
-+ @Description FM MACSEC device opaque structure used for type checking
-+*//***************************************************************************/
-+struct fm_macsec_dev;
-+struct fm_macsec_secy_dev;
-+
-+/**************************************************************************//**
-+ @Description A structure ..,
-+*//***************************************************************************/
-+struct fm_port;
-+
-+typedef int (*alloc_pcd_fqids)(struct device *dev, uint32_t num,
-+ uint8_t alignment, uint32_t *base_fqid);
-+
-+typedef int (*free_pcd_fqids)(struct device *dev, uint32_t base_fqid);
-+
-+struct fm_port_pcd_param {
-+ alloc_pcd_fqids cba;
-+ free_pcd_fqids cbf;
-+ struct device *dev;
-+};
-+
-+/**************************************************************************//**
-+ @Description A structure of information about each of the external
-+ buffer pools used by the port,
-+*//***************************************************************************/
-+struct fm_port_pool_param {
-+ uint8_t id; /**< External buffer pool id */
-+ uint16_t size; /**< External buffer pool buffer size */
-+};
-+
-+/**************************************************************************//**
-+ @Description structure for additional port parameters
-+*//***************************************************************************/
-+struct fm_port_params {
-+ uint32_t errq; /**< Error Queue Id. */
-+ uint32_t defq; /**< For Tx and HC - Default Confirmation queue,
-+ 0 means no Tx conf for processed frames.
-+ For Rx and OP - default Rx queue. */
-+ uint8_t num_pools; /**< Number of pools use by this port */
-+ struct fm_port_pool_param pool_param[FM_PORT_MAX_NUM_OF_EXT_POOLS];
-+ /**< Parameters for each pool */
-+ uint16_t priv_data_size; /**< Area that user may save for his own
-+ need (E.g. save the SKB) */
-+ bool parse_results; /**< Put the parser-results in the Rx/Tx buffer */
-+ bool hash_results; /**< Put the hash-results in the Rx/Tx buffer */
-+ bool time_stamp; /**< Put the time-stamp in the Rx/Tx buffer */
-+ bool frag_enable; /**< Fragmentation support, for OP only */
-+ uint16_t data_align; /**< value for selecting a data alignment (must be a power of 2);
-+ if write optimization is used, must be >= 16. */
-+ uint8_t manip_extra_space; /**< Maximum extra size needed (insertion-size minus removal-size);
-+ Note that this field impacts the size of the buffer-prefix
-+ (i.e. it pushes the data offset); */
-+};
-+
-+/**************************************************************************//**
-+ @Function fm_bind
-+
-+ @Description Bind to a specific FM device.
-+
-+ @Param[in] fm_dev - the OF handle of the FM device.
-+
-+ @Return A handle of the FM device.
-+
-+ @Cautions Allowed only after the port was created.
-+*//***************************************************************************/
-+struct fm *fm_bind(struct device *fm_dev);
-+
-+/**************************************************************************//**
-+ @Function fm_unbind
-+
-+ @Description Un-bind from a specific FM device.
-+
-+ @Param[in] fm - A handle of the FM device.
-+
-+ @Cautions Allowed only after the port was created.
-+*//***************************************************************************/
-+void fm_unbind(struct fm *fm);
-+
-+void *fm_get_handle(struct fm *fm);
-+void *fm_get_rtc_handle(struct fm *fm);
-+struct resource *fm_get_mem_region(struct fm *fm);
-+
-+/**************************************************************************//**
-+ @Function fm_port_bind
-+
-+ @Description Bind to a specific FM-port device (may be Rx or Tx port).
-+
-+ @Param[in] fm_port_dev - the OF handle of the FM port device.
-+
-+ @Return A handle of the FM port device.
-+
-+ @Cautions Allowed only after the port was created.
-+*//***************************************************************************/
-+struct fm_port *fm_port_bind(struct device *fm_port_dev);
-+
-+/**************************************************************************//**
-+ @Function fm_port_unbind
-+
-+ @Description Un-bind from a specific FM-port device (may be Rx or Tx port).
-+
-+ @Param[in] port - A handle of the FM port device.
-+
-+ @Cautions Allowed only after the port was created.
-+*//***************************************************************************/
-+void fm_port_unbind(struct fm_port *port);
-+
-+/**************************************************************************//**
-+ @Function fm_set_rx_port_params
-+
-+ @Description Configure parameters for a specific Rx FM-port device.
-+
-+ @Param[in] port - A handle of the FM port device.
-+ @Param[in] params - Rx port parameters
-+
-+ @Cautions Allowed only after the port is binded.
-+*//***************************************************************************/
-+void fm_set_rx_port_params(struct fm_port *port,
-+ struct fm_port_params *params);
-+
-+/**************************************************************************//**
-+ @Function fm_port_pcd_bind
-+
-+ @Description Bind as a listener on a port PCD.
-+
-+ @Param[in] port - A handle of the FM port device.
-+ @Param[in] params - PCD port parameters
-+
-+ @Cautions Allowed only after the port is binded.
-+*//***************************************************************************/
-+void fm_port_pcd_bind (struct fm_port *port, struct fm_port_pcd_param *params);
-+
-+/**************************************************************************//**
-+ @Function fm_port_get_buff_layout_ext_params
-+
-+ @Description Get data_align and manip_extra_space from the device tree
-+ chosen node if applied.
-+ This function will only update these two parameters.
-+ When this port has no such parameters in the device tree
-+ values will be set to 0.
-+
-+ @Param[in] port - A handle of the FM port device.
-+ @Param[in] params - PCD port parameters
-+
-+ @Cautions Allowed only after the port is binded.
-+*//***************************************************************************/
-+void fm_port_get_buff_layout_ext_params(struct fm_port *port, struct fm_port_params *params);
-+
-+/**************************************************************************//**
-+ @Function fm_get_tx_port_channel
-+
-+ @Description Get qman-channel number for this Tx port.
-+
-+ @Param[in] port - A handle of the FM port device.
-+
-+ @Return qman-channel number for this Tx port.
-+
-+ @Cautions Allowed only after the port is binded.
-+*//***************************************************************************/
-+uint16_t fm_get_tx_port_channel(struct fm_port *port);
-+
-+/**************************************************************************//**
-+ @Function fm_set_tx_port_params
-+
-+ @Description Configure parameters for a specific Tx FM-port device
-+
-+ @Param[in] port - A handle of the FM port device.
-+ @Param[in] params - Tx port parameters
-+
-+ @Cautions Allowed only after the port is binded.
-+*//***************************************************************************/
-+void fm_set_tx_port_params(struct fm_port *port, struct fm_port_params *params);
-+
-+
-+/**************************************************************************//**
-+ @Function fm_mac_set_handle
-+
-+ @Description Set mac handle
-+
-+ @Param[in] h_lnx_wrp_fm_dev - A handle of the LnxWrp FM device.
-+ @Param[in] h_fm_mac - A handle of the LnxWrp FM MAC device.
-+ @Param[in] mac_id - MAC id.
-+*//***************************************************************************/
-+void fm_mac_set_handle(t_Handle h_lnx_wrp_fm_dev, t_Handle h_fm_mac,
-+ int mac_id);
-+
-+/**************************************************************************//**
-+ @Function fm_port_enable
-+
-+ @Description Enable specific FM-port device (may be Rx or Tx port).
-+
-+ @Param[in] port - A handle of the FM port device.
-+
-+ @Cautions Allowed only after the port is initialized.
-+*//***************************************************************************/
-+int fm_port_enable(struct fm_port *port);
-+
-+/**************************************************************************//**
-+ @Function fm_port_disable
-+
-+ @Description Disable specific FM-port device (may be Rx or Tx port).
-+
-+ @Param[in] port - A handle of the FM port device.
-+
-+ @Cautions Allowed only after the port is initialized.
-+*//***************************************************************************/
-+int fm_port_disable(struct fm_port *port);
-+
-+void *fm_port_get_handle(const struct fm_port *port);
-+
-+u64 *fm_port_get_buffer_time_stamp(const struct fm_port *port,
-+ const void *data);
-+
-+/**************************************************************************//**
-+ @Function fm_port_get_base_address
-+
-+ @Description Get base address of this port. Useful for accessing
-+ port-specific registers (i.e., not common ones).
-+
-+ @Param[in] port - A handle of the FM port device.
-+
-+ @Param[out] base_addr - The port's base addr (virtual address).
-+*//***************************************************************************/
-+void fm_port_get_base_addr(const struct fm_port *port, uint64_t *base_addr);
-+
-+/**************************************************************************//**
-+ @Function fm_mutex_lock
-+
-+ @Description Lock function required before any FMD/LLD call.
-+*//***************************************************************************/
-+void fm_mutex_lock(void);
-+
-+/**************************************************************************//**
-+ @Function fm_mutex_unlock
-+
-+ @Description Unlock function required after any FMD/LLD call.
-+*//***************************************************************************/
-+void fm_mutex_unlock(void);
-+
-+/**************************************************************************//**
-+ @Function fm_get_max_frm
-+
-+ @Description Get the maximum frame size
-+*//***************************************************************************/
-+int fm_get_max_frm(void);
-+
-+/**************************************************************************//**
-+ @Function fm_get_rx_extra_headroom
-+
-+ @Description Get the extra headroom size
-+*//***************************************************************************/
-+int fm_get_rx_extra_headroom(void);
-+
-+/**************************************************************************//**
-+@Function fm_port_set_rate_limit
-+
-+@Description Configure Shaper parameter on FM-port device (Tx port).
-+
-+@Param[in] port - A handle of the FM port device.
-+@Param[in] max_burst_size - Value of maximum burst size allowed.
-+@Param[in] rate_limit - The required rate value.
-+
-+@Cautions Allowed only after the port is initialized.
-+*//***************************************************************************/
-+int fm_port_set_rate_limit(struct fm_port *port,
-+ uint16_t max_burst_size,
-+ uint32_t rate_limit);
-+/**************************************************************************//**
-+@Function fm_port_set_rate_limit
-+
-+@Description Delete Shaper configuration on FM-port device (Tx port).
-+
-+@Param[in] port - A handle of the FM port device.
-+
-+@Cautions Allowed only after the port is initialized.
-+*//***************************************************************************/
-+int fm_port_del_rate_limit(struct fm_port *port);
-+
-+struct auto_res_tables_sizes
-+{
-+ uint16_t max_num_of_arp_entries;
-+ uint16_t max_num_of_echo_ipv4_entries;
-+ uint16_t max_num_of_ndp_entries;
-+ uint16_t max_num_of_echo_ipv6_entries;
-+ uint16_t max_num_of_snmp_ipv4_entries;
-+ uint16_t max_num_of_snmp_ipv6_entries;
-+ uint16_t max_num_of_snmp_oid_entries;
-+ uint16_t max_num_of_snmp_char; /* total amount of character needed
-+ for the snmp table */
-+ uint16_t max_num_of_ip_prot_filtering;
-+ uint16_t max_num_of_tcp_port_filtering;
-+ uint16_t max_num_of_udp_port_filtering;
-+};
-+/* ARP */
-+struct auto_res_arp_entry
-+{
-+ uint32_t ip_address;
-+ uint8_t mac[6];
-+ bool is_vlan;
-+ uint16_t vid;
-+};
-+struct auto_res_arp_info
-+{
-+ uint8_t table_size;
-+ struct auto_res_arp_entry *auto_res_table;
-+ bool enable_conflict_detection; /* when TRUE
-+ Conflict Detection will be checked and wake the host if
-+ needed */
-+};
-+
-+/* NDP */
-+struct auto_res_ndp_entry
-+{
-+ uint32_t ip_address[4];
-+ uint8_t mac[6];
-+ bool is_vlan;
-+ uint16_t vid;
-+};
-+struct auto_res_ndp_info
-+{
-+ uint32_t multicast_group;
-+ uint8_t table_size_assigned;
-+ struct auto_res_ndp_entry *auto_res_table_assigned; /* This list
-+ refer to solicitation IP addresses. Note that all IP adresses
-+ must be from the same multicast group. This will be checked and
-+ if not operation will fail. */
-+ uint8_t table_size_tmp;
-+ struct auto_res_ndp_entry *auto_res_table_tmp; /* This list
-+ refer to temp IP addresses. Note that all temp IP adresses must
-+ be from the same multicast group. This will be checked and if
-+ not operation will fail. */
-+
-+ bool enable_conflict_detection; /* when TRUE
-+ Conflict Detection will be checked and wake the host if
-+ needed */
-+};
-+
-+/* ICMP ECHO */
-+struct auto_res_echo_ipv4_info
-+{
-+ uint8_t table_size;
-+ struct auto_res_arp_entry *auto_res_table;
-+};
-+
-+struct auto_res_echo_ipv6_info
-+{
-+ uint8_t table_size;
-+ struct auto_res_ndp_entry *auto_res_table;
-+};
-+
-+/* SNMP */
-+struct auto_res_snmp_entry
-+{
-+ uint16_t oidSize;
-+ uint8_t *oidVal; /* only the oid string */
-+ uint16_t resSize;
-+ uint8_t *resVal; /* resVal will be the entire reply,
-+ i.e. "Type|Length|Value" */
-+};
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP IPv4 Addresses Table Entry
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+struct auto_res_snmp_ipv4addr_tbl_entry
-+{
-+ uint32_t ipv4addr; /*!< 32 bit IPv4 Address. */
-+ bool is_vlan;
-+ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+};
-+
-+/**************************************************************************//**
-+ @Description Deep Sleep Auto Response SNMP IPv6 Addresses Table Entry
-+ Refer to the FMan Controller spec for more details.
-+*//***************************************************************************/
-+struct auto_res_snmp_ipv6addr_tbl_entry
-+{
-+ uint32_t ipv6Addr[4]; /*!< 4 * 32 bit IPv6 Address. */
-+ bool isVlan;
-+ uint16_t vid; /*!< 12 bits VLAN ID. The 4 left-most bits should be cleared */
-+ /*!< This field should be 0x0000 for an entry with no VLAN tag or a null VLAN ID. */
-+};
-+
-+struct auto_res_snmp_info
-+{
-+ uint16_t control; /**< Control bits [0-15]. */
-+ uint16_t max_snmp_msg_length; /**< Maximal allowed SNMP message length. */
-+ uint16_t num_ipv4_addresses; /**< Number of entries in IPv4 addresses table. */
-+ uint16_t num_ipv6_addresses; /**< Number of entries in IPv6 addresses table. */
-+ struct auto_res_snmp_ipv4addr_tbl_entry *ipv4addr_tbl; /**< Pointer to IPv4 addresses table. */
-+ struct auto_res_snmp_ipv6addr_tbl_entry *ipv6addr_tbl; /**< Pointer to IPv6 addresses table. */
-+ char *community_read_write_string;
-+ char *community_read_only_string;
-+ struct auto_res_snmp_entry *oid_table;
-+ uint32_t oid_table_size;
-+ uint32_t *statistics;
-+};
-+
-+/* Filtering */
-+struct auto_res_port_filtering_entry
-+{
-+ uint16_t src_port;
-+ uint16_t dst_port;
-+ uint16_t src_port_mask;
-+ uint16_t dst_port_mask;
-+};
-+struct auto_res_filtering_info
-+{
-+ /* IP protocol filtering parameters */
-+ uint8_t ip_prot_table_size;
-+ uint8_t *ip_prot_table_ptr;
-+ bool ip_prot_pass_on_hit; /* when TRUE, miss in the table will
-+ cause the packet to be droped, hit will pass the packet to
-+ UDP/TCP filters if needed and if not to the classification
-+ tree. If the classification tree will pass the packet to a
-+ queue it will cause a wake interupt. When FALSE it the other
-+ way around. */
-+ /* UDP port filtering parameters */
-+ uint8_t udp_ports_table_size;
-+ struct auto_res_port_filtering_entry *udp_ports_table_ptr;
-+ bool udp_port_pass_on_hit; /* when TRUE, miss in the table will
-+ cause the packet to be droped, hit will pass the packet to
-+ classification tree. If the classification tree will pass the
-+ packet to a queue it will cause a wake interupt. When FALSE it
-+ the other way around. */
-+ /* TCP port filtering parameters */
-+ uint16_t tcp_flags_mask;
-+ uint8_t tcp_ports_table_size;
-+ struct auto_res_port_filtering_entry *tcp_ports_table_ptr;
-+ bool tcp_port_pass_on_hit; /* when TRUE, miss in the table will
-+ cause the packet to be droped, hit will pass the packet to
-+ classification tree. If the classification tree will pass the
-+ packet to a queue it will cause a wake interupt. When FALSE it
-+ the other way around. */
-+};
-+
-+struct auto_res_port_params
-+{
-+ t_Handle h_FmPortTx;
-+ struct auto_res_arp_info *p_auto_res_arp_info;
-+ struct auto_res_echo_ipv4_info *p_auto_res_echo_ipv4_info;
-+ struct auto_res_ndp_info *p_auto_res_ndp_info;
-+ struct auto_res_echo_ipv6_info *p_auto_res_echo_ipv6_info;
-+ struct auto_res_snmp_info *p_auto_res_snmp_info;
-+ struct auto_res_filtering_info *p_auto_res_filtering_info;
-+};
-+
-+struct auto_res_port_stats
-+{
-+ uint32_t arp_ar_cnt;
-+ uint32_t echo_icmpv4_ar_cnt;
-+ uint32_t ndp_ar_cnt;
-+ uint32_t echo_icmpv6_ar_cnt;
-+};
-+
-+int fm_port_config_autores_for_deepsleep_support(struct fm_port *port,
-+ struct auto_res_tables_sizes *params);
-+
-+int fm_port_enter_autores_for_deepsleep(struct fm_port *port,
-+ struct auto_res_port_params *params);
-+
-+void fm_port_exit_auto_res_for_deep_sleep(struct fm_port *port_rx,
-+ struct fm_port *port_tx);
-+
-+bool fm_port_is_in_auto_res_mode(struct fm_port *port);
-+
-+struct auto_res_tables_sizes *fm_port_get_autores_maxsize(
-+ struct fm_port *port);
-+
-+int fm_port_get_autores_stats(struct fm_port *port, struct auto_res_port_stats
-+ *stats);
-+
-+int fm_port_resume(struct fm_port *port);
-+
-+int fm_port_suspend(struct fm_port *port);
-+
-+#ifdef CONFIG_FMAN_PFC
-+/**************************************************************************//**
-+@Function fm_port_set_pfc_priorities_mapping_to_qman_wq
-+
-+@Description Associate a QMan Work Queue with a PFC priority on this
-+ FM-port device (Tx port).
-+
-+@Param[in] port - A handle of the FM port device.
-+
-+@Param[in] prio - The PFC priority.
-+
-+@Param[in] wq - The Work Queue associated with the PFC priority.
-+
-+@Cautions Allowed only after the port is initialized.
-+*//***************************************************************************/
-+int fm_port_set_pfc_priorities_mapping_to_qman_wq(struct fm_port *port,
-+ uint8_t prio, uint8_t wq);
-+#endif
-+
-+/**************************************************************************//**
-+@Function fm_mac_set_exception
-+
-+@Description Set MAC exception state.
-+
-+@Param[in] fm_mac_dev - A handle of the FM MAC device.
-+@Param[in] exception - FM MAC exception type.
-+@Param[in] enable - new state.
-+
-+*//***************************************************************************/
-+int fm_mac_set_exception(struct fm_mac_dev *fm_mac_dev,
-+ e_FmMacExceptions exception, bool enable);
-+
-+int fm_mac_free(struct fm_mac_dev *fm_mac_dev);
-+
-+struct fm_mac_dev *fm_mac_config(t_FmMacParams *params);
-+
-+int fm_mac_config_max_frame_length(struct fm_mac_dev *fm_mac_dev,
-+ int len);
-+
-+int fm_mac_config_pad_and_crc(struct fm_mac_dev *fm_mac_dev, bool enable);
-+
-+int fm_mac_config_half_duplex(struct fm_mac_dev *fm_mac_dev, bool enable);
-+
-+int fm_mac_config_reset_on_init(struct fm_mac_dev *fm_mac_dev, bool enable);
-+
-+int fm_mac_init(struct fm_mac_dev *fm_mac_dev);
-+
-+int fm_mac_get_version(struct fm_mac_dev *fm_mac_dev, uint32_t *version);
-+
-+int fm_mac_enable(struct fm_mac_dev *fm_mac_dev);
-+
-+int fm_mac_disable(struct fm_mac_dev *fm_mac_dev);
-+
-+int fm_mac_resume(struct fm_mac_dev *fm_mac_dev);
-+
-+int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev,
-+ bool enable);
-+
-+int fm_mac_remove_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
-+ t_EnetAddr *mac_addr);
-+
-+int fm_mac_add_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
-+ t_EnetAddr *mac_addr);
-+
-+int fm_mac_modify_mac_addr(struct fm_mac_dev *fm_mac_dev,
-+ uint8_t *addr);
-+
-+int fm_mac_adjust_link(struct fm_mac_dev *fm_mac_dev,
-+ bool link, int speed, bool duplex);
-+
-+int fm_mac_enable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev);
-+
-+int fm_mac_disable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev);
-+
-+int fm_mac_set_rx_pause_frames(
-+ struct fm_mac_dev *fm_mac_dev, bool en);
-+
-+int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
-+ bool en);
-+
-+int fm_rtc_enable(struct fm *fm_dev);
-+
-+int fm_rtc_disable(struct fm *fm_dev);
-+
-+int fm_rtc_get_cnt(struct fm *fm_dev, uint64_t *ts);
-+
-+int fm_rtc_set_cnt(struct fm *fm_dev, uint64_t ts);
-+
-+int fm_rtc_get_drift(struct fm *fm_dev, uint32_t *drift);
-+
-+int fm_rtc_set_drift(struct fm *fm_dev, uint32_t drift);
-+
-+int fm_rtc_set_alarm(struct fm *fm_dev, uint32_t id,
-+ uint64_t time);
-+
-+int fm_rtc_set_fiper(struct fm *fm_dev, uint32_t id,
-+ uint64_t fiper);
-+
-+int fm_mac_set_wol(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
-+ bool en);
-+
-+/**************************************************************************//**
-+@Function fm_macsec_set_exception
-+
-+@Description Set MACSEC exception state.
-+
-+@Param[in] fm_macsec_dev - A handle of the FM MACSEC device.
-+@Param[in] exception - FM MACSEC exception type.
-+@Param[in] enable - new state.
-+
-+*//***************************************************************************/
-+
-+int fm_macsec_set_exception(struct fm_macsec_dev *fm_macsec_dev,
-+ fm_macsec_exception exception, bool enable);
-+int fm_macsec_free(struct fm_macsec_dev *fm_macsec_dev);
-+struct fm_macsec_dev *fm_macsec_config(struct fm_macsec_params *fm_params);
-+int fm_macsec_init(struct fm_macsec_dev *fm_macsec_dev);
-+int fm_macsec_config_unknown_sci_frame_treatment(struct fm_macsec_dev
-+ *fm_macsec_dev,
-+ fm_macsec_unknown_sci_frame_treatment treat_mode);
-+int fm_macsec_config_invalid_tags_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
-+ bool deliver_uncontrolled);
-+int fm_macsec_config_kay_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
-+ bool discard_uncontrolled);
-+int fm_macsec_config_untag_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
-+ fm_macsec_untag_frame_treatment treat_mode);
-+int fm_macsec_config_pn_exhaustion_threshold(struct fm_macsec_dev *fm_macsec_dev,
-+ uint32_t pnExhThr);
-+int fm_macsec_config_keys_unreadable(struct fm_macsec_dev *fm_macsec_dev);
-+int fm_macsec_config_sectag_without_sci(struct fm_macsec_dev *fm_macsec_dev);
-+int fm_macsec_config_exception(struct fm_macsec_dev *fm_macsec_dev,
-+ fm_macsec_exception exception, bool enable);
-+int fm_macsec_get_revision(struct fm_macsec_dev *fm_macsec_dev,
-+ int *macsec_revision);
-+int fm_macsec_enable(struct fm_macsec_dev *fm_macsec_dev);
-+int fm_macsec_disable(struct fm_macsec_dev *fm_macsec_dev);
-+
-+
-+int fm_macsec_secy_config_exception(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_secy_exception exception,
-+ bool enable);
-+int fm_macsec_secy_free(struct fm_macsec_secy_dev *fm_macsec_secy_dev);
-+struct fm_macsec_secy_dev *fm_macsec_secy_config(struct fm_macsec_secy_params *secy_params);
-+int fm_macsec_secy_init(struct fm_macsec_secy_dev *fm_macsec_secy_dev);
-+int fm_macsec_secy_config_sci_insertion_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_sci_insertion_mode sci_insertion_mode);
-+int fm_macsec_secy_config_protect_frames(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ bool protect_frames);
-+int fm_macsec_secy_config_replay_window(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ bool replay_protect, uint32_t replay_window);
-+int fm_macsec_secy_config_validation_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_valid_frame_behavior validate_frames);
-+int fm_macsec_secy_config_confidentiality(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ bool confidentiality_enable,
-+ uint32_t confidentiality_offset);
-+int fm_macsec_secy_config_point_to_point(struct fm_macsec_secy_dev *fm_macsec_secy_dev);
-+int fm_macsec_secy_config_event(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_secy_event event,
-+ bool enable);
-+struct rx_sc_dev *fm_macsec_secy_create_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct fm_macsec_secy_sc_params *params);
-+int fm_macsec_secy_delete_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc);
-+int fm_macsec_secy_create_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc, macsec_an_t an,
-+ uint32_t lowest_pn, macsec_sa_key_t key);
-+int fm_macsec_secy_delete_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc, macsec_an_t an);
-+int fm_macsec_secy_rxsa_enable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an);
-+int fm_macsec_secy_rxsa_disable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an);
-+int fm_macsec_secy_rxsa_update_next_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an, uint32_t updt_next_pn);
-+int fm_macsec_secy_rxsa_update_lowest_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an, uint32_t updt_lowest_pn);
-+int fm_macsec_secy_rxsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an, macsec_sa_key_t key);
-+int fm_macsec_secy_create_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t an, macsec_sa_key_t key);
-+int fm_macsec_secy_delete_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t an);
-+int fm_macsec_secy_txsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t next_active_an,
-+ macsec_sa_key_t key);
-+int fm_macsec_secy_txsa_set_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t an);
-+int fm_macsec_secy_txsa_get_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t *p_an);
-+int fm_macsec_secy_get_rxsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc, uint32_t *sc_phys_id);
-+int fm_macsec_secy_get_txsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ uint32_t *sc_phys_id);
-+
-+/** @} */ /* end of FM_LnxKern_ctrl_grp group */
-+/** @} */ /* end of FM_LnxKern_grp group */
-+
-+/* default values for initializing PTP 1588 timer clock */
-+#define DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT 2 /* power of 2 for better performance */
-+#define DPA_PTP_NOMINAL_FREQ_PERIOD_NS (1 << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT) /* 4ns,250MHz */
-+
-+#endif /* __LNXWRP_FSL_FMAN_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/inc/xx/xx.h
-@@ -0,0 +1,50 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __XX_H
-+#define __XX_H
-+
-+#include "xx_ext.h"
-+
-+void * xx_Malloc(uint32_t n);
-+void xx_Free(void *p);
-+
-+void *xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t align);
-+void xx_FreeSmart(void *p);
-+
-+/* never used: */
-+#define GetDeviceName(irq) ((char *)NULL)
-+
-+int GetDeviceIrqNum(int irq);
-+
-+
-+#endif /* __XX_H */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/system/Makefile
-@@ -0,0 +1,10 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+#
-+
-+obj-y += sys_io.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/system/sys_io.c
-@@ -0,0 +1,171 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/version.h>
-+
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+#include <linux/modversions.h>
-+#else
-+#include <config/modversions.h>
-+#endif /* LINUX_VERSION_CODE */
-+#endif /* MODVERSIONS */
-+
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+
-+#include <asm/io.h>
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "string_ext.h"
-+#include "list_ext.h"
-+#include "sys_io_ext.h"
-+
-+
-+#define __ERR_MODULE__ MODULE_UNKNOWN
-+
-+
-+typedef struct {
-+ uint64_t virtAddr;
-+ uint64_t physAddr;
-+ uint32_t size;
-+ t_List node;
-+} t_IoMap;
-+#define IOMAP_OBJECT(ptr) LIST_OBJECT(ptr, t_IoMap, node)
-+
-+LIST(mapsList);
-+
-+
-+static void EnqueueIoMap(t_IoMap *p_IoMap)
-+{
-+ uint32_t intFlags;
-+
-+ intFlags = XX_DisableAllIntr();
-+ LIST_AddToTail(&p_IoMap->node, &mapsList);
-+ XX_RestoreAllIntr(intFlags);
-+}
-+
-+static t_IoMap * FindIoMapByVirtAddr(uint64_t addr)
-+{
-+ t_IoMap *p_IoMap;
-+ t_List *p_Pos;
-+
-+ LIST_FOR_EACH(p_Pos, &mapsList)
-+ {
-+ p_IoMap = IOMAP_OBJECT(p_Pos);
-+ if ((addr >= p_IoMap->virtAddr) && (addr < p_IoMap->virtAddr+p_IoMap->size))
-+ return p_IoMap;
-+ }
-+
-+ return NULL;
-+}
-+
-+static t_IoMap * FindIoMapByPhysAddr(uint64_t addr)
-+{
-+ t_IoMap *p_IoMap;
-+ t_List *p_Pos;
-+
-+ LIST_FOR_EACH(p_Pos, &mapsList)
-+ {
-+ p_IoMap = IOMAP_OBJECT(p_Pos);
-+ if ((addr >= p_IoMap->physAddr) && (addr < p_IoMap->physAddr+p_IoMap->size))
-+ return p_IoMap;
-+ }
-+
-+ return NULL;
-+}
-+
-+t_Error SYS_RegisterIoMap (uint64_t virtAddr, uint64_t physAddr, uint32_t size)
-+{
-+ t_IoMap *p_IoMap;
-+
-+ p_IoMap = (t_IoMap*)XX_Malloc(sizeof(t_IoMap));
-+ if (!p_IoMap)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!"));
-+ memset(p_IoMap, 0, sizeof(t_IoMap));
-+
-+ p_IoMap->virtAddr = virtAddr;
-+ p_IoMap->physAddr = physAddr;
-+ p_IoMap->size = size;
-+
-+ INIT_LIST(&p_IoMap->node);
-+ EnqueueIoMap(p_IoMap);
-+
-+ return E_OK;
-+}
-+
-+t_Error SYS_UnregisterIoMap (uint64_t virtAddr)
-+{
-+ t_IoMap *p_IoMap = FindIoMapByVirtAddr(virtAddr);
-+ if (!p_IoMap)
-+ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
-+
-+ LIST_Del(&p_IoMap->node);
-+ XX_Free(p_IoMap);
-+
-+ return E_OK;
-+}
-+
-+uint64_t SYS_PhysToVirt(uint64_t addr)
-+{
-+ t_IoMap *p_IoMap = FindIoMapByPhysAddr(addr);
-+ if (p_IoMap)
-+ {
-+ /* This is optimization - put the latest in the list-head - like a cache */
-+ if (mapsList.p_Next != &p_IoMap->node)
-+ {
-+ uint32_t intFlags = XX_DisableAllIntr();
-+ LIST_DelAndInit(&p_IoMap->node);
-+ LIST_Add(&p_IoMap->node, &mapsList);
-+ XX_RestoreAllIntr(intFlags);
-+ }
-+ return (uint64_t)(addr - p_IoMap->physAddr + p_IoMap->virtAddr);
-+ }
-+ return PTR_TO_UINT(phys_to_virt((unsigned long)addr));
-+}
-+
-+uint64_t SYS_VirtToPhys(uint64_t addr)
-+{
-+ t_IoMap *p_IoMap;
-+
-+ if (addr == 0)
-+ return 0;
-+
-+ p_IoMap = FindIoMapByVirtAddr(addr);
-+ if (p_IoMap)
-+ return (uint64_t)(addr - p_IoMap->virtAddr + p_IoMap->physAddr);
-+ return (uint64_t)virt_to_phys(UINT_TO_PTR(addr));
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/Makefile
-@@ -0,0 +1,19 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/inc
-+
-+ccflags-y += -I$(NCSW_FM_INC)
-+ccflags-y += -I$(NET_DPA)
-+
-+obj-y += fsl-ncsw-PFM.o
-+obj-$(CONFIG_FSL_SDK_FMAN_TEST) += fman_test.o
-+
-+fsl-ncsw-PFM-objs := lnxwrp_fm.o lnxwrp_fm_port.o lnxwrp_ioctls_fm.o \
-+ lnxwrp_sysfs.o lnxwrp_sysfs_fm.o lnxwrp_sysfs_fm_port.o
-+obj-$(CONFIG_COMPAT) += lnxwrp_ioctls_fm_compat.o
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/fman_test.c
-@@ -0,0 +1,1665 @@
-+/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File fman_test.c
-+ @Authors Pistirica Sorin Andrei
-+ @Description FM Linux test environment
-+*/
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/cdev.h>
-+#include <linux/device.h>
-+#include <linux/io.h>
-+#include <linux/ioport.h>
-+#include <linux/of_platform.h>
-+#include <linux/ip.h>
-+#include <linux/compat.h>
-+#include <linux/uaccess.h>
-+#include <linux/errno.h>
-+#include <linux/netdevice.h>
-+#include <linux/spinlock.h>
-+#include <linux/types.h>
-+#include <linux/fsl_qman.h>
-+#include <linux/fsl_bman.h>
-+
-+/* private headers */
-+#include "fm_ext.h"
-+#include "lnxwrp_fsl_fman.h"
-+#include "fm_port_ext.h"
-+#if (DPAA_VERSION == 11)
-+#include "../../Peripherals/FM/MAC/memac.h"
-+#endif
-+#include "fm_test_ioctls.h"
-+#include "fsl_fman_test.h"
-+
-+#include "dpaa_eth.h"
-+#include "dpaa_eth_common.h"
-+
-+#define FMT_FRM_WATERMARK 0xdeadbeefdeadbeeaLL
-+
-+struct fmt_frame_s {
-+ ioc_fmt_buff_desc_t buff;
-+ struct list_head list;
-+};
-+
-+struct fmt_fqs_s {
-+ struct qman_fq fq_base;
-+ bool init;
-+ struct fmt_port_s *fmt_port_priv;
-+};
-+
-+struct fmt_port_pcd_s {
-+ int num_queues;
-+ struct fmt_fqs_s *fmt_pcd_fqs;
-+ uint32_t fqid_base;
-+};
-+
-+/* char dev structure: fm test port */
-+struct fmt_port_s {
-+ bool valid;
-+ uint8_t id;
-+ ioc_fmt_port_type port_type;
-+ ioc_diag_mode diag;
-+ bool compat_test_type;
-+
-+ /* fm ports */
-+ /* ! for oh ports p_tx_fm_port_dev == p_rx_fm_port_dev &&
-+ * p_tx_port == p_rx_port */
-+ /* t_LnxWrpFmPortDev */
-+ struct fm_port *p_tx_port;
-+ /* t_LnxWrpFmPortDev->h_Dev: t_FmPort */
-+ void *p_tx_fm_port_dev;
-+ /* t_LnxWrpFmPortDev */
-+ struct fm_port *p_rx_port;
-+ /* t_LnxWrpFmPortDev->h_Dev: t_FmPort */
-+ void *p_rx_fm_port_dev;
-+
-+ void *p_mac_dev;
-+ uint64_t fm_phys_base_addr;
-+
-+ /* read/write queue manipulation */
-+ spinlock_t rx_q_lock;
-+ struct list_head rx_q;
-+
-+ /* tx queuee for injecting traffic */
-+ int num_of_tx_fqs;
-+ struct fmt_fqs_s p_tx_fqs[FMAN_TEST_MAX_TX_FQS];
-+
-+ /* pcd private queues manipulation */
-+ struct fmt_port_pcd_s fmt_port_pcd;
-+
-+ /* debugging stuff */
-+
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ atomic_t enqueue_to_qman_frm;
-+ atomic_t enqueue_to_rxq;
-+ atomic_t dequeue_from_rxq;
-+ atomic_t not_enqueue_to_rxq_wrong_frm;
-+#endif
-+
-+};
-+
-+/* The devices. */
-+struct fmt_s {
-+ int major;
-+ struct fmt_port_s ports[IOC_FMT_MAX_NUM_OF_PORTS];
-+ struct class *fmt_class;
-+};
-+
-+/* fm test structure */
-+static struct fmt_s fm_test;
-+
-+#if (DPAA_VERSION == 11)
-+struct mac_priv_s {
-+ t_Handle mac;
-+};
-+#endif
-+
-+#define DTSEC_BASE_ADDR 0x000e0000
-+#define DTSEC_MEM_RANGE 0x00002000
-+#define MAC_1G_MACCFG1 0x00000100
-+#define MAC_1G_LOOP_MASK 0x00000100
-+static int set_1gmac_loopback(
-+ struct fmt_port_s *fmt_port,
-+ bool en)
-+{
-+#if (DPAA_VERSION <= 10)
-+ uint32_t dtsec_idx = fmt_port->id; /* dtsec for which port */
-+ uint32_t dtsec_idx_off = dtsec_idx * DTSEC_MEM_RANGE;
-+ phys_addr_t maccfg1_hw;
-+ void *maccfg1_map;
-+ uint32_t maccfg1_val;
-+
-+ /* compute the maccfg1 register address */
-+ maccfg1_hw = fmt_port->fm_phys_base_addr +
-+ (phys_addr_t)(DTSEC_BASE_ADDR +
-+ dtsec_idx_off +
-+ MAC_1G_MACCFG1);
-+
-+ /* map register */
-+ maccfg1_map = ioremap(maccfg1_hw, sizeof(u32));
-+
-+ /* set register */
-+ maccfg1_val = in_be32(maccfg1_map);
-+ if (en)
-+ maccfg1_val |= MAC_1G_LOOP_MASK;
-+ else
-+ maccfg1_val &= ~MAC_1G_LOOP_MASK;
-+ out_be32(maccfg1_map, maccfg1_val);
-+
-+ /* unmap register */
-+ iounmap(maccfg1_map);
-+#else
-+ struct mac_device *mac_dev;
-+ struct mac_priv_s *priv;
-+ t_Memac *p_memac;
-+
-+ if (!fmt_port)
-+ return -EINVAL;
-+
-+ mac_dev = (struct mac_device *)fmt_port->p_mac_dev;
-+
-+ if (!mac_dev)
-+ return -EINVAL;
-+
-+ priv = macdev_priv(mac_dev);
-+
-+ if (!priv)
-+ return -EINVAL;
-+
-+ p_memac = priv->mac;
-+
-+ if (!p_memac)
-+ return -EINVAL;
-+
-+ memac_set_loopback(p_memac->p_MemMap, en);
-+#endif
-+ return 0;
-+}
-+
-+/* TODO: re-write this function */
-+static int set_10gmac_int_loopback(
-+ struct fmt_port_s *fmt_port,
-+ bool en)
-+{
-+#ifndef FM_10G_MAC_NO_CTRL_LOOPBACK
-+#define FM_10GMAC0_OFFSET 0x000f0000
-+#define FM_10GMAC_CMD_CONF_CTRL_OFFSET 0x8
-+#define CMD_CFG_LOOPBACK_EN 0x00000400
-+
-+ uint64_t base_addr, reg_addr;
-+ uint32_t tmp_val;
-+
-+ base_addr = fmt_port->fm_phys_base_addr + (FM_10GMAC0_OFFSET +
-+ ((fmt_port->id-FM_MAX_NUM_OF_1G_RX_PORTS)*0x2000));
-+
-+ base_addr = PTR_TO_UINT(ioremap(base_addr, 0x1000));
-+
-+ reg_addr = base_addr + FM_10GMAC_CMD_CONF_CTRL_OFFSET;
-+ tmp_val = GET_UINT32(*((uint32_t *)UINT_TO_PTR(reg_addr)));
-+ if (en)
-+ tmp_val |= CMD_CFG_LOOPBACK_EN;
-+ else
-+ tmp_val &= ~CMD_CFG_LOOPBACK_EN;
-+ WRITE_UINT32(*((uint32_t *)UINT_TO_PTR(reg_addr)), tmp_val);
-+
-+ iounmap(UINT_TO_PTR(base_addr));
-+
-+ return 0;
-+#else
-+ _fmt_err("TGEC don't have internal-loopback.\n");
-+ return -EPERM;
-+#endif
-+}
-+
-+static int set_mac_int_loopback(struct fmt_port_s *fmt_port, bool en)
-+{
-+ int _err = 0;
-+
-+ switch (fmt_port->port_type) {
-+
-+ case e_IOC_FMT_PORT_T_RXTX:
-+ /* 1G port */
-+ if (fmt_port->id < FM_MAX_NUM_OF_1G_RX_PORTS)
-+ _err = set_1gmac_loopback(fmt_port, en);
-+ /* 10g port */
-+ else if ((fmt_port->id >= FM_MAX_NUM_OF_1G_RX_PORTS) &&
-+ (fmt_port->id < FM_MAX_NUM_OF_1G_RX_PORTS +
-+ FM_MAX_NUM_OF_10G_RX_PORTS)) {
-+
-+ _err = set_10gmac_int_loopback(fmt_port, en);
-+ } else
-+ _err = -EINVAL;
-+ break;
-+ /* op port does not have MAC (loopback mode) */
-+ case e_IOC_FMT_PORT_T_OP:
-+
-+ _err = 0;
-+ break;
-+ default:
-+
-+ _err = -EPERM;
-+ break;
-+ }
-+
-+ return _err;
-+}
-+
-+static void enqueue_fmt_frame(
-+ struct fmt_port_s *fmt_port,
-+ struct fmt_frame_s *p_fmt_frame)
-+{
-+ spinlock_t *rx_q_lock = NULL;
-+
-+ rx_q_lock = &fmt_port->rx_q_lock;
-+
-+ spin_lock(rx_q_lock);
-+ list_add_tail(&p_fmt_frame->list, &fmt_port->rx_q);
-+ spin_unlock(rx_q_lock);
-+
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ atomic_inc(&fmt_port->enqueue_to_rxq);
-+#endif
-+}
-+
-+static struct fmt_frame_s *dequeue_fmt_frame(
-+ struct fmt_port_s *fmt_port)
-+{
-+ struct fmt_frame_s *p_fmt_frame = NULL;
-+ spinlock_t *rx_q_lock = NULL;
-+
-+ rx_q_lock = &fmt_port->rx_q_lock;
-+
-+ spin_lock(rx_q_lock);
-+
-+#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member)
-+
-+ if (!list_empty(&fmt_port->rx_q)) {
-+ p_fmt_frame = list_last_entry(&fmt_port->rx_q,
-+ struct fmt_frame_s,
-+ list);
-+ list_del(&p_fmt_frame->list);
-+
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ atomic_inc(&fmt_port->dequeue_from_rxq);
-+#endif
-+ }
-+
-+ spin_unlock(rx_q_lock);
-+
-+ return p_fmt_frame;
-+}
-+
-+/* eth-dev -to- fmt port association */
-+struct fmt_port_s *match_dpa_to_fmt_port(
-+ struct dpa_priv_s *dpa_priv) {
-+ struct mac_device *mac_dev = dpa_priv->mac_dev;
-+ struct fm_port *fm_port = (struct fm_port *) mac_dev;
-+ struct fmt_port_s *fmt_port = NULL;
-+ int i;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ /* find the FM-test-port object */
-+ for (i = 0; i < IOC_FMT_MAX_NUM_OF_PORTS; i++)
-+ if ((fm_test.ports[i].p_mac_dev &&
-+ mac_dev == fm_test.ports[i].p_mac_dev) ||
-+ fm_port == fm_test.ports[i].p_tx_port) {
-+
-+ fmt_port = &fm_test.ports[i];
-+ break;
-+ }
-+
-+ _fmt_dbgr("called\n");
-+ return fmt_port;
-+}
-+
-+void dump_frame(
-+ uint8_t *buffer,
-+ uint32_t size)
-+{
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ unsigned int i;
-+
-+ for (i = 0; i < size; i++) {
-+ if (i%16 == 0)
-+ printk(KERN_DEBUG "\n");
-+ printk(KERN_DEBUG "%2x ", *(buffer+i));
-+ }
-+#endif
-+ return;
-+}
-+
-+bool test_and_steal_frame(struct fmt_port_s *fmt_port,
-+ uint32_t fqid,
-+ uint8_t *buffer,
-+ uint32_t size)
-+{
-+ struct fmt_frame_s *p_fmt_frame = NULL;
-+ bool test_and_steal_frame_frame;
-+ uint32_t data_offset;
-+ uint32_t i;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ if (!fmt_port || !fmt_port->p_rx_fm_port_dev)
-+ return false;
-+
-+ /* check watermark */
-+ test_and_steal_frame_frame = false;
-+ for (i = 0; i < size; i++) {
-+ uint64_t temp = *((uint64_t *)(buffer + i));
-+
-+ if (temp == (uint64_t) FMT_FRM_WATERMARK) {
-+ _fmt_dbgr("watermark found!\n");
-+ test_and_steal_frame_frame = true;
-+ break;
-+ }
-+ }
-+
-+ if (!test_and_steal_frame_frame) {
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ atomic_inc(&fmt_port->not_enqueue_to_rxq_wrong_frm);
-+#endif
-+ _fmt_dbgr("NOT watermark found!\n");
-+ return false;
-+ }
-+
-+ /* do not enqueue the tx conf/err frames */
-+ if ((fqid == FMT_TX_CONF_Q) || (fqid == FMT_TX_ERR_Q))
-+ goto _test_and_steal_frame_return_true;
-+
-+ _fmt_dbgr("on port %d got FMUC frame\n", fmt_port->id);
-+ data_offset = FM_PORT_GetBufferDataOffset(
-+ fmt_port->p_rx_fm_port_dev);
-+
-+ p_fmt_frame = kmalloc(sizeof(struct fmt_frame_s), GFP_KERNEL);
-+
-+ /* dump frame... no more space left on device */
-+ if (p_fmt_frame == NULL) {
-+ _fmt_err("no space left on device!\n");
-+ goto _test_and_steal_frame_return_true;
-+ }
-+
-+ memset(p_fmt_frame, 0, sizeof(struct fmt_frame_s));
-+ p_fmt_frame->buff.p_data = kmalloc(size * sizeof(uint8_t), GFP_KERNEL);
-+
-+ /* No more space left on device*/
-+ if (p_fmt_frame->buff.p_data == NULL) {
-+ _fmt_err("no space left on device!\n");
-+ kfree(p_fmt_frame);
-+ goto _test_and_steal_frame_return_true;
-+ }
-+
-+ p_fmt_frame->buff.size = size-data_offset;
-+ p_fmt_frame->buff.qid = fqid;
-+
-+ memcpy(p_fmt_frame->buff.p_data,
-+ (uint8_t *)PTR_MOVE(buffer, data_offset),
-+ p_fmt_frame->buff.size);
-+
-+ memcpy(p_fmt_frame->buff.buff_context.fm_prs_res,
-+ FM_PORT_GetBufferPrsResult(fmt_port->p_rx_fm_port_dev,
-+ (char *)buffer),
-+ 32);
-+
-+ /* enqueue frame - this frame will go to us */
-+ enqueue_fmt_frame(fmt_port, p_fmt_frame);
-+
-+_test_and_steal_frame_return_true:
-+ return true;
-+}
-+
-+static int fmt_fq_release(const struct qm_fd *fd)
-+{
-+ struct dpa_bp *_dpa_bp;
-+ struct bm_buffer _bmb;
-+
-+ if (fd->format == qm_fd_contig) {
-+ _dpa_bp = dpa_bpid2pool(fd->bpid);
-+ BUG_ON(IS_ERR(_dpa_bp));
-+
-+ _bmb.hi = fd->addr_hi;
-+ _bmb.lo = fd->addr_lo;
-+
-+ while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
-+ cpu_relax();
-+
-+ } else {
-+ _fmt_err("frame not supported !\n");
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+/* sync it w/ dpaa_eth.c: DPA_BP_HEAD */
-+#define DPA_BP_HEADROOM (DPA_TX_PRIV_DATA_SIZE + \
-+ fm_get_rx_extra_headroom() + \
-+ DPA_PARSE_RESULTS_SIZE + \
-+ DPA_HASH_RESULTS_SIZE)
-+#define MAC_HEADER_LENGTH 14
-+#define L2_AND_HEADROOM_OFF ((DPA_BP_HEADROOM) + (MAC_HEADER_LENGTH))
-+
-+/* dpa ingress hooks definition */
-+enum dpaa_eth_hook_result fmt_rx_default_hook(
-+ struct sk_buff *skb,
-+ struct net_device *net_dev,
-+ u32 fqid)
-+{
-+ struct dpa_priv_s *dpa_priv = NULL;
-+ struct fmt_port_s *fmt_port = NULL;
-+ uint8_t *buffer;
-+ uint32_t buffer_len;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ dpa_priv = netdev_priv(net_dev);
-+ fmt_port = match_dpa_to_fmt_port(dpa_priv);
-+
-+ /* conversion from skb to fd:
-+ * skb cames processed for L3, so we need to go back for
-+ * layer 2 offset */
-+ buffer = (uint8_t *)(skb->data - ((int)L2_AND_HEADROOM_OFF));
-+ buffer_len = skb->len + ((int)L2_AND_HEADROOM_OFF);
-+
-+ /* if is not out frame let dpa to handle it */
-+ if (test_and_steal_frame(fmt_port,
-+ FMT_RX_DFLT_Q,
-+ buffer,
-+ buffer_len))
-+ goto _fmt_rx_default_hook_stolen;
-+
-+ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
-+ return DPAA_ETH_CONTINUE;
-+
-+_fmt_rx_default_hook_stolen:
-+ dev_kfree_skb(skb);
-+
-+ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
-+ return DPAA_ETH_STOLEN;
-+}
-+
-+enum dpaa_eth_hook_result fmt_rx_error_hook(
-+ struct net_device *net_dev,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ struct dpa_priv_s *dpa_priv = NULL;
-+ struct dpa_bp *dpa_bp = NULL;
-+ struct fmt_port_s *fmt_port = NULL;
-+ void *fd_virt_addr = NULL;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ dpa_priv = netdev_priv(net_dev);
-+ fmt_port = match_dpa_to_fmt_port(dpa_priv);
-+
-+ /* dpaa doesn't do this... we have to do it here */
-+ dpa_bp = dpa_bpid2pool(fd->bpid);
-+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
-+
-+ fd_virt_addr = phys_to_virt(addr);
-+ /* if is not out frame let dpa to handle it */
-+ if (test_and_steal_frame(fmt_port,
-+ FMT_RX_ERR_Q,
-+ fd_virt_addr,
-+ fd->length20 + fd->offset)) {
-+ goto _fmt_rx_error_hook_stolen;
-+ }
-+
-+ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
-+ return DPAA_ETH_CONTINUE;
-+
-+_fmt_rx_error_hook_stolen:
-+ /* the frame data doesn't matter,
-+ * so, no mapping is needed */
-+ fmt_fq_release(fd);
-+
-+ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
-+ return DPAA_ETH_STOLEN;
-+}
-+
-+enum dpaa_eth_hook_result fmt_tx_confirm_hook(
-+ struct net_device *net_dev,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ struct dpa_priv_s *dpa_priv = NULL;
-+ struct fmt_port_s *fmt_port = NULL;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ void *fd_virt_addr = NULL;
-+ uint32_t fd_len = 0;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ dpa_priv = netdev_priv(net_dev);
-+ fmt_port = match_dpa_to_fmt_port(dpa_priv);
-+
-+ fd_virt_addr = phys_to_virt(addr);
-+ fd_len = fd->length20 + fd->offset;
-+
-+ if (fd_len > fm_get_max_frm()) {
-+ _fmt_err("tx confirm bad frame size: %u!\n", fd_len);
-+ goto _fmt_tx_confirm_hook_continue;
-+ }
-+
-+ if (test_and_steal_frame(fmt_port,
-+ FMT_TX_CONF_Q,
-+ fd_virt_addr,
-+ fd_len))
-+ goto _fmt_tx_confirm_hook_stolen;
-+
-+_fmt_tx_confirm_hook_continue:
-+ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
-+ return DPAA_ETH_CONTINUE;
-+
-+_fmt_tx_confirm_hook_stolen:
-+ kfree(fd_virt_addr);
-+
-+ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
-+ return DPAA_ETH_STOLEN;
-+}
-+
-+enum dpaa_eth_hook_result fmt_tx_confirm_error_hook(
-+ struct net_device *net_dev,
-+ const struct qm_fd *fd,
-+ u32 fqid)
-+{
-+ struct dpa_priv_s *dpa_priv = NULL;
-+ struct fmt_port_s *fmt_port = NULL;
-+ dma_addr_t addr = qm_fd_addr(fd);
-+ void *fd_virt_addr = NULL;
-+ uint32_t fd_len = 0;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ dpa_priv = netdev_priv(net_dev);
-+ fmt_port = match_dpa_to_fmt_port(dpa_priv);
-+
-+ fd_virt_addr = phys_to_virt(addr);
-+ fd_len = fd->length20 + fd->offset;
-+
-+ if (fd_len > fm_get_max_frm()) {
-+ _fmt_err("tx confirm err bad frame size: %u !\n", fd_len);
-+ goto _priv_ingress_tx_err_continue;
-+ }
-+
-+ if (test_and_steal_frame(fmt_port, FMT_TX_ERR_Q, fd_virt_addr, fd_len))
-+ goto _priv_ingress_tx_err_stolen;
-+
-+_priv_ingress_tx_err_continue:
-+ _fmt_dbgr("called:DPAA_ETH_CONTINUE.\n");
-+ return DPAA_ETH_CONTINUE;
-+
-+_priv_ingress_tx_err_stolen:
-+ kfree(fd_virt_addr);
-+
-+ _fmt_dbgr("called:DPAA_ETH_STOLEN.\n");
-+ return DPAA_ETH_STOLEN;
-+}
-+
-+/* egress callbacks definition */
-+enum qman_cb_dqrr_result fmt_egress_dqrr(
-+ struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dqrr)
-+{
-+ /* this callback should never be called */
-+ BUG();
-+ return qman_cb_dqrr_consume;
-+}
-+
-+static void fmt_egress_error_dqrr(
-+ struct qman_portal *p,
-+ struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ uint8_t *fd_virt_addr = NULL;
-+
-+ /* tx failure, on the ern callback - release buffer */
-+ fd_virt_addr = (uint8_t *)phys_to_virt(qm_fd_addr(&msg->ern.fd));
-+ kfree(fd_virt_addr);
-+
-+ return;
-+}
-+
-+static const struct qman_fq fmt_egress_fq = {
-+ .cb = { .dqrr = fmt_egress_dqrr,
-+ .ern = fmt_egress_error_dqrr,
-+ .fqs = NULL}
-+};
-+
-+int fmt_fq_alloc(
-+ struct fmt_fqs_s *fmt_fqs,
-+ const struct qman_fq *qman_fq,
-+ uint32_t fqid, uint32_t flags,
-+ uint16_t channel, uint8_t wq)
-+{
-+ int _errno = 0;
-+
-+ _fmt_dbg("calling...\n");
-+
-+ fmt_fqs->fq_base = *qman_fq;
-+
-+ if (fqid == 0) {
-+ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
-+ flags &= ~QMAN_FQ_FLAG_NO_MODIFY;
-+ } else
-+ flags &= ~QMAN_FQ_FLAG_DYNAMIC_FQID;
-+
-+ fmt_fqs->init = !(flags & QMAN_FQ_FLAG_NO_MODIFY);
-+
-+ _errno = qman_create_fq(fqid, flags, &fmt_fqs->fq_base);
-+ if (_errno < 0) {
-+ _fmt_err("frame queues create failed.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (fmt_fqs->init) {
-+ struct qm_mcc_initfq initfq;
-+
-+ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
-+ initfq.fqd.dest.channel = channel;
-+ initfq.fqd.dest.wq = wq;
-+
-+ _errno = qman_init_fq(&fmt_fqs->fq_base,
-+ QMAN_INITFQ_FLAG_SCHED,
-+ &initfq);
-+ if (_errno < 0) {
-+ _fmt_err("frame queues init erorr.\n");
-+ qman_destroy_fq(&fmt_fqs->fq_base, 0);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ _fmt_dbg("called.\n");
-+ return 0;
-+}
-+
-+static int fmt_fq_free(struct fmt_fqs_s *fmt_fq)
-+{
-+ int _err = 0;
-+
-+ _fmt_dbg("calling...\n");
-+
-+ if (fmt_fq->init) {
-+ _err = qman_retire_fq(&fmt_fq->fq_base, NULL);
-+ if (unlikely(_err < 0))
-+ _fmt_err("qman_retire_fq(%u) = %d\n",
-+ qman_fq_fqid(&fmt_fq->fq_base), _err);
-+
-+ _err = qman_oos_fq(&fmt_fq->fq_base);
-+ if (unlikely(_err < 0))
-+ _fmt_err("qman_oos_fq(%u) = %d\n",
-+ qman_fq_fqid(&fmt_fq->fq_base), _err);
-+ }
-+
-+ qman_destroy_fq(&fmt_fq->fq_base, 0);
-+
-+ _fmt_dbg("called.\n");
-+ return _err;
-+}
-+
-+/* private pcd dqrr calbacks */
-+static enum qman_cb_dqrr_result fmt_pcd_dqrr(
-+ struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ struct dpa_bp *dpa_bp = NULL;
-+ dma_addr_t addr = qm_fd_addr(&dq->fd);
-+ uint8_t *fd_virt_addr = NULL;
-+ struct fmt_port_s *fmt_port;
-+ struct fmt_port_pcd_s *fmt_port_pcd;
-+ uint32_t relative_fqid = 0;
-+ uint32_t fd_len = 0;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ /* upcast - from pcd_alloc_fq */
-+ fmt_port = ((struct fmt_fqs_s *)fq)->fmt_port_priv;
-+ if (!fmt_port) {
-+ _fmt_err(" wrong fmt port -to- fq match.\n");
-+ goto _fmt_pcd_dqrr_return;
-+ }
-+ fmt_port_pcd = &fmt_port->fmt_port_pcd;
-+
-+ relative_fqid = dq->fqid - fmt_port_pcd->fqid_base;
-+ _fmt_dbgr("pcd dqrr got frame on relative fq:%u@base:%u\n",
-+ relative_fqid, fmt_port_pcd->fqid_base);
-+
-+ fd_len = dq->fd.length20 + dq->fd.offset;
-+
-+ if (fd_len > fm_get_max_frm()) {
-+ _fmt_err("pcd dqrr wrong frame size: %u (%u:%u)!\n",
-+ fd_len, dq->fd.length20, dq->fd.offset);
-+ goto _fmt_pcd_dqrr_return;
-+ }
-+
-+ dpa_bp = dpa_bpid2pool(dq->fd.bpid);
-+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
-+
-+ fd_virt_addr = phys_to_virt(addr);
-+ if (!test_and_steal_frame(fmt_port, relative_fqid, fd_virt_addr,
-+ fd_len)) {
-+
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ atomic_inc(&fmt_port->not_enqueue_to_rxq_wrong_frm);
-+#endif
-+ _fmt_wrn("pcd dqrr unrecognized frame@fqid: %u,"
-+ " frame len: %u (dropped).\n",
-+ dq->fqid, dq->fd.length20);
-+ dump_frame(fd_virt_addr, fd_len);
-+ }
-+
-+_fmt_pcd_dqrr_return:
-+ /* no need to map again here */
-+ fmt_fq_release(&dq->fd);
-+
-+ _fmt_dbgr("calle.\n");
-+ return qman_cb_dqrr_consume;
-+}
-+
-+static void fmt_pcd_err_dqrr(
-+ struct qman_portal *qm,
-+ struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ _fmt_err("this callback should never be called.\n");
-+ BUG();
-+ return;
-+}
-+
-+static void fmt_pcd_fqs_dqrr(
-+ struct qman_portal *qm,
-+ struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ _fmt_dbg(" fq state(0x%x)@fqid(%u.\n", msg->fq.fqs, msg->fq.fqid);
-+ return;
-+}
-+
-+/* private pcd queue template */
-+static const struct qman_fq pcd_fq = {
-+ .cb = { .dqrr = fmt_pcd_dqrr,
-+ .ern = fmt_pcd_err_dqrr,
-+ .fqs = fmt_pcd_fqs_dqrr}
-+};
-+
-+/* defined as weak in dpaa driver. */
-+/* ! parameters come from IOCTL call - US */
-+int dpa_alloc_pcd_fqids(
-+ struct device *dev,
-+ uint32_t num, uint8_t alignment,
-+ uint32_t *base_fqid)
-+{
-+ int _err = 0, i;
-+ struct net_device *net_dev = NULL;
-+ struct dpa_priv_s *dpa_priv = NULL;
-+ struct fmt_port_pcd_s *fmt_port_pcd = NULL;
-+ struct fmt_fqs_s *fmt_fqs = NULL;
-+ struct fmt_port_s *fmt_port = NULL;
-+ int num_allocated = 0;
-+
-+ _fmt_dbg("calling...\n");
-+
-+ net_dev = (typeof(net_dev))dev_get_drvdata(dev);
-+ dpa_priv = (typeof(dpa_priv))netdev_priv(net_dev);
-+
-+ if (!netif_msg_probe(dpa_priv)) {
-+ _fmt_err("dpa not probe.\n");
-+ _err = -ENODEV;
-+ goto _pcd_alloc_fqs_err;
-+ }
-+
-+ fmt_port = match_dpa_to_fmt_port(dpa_priv);
-+ if (!fmt_port) {
-+ _fmt_err("fmt port not found.");
-+ _err = -EINVAL;
-+ goto _pcd_alloc_fqs_err;
-+ }
-+
-+ fmt_port_pcd = &fmt_port->fmt_port_pcd;
-+
-+ num_allocated = qman_alloc_fqid_range(base_fqid, num, alignment, 0);
-+
-+ if ((num_allocated <= 0) ||
-+ (num_allocated < num) ||
-+ (alignment && (*base_fqid) % alignment)) {
-+ *base_fqid = 0;
-+ _fmt_err("Failed to alloc pcd fqs rang.\n");
-+ _err = -EINVAL;
-+ goto _pcd_alloc_fqs_err;
-+ }
-+
-+ _fmt_dbg("wanted %d fqs(align %d), got %d fqids@%u.\n",
-+ num, alignment, num_allocated, *base_fqid);
-+
-+ /* alloc pcd queues */
-+ fmt_port_pcd->fmt_pcd_fqs = kmalloc(num_allocated *
-+ sizeof(struct fmt_fqs_s),
-+ GFP_KERNEL);
-+ fmt_port_pcd->num_queues = num_allocated;
-+ fmt_port_pcd->fqid_base = *base_fqid;
-+ fmt_fqs = fmt_port_pcd->fmt_pcd_fqs;
-+
-+ /* alloc the pcd queues */
-+ for (i = 0; i < num_allocated; i++, fmt_fqs++) {
-+ _err = fmt_fq_alloc(
-+ fmt_fqs,
-+ &pcd_fq,
-+ (*base_fqid) + i, QMAN_FQ_FLAG_NO_ENQUEUE,
-+ dpa_priv->channel, 7);
-+
-+ if (_err < 0)
-+ goto _pcd_alloc_fqs_err;
-+
-+ /* upcast to identify from where the frames came from */
-+ fmt_fqs->fmt_port_priv = fmt_port;
-+ }
-+
-+ _fmt_dbg("called.\n");
-+ return _err;
-+_pcd_alloc_fqs_err:
-+ if (num_allocated > 0)
-+ qman_release_fqid_range(*base_fqid, num_allocated);
-+ /*TODO: free fmt_pcd_fqs if are any */
-+
-+ _fmt_dbg("called(_err:%d).\n", _err);
-+ return _err;
-+}
-+
-+/* defined as weak in dpaa driver. */
-+int dpa_free_pcd_fqids(
-+ struct device *dev,
-+ uint32_t base_fqid)
-+{
-+
-+ int _err = 0, i;
-+ struct net_device *net_dev = NULL;
-+ struct dpa_priv_s *dpa_priv = NULL;
-+ struct fmt_port_pcd_s *fmt_port_pcd = NULL;
-+ struct fmt_fqs_s *fmt_fqs = NULL;
-+ struct fmt_port_s *fmt_port = NULL;
-+ int num_allocated = 0;
-+
-+ _fmt_dbg("calling...\n");
-+
-+ net_dev = (typeof(net_dev))dev_get_drvdata(dev);
-+ dpa_priv = (typeof(dpa_priv))netdev_priv(net_dev);
-+
-+ if (!netif_msg_probe(dpa_priv)) {
-+ _fmt_err("dpa not probe.\n");
-+ _err = -ENODEV;
-+ goto _pcd_free_fqs_err;
-+ }
-+
-+ fmt_port = match_dpa_to_fmt_port(dpa_priv);
-+ if (!fmt_port) {
-+ _fmt_err("fmt port not found.");
-+ _err = -EINVAL;
-+ goto _pcd_free_fqs_err;
-+ }
-+
-+ fmt_port_pcd = &fmt_port->fmt_port_pcd;
-+ num_allocated = fmt_port_pcd->num_queues;
-+ fmt_fqs = fmt_port_pcd->fmt_pcd_fqs;
-+
-+ for (i = 0; i < num_allocated; i++, fmt_fqs++)
-+ fmt_fq_free(fmt_fqs);
-+
-+ qman_release_fqid_range(base_fqid,num_allocated);
-+
-+ kfree(fmt_port_pcd->fmt_pcd_fqs);
-+ memset(fmt_port_pcd, 0, sizeof(*fmt_port_pcd));
-+
-+ /* debugging stuff */
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ _fmt_dbg(" portid: %u.\n", fmt_port->id);
-+ _fmt_dbg(" frames enqueue to qman: %u.\n",
-+ atomic_read(&fmt_port->enqueue_to_qman_frm));
-+ _fmt_dbg(" frames enqueue to rxq: %u.\n",
-+ atomic_read(&fmt_port->enqueue_to_rxq));
-+ _fmt_dbg(" frames dequeue from rxq: %u.\n",
-+ atomic_read(&fmt_port->dequeue_from_rxq));
-+ _fmt_dbg(" frames not enqueue to rxq - wrong frm: %u.\n",
-+ atomic_read(&fmt_port->not_enqueue_to_rxq_wrong_frm));
-+ atomic_set(&fmt_port->enqueue_to_qman_frm, 0);
-+ atomic_set(&fmt_port->enqueue_to_rxq, 0);
-+ atomic_set(&fmt_port->dequeue_from_rxq, 0);
-+ atomic_set(&fmt_port->not_enqueue_to_rxq_wrong_frm, 0);
-+#endif
-+ return 0;
-+
-+_pcd_free_fqs_err:
-+ return _err;
-+}
-+
-+static int fmt_port_init(
-+ struct fmt_port_s *fmt_port,
-+ ioc_fmt_port_param_t *p_Params)
-+{
-+ struct device_node *fm_node, *fm_port_node;
-+ const uint32_t *uint32_prop;
-+ int _errno = 0, lenp = 0, i;
-+ static struct of_device_id fm_node_of_match[] = {
-+ { .compatible = "fsl,fman", },
-+ { /* end of list */ },
-+ };
-+
-+ _fmt_dbg("calling...\n");
-+
-+ /* init send/receive tu US list */
-+ INIT_LIST_HEAD(&fmt_port->rx_q);
-+
-+ /* check parameters */
-+ if (p_Params->num_tx_queues > FMAN_TEST_MAX_TX_FQS ||
-+ p_Params->fm_port_id > IOC_FMT_MAX_NUM_OF_PORTS) {
-+ _fmt_dbg("wrong test parameters.\n");
-+ return -EINVAL;
-+ }
-+
-+ /* set port parameters */
-+ fmt_port->num_of_tx_fqs = p_Params->num_tx_queues;
-+ fmt_port->id = p_Params->fm_port_id;
-+ fmt_port->port_type = p_Params->fm_port_type;
-+ fmt_port->diag = e_IOC_DIAG_MODE_NONE;
-+
-+ /* init debugging stuff */
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ atomic_set(&fmt_port->enqueue_to_qman_frm, 0);
-+ atomic_set(&fmt_port->enqueue_to_rxq, 0);
-+ atomic_set(&fmt_port->dequeue_from_rxq, 0);
-+ atomic_set(&fmt_port->not_enqueue_to_rxq_wrong_frm, 0);
-+#endif
-+
-+ /* TODO: This should be done at probe time not at runtime
-+ * very ugly function */
-+ /* fill fmt port properties from dts */
-+ for_each_matching_node(fm_node, fm_node_of_match) {
-+
-+ uint32_prop = (uint32_t *)of_get_property(fm_node,
-+ "cell-index", &lenp);
-+ if (unlikely(uint32_prop == NULL)) {
-+ _fmt_wrn("of_get_property(%s, cell-index) invalid",
-+ fm_node->full_name);
-+ return -EINVAL;
-+ }
-+ if (WARN_ON(lenp != sizeof(uint32_t))) {
-+ _fmt_wrn("of_get_property(%s, cell-index) invalid",
-+ fm_node->full_name);
-+ return -EINVAL;
-+ }
-+
-+ if (*uint32_prop == p_Params->fm_id) {
-+ struct resource res;
-+
-+ /* Get the FM address */
-+ _errno = of_address_to_resource(fm_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ _fmt_wrn("of_address_to_resource() = %u.\n", _errno);
-+ return -EINVAL;
-+ }
-+
-+ fmt_port->fm_phys_base_addr = res.start;
-+
-+ for_each_child_of_node(fm_node, fm_port_node) {
-+ struct platform_device *of_dev;
-+
-+ if (!of_device_is_available(fm_port_node))
-+ continue;
-+
-+ uint32_prop = (uint32_t *)of_get_property(
-+ fm_port_node,
-+ "cell-index",
-+ &lenp);
-+ if (uint32_prop == NULL)
-+ continue;
-+
-+ if (of_device_is_compatible(fm_port_node,
-+ "fsl,fman-port-oh") &&
-+ (fmt_port->port_type == e_IOC_FMT_PORT_T_OP)) {
-+
-+ if (*uint32_prop == fmt_port->id) {
-+ of_dev = of_find_device_by_node(fm_port_node);
-+ if (unlikely(of_dev == NULL)) {
-+ _fmt_wrn("fm id invalid\n");
-+ return -EINVAL;
-+ }
-+
-+ fmt_port->p_tx_port =
-+ fm_port_bind(&of_dev->dev);
-+ fmt_port->p_tx_fm_port_dev =
-+ (void *)fm_port_get_handle(
-+ fmt_port->p_tx_port);
-+ fmt_port->p_rx_port =
-+ fmt_port->p_tx_port;
-+ fmt_port->p_rx_fm_port_dev =
-+ fmt_port->p_tx_fm_port_dev;
-+ fmt_port->p_mac_dev = NULL;
-+ break;
-+ }
-+ } else if ((*uint32_prop == fmt_port->id) &&
-+ fmt_port->port_type == e_IOC_FMT_PORT_T_RXTX) {
-+
-+ of_dev = of_find_device_by_node(fm_port_node);
-+ if (unlikely(of_dev == NULL)) {
-+ _fmt_wrn("dtb fm id invalid value");
-+ return -EINVAL;
-+ }
-+
-+ if (of_device_is_compatible(fm_port_node,
-+ "fsl,fman-port-1g-tx")) {
-+ fmt_port->p_tx_port =
-+ fm_port_bind(&of_dev->dev);
-+ fmt_port->p_tx_fm_port_dev = (void *)
-+ fm_port_get_handle(
-+ fmt_port->p_tx_port);
-+ } else if (of_device_is_compatible(fm_port_node,
-+ "fsl,fman-port-1g-rx")) {
-+ fmt_port->p_rx_port =
-+ fm_port_bind(&of_dev->dev);
-+ fmt_port->p_rx_fm_port_dev = (void *)
-+ fm_port_get_handle(
-+ fmt_port->p_rx_port);
-+ } else if (of_device_is_compatible(fm_port_node,
-+ "fsl,fman-1g-mac") ||
-+ of_device_is_compatible(fm_port_node,
-+ "fsl,fman-memac"))
-+ fmt_port->p_mac_dev =
-+ (typeof(fmt_port->p_mac_dev))
-+ dev_get_drvdata(&of_dev->dev);
-+ else
-+ continue;
-+
-+ if (fmt_port->p_tx_fm_port_dev &&
-+ fmt_port->p_rx_fm_port_dev && fmt_port->p_mac_dev)
-+ break;
-+ } else if (((*uint32_prop + FM_MAX_NUM_OF_1G_RX_PORTS) ==
-+ fmt_port->id) &&
-+ fmt_port->port_type == e_IOC_FMT_PORT_T_RXTX) {
-+
-+ of_dev = of_find_device_by_node(fm_port_node);
-+ if (unlikely(of_dev == NULL)) {
-+ _fmt_wrn("dtb fm id invalid value\n");
-+ return -EINVAL;
-+ }
-+
-+ if (of_device_is_compatible(fm_port_node,
-+ "fsl,fman-port-10g-tx")) {
-+ fmt_port->p_tx_port =
-+ fm_port_bind(&of_dev->dev);
-+ fmt_port->p_tx_fm_port_dev = (void *)
-+ fm_port_get_handle(
-+ fmt_port->p_tx_port);
-+ } else if (of_device_is_compatible(fm_port_node,
-+ "fsl,fman-port-10g-rx")) {
-+ fmt_port->p_rx_port =
-+ fm_port_bind(&of_dev->dev);
-+ fmt_port->p_rx_fm_port_dev = (void *)
-+ fm_port_get_handle(
-+ fmt_port->p_rx_port);
-+ } else if (of_device_is_compatible(fm_port_node,
-+ "fsl,fman-10g-mac") ||
-+ of_device_is_compatible(fm_port_node,
-+ "fsl,fman-memac"))
-+ fmt_port->p_mac_dev =
-+ (typeof(fmt_port->p_mac_dev))
-+ dev_get_drvdata(&of_dev->dev);
-+ else
-+ continue;
-+
-+ if (fmt_port->p_tx_fm_port_dev &&
-+ fmt_port->p_rx_fm_port_dev && fmt_port->p_mac_dev)
-+ break;
-+ }
-+ } /* for_each_child */
-+ }
-+ } /* for each matching node */
-+
-+ if (fmt_port->p_tx_fm_port_dev == 0 ||
-+ fmt_port->p_rx_fm_port_dev == 0) {
-+
-+ _fmt_err("bad fm port pointers.\n");
-+ return -EINVAL;
-+ }
-+
-+ _fmt_dbg("alloc %u tx queues.\n", fmt_port->num_of_tx_fqs);
-+
-+ /* init fman test egress dynamic frame queues */
-+ for (i = 0; i < fmt_port->num_of_tx_fqs; i++) {
-+ int _errno;
-+ _errno = fmt_fq_alloc(
-+ &fmt_port->p_tx_fqs[i],
-+ &fmt_egress_fq,
-+ 0,
-+ QMAN_FQ_FLAG_TO_DCPORTAL,
-+ fm_get_tx_port_channel(fmt_port->p_tx_port),
-+ i);
-+
-+ if (_errno < 0) {
-+ _fmt_err("tx queues allocation failed.\n");
-+ /* TODO: memory leak here if 1 queue is allocated and
-+ * next queues are failing ... */
-+ return -EINVAL;
-+ }
-+ }
-+
-+ /* port is valid and ready to use. */
-+ fmt_port->valid = TRUE;
-+
-+ _fmt_dbg("called.\n");
-+ return 0;
-+}
-+
-+/* fm test chardev functions */
-+static int fmt_open(struct inode *inode, struct file *file)
-+{
-+ unsigned int minor = iminor(inode);
-+
-+ _fmt_dbg("calling...\n");
-+
-+ if (file->private_data != NULL)
-+ return 0;
-+
-+ /* The minor represent the port number.
-+ * Set the port structure accordingly, thus all the operations
-+ * will be done on this port. */
-+ if ((minor >= DEV_FM_TEST_PORTS_MINOR_BASE) &&
-+ (minor < DEV_FM_TEST_MAX_MINORS))
-+ file->private_data = &fm_test.ports[minor];
-+ else
-+ return -ENXIO;
-+
-+ _fmt_dbg("called.\n");
-+ return 0;
-+}
-+
-+static int fmt_close(struct inode *inode, struct file *file)
-+{
-+ struct fmt_port_s *fmt_port = NULL;
-+ struct fmt_frame_s *fmt_frame = NULL;
-+
-+ int err = 0;
-+
-+ _fmt_dbg("calling...\n");
-+
-+ fmt_port = file->private_data;
-+ if (!fmt_port)
-+ return -ENODEV;
-+
-+ /* Close the current test port by invalidating it. */
-+ fmt_port->valid = FALSE;
-+
-+ /* clean the fmt port queue */
-+ while ((fmt_frame = dequeue_fmt_frame(fmt_port)) != NULL) {
-+ if (fmt_frame && fmt_frame->buff.p_data){
-+ kfree(fmt_frame->buff.p_data);
-+ kfree(fmt_frame);
-+ }
-+ }
-+
-+ /* !!! the qman queues are cleaning from fm_ioctl...
-+ * - very ugly */
-+
-+ _fmt_dbg("called.\n");
-+ return err;
-+}
-+
-+static int fmt_ioctls(unsigned int minor,
-+ struct file *file,
-+ unsigned int cmd,
-+ unsigned long arg,
-+ bool compat)
-+{
-+ struct fmt_port_s *fmt_port = NULL;
-+
-+ _fmt_dbg("IOCTL minor:%u "
-+ " arg:0x%08lx ioctl cmd (0x%08x):(0x%02x:0x%02x.\n",
-+ minor, arg, cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
-+
-+ fmt_port = file->private_data;
-+ if (!fmt_port) {
-+ _fmt_err("invalid fmt port.\n");
-+ return -ENODEV;
-+ }
-+
-+ /* set test type properly */
-+ if (compat)
-+ fmt_port->compat_test_type = true;
-+ else
-+ fmt_port->compat_test_type = false;
-+
-+ switch (cmd) {
-+ case FMT_PORT_IOC_INIT:
-+ {
-+ ioc_fmt_port_param_t param;
-+
-+ if (fmt_port->valid) {
-+ _fmt_wrn("port is already initialized.\n");
-+ return -EFAULT;
-+ }
-+#if defined(CONFIG_COMPAT)
-+ if (compat) {
-+ if (copy_from_user(&param,
-+ (ioc_fmt_port_param_t *)compat_ptr(arg),
-+ sizeof(ioc_fmt_port_param_t)))
-+
-+ return -EFAULT;
-+ } else
-+#endif
-+ {
-+ if (copy_from_user(&param,
-+ (ioc_fmt_port_param_t *) arg,
-+ sizeof(ioc_fmt_port_param_t)))
-+
-+ return -EFAULT;
-+ }
-+
-+ return fmt_port_init(fmt_port, &param);
-+ }
-+
-+ case FMT_PORT_IOC_SET_DIAG_MODE:
-+ if (get_user(fmt_port->diag, (ioc_diag_mode *)arg))
-+ return -EFAULT;
-+
-+ if (fmt_port->diag == e_IOC_DIAG_MODE_CTRL_LOOPBACK)
-+ return set_mac_int_loopback(fmt_port, TRUE);
-+ else
-+ return set_mac_int_loopback(fmt_port, FALSE);
-+ break;
-+
-+ case FMT_PORT_IOC_SET_DPAECHO_MODE:
-+ case FMT_PORT_IOC_SET_IP_HEADER_MANIP:
-+ default:
-+ _fmt_wrn("ioctl unimplemented minor:%u@ioctl"
-+ " cmd:0x%08x(type:0x%02x, nr:0x%02x.\n",
-+ minor, cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
-+ return -EFAULT;
-+ }
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_COMPAT
-+static long fmt_compat_ioctl(
-+ struct file *file,
-+ unsigned int cmd,
-+ unsigned long arg)
-+{
-+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
-+
-+ _fmt_dbg("calling...\n");
-+ return fmt_ioctls(minor, file, cmd, arg, true);
-+}
-+#endif
-+
-+static long fmt_ioctl(
-+ struct file *file,
-+ unsigned int cmd,
-+ unsigned long arg)
-+{
-+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
-+ unsigned int res;
-+
-+ _fmt_dbg("calling...\n");
-+
-+ fm_mutex_lock();
-+ res = fmt_ioctls(minor, file, cmd, arg, false);
-+ fm_mutex_unlock();
-+
-+ _fmt_dbg("called.\n");
-+
-+ return res;
-+}
-+
-+#ifdef CONFIG_COMPAT
-+void copy_compat_test_frame_buffer(
-+ ioc_fmt_buff_desc_t *buff,
-+ ioc_fmt_compat_buff_desc_t *compat_buff)
-+{
-+ compat_buff->qid = buff->qid;
-+ compat_buff->p_data = ptr_to_compat(buff->p_data);
-+ compat_buff->size = buff->size;
-+ compat_buff->status = buff->status;
-+
-+ compat_buff->buff_context.p_user_priv =
-+ ptr_to_compat(buff->buff_context.p_user_priv);
-+ memcpy(compat_buff->buff_context.fm_prs_res,
-+ buff->buff_context.fm_prs_res,
-+ FM_PRS_MAX * sizeof(uint8_t));
-+ memcpy(compat_buff->buff_context.fm_time_stamp,
-+ buff->buff_context.fm_time_stamp,
-+ FM_TIME_STAMP_MAX * sizeof(uint8_t));
-+}
-+#endif
-+
-+ssize_t fmt_read(
-+ struct file *file,
-+ char __user *buf,
-+ size_t size,
-+ loff_t *ppos)
-+{
-+ struct fmt_port_s *fmt_port = NULL;
-+ struct fmt_frame_s *p_fmt_frame = NULL;
-+ ssize_t cnt = 0;
-+
-+ fmt_port = file->private_data;
-+ if (!fmt_port || !fmt_port->valid) {
-+ _fmt_err("fmt port not valid!\n");
-+ return -ENODEV;
-+ }
-+
-+ p_fmt_frame = dequeue_fmt_frame(fmt_port);
-+ if (p_fmt_frame == NULL)
-+ return 0;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+#ifdef CONFIG_COMPAT
-+ if (fmt_port->compat_test_type){
-+ cnt = sizeof(ioc_fmt_compat_buff_desc_t);
-+ }
-+ else
-+#endif
-+ {
-+ cnt = sizeof(ioc_fmt_buff_desc_t);
-+ }
-+
-+ if (size < cnt) {
-+ _fmt_err("illegal buffer-size!\n");
-+ cnt = 0;
-+ goto _fmt_read_return;
-+ }
-+
-+ /* Copy structure */
-+#ifdef CONFIG_COMPAT
-+ if (fmt_port->compat_test_type) {
-+ {
-+ ioc_fmt_compat_buff_desc_t compat_buff;
-+ copy_compat_test_frame_buffer(&p_fmt_frame->buff,
-+ &compat_buff);
-+
-+ if (copy_to_user(buf, &compat_buff, cnt)) {
-+ _fmt_err("copy_to_user failed!\n");
-+ goto _fmt_read_return;
-+ }
-+ }
-+
-+ ((ioc_fmt_compat_buff_desc_t *)buf)->p_data =
-+ ptr_to_compat(buf+sizeof(ioc_fmt_compat_buff_desc_t));
-+ cnt += MIN(p_fmt_frame->buff.size, size-cnt);
-+ } else
-+#endif
-+ {
-+ if (copy_to_user(buf, &p_fmt_frame->buff, cnt)) {
-+ _fmt_err("copy_to_user failed!\n");
-+ goto _fmt_read_return;
-+ }
-+
-+ ((ioc_fmt_buff_desc_t *)buf)->p_data =
-+ buf + sizeof(ioc_fmt_buff_desc_t);
-+ cnt += MIN(p_fmt_frame->buff.size, size-cnt);
-+ }
-+
-+ if (size < cnt) {
-+ _fmt_err("illegal buffer-size!\n");
-+ goto _fmt_read_return;
-+ }
-+
-+ /* copy frame */
-+#ifdef CONFIG_COMPAT
-+ if (fmt_port->compat_test_type) {
-+ if (copy_to_user(buf+sizeof(ioc_fmt_compat_buff_desc_t),
-+ p_fmt_frame->buff.p_data, cnt)) {
-+ _fmt_err("copy_to_user failed!\n");
-+ goto _fmt_read_return;
-+ }
-+ } else
-+#endif
-+ {
-+ if (copy_to_user(buf+sizeof(ioc_fmt_buff_desc_t),
-+ p_fmt_frame->buff.p_data, cnt)) {
-+ _fmt_err("copy_to_user failed!\n");
-+ goto _fmt_read_return;
-+ }
-+ }
-+
-+_fmt_read_return:
-+ kfree(p_fmt_frame->buff.p_data);
-+ kfree(p_fmt_frame);
-+
-+ _fmt_dbgr("called.\n");
-+ return cnt;
-+}
-+
-+ssize_t fmt_write(
-+ struct file *file,
-+ const char __user *buf,
-+ size_t size,
-+ loff_t *ppos)
-+{
-+ struct fmt_port_s *fmt_port = NULL;
-+ ioc_fmt_buff_desc_t buff_desc;
-+#ifdef CONFIG_COMPAT
-+ ioc_fmt_compat_buff_desc_t buff_desc_compat;
-+#endif
-+ uint8_t *p_data = NULL;
-+ uint32_t data_offset;
-+ int _errno;
-+ t_DpaaFD fd;
-+
-+ _fmt_dbgr("calling...\n");
-+
-+ fmt_port = file->private_data;
-+ if (!fmt_port || !fmt_port->valid) {
-+ _fmt_err("fmt port not valid.\n");
-+ return -EINVAL;
-+ }
-+
-+ /* If Compat (32B UserSpace - 64B KernelSpace) */
-+#ifdef CONFIG_COMPAT
-+ if (fmt_port->compat_test_type) {
-+ if (size < sizeof(ioc_fmt_compat_buff_desc_t)) {
-+ _fmt_err("invalid buff_desc size.\n");
-+ return -EFAULT;
-+ }
-+
-+ if (copy_from_user(&buff_desc_compat, buf,
-+ sizeof(ioc_fmt_compat_buff_desc_t)))
-+ return -EFAULT;
-+
-+ buff_desc.qid = buff_desc_compat.qid;
-+ buff_desc.p_data = compat_ptr(buff_desc_compat.p_data);
-+ buff_desc.size = buff_desc_compat.size;
-+ buff_desc.status = buff_desc_compat.status;
-+
-+ buff_desc.buff_context.p_user_priv =
-+ compat_ptr(buff_desc_compat.buff_context.p_user_priv);
-+ memcpy(buff_desc.buff_context.fm_prs_res,
-+ buff_desc_compat.buff_context.fm_prs_res,
-+ FM_PRS_MAX * sizeof(uint8_t));
-+ memcpy(buff_desc.buff_context.fm_time_stamp,
-+ buff_desc_compat.buff_context.fm_time_stamp,
-+ FM_TIME_STAMP_MAX * sizeof(uint8_t));
-+ } else
-+#endif
-+ {
-+ if (size < sizeof(ioc_fmt_buff_desc_t)) {
-+ _fmt_err("invalid buff_desc size.\n");
-+ return -EFAULT;
-+ }
-+
-+ if (copy_from_user(&buff_desc, (ioc_fmt_buff_desc_t *)buf,
-+ sizeof(ioc_fmt_buff_desc_t)))
-+ return -EFAULT;
-+ }
-+
-+ data_offset = FM_PORT_GetBufferDataOffset(fmt_port->p_tx_fm_port_dev);
-+ p_data = kmalloc(buff_desc.size+data_offset, GFP_KERNEL);
-+ if (!p_data)
-+ return -ENOMEM;
-+
-+ /* If Compat (32UserSpace - 64KernelSpace) the buff_desc.p_data is ok */
-+ if (copy_from_user((uint8_t *)PTR_MOVE(p_data, data_offset),
-+ buff_desc.p_data,
-+ buff_desc.size)) {
-+ kfree(p_data);
-+ return -EFAULT;
-+ }
-+
-+ /* TODO: dma_map_single here (cannot access the bpool struct) */
-+
-+ /* prepare fd */
-+ memset(&fd, 0, sizeof(fd));
-+ DPAA_FD_SET_ADDR(&fd, p_data);
-+ DPAA_FD_SET_OFFSET(&fd, data_offset);
-+ DPAA_FD_SET_LENGTH(&fd, buff_desc.size);
-+
-+ _errno = qman_enqueue(&fmt_port->p_tx_fqs[buff_desc.qid].fq_base,
-+ (struct qm_fd *)&fd, 0);
-+ if (_errno) {
-+ buff_desc.status = (uint32_t)_errno;
-+ if (copy_to_user((ioc_fmt_buff_desc_t *)buf, &buff_desc,
-+ sizeof(ioc_fmt_buff_desc_t))) {
-+ kfree(p_data);
-+ return -EFAULT;
-+ }
-+ }
-+
-+ /* for debugging */
-+#if defined(FMT_K_DBG) || defined(FMT_K_DBG_RUNTIME)
-+ atomic_inc(&fmt_port->enqueue_to_qman_frm);
-+#endif
-+ _fmt_dbgr("called.\n");
-+ return buff_desc.size;
-+}
-+
-+/* fm test character device definition */
-+static const struct file_operations fmt_fops =
-+{
-+ .owner = THIS_MODULE,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = fmt_compat_ioctl,
-+#endif
-+ .unlocked_ioctl = fmt_ioctl,
-+ .open = fmt_open,
-+ .release = fmt_close,
-+ .read = fmt_read,
-+ .write = fmt_write,
-+};
-+
-+static int fmt_init(void)
-+{
-+ int id;
-+
-+ _fmt_dbg("calling...\n");
-+
-+ /* Register to the /dev for IOCTL API */
-+ /* Register dynamically a new major number for the character device: */
-+ fm_test.major = register_chrdev(0, DEV_FM_TEST_NAME, &fmt_fops);
-+ if (fm_test.major <= 0) {
-+ _fmt_wrn("Failed to allocate major number for device %s.\n",
-+ DEV_FM_TEST_NAME);
-+ return -ENODEV;
-+ }
-+
-+ /* Creating class for FMan_test */
-+ fm_test.fmt_class = class_create(THIS_MODULE, DEV_FM_TEST_NAME);
-+ if (IS_ERR(fm_test.fmt_class)) {
-+ unregister_chrdev(fm_test.major, DEV_FM_TEST_NAME);
-+ _fmt_wrn("Error creating %s class.\n", DEV_FM_TEST_NAME);
-+ return -ENODEV;
-+ }
-+
-+ for (id = 0; id < IOC_FMT_MAX_NUM_OF_PORTS; id++)
-+ if (NULL == device_create(fm_test.fmt_class, NULL,
-+ MKDEV(fm_test.major,
-+ DEV_FM_TEST_PORTS_MINOR_BASE + id), NULL,
-+ DEV_FM_TEST_NAME "%d", id)) {
-+
-+ _fmt_err("Error creating %s device.\n",
-+ DEV_FM_TEST_NAME);
-+ return -ENODEV;
-+ }
-+
-+ return 0;
-+}
-+
-+static void fmt_free(void)
-+{
-+ int id;
-+
-+ for (id = 0; id < IOC_FMT_MAX_NUM_OF_PORTS; id++)
-+ device_destroy(fm_test.fmt_class, MKDEV(fm_test.major,
-+ DEV_FM_TEST_PORTS_MINOR_BASE + id));
-+ class_destroy(fm_test.fmt_class);
-+}
-+
-+static int __init __cold fmt_load(void)
-+{
-+ struct dpaa_eth_hooks_s priv_dpaa_eth_hooks;
-+
-+ /* set dpaa hooks for default queues */
-+ memset(&priv_dpaa_eth_hooks, 0, sizeof(priv_dpaa_eth_hooks));
-+ priv_dpaa_eth_hooks.rx_default = fmt_rx_default_hook;
-+ priv_dpaa_eth_hooks.rx_error = fmt_rx_error_hook;
-+ priv_dpaa_eth_hooks.tx_confirm = fmt_tx_confirm_hook;
-+ priv_dpaa_eth_hooks.tx_error = fmt_tx_confirm_error_hook;
-+
-+ fsl_dpaa_eth_set_hooks(&priv_dpaa_eth_hooks);
-+
-+ /* initialize the fman test environment */
-+ if (fmt_init() < 0) {
-+ _fmt_err("Failed to init FM-test modul.\n");
-+ fmt_free();
-+ return -ENODEV;
-+ }
-+
-+ _fmt_inf("FSL FM test module loaded.\n");
-+
-+ return 0;
-+}
-+
-+static void __exit __cold fmt_unload(void)
-+{
-+ fmt_free();
-+ _fmt_inf("FSL FM test module unloaded.\n");
-+}
-+
-+module_init(fmt_load);
-+module_exit(fmt_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c
-@@ -0,0 +1,2910 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_fm.c
-+ @Author Shlomi Gridish
-+ @Description FM Linux wrapper functions.
-+*/
-+
-+#include <linux/version.h>
-+#include <linux/slab.h>
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#include <config/modversions.h>
-+#endif /* MODVERSIONS */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/cdev.h>
-+#include <linux/device.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/io.h>
-+#include <linux/ioport.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/clk.h>
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#ifndef CONFIG_FMAN_ARM
-+#include <sysdev/fsl_soc.h>
-+#include <linux/fsl/guts.h>
-+#include <linux/fsl/svr.h>
-+#endif
-+#include <linux/stat.h> /* For file access mask */
-+#include <linux/skbuff.h>
-+#include <linux/proc_fs.h>
-+
-+/* NetCommSw Headers --------------- */
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "sprint_ext.h"
-+#include "debug_ext.h"
-+#include "sys_io_ext.h"
-+
-+#include "fm_ioctls.h"
-+
-+#include "lnxwrp_fm.h"
-+#include "lnxwrp_resources.h"
-+#include "lnxwrp_sysfs_fm.h"
-+#include "lnxwrp_sysfs_fm_port.h"
-+#include "lnxwrp_exp_sym.h"
-+#include "fm_common.h"
-+#include "../../sdk_fman/Peripherals/FM/fm.h"
-+#define __ERR_MODULE__ MODULE_FM
-+
-+extern struct device_node *GetFmPortAdvArgsDevTreeNode (struct device_node *fm_node,
-+ e_FmPortType portType,
-+ uint8_t portId);
-+
-+#define PROC_PRINT(args...) offset += sprintf(buf+offset,args)
-+
-+#define ADD_ADV_CONFIG_NO_RET(_func, _param) \
-+ do { \
-+ if (i<max){ \
-+ p_Entry = &p_Entrys[i]; \
-+ p_Entry->p_Function = _func; \
-+ _param \
-+ i++; \
-+ } \
-+ else \
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,\
-+ ("Number of advanced-configuration entries exceeded"));\
-+ } while (0)
-+
-+/* Bootarg used to override the Kconfig FSL_FM_MAX_FRAME_SIZE value */
-+#define FSL_FM_MAX_FRM_BOOTARG "fsl_fm_max_frm"
-+
-+/* Bootarg used to override FSL_FM_RX_EXTRA_HEADROOM Kconfig value */
-+#define FSL_FM_RX_EXTRA_HEADROOM_BOOTARG "fsl_fm_rx_extra_headroom"
-+
-+/* Minimum and maximum value for the fsl_fm_rx_extra_headroom bootarg */
-+#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16
-+#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
-+
-+#define FSL_FM_PAUSE_TIME_ENABLE 0xf000
-+#define FSL_FM_PAUSE_TIME_DISABLE 0
-+#define FSL_FM_PAUSE_THRESH_DEFAULT 0
-+
-+/*
-+ * Max frame size, across all interfaces.
-+ * Configurable from Kconfig or bootargs, to avoid allocating
-+ * oversized (socket) buffers when not using jumbo frames.
-+ * Must be large enough to accommodate the network MTU, but small enough
-+ * to avoid wasting skb memory.
-+ *
-+ * Could be overridden once, at boot-time, via the
-+ * fm_set_max_frm() callback.
-+ */
-+int fsl_fm_max_frm = CONFIG_FSL_FM_MAX_FRAME_SIZE;
-+
-+/*
-+ * Extra headroom for Rx buffers.
-+ * FMan is instructed to allocate, on the Rx path, this amount of
-+ * space at the beginning of a data buffer, beside the DPA private
-+ * data area and the IC fields.
-+ * Does not impact Tx buffer layout.
-+ *
-+ * Configurable from Kconfig or bootargs. Zero by default, it's needed
-+ * on particular forwarding scenarios that add extra headers to the
-+ * forwarded frame.
-+ */
-+int fsl_fm_rx_extra_headroom = CONFIG_FSL_FM_RX_EXTRA_HEADROOM;
-+
-+#ifdef CONFIG_FMAN_PFC
-+static int fsl_fm_pfc_quanta[] = {
-+ CONFIG_FMAN_PFC_QUANTA_0,
-+ CONFIG_FMAN_PFC_QUANTA_1,
-+ CONFIG_FMAN_PFC_QUANTA_2,
-+ CONFIG_FMAN_PFC_QUANTA_3
-+};
-+#endif
-+
-+static t_LnxWrpFm lnxWrpFm;
-+
-+int fm_get_max_frm()
-+{
-+ return fsl_fm_max_frm;
-+}
-+EXPORT_SYMBOL(fm_get_max_frm);
-+
-+int fm_get_rx_extra_headroom()
-+{
-+ return ALIGN(fsl_fm_rx_extra_headroom, 16);
-+}
-+EXPORT_SYMBOL(fm_get_rx_extra_headroom);
-+
-+static int __init fm_set_max_frm(char *str)
-+{
-+ int ret = 0;
-+
-+ ret = get_option(&str, &fsl_fm_max_frm);
-+ if (ret != 1) {
-+ /*
-+ * This will only work if CONFIG_EARLY_PRINTK is compiled in,
-+ * and something like "earlyprintk=serial,uart0,115200" is
-+ * specified in the bootargs
-+ */
-+ printk(KERN_WARNING "No suitable %s=<int> prop in bootargs; "
-+ "will use the default FSL_FM_MAX_FRAME_SIZE (%d) "
-+ "from Kconfig.\n", FSL_FM_MAX_FRM_BOOTARG,
-+ CONFIG_FSL_FM_MAX_FRAME_SIZE);
-+
-+ fsl_fm_max_frm = CONFIG_FSL_FM_MAX_FRAME_SIZE;
-+ return 1;
-+ }
-+
-+ /* Don't allow invalid bootargs; fallback to the Kconfig value */
-+ if (fsl_fm_max_frm < 64 || fsl_fm_max_frm > 9600) {
-+ printk(KERN_WARNING "Invalid %s=%d in bootargs, valid range is "
-+ "64-9600. Falling back to the FSL_FM_MAX_FRAME_SIZE (%d) "
-+ "from Kconfig.\n",
-+ FSL_FM_MAX_FRM_BOOTARG, fsl_fm_max_frm,
-+ CONFIG_FSL_FM_MAX_FRAME_SIZE);
-+
-+ fsl_fm_max_frm = CONFIG_FSL_FM_MAX_FRAME_SIZE;
-+ return 1;
-+ }
-+
-+ printk(KERN_INFO "Using fsl_fm_max_frm=%d from bootargs\n",
-+ fsl_fm_max_frm);
-+ return 0;
-+}
-+early_param(FSL_FM_MAX_FRM_BOOTARG, fm_set_max_frm);
-+
-+static int __init fm_set_rx_extra_headroom(char *str)
-+{
-+ int ret;
-+
-+ ret = get_option(&str, &fsl_fm_rx_extra_headroom);
-+
-+ if (ret != 1) {
-+ printk(KERN_WARNING "No suitable %s=<int> prop in bootargs; "
-+ "will use the default FSL_FM_RX_EXTRA_HEADROOM (%d) "
-+ "from Kconfig.\n", FSL_FM_RX_EXTRA_HEADROOM_BOOTARG,
-+ CONFIG_FSL_FM_RX_EXTRA_HEADROOM);
-+ fsl_fm_rx_extra_headroom = CONFIG_FSL_FM_RX_EXTRA_HEADROOM;
-+
-+ return 1;
-+ }
-+
-+ if (fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN ||
-+ fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX) {
-+ printk(KERN_WARNING "Invalid value for %s=%d prop in "
-+ "bootargs; will use the default "
-+ "FSL_FM_RX_EXTRA_HEADROOM (%d) from Kconfig.\n",
-+ FSL_FM_RX_EXTRA_HEADROOM_BOOTARG,
-+ fsl_fm_rx_extra_headroom,
-+ CONFIG_FSL_FM_RX_EXTRA_HEADROOM);
-+ fsl_fm_rx_extra_headroom = CONFIG_FSL_FM_RX_EXTRA_HEADROOM;
-+ }
-+
-+ printk(KERN_INFO "Using fsl_fm_rx_extra_headroom=%d from bootargs\n",
-+ fsl_fm_rx_extra_headroom);
-+
-+ return 0;
-+}
-+early_param(FSL_FM_RX_EXTRA_HEADROOM_BOOTARG, fm_set_rx_extra_headroom);
-+
-+static irqreturn_t fm_irq(int irq, void *_dev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)_dev;
-+#ifdef CONFIG_PM_SLEEP
-+ t_Fm *p_Fm = (t_Fm*)p_LnxWrpFmDev->h_Dev;
-+#endif
-+ if (!p_LnxWrpFmDev || !p_LnxWrpFmDev->h_Dev)
-+ return IRQ_NONE;
-+
-+#ifdef CONFIG_PM_SLEEP
-+ if (fman_get_normal_pending(p_Fm->p_FmFpmRegs) & INTR_EN_WAKEUP)
-+ {
-+ pm_wakeup_event(p_LnxWrpFmDev->dev, 200);
-+ }
-+#endif
-+ FM_EventIsr(p_LnxWrpFmDev->h_Dev);
-+ return IRQ_HANDLED;
-+}
-+
-+static irqreturn_t fm_err_irq(int irq, void *_dev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)_dev;
-+
-+ if (!p_LnxWrpFmDev || !p_LnxWrpFmDev->h_Dev)
-+ return IRQ_NONE;
-+
-+ if (FM_ErrorIsr(p_LnxWrpFmDev->h_Dev) == E_OK)
-+ return IRQ_HANDLED;
-+
-+ return IRQ_NONE;
-+}
-+
-+/* used to protect FMD/LLD from concurrent calls in functions fm_mutex_lock / fm_mutex_unlock */
-+static struct mutex lnxwrp_mutex;
-+
-+static t_LnxWrpFmDev * CreateFmDev(uint8_t id)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ int j;
-+
-+ p_LnxWrpFmDev = (t_LnxWrpFmDev *)XX_Malloc(sizeof(t_LnxWrpFmDev));
-+ if (!p_LnxWrpFmDev)
-+ {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ return NULL;
-+ }
-+
-+ memset(p_LnxWrpFmDev, 0, sizeof(t_LnxWrpFmDev));
-+ p_LnxWrpFmDev->fmDevSettings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
-+ memset(p_LnxWrpFmDev->fmDevSettings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
-+ p_LnxWrpFmDev->fmPcdDevSettings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
-+ memset(p_LnxWrpFmDev->fmPcdDevSettings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
-+ p_LnxWrpFmDev->hcPort.settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
-+ memset(p_LnxWrpFmDev->hcPort.settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
-+ for (j=0; j<FM_MAX_NUM_OF_RX_PORTS; j++)
-+ {
-+ p_LnxWrpFmDev->rxPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
-+ memset(p_LnxWrpFmDev->rxPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
-+ }
-+ for (j=0; j<FM_MAX_NUM_OF_TX_PORTS; j++)
-+ {
-+ p_LnxWrpFmDev->txPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
-+ memset(p_LnxWrpFmDev->txPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
-+ }
-+ for (j=0; j<FM_MAX_NUM_OF_OH_PORTS-1; j++)
-+ {
-+ p_LnxWrpFmDev->opPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry));
-+ memset(p_LnxWrpFmDev->opPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)));
-+ }
-+
-+ return p_LnxWrpFmDev;
-+}
-+
-+static void DestroyFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+ int j;
-+
-+ for (j=0; j<FM_MAX_NUM_OF_OH_PORTS-1; j++)
-+ if (p_LnxWrpFmDev->opPorts[j].settings.advConfig)
-+ XX_Free(p_LnxWrpFmDev->opPorts[j].settings.advConfig);
-+ for (j=0; j<FM_MAX_NUM_OF_TX_PORTS; j++)
-+ if (p_LnxWrpFmDev->txPorts[j].settings.advConfig)
-+ XX_Free(p_LnxWrpFmDev->txPorts[j].settings.advConfig);
-+ for (j=0; j<FM_MAX_NUM_OF_RX_PORTS; j++)
-+ if (p_LnxWrpFmDev->rxPorts[j].settings.advConfig)
-+ XX_Free(p_LnxWrpFmDev->rxPorts[j].settings.advConfig);
-+ if (p_LnxWrpFmDev->hcPort.settings.advConfig)
-+ XX_Free(p_LnxWrpFmDev->hcPort.settings.advConfig);
-+ if (p_LnxWrpFmDev->fmPcdDevSettings.advConfig)
-+ XX_Free(p_LnxWrpFmDev->fmPcdDevSettings.advConfig);
-+ if (p_LnxWrpFmDev->fmDevSettings.advConfig)
-+ XX_Free(p_LnxWrpFmDev->fmDevSettings.advConfig);
-+
-+ XX_Free(p_LnxWrpFmDev);
-+}
-+
-+static t_Error FillRestFmInfo(t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+#define FM_BMI_PPIDS_OFFSET 0x00080304
-+#define FM_DMA_PLR_OFFSET 0x000c2060
-+#define FM_FPM_IP_REV_1_OFFSET 0x000c30c4
-+#define DMA_HIGH_LIODN_MASK 0x0FFF0000
-+#define DMA_LOW_LIODN_MASK 0x00000FFF
-+#define DMA_LIODN_SHIFT 16
-+
-+typedef _Packed struct {
-+ uint32_t plr[32];
-+} _PackedType t_Plr;
-+
-+typedef _Packed struct {
-+ volatile uint32_t fmbm_ppid[63];
-+} _PackedType t_Ppids;
-+
-+ t_Plr *p_Plr;
-+ t_Ppids *p_Ppids;
-+ int i,j;
-+ uint32_t fmRev;
-+
-+ static const uint8_t phys1GRxPortId[] = {0x8,0x9,0xa,0xb,0xc,0xd,0xe,0xf};
-+ static const uint8_t phys10GRxPortId[] = {0x10,0x11};
-+#if (DPAA_VERSION >= 11)
-+ static const uint8_t physOhPortId[] = {/* 0x1, */0x2,0x3,0x4,0x5,0x6,0x7};
-+#else
-+ static const uint8_t physOhPortId[] = {0x1,0x2,0x3,0x4,0x5,0x6,0x7};
-+#endif
-+ static const uint8_t phys1GTxPortId[] = {0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f};
-+ static const uint8_t phys10GTxPortId[] = {0x30,0x31};
-+
-+ fmRev = (uint32_t)(*((volatile uint32_t *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_FPM_IP_REV_1_OFFSET)));
-+ fmRev &= 0xffff;
-+
-+ p_Plr = (t_Plr *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_DMA_PLR_OFFSET);
-+#ifdef MODULE
-+ for (i=0;i<FM_MAX_NUM_OF_PARTITIONS/2;i++)
-+ p_Plr->plr[i] = 0;
-+#endif /* MODULE */
-+
-+ for (i=0; i<FM_MAX_NUM_OF_PARTITIONS; i++)
-+ {
-+ uint16_t liodnBase = (uint16_t)((i%2) ?
-+ (p_Plr->plr[i/2] & DMA_LOW_LIODN_MASK) :
-+ ((p_Plr->plr[i/2] & DMA_HIGH_LIODN_MASK) >> DMA_LIODN_SHIFT));
-+#ifdef FM_PARTITION_ARRAY
-+ /* TODO: this was .liodnPerPartition[i] = liodnBase; is the index meaning the same? */
-+ p_LnxWrpFmDev->fmDevSettings.param.liodnBasePerPort[i] = liodnBase;
-+#endif /* FM_PARTITION_ARRAY */
-+
-+ if ((i >= phys1GRxPortId[0]) &&
-+ (i <= phys1GRxPortId[FM_MAX_NUM_OF_1G_RX_PORTS-1]))
-+ {
-+ for (j=0; j<ARRAY_SIZE(phys1GRxPortId); j++)
-+ if (phys1GRxPortId[j] == i)
-+ break;
-+ ASSERT_COND(j<ARRAY_SIZE(phys1GRxPortId));
-+ p_LnxWrpFmDev->rxPorts[j].settings.param.liodnBase = liodnBase;
-+ }
-+ else if (FM_MAX_NUM_OF_10G_RX_PORTS &&
-+ (i >= phys10GRxPortId[0]) &&
-+ (i <= phys10GRxPortId[FM_MAX_NUM_OF_10G_RX_PORTS-1]))
-+ {
-+ for (j=0; j<ARRAY_SIZE(phys10GRxPortId); j++)
-+ if (phys10GRxPortId[j] == i)
-+ break;
-+ ASSERT_COND(j<ARRAY_SIZE(phys10GRxPortId));
-+ p_LnxWrpFmDev->rxPorts[FM_MAX_NUM_OF_1G_RX_PORTS+j].settings.param.liodnBase = liodnBase;
-+ }
-+ else if ((i >= physOhPortId[0]) &&
-+ (i <= physOhPortId[FM_MAX_NUM_OF_OH_PORTS-1]))
-+ {
-+ for (j=0; j<ARRAY_SIZE(physOhPortId); j++)
-+ if (physOhPortId[j] == i)
-+ break;
-+ ASSERT_COND(j<ARRAY_SIZE(physOhPortId));
-+ if (j == 0)
-+ p_LnxWrpFmDev->hcPort.settings.param.liodnBase = liodnBase;
-+ else
-+ p_LnxWrpFmDev->opPorts[j - 1].settings.param.liodnBase = liodnBase;
-+ }
-+ else if ((i >= phys1GTxPortId[0]) &&
-+ (i <= phys1GTxPortId[FM_MAX_NUM_OF_1G_TX_PORTS-1]))
-+ {
-+ for (j=0; j<ARRAY_SIZE(phys1GTxPortId); j++)
-+ if (phys1GTxPortId[j] == i)
-+ break;
-+ ASSERT_COND(j<ARRAY_SIZE(phys1GTxPortId));
-+ p_LnxWrpFmDev->txPorts[j].settings.param.liodnBase = liodnBase;
-+ }
-+ else if (FM_MAX_NUM_OF_10G_TX_PORTS &&
-+ (i >= phys10GTxPortId[0]) &&
-+ (i <= phys10GTxPortId[FM_MAX_NUM_OF_10G_TX_PORTS-1]))
-+ {
-+ for (j=0; j<ARRAY_SIZE(phys10GTxPortId); j++)
-+ if (phys10GTxPortId[j] == i)
-+ break;
-+ ASSERT_COND(j<ARRAY_SIZE(phys10GTxPortId));
-+ p_LnxWrpFmDev->txPorts[FM_MAX_NUM_OF_1G_TX_PORTS+j].settings.param.liodnBase = liodnBase;
-+ }
-+ }
-+
-+ p_Ppids = (t_Ppids *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_BMI_PPIDS_OFFSET);
-+
-+ for (i=0; i<FM_MAX_NUM_OF_1G_RX_PORTS; i++)
-+ p_LnxWrpFmDev->rxPorts[i].settings.param.specificParams.rxParams.liodnOffset =
-+ p_Ppids->fmbm_ppid[phys1GRxPortId[i]-1];
-+
-+ for (i=0; i<FM_MAX_NUM_OF_10G_RX_PORTS; i++)
-+ p_LnxWrpFmDev->rxPorts[FM_MAX_NUM_OF_1G_RX_PORTS+i].settings.param.specificParams.rxParams.liodnOffset =
-+ p_Ppids->fmbm_ppid[phys10GRxPortId[i]-1];
-+
-+ return E_OK;
-+}
-+
-+/* Structure that defines QE firmware binary files.
-+ *
-+ * See Documentation/powerpc/qe_firmware.txt for a description of these
-+ * fields.
-+ */
-+struct qe_firmware {
-+ struct qe_header {
-+ __be32 length; /* Length of the entire structure, in bytes */
-+ u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */
-+ u8 version; /* Version of this layout. First ver is '1' */
-+ } header;
-+ u8 id[62]; /* Null-terminated identifier string */
-+ u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */
-+ u8 count; /* Number of microcode[] structures */
-+ struct {
-+ __be16 model; /* The SOC model */
-+ u8 major; /* The SOC revision major */
-+ u8 minor; /* The SOC revision minor */
-+ } __attribute__ ((packed)) soc;
-+ u8 padding[4]; /* Reserved, for alignment */
-+ __be64 extended_modes; /* Extended modes */
-+ __be32 vtraps[8]; /* Virtual trap addresses */
-+ u8 reserved[4]; /* Reserved, for future expansion */
-+ struct qe_microcode {
-+ u8 id[32]; /* Null-terminated identifier */
-+ __be32 traps[16]; /* Trap addresses, 0 == ignore */
-+ __be32 eccr; /* The value for the ECCR register */
-+ __be32 iram_offset; /* Offset into I-RAM for the code */
-+ __be32 count; /* Number of 32-bit words of the code */
-+ __be32 code_offset; /* Offset of the actual microcode */
-+ u8 major; /* The microcode version major */
-+ u8 minor; /* The microcode version minor */
-+ u8 revision; /* The microcode version revision */
-+ u8 padding; /* Reserved, for alignment */
-+ u8 reserved[4]; /* Reserved, for future expansion */
-+ } __attribute__ ((packed)) microcode[1];
-+ /* All microcode binaries should be located here */
-+ /* CRC32 should be located here, after the microcode binaries */
-+} __attribute__ ((packed));
-+
-+
-+/**
-+ * FindFmanMicrocode - find the Fman microcode
-+ *
-+ * This function returns a pointer to the QE Firmware blob that holds
-+ * the Fman microcode. We use the QE Firmware structure because Fman microcode
-+ * is similar to QE microcode, so there's no point in defining a new layout.
-+ *
-+ * Current versions of U-Boot embed the Fman firmware into the device tree,
-+ * so we check for that first. Each Fman node in the device tree contains a
-+ * node or a pointer to node that holds the firmware. Technically, we should
-+ * be fetching the firmware node for the current Fman, but we don't have that
-+ * information any more, so we assume that there is only one firmware node in
-+ * the device tree, and that all Fmen use the same firmware.
-+ */
-+static const struct qe_firmware *FindFmanMicrocode(void)
-+{
-+ static const struct qe_firmware *P4080_UCPatch;
-+ struct device_node *np;
-+
-+ if (P4080_UCPatch)
-+ return P4080_UCPatch;
-+
-+ /* The firmware should be inside the device tree. */
-+ np = of_find_compatible_node(NULL, NULL, "fsl,fman-firmware");
-+ if (np) {
-+ P4080_UCPatch = of_get_property(np, "fsl,firmware", NULL);
-+ of_node_put(np);
-+ if (P4080_UCPatch)
-+ return P4080_UCPatch;
-+ else
-+ REPORT_ERROR(WARNING, E_NOT_FOUND, ("firmware node is incomplete"));
-+ }
-+
-+ /* Returning NULL here forces the reuse of the IRAM content */
-+ return NULL;
-+}
-+#define SVR_SECURITY_MASK 0x00080000
-+#define SVR_PERSONALITY_MASK 0x0000FF00
-+#define SVR_VER_IGNORE_MASK (SVR_SECURITY_MASK | SVR_PERSONALITY_MASK)
-+#define SVR_B4860_REV1_VALUE 0x86800010
-+#define SVR_B4860_REV2_VALUE 0x86800020
-+#define SVR_T4240_VALUE 0x82400000
-+#define SVR_T4120_VALUE 0x82400100
-+#define SVR_T4160_VALUE 0x82410000
-+#define SVR_T4080_VALUE 0x82410200
-+#define SVR_T4_DEVICE_ID 0x82400000
-+#define SVR_DEVICE_ID_MASK 0xFFF00000
-+
-+#define OF_DEV_ID_NUM 2 /* one used, another one zeroed */
-+
-+/* searches for a subnode with the given name/compatible */
-+static bool HasFmPcdOfNode(struct device_node *fm_node,
-+ struct of_device_id *ids,
-+ const char *name,
-+ const char *compatible)
-+{
-+ struct device_node *dev_node;
-+ bool ret = false;
-+
-+ memset(ids, 0, OF_DEV_ID_NUM*sizeof(struct of_device_id));
-+ if (WARN_ON(strlen(name) >= sizeof(ids[0].name)))
-+ return false;
-+ strcpy(ids[0].name, name);
-+ if (WARN_ON(strlen(compatible) >= sizeof(ids[0].compatible)))
-+ return false;
-+ strcpy(ids[0].compatible, compatible);
-+ for_each_child_of_node(fm_node, dev_node)
-+ if (of_match_node(ids, dev_node) != NULL)
-+ ret = true;
-+ return ret;
-+}
-+
-+static t_LnxWrpFmDev * ReadFmDevTreeNode (struct platform_device *of_dev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ struct device_node *fm_node, *dev_node;
-+ struct of_device_id ids[OF_DEV_ID_NUM];
-+ struct resource res;
-+ struct clk *clk;
-+ u32 clk_rate;
-+ const uint32_t *uint32_prop;
-+ int _errno=0, lenp;
-+ uint32_t tmp_prop;
-+
-+ fm_node = of_node_get(of_dev->dev.of_node);
-+
-+ uint32_prop = (uint32_t *)of_get_property(fm_node, "cell-index", &lenp);
-+ if (unlikely(uint32_prop == NULL)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, cell-index) failed", fm_node->full_name));
-+ return NULL;
-+ }
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ return NULL;
-+
-+ if (tmp_prop > INTG_MAX_NUM_OF_FM) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!"));
-+ return NULL;
-+ }
-+ p_LnxWrpFmDev = CreateFmDev(tmp_prop);
-+ if (!p_LnxWrpFmDev) {
-+ REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG);
-+ return NULL;
-+ }
-+ p_LnxWrpFmDev->dev = &of_dev->dev;
-+ p_LnxWrpFmDev->id = tmp_prop;
-+
-+ /* Get the FM interrupt */
-+ p_LnxWrpFmDev->irq = of_irq_to_resource(fm_node, 0, NULL);
-+ if (unlikely(p_LnxWrpFmDev->irq == /*NO_IRQ*/0)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ));
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+
-+ /* Get the FM error interrupt */
-+ p_LnxWrpFmDev->err_irq = of_irq_to_resource(fm_node, 1, NULL);
-+
-+ if (unlikely(p_LnxWrpFmDev->err_irq == /*NO_IRQ*/0)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ));
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+
-+ /* Get the FM address */
-+ _errno = of_address_to_resource(fm_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+
-+
-+ p_LnxWrpFmDev->fmBaseAddr = 0;
-+ p_LnxWrpFmDev->fmPhysBaseAddr = res.start;
-+ p_LnxWrpFmDev->fmMemSize = res.end + 1 - res.start;
-+
-+ clk = of_clk_get(fm_node, 0);
-+ if (IS_ERR(clk)) {
-+ dev_err(&of_dev->dev, "%s: Failed to get FM clock structure\n",
-+ __func__);
-+ of_node_put(fm_node);
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+
-+ clk_rate = clk_get_rate(clk);
-+ if (!clk_rate) {
-+ dev_err(&of_dev->dev, "%s: Failed to determine FM clock rate\n",
-+ __func__);
-+ of_node_put(fm_node);
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+
-+ p_LnxWrpFmDev->fmDevSettings.param.fmClkFreq = DIV_ROUND_UP(clk_rate, 1000000); /* In MHz, rounded */
-+ /* Get the MURAM base address and size */
-+ memset(ids, 0, sizeof(ids));
-+ if (WARN_ON(strlen("muram") >= sizeof(ids[0].name)))
-+ return NULL;
-+ strcpy(ids[0].name, "muram");
-+ if (WARN_ON(strlen("fsl,fman-muram") >= sizeof(ids[0].compatible)))
-+ return NULL;
-+ strcpy(ids[0].compatible, "fsl,fman-muram");
-+ for_each_child_of_node(fm_node, dev_node) {
-+ if (likely(of_match_node(ids, dev_node) != NULL)) {
-+ _errno = of_address_to_resource(dev_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+
-+ p_LnxWrpFmDev->fmMuramBaseAddr = 0;
-+ p_LnxWrpFmDev->fmMuramPhysBaseAddr = res.start;
-+ p_LnxWrpFmDev->fmMuramMemSize = res.end + 1 - res.start;
-+
-+#ifndef CONFIG_FMAN_ARM
-+ {
-+ uint32_t svr;
-+ svr = mfspr(SPRN_SVR);
-+
-+ if ((svr & ~SVR_VER_IGNORE_MASK) >= SVR_B4860_REV2_VALUE)
-+ p_LnxWrpFmDev->fmMuramMemSize = 0x80000;
-+ }
-+#endif
-+ }
-+ }
-+
-+#if 0
-+ /* Get the RTC base address and size */
-+ memset(ids, 0, sizeof(ids));
-+ if (WARN_ON(strlen("ptp-timer") >= sizeof(ids[0].name)))
-+ return NULL;
-+ strcpy(ids[0].name, "ptp-timer");
-+ if (WARN_ON(strlen("fsl,fman-ptp-timer") >= sizeof(ids[0].compatible)))
-+ return NULL;
-+ strcpy(ids[0].compatible, "fsl,fman-ptp-timer");
-+ for_each_child_of_node(fm_node, dev_node) {
-+ if (likely(of_match_node(ids, dev_node) != NULL)) {
-+ _errno = of_address_to_resource(dev_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+
-+ p_LnxWrpFmDev->fmRtcBaseAddr = 0;
-+ p_LnxWrpFmDev->fmRtcPhysBaseAddr = res.start;
-+ p_LnxWrpFmDev->fmRtcMemSize = res.end + 1 - res.start;
-+ }
-+ }
-+#endif
-+
-+#if (DPAA_VERSION >= 11)
-+ /* Get the VSP base address */
-+ for_each_child_of_node(fm_node, dev_node) {
-+ if (of_device_is_compatible(dev_node, "fsl,fman-vsps")) {
-+ _errno = of_address_to_resource(dev_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno));
-+ DestroyFmDev(p_LnxWrpFmDev);
-+ return NULL;
-+ }
-+ p_LnxWrpFmDev->fmVspBaseAddr = 0;
-+ p_LnxWrpFmDev->fmVspPhysBaseAddr = res.start;
-+ p_LnxWrpFmDev->fmVspMemSize = res.end + 1 - res.start;
-+ }
-+ }
-+#endif
-+
-+ /* Get all PCD nodes */
-+ p_LnxWrpFmDev->prsActive = HasFmPcdOfNode(fm_node, ids, "parser", "fsl,fman-parser");
-+ p_LnxWrpFmDev->kgActive = HasFmPcdOfNode(fm_node, ids, "keygen", "fsl,fman-keygen");
-+ p_LnxWrpFmDev->ccActive = HasFmPcdOfNode(fm_node, ids, "cc", "fsl,fman-cc");
-+ p_LnxWrpFmDev->plcrActive = HasFmPcdOfNode(fm_node, ids, "policer", "fsl,fman-policer");
-+
-+ if (p_LnxWrpFmDev->prsActive || p_LnxWrpFmDev->kgActive ||
-+ p_LnxWrpFmDev->ccActive || p_LnxWrpFmDev->plcrActive)
-+ p_LnxWrpFmDev->pcdActive = TRUE;
-+
-+ if (p_LnxWrpFmDev->pcdActive)
-+ {
-+ const char *str_prop = (char *)of_get_property(fm_node, "fsl,default-pcd", &lenp);
-+ if (str_prop) {
-+ if (strncmp(str_prop, "3-tuple", strlen("3-tuple")) == 0)
-+ p_LnxWrpFmDev->defPcd = e_FM_PCD_3_TUPLE;
-+ }
-+ else
-+ p_LnxWrpFmDev->defPcd = e_NO_PCD;
-+ }
-+
-+ of_node_put(fm_node);
-+
-+ p_LnxWrpFmDev->hcCh =
-+ qman_affine_channel(cpumask_first(qman_affine_cpus()));
-+
-+ p_LnxWrpFmDev->active = TRUE;
-+
-+ return p_LnxWrpFmDev;
-+}
-+
-+struct device_node *GetFmAdvArgsDevTreeNode (uint8_t fmIndx)
-+{
-+ struct device_node *dev_node;
-+ const uint32_t *uint32_prop;
-+ int lenp;
-+ uint32_t tmp_prop;
-+
-+ for_each_compatible_node(dev_node, NULL, "fsl,fman-extended-args") {
-+ uint32_prop = (uint32_t *)of_get_property(dev_node, "cell-index", &lenp);
-+ if (unlikely(uint32_prop == NULL)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ dev_node->full_name));
-+ return NULL;
-+ }
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ return NULL;
-+ if (tmp_prop > INTG_MAX_NUM_OF_FM) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!"));
-+ return NULL;
-+ }
-+ if (fmIndx == tmp_prop)
-+ return dev_node;
-+ }
-+
-+ return NULL;
-+}
-+
-+static t_Error CheckNConfigFmAdvArgs (t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+ struct device_node *dev_node;
-+ t_Error err = E_INVALID_VALUE;
-+ const uint32_t *uint32_prop;
-+ const char *str_prop;
-+ int lenp;
-+ uint32_t tmp_prop;
-+
-+ dev_node = GetFmAdvArgsDevTreeNode(p_LnxWrpFmDev->id);
-+ if (!dev_node) /* no advance parameters for FMan */
-+ return E_OK;
-+
-+ str_prop = (char *)of_get_property(dev_node, "dma-aid-mode", &lenp);
-+ if (str_prop) {
-+ if (strcmp(str_prop, "port") == 0)
-+ err = FM_ConfigDmaAidMode(p_LnxWrpFmDev->h_Dev, e_FM_DMA_AID_OUT_PORT_ID);
-+ else if (strcmp(str_prop, "tnum") == 0)
-+ err = FM_ConfigDmaAidMode(p_LnxWrpFmDev->h_Dev, e_FM_DMA_AID_OUT_TNUM);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ uint32_prop = (uint32_t *)of_get_property(dev_node,
-+ "total-fifo-size", &lenp);
-+ if (uint32_prop) {
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ if (FM_ConfigTotalFifoSize(p_LnxWrpFmDev->h_Dev,
-+ tmp_prop) != E_OK)
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+ }
-+
-+ uint32_prop = (uint32_t *)of_get_property(dev_node, "tnum-aging-period",
-+ &lenp);
-+ if (uint32_prop) {
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ err = FM_ConfigTnumAgingPeriod(p_LnxWrpFmDev->h_Dev,
-+ (uint16_t)tmp_prop/*tnumAgingPeriod*/);
-+
-+ if (err != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ of_node_put(dev_node);
-+
-+ return E_OK;
-+}
-+
-+static void LnxwrpFmDevExceptionsCb(t_Handle h_App, e_FmExceptions exception)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)h_App;
-+
-+ ASSERT_COND(p_LnxWrpFmDev);
-+
-+ DBG(INFO, ("got fm exception %d", exception));
-+
-+ /* do nothing */
-+ UNUSED(exception);
-+}
-+
-+static void LnxwrpFmDevBusErrorCb(t_Handle h_App,
-+ e_FmPortType portType,
-+ uint8_t portId,
-+ uint64_t addr,
-+ uint8_t tnum,
-+ uint16_t liodn)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)h_App;
-+
-+ ASSERT_COND(p_LnxWrpFmDev);
-+
-+ /* do nothing */
-+ UNUSED(portType);UNUSED(portId);UNUSED(addr);UNUSED(tnum);UNUSED(liodn);
-+}
-+
-+static t_Error ConfigureFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+ struct resource *dev_res;
-+ int _errno;
-+
-+ if (!p_LnxWrpFmDev->active)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM not configured!!!"));
-+
-+#ifndef MODULE
-+ _errno = can_request_irq(p_LnxWrpFmDev->irq, 0);
-+ if (unlikely(_errno < 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno));
-+#endif
-+ _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, IRQF_SHARED, "fman", p_LnxWrpFmDev);
-+ if (unlikely(_errno < 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->irq, _errno));
-+
-+ enable_irq_wake(p_LnxWrpFmDev->irq);
-+
-+ if (p_LnxWrpFmDev->err_irq != 0) {
-+#ifndef MODULE
-+ _errno = can_request_irq(p_LnxWrpFmDev->err_irq, 0);
-+ if (unlikely(_errno < 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno));
-+#endif
-+ _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->err_irq, fm_err_irq, IRQF_SHARED, "fman-err", p_LnxWrpFmDev);
-+ if (unlikely(_errno < 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->err_irq, _errno));
-+
-+ enable_irq_wake(p_LnxWrpFmDev->err_irq);
-+ }
-+
-+ p_LnxWrpFmDev->res = devm_request_mem_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize, "fman");
-+ if (unlikely(p_LnxWrpFmDev->res == NULL))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_mem_region() failed"));
-+
-+ p_LnxWrpFmDev->fmBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize));
-+ if (unlikely(p_LnxWrpFmDev->fmBaseAddr == 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
-+
-+ if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmBaseAddr, (uint64_t)p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM memory map"));
-+
-+ dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize, "fman-muram");
-+ if (unlikely(dev_res == NULL))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed"));
-+
-+ p_LnxWrpFmDev->fmMuramBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize));
-+ if (unlikely(p_LnxWrpFmDev->fmMuramBaseAddr == 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
-+
-+ if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmMuramBaseAddr, (uint64_t)p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM MURAM memory map"));
-+
-+ if (p_LnxWrpFmDev->fmRtcPhysBaseAddr)
-+ {
-+ dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize, "fman-ptp-timer");
-+ if (unlikely(dev_res == NULL))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed"));
-+
-+ p_LnxWrpFmDev->fmRtcBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize));
-+ if (unlikely(p_LnxWrpFmDev->fmRtcBaseAddr == 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
-+
-+ if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmRtcBaseAddr, (uint64_t)p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC memory map"));
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_LnxWrpFmDev->fmVspPhysBaseAddr) {
-+ dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmVspPhysBaseAddr, p_LnxWrpFmDev->fmVspMemSize, "fman-vsp");
-+ if (unlikely(dev_res == NULL))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed"));
-+
-+ p_LnxWrpFmDev->fmVspBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmVspPhysBaseAddr, p_LnxWrpFmDev->fmVspMemSize));
-+ if (unlikely(p_LnxWrpFmDev->fmVspBaseAddr == 0))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed"));
-+ }
-+#endif
-+
-+ p_LnxWrpFmDev->fmDevSettings.param.baseAddr = p_LnxWrpFmDev->fmBaseAddr;
-+ p_LnxWrpFmDev->fmDevSettings.param.fmId = p_LnxWrpFmDev->id;
-+ p_LnxWrpFmDev->fmDevSettings.param.irq = NO_IRQ;
-+ p_LnxWrpFmDev->fmDevSettings.param.errIrq = NO_IRQ;
-+ p_LnxWrpFmDev->fmDevSettings.param.f_Exception = LnxwrpFmDevExceptionsCb;
-+ p_LnxWrpFmDev->fmDevSettings.param.f_BusError = LnxwrpFmDevBusErrorCb;
-+ p_LnxWrpFmDev->fmDevSettings.param.h_App = p_LnxWrpFmDev;
-+
-+ return FillRestFmInfo(p_LnxWrpFmDev);
-+}
-+
-+#ifndef CONFIG_FMAN_ARM
-+/*
-+ * Table for matching compatible strings, for device tree
-+ * guts node, for QorIQ SOCs.
-+ * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
-+ * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
-+ * string would be used.
-+*/
-+static const struct of_device_id guts_device_ids[] = {
-+ { .compatible = "fsl,qoriq-device-config-1.0", },
-+ { .compatible = "fsl,qoriq-device-config-2.0", },
-+ {}
-+};
-+
-+static unsigned int get_rcwsr(int regnum)
-+{
-+ struct ccsr_guts __iomem *guts_regs = NULL;
-+ struct device_node *guts_node;
-+
-+ guts_node = of_find_matching_node(NULL, guts_device_ids);
-+ if (!guts_node) {
-+ pr_err("could not find GUTS node\n");
-+ return 0;
-+ }
-+ guts_regs = of_iomap(guts_node, 0);
-+ of_node_put(guts_node);
-+ if (!guts_regs) {
-+ pr_err("ioremap of GUTS node failed\n");
-+ return 0;
-+ }
-+
-+ return ioread32be(&guts_regs->rcwsr[regnum]);
-+}
-+
-+#define FMAN1_ALL_MACS_MASK 0xFCC00000
-+#define FMAN2_ALL_MACS_MASK 0x000FCC00
-+
-+/**
-+ * @Function ResetOnInitErrata_A007273
-+ *
-+ * @Description Workaround for Errata A-007273
-+ * This workaround is required to avoid a FMan hang during reset on initialization.
-+ * Enable all MACs in guts.devdisr2 register,
-+ * then perform a regular FMan reset and then restore MACs to their original state.
-+ *
-+ * @Param[in] h_Fm - FM module descriptor
-+ *
-+ * @Return None.
-+ */
-+void ResetOnInitErrata_A007273(t_Handle h_Fm)
-+{
-+ struct ccsr_guts __iomem *guts_regs = NULL;
-+ struct device_node *guts_node;
-+ u32 devdisr2, enableMacs;
-+
-+ /* Get guts registers */
-+ guts_node = of_find_matching_node(NULL, guts_device_ids);
-+ if (!guts_node) {
-+ pr_err("could not find GUTS node\n");
-+ return;
-+ }
-+ guts_regs = of_iomap(guts_node, 0);
-+ of_node_put(guts_node);
-+ if (!guts_regs) {
-+ pr_err("ioremap of GUTS node failed\n");
-+ return;
-+ }
-+
-+ /* Read current state */
-+ devdisr2 = ioread32be(&guts_regs->devdisr2);
-+
-+ if (FmGetId(h_Fm) == 0)
-+ enableMacs = devdisr2 & ~FMAN1_ALL_MACS_MASK;
-+ else
-+ enableMacs = devdisr2 & ~FMAN2_ALL_MACS_MASK;
-+
-+ /* Enable all MACs */
-+ iowrite32be(enableMacs, &guts_regs->devdisr2);
-+
-+ /* Perform standard FMan reset */
-+ FmReset(h_Fm);
-+
-+ /* Restore devdisr2 value */
-+ iowrite32be(devdisr2, &guts_regs->devdisr2);
-+
-+ iounmap(guts_regs);
-+}
-+#endif
-+
-+static t_Error InitFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+ const struct qe_firmware *fw;
-+
-+ if (!p_LnxWrpFmDev->active)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM not configured!!!"));
-+
-+ if ((p_LnxWrpFmDev->h_MuramDev = FM_MURAM_ConfigAndInit(p_LnxWrpFmDev->fmMuramBaseAddr, p_LnxWrpFmDev->fmMuramMemSize)) == NULL)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-MURAM!"));
-+
-+ /* Loading the fman-controller code */
-+ fw = FindFmanMicrocode();
-+
-+ if (!fw) {
-+ /* this forces the reuse of the current IRAM content */
-+ p_LnxWrpFmDev->fmDevSettings.param.firmware.size = 0;
-+ p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code = NULL;
-+ } else {
-+ p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code =
-+ (void *) fw + be32_to_cpu(fw->microcode[0].code_offset);
-+ p_LnxWrpFmDev->fmDevSettings.param.firmware.size =
-+ sizeof(u32) * be32_to_cpu(fw->microcode[0].count);
-+ DBG(INFO, ("Loading fman-controller code version %d.%d.%d",
-+ fw->microcode[0].major,
-+ fw->microcode[0].minor,
-+ fw->microcode[0].revision));
-+ }
-+
-+#ifdef CONFIG_FMAN_ARM
-+ { /* endianness adjustments: byteswap the ucode retrieved from the f/w blob */
-+ int i;
-+ int usz = p_LnxWrpFmDev->fmDevSettings.param.firmware.size;
-+ void * p_Code = p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code;
-+ u32 *dest = kzalloc(usz, GFP_KERNEL);
-+
-+ if (p_Code && dest)
-+ for(i=0; i < usz / 4; ++i)
-+ dest[i] = be32_to_cpu(((u32 *)p_Code)[i]);
-+
-+ p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code = dest;
-+ }
-+#endif
-+
-+ p_LnxWrpFmDev->fmDevSettings.param.h_FmMuram = p_LnxWrpFmDev->h_MuramDev;
-+
-+#if (DPAA_VERSION >= 11)
-+ if (p_LnxWrpFmDev->fmVspBaseAddr) {
-+ p_LnxWrpFmDev->fmDevSettings.param.vspBaseAddr = p_LnxWrpFmDev->fmVspBaseAddr;
-+ p_LnxWrpFmDev->fmDevSettings.param.partVSPBase = 0;
-+ p_LnxWrpFmDev->fmDevSettings.param.partNumOfVSPs = FM_VSP_MAX_NUM_OF_ENTRIES;
-+ }
-+#endif
-+
-+#ifdef CONFIG_FMAN_ARM
-+ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio = 1;
-+#else
-+ if(p_LnxWrpFmDev->fmDevSettings.param.fmId == 0)
-+ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio =
-+ !!(get_rcwsr(4) & 0x2); /* RCW[FM_MAC_RAT0] */
-+ else
-+ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio =
-+ !!(get_rcwsr(4) & 0x1); /* RCW[FM_MAC_RAT1] */
-+
-+ {
-+ /* T4 Devices ClkRatio is always 1 regardless of RCW[FM_MAC_RAT1] */
-+ uint32_t svr;
-+ svr = mfspr(SPRN_SVR);
-+
-+ if ((svr & SVR_DEVICE_ID_MASK) == SVR_T4_DEVICE_ID)
-+ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio = 1;
-+ }
-+#endif /* CONFIG_FMAN_ARM */
-+
-+ if ((p_LnxWrpFmDev->h_Dev = FM_Config(&p_LnxWrpFmDev->fmDevSettings.param)) == NULL)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM"));
-+
-+
-+ if (FM_ConfigResetOnInit(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
-+
-+#ifndef CONFIG_FMAN_ARM
-+#ifdef FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273
-+ if (FM_ConfigResetOnInitOverrideCallback(p_LnxWrpFmDev->h_Dev, ResetOnInitErrata_A007273) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
-+#endif /* FM_HANG_AT_RESET_MAC_CLK_DISABLED_ERRATA_FMAN_A007273 */
-+#endif /* CONFIG_FMAN_ARM */
-+
-+#ifdef CONFIG_FMAN_P1023
-+ if (FM_ConfigDmaAidOverride(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
-+#endif
-+
-+
-+ CheckNConfigFmAdvArgs(p_LnxWrpFmDev);
-+
-+ if (FM_Init(p_LnxWrpFmDev->h_Dev) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM"));
-+
-+ /* TODO: Why we mask these interrupts? */
-+ if (p_LnxWrpFmDev->err_irq == 0) {
-+ FM_SetException(p_LnxWrpFmDev->h_Dev, e_FM_EX_DMA_BUS_ERROR,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_READ_ECC,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_SYSTEM_WRITE_ECC,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_FM_WRITE_ECC,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_SINGLE_PORT_ECC, FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_STALL_ON_TASKS , FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_SINGLE_ECC, FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_DOUBLE_ECC,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_SINGLE_ECC, FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_DOUBLE_ECC,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_LIST_RAM_ECC,FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_STORAGE_PROFILE_ECC, FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_STATISTICS_RAM_ECC, FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_DISPATCH_RAM_ECC, FALSE);
-+ FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_IRAM_ECC,FALSE);
-+ /* TODO: FmDisableRamsEcc assert for ramsEccOwners.
-+ * FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_MURAM_ECC,FALSE);*/
-+ }
-+
-+ if (p_LnxWrpFmDev->fmRtcBaseAddr)
-+ {
-+ t_FmRtcParams fmRtcParam;
-+
-+ memset(&fmRtcParam, 0, sizeof(fmRtcParam));
-+ fmRtcParam.h_App = p_LnxWrpFmDev;
-+ fmRtcParam.h_Fm = p_LnxWrpFmDev->h_Dev;
-+ fmRtcParam.baseAddress = p_LnxWrpFmDev->fmRtcBaseAddr;
-+
-+ if(!(p_LnxWrpFmDev->h_RtcDev = FM_RTC_Config(&fmRtcParam)))
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-RTC"));
-+
-+ if (FM_RTC_ConfigPeriod(p_LnxWrpFmDev->h_RtcDev, DPA_PTP_NOMINAL_FREQ_PERIOD_NS) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC"));
-+
-+ if (FM_RTC_Init(p_LnxWrpFmDev->h_RtcDev) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC"));
-+ }
-+
-+ return E_OK;
-+}
-+
-+/* TODO: to be moved back here */
-+extern void FreeFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev);
-+
-+static void FreeFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+ if (!p_LnxWrpFmDev->active)
-+ return;
-+
-+ FreeFmPcdDev(p_LnxWrpFmDev);
-+
-+ if (p_LnxWrpFmDev->h_RtcDev)
-+ FM_RTC_Free(p_LnxWrpFmDev->h_RtcDev);
-+
-+ if (p_LnxWrpFmDev->h_Dev)
-+ FM_Free(p_LnxWrpFmDev->h_Dev);
-+
-+ if (p_LnxWrpFmDev->h_MuramDev)
-+ FM_MURAM_Free(p_LnxWrpFmDev->h_MuramDev);
-+
-+ if (p_LnxWrpFmDev->fmRtcBaseAddr)
-+ {
-+ SYS_UnregisterIoMap(p_LnxWrpFmDev->fmRtcBaseAddr);
-+ devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmRtcBaseAddr));
-+ __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize);
-+ }
-+ SYS_UnregisterIoMap(p_LnxWrpFmDev->fmMuramBaseAddr);
-+ devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmMuramBaseAddr));
-+ __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize);
-+ SYS_UnregisterIoMap(p_LnxWrpFmDev->fmBaseAddr);
-+ devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr));
-+ devm_release_mem_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize);
-+ if (p_LnxWrpFmDev->err_irq != 0) {
-+ devm_free_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->err_irq, p_LnxWrpFmDev);
-+ }
-+
-+ devm_free_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, p_LnxWrpFmDev);
-+}
-+
-+/* FMan character device file operations */
-+extern struct file_operations fm_fops;
-+
-+static int /*__devinit*/ fm_probe(struct platform_device *of_dev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+
-+ if ((p_LnxWrpFmDev = ReadFmDevTreeNode(of_dev)) == NULL)
-+ return -EIO;
-+ if (ConfigureFmDev(p_LnxWrpFmDev) != E_OK)
-+ return -EIO;
-+ if (InitFmDev(p_LnxWrpFmDev) != E_OK)
-+ return -EIO;
-+
-+ /* IOCTL ABI checking */
-+ LnxWrpPCDIOCTLEnumChecking();
-+ LnxWrpPCDIOCTLTypeChecking();
-+
-+ Sprint (p_LnxWrpFmDev->name, "%s%d", DEV_FM_NAME, p_LnxWrpFmDev->id);
-+
-+ /* Register to the /dev for IOCTL API */
-+ /* Register dynamically a new major number for the character device: */
-+ if ((p_LnxWrpFmDev->major = register_chrdev(0, p_LnxWrpFmDev->name, &fm_fops)) <= 0) {
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Failed to allocate a major number for device \"%s\"", p_LnxWrpFmDev->name));
-+ return -EIO;
-+ }
-+
-+ /* Creating classes for FM */
-+ DBG(TRACE ,("class_create fm_class"));
-+ p_LnxWrpFmDev->fm_class = class_create(THIS_MODULE, p_LnxWrpFmDev->name);
-+ if (IS_ERR(p_LnxWrpFmDev->fm_class)) {
-+ unregister_chrdev(p_LnxWrpFmDev->major, p_LnxWrpFmDev->name);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("class_create error fm_class"));
-+ return -EIO;
-+ }
-+
-+ device_create(p_LnxWrpFmDev->fm_class, NULL, MKDEV(p_LnxWrpFmDev->major, DEV_FM_MINOR_BASE), NULL,
-+ "fm%d", p_LnxWrpFmDev->id);
-+ device_create(p_LnxWrpFmDev->fm_class, NULL, MKDEV(p_LnxWrpFmDev->major, DEV_FM_PCD_MINOR_BASE), NULL,
-+ "fm%d-pcd", p_LnxWrpFmDev->id);
-+ dev_set_drvdata(p_LnxWrpFmDev->dev, p_LnxWrpFmDev);
-+
-+ /* create sysfs entries for stats and regs */
-+ if ( fm_sysfs_create(p_LnxWrpFmDev->dev) !=0 )
-+ {
-+ FreeFmDev(p_LnxWrpFmDev);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Unable to create sysfs entry - fm!!!"));
-+ return -EIO;
-+ }
-+
-+#ifdef CONFIG_PM
-+ device_set_wakeup_capable(p_LnxWrpFmDev->dev, true);
-+#endif
-+
-+ DBG(TRACE, ("FM%d probed", p_LnxWrpFmDev->id));
-+
-+ return 0;
-+}
-+
-+static int fm_remove(struct platform_device *of_dev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ struct device *dev;
-+
-+ dev = &of_dev->dev;
-+ p_LnxWrpFmDev = dev_get_drvdata(dev);
-+
-+ fm_sysfs_destroy(dev);
-+
-+ DBG(TRACE, ("destroy fm_class"));
-+ device_destroy(p_LnxWrpFmDev->fm_class, MKDEV(p_LnxWrpFmDev->major, DEV_FM_MINOR_BASE));
-+ device_destroy(p_LnxWrpFmDev->fm_class, MKDEV(p_LnxWrpFmDev->major, DEV_FM_PCD_MINOR_BASE));
-+ class_destroy(p_LnxWrpFmDev->fm_class);
-+
-+ /* Destroy chardev */
-+ unregister_chrdev(p_LnxWrpFmDev->major, p_LnxWrpFmDev->name);
-+
-+ FreeFmDev(p_LnxWrpFmDev);
-+
-+ DestroyFmDev(p_LnxWrpFmDev);
-+
-+ dev_set_drvdata(dev, NULL);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id fm_match[] = {
-+ {
-+ .compatible = "fsl,fman"
-+ },
-+ {}
-+};
-+#ifndef MODULE
-+MODULE_DEVICE_TABLE(of, fm_match);
-+#endif /* !MODULE */
-+
-+#if defined CONFIG_PM && (defined CONFIG_PPC || defined CONFIG_PPC64)
-+
-+#define SCFG_FMCLKDPSLPCR_ADDR 0xFFE0FC00C
-+#define SCFG_FMCLKDPSLPCR_DS_VAL 0x48402000
-+#define SCFG_FMCLKDPSLPCR_NORMAL_VAL 0x00402000
-+
-+struct device *g_fm_dev;
-+
-+static int fm_soc_suspend(struct device *dev)
-+{
-+ int err = 0;
-+ uint32_t *fmclk;
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = dev_get_drvdata(get_device(dev));
-+ g_fm_dev = dev;
-+ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
-+ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_DS_VAL);
-+ if (p_LnxWrpFmDev->h_DsarRxPort)
-+ {
-+#ifdef CONFIG_FSL_QORIQ_PM
-+ device_set_wakeup_enable(p_LnxWrpFmDev->dev, 1);
-+#endif
-+ err = FM_PORT_EnterDsarFinal(p_LnxWrpFmDev->h_DsarRxPort,
-+ p_LnxWrpFmDev->h_DsarTxPort);
-+ }
-+ return err;
-+}
-+
-+static int fm_soc_resume(struct device *dev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = dev_get_drvdata(get_device(dev));
-+ uint32_t *fmclk;
-+ fmclk = ioremap(SCFG_FMCLKDPSLPCR_ADDR, 4);
-+ WRITE_UINT32(*fmclk, SCFG_FMCLKDPSLPCR_NORMAL_VAL);
-+ if (p_LnxWrpFmDev->h_DsarRxPort)
-+ {
-+#ifdef CONFIG_FSL_QORIQ_PM
-+ device_set_wakeup_enable(p_LnxWrpFmDev->dev, 0);
-+#endif
-+ FM_PORT_ExitDsar(p_LnxWrpFmDev->h_DsarRxPort,
-+ p_LnxWrpFmDev->h_DsarTxPort);
-+ p_LnxWrpFmDev->h_DsarRxPort = 0;
-+ p_LnxWrpFmDev->h_DsarTxPort = 0;
-+ }
-+ return 0;
-+}
-+
-+static const struct dev_pm_ops fm_pm_ops = {
-+ .suspend = fm_soc_suspend,
-+ .resume = fm_soc_resume,
-+};
-+
-+#define FM_PM_OPS (&fm_pm_ops)
-+
-+#else /* CONFIG_PM && (CONFIG_PPC || CONFIG_PPC64) */
-+
-+#define FM_PM_OPS NULL
-+
-+#endif /* CONFIG_PM && (CONFIG_PPC || CONFIG_PPC64) */
-+
-+static struct platform_driver fm_driver = {
-+ .driver = {
-+ .name = "fsl-fman",
-+ .of_match_table = fm_match,
-+ .owner = THIS_MODULE,
-+ .pm = FM_PM_OPS,
-+ },
-+ .probe = fm_probe,
-+ .remove = fm_remove
-+};
-+
-+t_Handle LNXWRP_FM_Init(void)
-+{
-+ memset(&lnxWrpFm, 0, sizeof(lnxWrpFm));
-+ mutex_init(&lnxwrp_mutex);
-+
-+ /* Register to the DTB for basic FM API */
-+ platform_driver_register(&fm_driver);
-+
-+ return &lnxWrpFm;
-+}
-+
-+t_Error LNXWRP_FM_Free(t_Handle h_LnxWrpFm)
-+{
-+ platform_driver_unregister(&fm_driver);
-+ mutex_destroy(&lnxwrp_mutex);
-+
-+ return E_OK;
-+}
-+
-+
-+struct fm * fm_bind(struct device *fm_dev)
-+{
-+ return (struct fm *)(dev_get_drvdata(get_device(fm_dev)));
-+}
-+EXPORT_SYMBOL(fm_bind);
-+
-+void fm_unbind(struct fm *fm)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
-+
-+ put_device(p_LnxWrpFmDev->dev);
-+}
-+EXPORT_SYMBOL(fm_unbind);
-+
-+struct resource * fm_get_mem_region(struct fm *fm)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
-+
-+ return p_LnxWrpFmDev->res;
-+}
-+EXPORT_SYMBOL(fm_get_mem_region);
-+
-+void * fm_get_handle(struct fm *fm)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
-+
-+ return (void *)p_LnxWrpFmDev->h_Dev;
-+}
-+EXPORT_SYMBOL(fm_get_handle);
-+
-+void * fm_get_rtc_handle(struct fm *fm)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm;
-+
-+ return (void *)p_LnxWrpFmDev->h_RtcDev;
-+}
-+EXPORT_SYMBOL(fm_get_rtc_handle);
-+
-+struct fm_port * fm_port_bind (struct device *fm_port_dev)
-+{
-+ return (struct fm_port *)(dev_get_drvdata(get_device(fm_port_dev)));
-+}
-+EXPORT_SYMBOL(fm_port_bind);
-+
-+void fm_port_unbind(struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
-+
-+ put_device(p_LnxWrpFmPortDev->dev);
-+}
-+EXPORT_SYMBOL(fm_port_unbind);
-+
-+void *fm_port_get_handle(const struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
-+
-+ return (void *)p_LnxWrpFmPortDev->h_Dev;
-+}
-+EXPORT_SYMBOL(fm_port_get_handle);
-+
-+u64 *fm_port_get_buffer_time_stamp(const struct fm_port *port,
-+ const void *data)
-+{
-+ return FM_PORT_GetBufferTimeStamp(fm_port_get_handle(port),
-+ (void *)data);
-+}
-+EXPORT_SYMBOL(fm_port_get_buffer_time_stamp);
-+
-+void fm_port_get_base_addr(const struct fm_port *port, uint64_t *base_addr)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+
-+ *base_addr = p_LnxWrpFmPortDev->settings.param.baseAddr;
-+}
-+EXPORT_SYMBOL(fm_port_get_base_addr);
-+
-+void fm_port_pcd_bind (struct fm_port *port, struct fm_port_pcd_param *params)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
-+
-+ p_LnxWrpFmPortDev->pcd_owner_params.cba = params->cba;
-+ p_LnxWrpFmPortDev->pcd_owner_params.cbf = params->cbf;
-+ p_LnxWrpFmPortDev->pcd_owner_params.dev = params->dev;
-+}
-+EXPORT_SYMBOL(fm_port_pcd_bind);
-+
-+void fm_port_get_buff_layout_ext_params(struct fm_port *port, struct fm_port_params *params)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ struct device_node *fm_node, *port_node;
-+ const uint32_t *uint32_prop;
-+ int lenp;
-+
-+ params->data_align = 0;
-+ params->manip_extra_space = 0;
-+
-+ fm_node = GetFmAdvArgsDevTreeNode(((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev)->id);
-+ if (!fm_node) /* no advance parameters for FMan */
-+ return;
-+
-+ port_node = GetFmPortAdvArgsDevTreeNode(fm_node,
-+ p_LnxWrpFmPortDev->settings.param.portType,
-+ p_LnxWrpFmPortDev->settings.param.portId);
-+ if (!port_node) /* no advance parameters for FMan-Port */
-+ return;
-+
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "buffer-layout", &lenp);
-+ if (uint32_prop) {
-+ if (WARN_ON(lenp != sizeof(uint32_t)*2))
-+ return;
-+
-+ params->manip_extra_space = (uint8_t)be32_to_cpu(uint32_prop[0]);
-+ params->data_align = (uint16_t)be32_to_cpu(uint32_prop[1]);
-+ }
-+
-+ of_node_put(port_node);
-+ of_node_put(fm_node);
-+}
-+EXPORT_SYMBOL(fm_port_get_buff_layout_ext_params);
-+
-+uint16_t fm_get_tx_port_channel(struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
-+
-+ return p_LnxWrpFmPortDev->txCh;
-+}
-+EXPORT_SYMBOL(fm_get_tx_port_channel);
-+
-+int fm_port_enable (struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
-+ t_Error err = FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev);
-+
-+ return GET_ERROR_TYPE(err);
-+}
-+EXPORT_SYMBOL(fm_port_enable);
-+
-+int fm_port_disable(struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
-+ t_Error err = FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev);
-+
-+ return GET_ERROR_TYPE(err);
-+}
-+EXPORT_SYMBOL(fm_port_disable);
-+
-+int fm_port_set_rate_limit(struct fm_port *port,
-+ uint16_t max_burst_size,
-+ uint32_t rate_limit)
-+{
-+ t_FmPortRateLimit param;
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ int err = 0;
-+
-+ param.maxBurstSize = max_burst_size;
-+ param.rateLimit = rate_limit;
-+ param.rateLimitDivider = 0;
-+
-+ err = FM_PORT_SetRateLimit(p_LnxWrpFmPortDev->h_Dev, &param);
-+ return err;
-+}
-+EXPORT_SYMBOL(fm_port_set_rate_limit);
-+
-+int fm_port_del_rate_limit(struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+
-+ FM_PORT_DeleteRateLimit(p_LnxWrpFmPortDev->h_Dev);
-+ return 0;
-+}
-+EXPORT_SYMBOL(fm_port_del_rate_limit);
-+
-+void FM_PORT_Dsar_DumpRegs(void);
-+int ar_showmem(struct file *file, const char __user *buffer,
-+ unsigned long count, void *data)
-+{
-+ FM_PORT_Dsar_DumpRegs();
-+ return 2;
-+}
-+
-+struct auto_res_tables_sizes *fm_port_get_autores_maxsize(
-+ struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ return &p_LnxWrpFmPortDev->dsar_table_sizes;
-+}
-+EXPORT_SYMBOL(fm_port_get_autores_maxsize);
-+
-+int fm_port_enter_autores_for_deepsleep(struct fm_port *port,
-+ struct auto_res_port_params *params)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ t_LnxWrpFmDev* p_LnxWrpFmDev = (t_LnxWrpFmDev*)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ p_LnxWrpFmDev->h_DsarRxPort = p_LnxWrpFmPortDev->h_Dev;
-+ p_LnxWrpFmDev->h_DsarTxPort = params->h_FmPortTx;
-+
-+ /*Register other under /proc/autoresponse */
-+ if (WARN_ON(sizeof(t_FmPortDsarParams) != sizeof(struct auto_res_port_params)))
-+ return -EFAULT;
-+
-+ FM_PORT_EnterDsar(p_LnxWrpFmPortDev->h_Dev, (t_FmPortDsarParams*)params);
-+ return 0;
-+}
-+EXPORT_SYMBOL(fm_port_enter_autores_for_deepsleep);
-+
-+void fm_port_exit_auto_res_for_deep_sleep(struct fm_port *port_rx,
-+ struct fm_port *port_tx)
-+{
-+}
-+EXPORT_SYMBOL(fm_port_exit_auto_res_for_deep_sleep);
-+
-+int fm_port_get_autores_stats(struct fm_port *port,
-+ struct auto_res_port_stats *stats)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ if (WARN_ON(sizeof(t_FmPortDsarStats) != sizeof(struct auto_res_port_stats)))
-+ return -EFAULT;
-+ return FM_PORT_GetDsarStats(p_LnxWrpFmPortDev->h_Dev, (t_FmPortDsarStats*)stats);
-+}
-+EXPORT_SYMBOL(fm_port_get_autores_stats);
-+
-+int fm_port_suspend(struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ if (!FM_PORT_IsInDsar(p_LnxWrpFmPortDev->h_Dev))
-+ return FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev);
-+ else
-+ return 0;
-+}
-+EXPORT_SYMBOL(fm_port_suspend);
-+
-+int fm_port_resume(struct fm_port *port)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ if (!FM_PORT_IsInDsar(p_LnxWrpFmPortDev->h_Dev))
-+ return FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev);
-+ else
-+ return 0;
-+}
-+EXPORT_SYMBOL(fm_port_resume);
-+
-+bool fm_port_is_in_auto_res_mode(struct fm_port *port)
-+{
-+ return FM_PORT_IsInDsar(port);
-+}
-+EXPORT_SYMBOL(fm_port_is_in_auto_res_mode);
-+
-+#ifdef CONFIG_FMAN_PFC
-+int fm_port_set_pfc_priorities_mapping_to_qman_wq(struct fm_port *port,
-+ uint8_t prio, uint8_t wq)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+ int err;
-+ int _errno;
-+
-+ err = FM_PORT_SetPfcPrioritiesMappingToQmanWQ(p_LnxWrpFmPortDev->h_Dev,
-+ prio, wq);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_PORT_SetPfcPrioritiesMappingToQmanWQ() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_port_set_pfc_priorities_mapping_to_qman_wq);
-+#endif
-+
-+int fm_mac_set_exception(struct fm_mac_dev *fm_mac_dev,
-+ e_FmMacExceptions exception, bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MAC_SetException(fm_mac_dev, exception, enable);
-+
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_SetException() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_set_exception);
-+
-+int fm_mac_free(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int err;
-+ int _error;
-+
-+ err = FM_MAC_Free(fm_mac_dev);
-+ _error = -GET_ERROR_TYPE(err);
-+
-+ if (unlikely(_error < 0))
-+ pr_err("FM_MAC_Free() = 0x%08x\n", err);
-+
-+ return _error;
-+}
-+EXPORT_SYMBOL(fm_mac_free);
-+
-+struct fm_mac_dev *fm_mac_config(t_FmMacParams *params)
-+{
-+ struct fm_mac_dev *fm_mac_dev;
-+
-+ fm_mac_dev = FM_MAC_Config(params);
-+ if (unlikely(fm_mac_dev == NULL))
-+ pr_err("FM_MAC_Config() failed\n");
-+
-+ return fm_mac_dev;
-+}
-+EXPORT_SYMBOL(fm_mac_config);
-+
-+int fm_mac_config_max_frame_length(struct fm_mac_dev *fm_mac_dev,
-+ int len)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MAC_ConfigMaxFrameLength(fm_mac_dev, len);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_config_max_frame_length);
-+
-+int fm_mac_config_pad_and_crc(struct fm_mac_dev *fm_mac_dev, bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MAC_ConfigPadAndCrc(fm_mac_dev, enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_ConfigPadAndCrc() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_config_pad_and_crc);
-+
-+int fm_mac_config_half_duplex(struct fm_mac_dev *fm_mac_dev, bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MAC_ConfigHalfDuplex(fm_mac_dev, enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_ConfigHalfDuplex() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_config_half_duplex);
-+
-+int fm_mac_config_reset_on_init(struct fm_mac_dev *fm_mac_dev, bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MAC_ConfigResetOnInit(fm_mac_dev, enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_ConfigResetOnInit() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_config_reset_on_init);
-+
-+int fm_mac_init(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MAC_Init(fm_mac_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_Init() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_init);
-+
-+int fm_mac_get_version(struct fm_mac_dev *fm_mac_dev, uint32_t *version)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MAC_GetVesrion(fm_mac_dev, version);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_GetVesrion() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_get_version);
-+
-+int fm_mac_enable(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_Enable(fm_mac_dev, e_COMM_MODE_RX_AND_TX);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_Enable() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_enable);
-+
-+int fm_mac_disable(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_Disable(fm_mac_dev, e_COMM_MODE_RX_AND_TX);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_Disable() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_disable);
-+
-+int fm_mac_resume(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_Resume(fm_mac_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_Resume() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_resume);
-+
-+int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev,
-+ bool enable)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_SetPromiscuous(fm_mac_dev, enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_SetPromiscuous() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_set_promiscuous);
-+
-+int fm_mac_remove_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
-+ t_EnetAddr *mac_addr)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_RemoveHashMacAddr(fm_mac_dev, mac_addr);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0) {
-+ pr_err("FM_MAC_RemoveHashMacAddr() = 0x%08x\n", err);
-+ return _errno;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(fm_mac_remove_hash_mac_addr);
-+
-+int fm_mac_add_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
-+ t_EnetAddr *mac_addr)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_AddHashMacAddr(fm_mac_dev, mac_addr);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0) {
-+ pr_err("FM_MAC_AddHashMacAddr() = 0x%08x\n", err);
-+ return _errno;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(fm_mac_add_hash_mac_addr);
-+
-+int fm_mac_modify_mac_addr(struct fm_mac_dev *fm_mac_dev,
-+ uint8_t *addr)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_ModifyMacAddr(fm_mac_dev, (t_EnetAddr *)addr);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0)
-+ pr_err("FM_MAC_ModifyMacAddr() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_modify_mac_addr);
-+
-+int fm_mac_adjust_link(struct fm_mac_dev *fm_mac_dev,
-+ bool link, int speed, bool duplex)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ if (!link) {
-+#if (DPAA_VERSION < 11)
-+ FM_MAC_RestartAutoneg(fm_mac_dev);
-+#endif
-+ return 0;
-+ }
-+
-+ err = FM_MAC_AdjustLink(fm_mac_dev, speed, duplex);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_AdjustLink() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_adjust_link);
-+
-+int fm_mac_enable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_Enable1588TimeStamp(fm_mac_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_Enable1588TimeStamp() = 0x%08x\n", err);
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_enable_1588_time_stamp);
-+
-+int fm_mac_disable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_MAC_Disable1588TimeStamp(fm_mac_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_Disable1588TimeStamp() = 0x%08x\n", err);
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_disable_1588_time_stamp);
-+
-+int fm_mac_set_rx_pause_frames(
-+ struct fm_mac_dev *fm_mac_dev, bool en)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ /* if rx pause is enabled, do NOT ignore pause frames */
-+ err = FM_MAC_SetRxIgnorePauseFrames(fm_mac_dev, !en);
-+
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0)
-+ pr_err("FM_MAC_SetRxIgnorePauseFrames() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_set_rx_pause_frames);
-+
-+#ifdef CONFIG_FMAN_PFC
-+int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
-+ bool en)
-+{
-+ int _errno, i;
-+ t_Error err;
-+
-+ if (en)
-+ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
-+ err = FM_MAC_SetTxPauseFrames(fm_mac_dev,
-+ i, fsl_fm_pfc_quanta[i],
-+ FSL_FM_PAUSE_THRESH_DEFAULT);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0) {
-+ pr_err("FM_MAC_SetTxPauseFrames() = 0x%08x\n", err);
-+ return _errno;
-+ }
-+ }
-+ else
-+ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
-+ err = FM_MAC_SetTxPauseFrames(fm_mac_dev,
-+ i, FSL_FM_PAUSE_TIME_DISABLE,
-+ FSL_FM_PAUSE_THRESH_DEFAULT);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0) {
-+ pr_err("FM_MAC_SetTxPauseFrames() = 0x%08x\n", err);
-+ return _errno;
-+ }
-+ }
-+
-+ return _errno;
-+}
-+#else
-+int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
-+ bool en)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ if (en)
-+ err = FM_MAC_SetTxAutoPauseFrames(fm_mac_dev,
-+ FSL_FM_PAUSE_TIME_ENABLE);
-+ else
-+ err = FM_MAC_SetTxAutoPauseFrames(fm_mac_dev,
-+ FSL_FM_PAUSE_TIME_DISABLE);
-+
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0)
-+ pr_err("FM_MAC_SetTxAutoPauseFrames() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+#endif
-+EXPORT_SYMBOL(fm_mac_set_tx_pause_frames);
-+
-+int fm_rtc_enable(struct fm *fm_dev)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_Enable(fm_get_rtc_handle(fm_dev), 0);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_Enable = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_enable);
-+
-+int fm_rtc_disable(struct fm *fm_dev)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_Disable(fm_get_rtc_handle(fm_dev));
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_Disable = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_disable);
-+
-+int fm_rtc_get_cnt(struct fm *fm_dev, uint64_t *ts)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_GetCurrentTime(fm_get_rtc_handle(fm_dev), ts);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_GetCurrentTime = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_get_cnt);
-+
-+int fm_rtc_set_cnt(struct fm *fm_dev, uint64_t ts)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_SetCurrentTime(fm_get_rtc_handle(fm_dev), ts);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_SetCurrentTime = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_set_cnt);
-+
-+int fm_rtc_get_drift(struct fm *fm_dev, uint32_t *drift)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_GetFreqCompensation(fm_get_rtc_handle(fm_dev),
-+ drift);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_GetFreqCompensation = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_get_drift);
-+
-+int fm_rtc_set_drift(struct fm *fm_dev, uint32_t drift)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_SetFreqCompensation(fm_get_rtc_handle(fm_dev),
-+ drift);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_SetFreqCompensation = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_set_drift);
-+
-+int fm_rtc_set_alarm(struct fm *fm_dev, uint32_t id,
-+ uint64_t time)
-+{
-+ t_FmRtcAlarmParams alarm;
-+ int _errno;
-+ t_Error err;
-+
-+ alarm.alarmId = id;
-+ alarm.alarmTime = time;
-+ alarm.f_AlarmCallback = NULL;
-+ err = FM_RTC_SetAlarm(fm_get_rtc_handle(fm_dev),
-+ &alarm);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_SetAlarm = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_set_alarm);
-+
-+int fm_rtc_set_fiper(struct fm *fm_dev, uint32_t id,
-+ uint64_t fiper)
-+{
-+ t_FmRtcPeriodicPulseParams pp;
-+ int _errno;
-+ t_Error err;
-+
-+ pp.periodicPulseId = id;
-+ pp.periodicPulsePeriod = fiper;
-+ pp.f_PeriodicPulseCallback = NULL;
-+ err = FM_RTC_SetPeriodicPulse(fm_get_rtc_handle(fm_dev), &pp);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_SetPeriodicPulse = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_set_fiper);
-+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+int fm_rtc_enable_interrupt(struct fm *fm_dev, uint32_t events)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_EnableInterrupt(fm_get_rtc_handle(fm_dev),
-+ events);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_EnableInterrupt = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_enable_interrupt);
-+
-+int fm_rtc_disable_interrupt(struct fm *fm_dev, uint32_t events)
-+{
-+ int _errno;
-+ t_Error err;
-+
-+ err = FM_RTC_DisableInterrupt(fm_get_rtc_handle(fm_dev),
-+ events);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_RTC_DisableInterrupt = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_rtc_disable_interrupt);
-+#endif
-+
-+int fm_mac_set_wol(struct fm_port *port, struct fm_mac_dev *fm_mac_dev, bool en)
-+{
-+ int _errno;
-+ t_Error err;
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
-+
-+ /* Do not set WoL on AR ports */
-+ if (FM_PORT_IsInDsar(p_LnxWrpFmPortDev->h_Dev)) {
-+ printk(KERN_WARNING "Port is AutoResponse enabled! WoL will not be set on this port!\n");
-+ return 0;
-+ }
-+
-+ err = FM_MAC_SetWakeOnLan(fm_mac_dev, en);
-+
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (_errno < 0)
-+ pr_err("FM_MAC_SetWakeOnLan() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_mac_set_wol);
-+
-+void fm_mutex_lock(void)
-+{
-+ mutex_lock(&lnxwrp_mutex);
-+}
-+EXPORT_SYMBOL(fm_mutex_lock);
-+
-+void fm_mutex_unlock(void)
-+{
-+ mutex_unlock(&lnxwrp_mutex);
-+}
-+EXPORT_SYMBOL(fm_mutex_unlock);
-+
-+/*Macsec wrapper functions*/
-+struct fm_macsec_dev *fm_macsec_config(struct fm_macsec_params *fm_params)
-+{
-+ struct fm_macsec_dev *fm_macsec_dev;
-+
-+ fm_macsec_dev = FM_MACSEC_Config((t_FmMacsecParams *)fm_params);
-+ if (unlikely(fm_macsec_dev == NULL))
-+ pr_err("FM_MACSEC_Config() failed\n");
-+
-+ return fm_macsec_dev;
-+}
-+EXPORT_SYMBOL(fm_macsec_config);
-+
-+int fm_macsec_init(struct fm_macsec_dev *fm_macsec_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_Init(fm_macsec_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_Init() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_init);
-+
-+int fm_macsec_free(struct fm_macsec_dev *fm_macsec_dev)
-+{
-+ int err;
-+ int _error;
-+
-+ err = FM_MACSEC_Free(fm_macsec_dev);
-+ _error = -GET_ERROR_TYPE(err);
-+
-+ if (unlikely(_error < 0))
-+ pr_err("FM_MACSEC_Free() = 0x%08x\n", err);
-+
-+ return _error;
-+}
-+EXPORT_SYMBOL(fm_macsec_free);
-+
-+int fm_macsec_config_unknown_sci_frame_treatment(struct fm_macsec_dev
-+ *fm_macsec_dev,
-+ fm_macsec_unknown_sci_frame_treatment treat_mode)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigUnknownSciFrameTreatment(fm_macsec_dev,
-+ treat_mode);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_ConfigUnknownSciFrameTreatmen() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_unknown_sci_frame_treatment);
-+
-+int fm_macsec_config_invalid_tags_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
-+ bool deliver_uncontrolled)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigInvalidTagsFrameTreatment(fm_macsec_dev,
-+ deliver_uncontrolled);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_invalid_tags_frame_treatment);
-+
-+int fm_macsec_config_kay_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
-+ bool discard_uncontrolled)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatment(fm_macsec_dev,
-+ discard_uncontrolled);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_ConfigEncryptWithNoChangedTextFrameTreatmen() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_kay_frame_treatment);
-+
-+int fm_macsec_config_untag_frame_treatment(struct fm_macsec_dev *fm_macsec_dev,
-+ fm_macsec_untag_frame_treatment treat_mode)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigUntagFrameTreatment(fm_macsec_dev, treat_mode);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_ConfigUntagFrameTreatment() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_untag_frame_treatment);
-+
-+int fm_macsec_config_pn_exhaustion_threshold(struct fm_macsec_dev *fm_macsec_dev,
-+ uint32_t pn_exh_thr)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigPnExhaustionThreshold(fm_macsec_dev, pn_exh_thr);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_ConfigPnExhaustionThreshold() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_pn_exhaustion_threshold);
-+
-+int fm_macsec_config_keys_unreadable(struct fm_macsec_dev *fm_macsec_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigKeysUnreadable(fm_macsec_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_ConfigKeysUnreadable() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_keys_unreadable);
-+
-+int fm_macsec_config_sectag_without_sci(struct fm_macsec_dev *fm_macsec_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigSectagWithoutSCI(fm_macsec_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_ConfigSectagWithoutSCI() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_sectag_without_sci);
-+
-+int fm_macsec_config_exception(struct fm_macsec_dev *fm_macsec_dev,
-+ fm_macsec_exception exception, bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_ConfigException(fm_macsec_dev, exception, enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_ConfigException() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_config_exception);
-+
-+int fm_macsec_get_revision(struct fm_macsec_dev *fm_macsec_dev,
-+ int *macsec_revision)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_GetRevision(fm_macsec_dev, macsec_revision);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_GetRevision() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_get_revision);
-+
-+int fm_macsec_enable(struct fm_macsec_dev *fm_macsec_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_Enable(fm_macsec_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_Enable() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_enable);
-+
-+int fm_macsec_disable(struct fm_macsec_dev *fm_macsec_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_Disable(fm_macsec_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_Disable() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_disable);
-+
-+int fm_macsec_set_exception(struct fm_macsec_dev *fm_macsec_dev,
-+ fm_macsec_exception exception, bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SetException(fm_macsec_dev, exception, enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SetException() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_set_exception);
-+
-+/* Macsec SECY wrapper API */
-+struct fm_macsec_secy_dev *fm_macsec_secy_config(struct fm_macsec_secy_params *secy_params)
-+{
-+ struct fm_macsec_secy_dev *fm_macsec_secy;
-+
-+ fm_macsec_secy = FM_MACSEC_SECY_Config((t_FmMacsecSecYParams *)secy_params);
-+ if (unlikely(fm_macsec_secy < 0))
-+ pr_err("FM_MACSEC_SECY_Config() failed\n");
-+
-+ return fm_macsec_secy;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config);
-+
-+int fm_macsec_secy_init(struct fm_macsec_secy_dev *fm_macsec_secy_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_Init(fm_macsec_secy_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_Init() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_init);
-+
-+int fm_macsec_secy_free(struct fm_macsec_secy_dev *fm_macsec_secy_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_Free(fm_macsec_secy_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_Free() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_free);
-+
-+int fm_macsec_secy_config_sci_insertion_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_sci_insertion_mode sci_insertion_mode)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigSciInsertionMode(fm_macsec_secy_dev,
-+ sci_insertion_mode);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigSciInsertionMode() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_sci_insertion_mode);
-+
-+int fm_macsec_secy_config_protect_frames(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ bool protect_frames)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigProtectFrames(fm_macsec_secy_dev,
-+ protect_frames);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigProtectFrames() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_protect_frames);
-+
-+int fm_macsec_secy_config_replay_window(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ bool replay_protect, uint32_t replay_window)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigReplayWindow(fm_macsec_secy_dev,
-+ replay_protect, replay_window);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigReplayWindow() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_replay_window);
-+
-+int fm_macsec_secy_config_validation_mode(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_valid_frame_behavior validate_frames)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigValidationMode(fm_macsec_secy_dev,
-+ validate_frames);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigValidationMode() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_validation_mode);
-+
-+int fm_macsec_secy_config_confidentiality(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ bool confidentiality_enable,
-+ uint32_t confidentiality_offset)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigConfidentiality(fm_macsec_secy_dev,
-+ confidentiality_enable,
-+ confidentiality_offset);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigConfidentiality() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_confidentiality);
-+
-+int fm_macsec_secy_config_point_to_point(struct fm_macsec_secy_dev *fm_macsec_secy_dev)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigPointToPoint(fm_macsec_secy_dev);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigPointToPoint() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_point_to_point);
-+
-+int fm_macsec_secy_config_exception(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_secy_exception exception,
-+ bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigException(fm_macsec_secy_dev, exception,
-+ enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigException() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_exception);
-+
-+int fm_macsec_secy_config_event(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ fm_macsec_secy_event event,
-+ bool enable)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_ConfigEvent(fm_macsec_secy_dev, event, enable);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_ConfigEvent() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_config_event);
-+
-+struct rx_sc_dev *fm_macsec_secy_create_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct fm_macsec_secy_sc_params *params)
-+{
-+ struct rx_sc_dev *rx_sc_dev;
-+
-+ rx_sc_dev = FM_MACSEC_SECY_CreateRxSc(fm_macsec_secy_dev, (t_FmMacsecSecYSCParams *)params);
-+ if (unlikely(rx_sc_dev == NULL))
-+ pr_err("FM_MACSEC_SECY_CreateRxSc() failed\n");
-+
-+ return rx_sc_dev;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_create_rxsc);
-+
-+int fm_macsec_secy_delete_rxsc(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_DeleteRxSc(fm_macsec_secy_dev, sc);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_DeleteRxSc() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_delete_rxsc);
-+
-+int fm_macsec_secy_create_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc, macsec_an_t an,
-+ uint32_t lowest_pn, macsec_sa_key_t key)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_CreateRxSa(fm_macsec_secy_dev, sc, an,
-+ lowest_pn, key);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_CreateRxSa() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_create_rx_sa);
-+
-+int fm_macsec_secy_delete_rx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc, macsec_an_t an)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_DeleteRxSa(fm_macsec_secy_dev, sc, an);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_DeleteRxSa() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_delete_rx_sa);
-+
-+int fm_macsec_secy_rxsa_enable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_RxSaEnableReceive(fm_macsec_secy_dev, sc, an);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_RxSaEnableReceive() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_rxsa_enable_receive);
-+
-+int fm_macsec_secy_rxsa_disable_receive(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_RxSaDisableReceive(fm_macsec_secy_dev, sc, an);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_RxSaDisableReceive() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_rxsa_disable_receive);
-+
-+int fm_macsec_secy_rxsa_update_next_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an, uint32_t updt_next_pn)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_RxSaUpdateNextPn(fm_macsec_secy_dev, sc, an,
-+ updt_next_pn);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_RxSaUpdateNextPn() = 0x%08x\n", err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_rxsa_update_next_pn);
-+
-+int fm_macsec_secy_rxsa_update_lowest_pn(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an, uint32_t updt_lowest_pn)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_RxSaUpdateLowestPn(fm_macsec_secy_dev, sc, an,
-+ updt_lowest_pn);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_RxSaUpdateLowestPn() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_rxsa_update_lowest_pn);
-+
-+int fm_macsec_secy_rxsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc,
-+ macsec_an_t an, macsec_sa_key_t key)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_RxSaModifyKey(fm_macsec_secy_dev, sc, an, key);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_RxSaModifyKey() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_rxsa_modify_key);
-+
-+int fm_macsec_secy_create_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t an, macsec_sa_key_t key)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_CreateTxSa(fm_macsec_secy_dev, an, key);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_CreateTxSa() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_create_tx_sa);
-+
-+int fm_macsec_secy_delete_tx_sa(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t an)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_DeleteTxSa(fm_macsec_secy_dev, an);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_DeleteTxSa() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_delete_tx_sa);
-+
-+int fm_macsec_secy_txsa_modify_key(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t next_active_an,
-+ macsec_sa_key_t key)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_TxSaModifyKey(fm_macsec_secy_dev, next_active_an,
-+ key);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_TxSaModifyKey() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_txsa_modify_key);
-+
-+int fm_macsec_secy_txsa_set_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t an)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_TxSaSetActive(fm_macsec_secy_dev, an);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_TxSaSetActive() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_txsa_set_active);
-+
-+int fm_macsec_secy_txsa_get_active(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ macsec_an_t *p_an)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_TxSaGetActive(fm_macsec_secy_dev, p_an);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_TxSaGetActive() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_txsa_get_active);
-+
-+int fm_macsec_secy_get_rxsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ struct rx_sc_dev *sc, uint32_t *sc_phys_id)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_GetRxScPhysId(fm_macsec_secy_dev, sc, sc_phys_id);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_GetRxScPhysId() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_get_rxsc_phys_id);
-+
-+int fm_macsec_secy_get_txsc_phys_id(struct fm_macsec_secy_dev *fm_macsec_secy_dev,
-+ uint32_t *sc_phys_id)
-+{
-+ int err;
-+ int _errno;
-+
-+ err = FM_MACSEC_SECY_GetTxScPhysId(fm_macsec_secy_dev, sc_phys_id);
-+ _errno = -GET_ERROR_TYPE(err);
-+ if (unlikely(_errno < 0))
-+ pr_err("FM_MACSEC_SECY_GetTxScPhysId() = 0x%08x\n",
-+ err);
-+
-+ return _errno;
-+}
-+EXPORT_SYMBOL(fm_macsec_secy_get_txsc_phys_id);
-+
-+static t_Handle h_FmLnxWrp;
-+
-+static int __init __cold fm_load (void)
-+{
-+ if ((h_FmLnxWrp = LNXWRP_FM_Init()) == NULL)
-+ {
-+ printk("Failed to init FM wrapper!\n");
-+ return -ENODEV;
-+ }
-+
-+ printk(KERN_CRIT "Freescale FM module," \
-+ " FMD API version %d.%d.%d\n",
-+ FMD_API_VERSION_MAJOR,
-+ FMD_API_VERSION_MINOR,
-+ FMD_API_VERSION_RESPIN);
-+ return 0;
-+}
-+
-+static void __exit __cold fm_unload (void)
-+{
-+ if (h_FmLnxWrp)
-+ LNXWRP_FM_Free(h_FmLnxWrp);
-+}
-+
-+module_init (fm_load);
-+module_exit (fm_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.h
-@@ -0,0 +1,294 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_fm.h
-+
-+ @Author Shlomi Gridish
-+
-+ @Description FM Linux wrapper functions.
-+
-+*/
-+
-+#ifndef __LNXWRP_FM_H__
-+#define __LNXWRP_FM_H__
-+
-+#include <linux/fsl_qman.h> /* struct qman_fq */
-+
-+#include "std_ext.h"
-+#include "error_ext.h"
-+#include "list_ext.h"
-+
-+#include "lnxwrp_fm_ext.h"
-+
-+#define FM_MAX_NUM_OF_ADV_SETTINGS 10
-+
-+#define LNXWRP_FM_NUM_OF_SHARED_PROFILES 16
-+
-+#if defined(CONFIG_FMAN_DISABLE_OH_TO_REUSE_RESOURCES)
-+#define FM_10G_OPENDMA_MIN_TRESHOLD 8 /* 10g minimum treshold if only HC is enabled and no OH port enabled */
-+#define FM_OPENDMA_RX_TX_RAPORT 2 /* RX = 2*TX */
-+#else
-+#define FM_10G_OPENDMA_MIN_TRESHOLD 7 /* 10g minimum treshold if 7 OH ports are enabled */
-+#define FM_OPENDMA_RX_TX_RAPORT 1 /* RX = TX */
-+#endif
-+#define FM_DEFAULT_TX10G_OPENDMA 8 /* default TX 10g open dmas */
-+#define FM_DEFAULT_RX10G_OPENDMA 8 /* default RX 10g open dmas */
-+
-+#define FRAG_MANIP_SPACE 128
-+#define FRAG_DATA_ALIGN 64
-+
-+#ifndef CONFIG_FSL_FM_MAX_FRAME_SIZE
-+#define CONFIG_FSL_FM_MAX_FRAME_SIZE 0
-+#endif
-+
-+#ifndef CONFIG_FSL_FM_RX_EXTRA_HEADROOM
-+#define CONFIG_FSL_FM_RX_EXTRA_HEADROOM 16
-+#endif
-+
-+typedef enum {
-+ e_NO_PCD = 0,
-+ e_FM_PCD_3_TUPLE
-+} e_LnxWrpFmPortPcdDefUseCase;
-+
-+
-+typedef struct t_FmTestFq {
-+ struct qman_fq fq_base;
-+ t_Handle h_Arg;
-+} t_FmTestFq;
-+
-+typedef struct {
-+ uint8_t id; /* sw port id, see SW_PORT_ID_TO_HW_PORT_ID() in fm_common.h */
-+ int minor;
-+ char name[20];
-+ bool active;
-+ uint64_t phys_baseAddr;
-+ uint64_t baseAddr; /* Port's *virtual* address */
-+ uint32_t memSize;
-+ t_WrpFmPortDevSettings settings;
-+ t_FmExtPools opExtPools;
-+ uint8_t totalNumOfSchemes;
-+ uint8_t schemesBase;
-+ uint8_t numOfSchemesUsed;
-+ uint32_t pcdBaseQ;
-+ uint16_t pcdNumOfQs;
-+ struct fm_port_pcd_param pcd_owner_params;
-+ e_LnxWrpFmPortPcdDefUseCase defPcd;
-+ t_Handle h_DefNetEnv;
-+ t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES];
-+ t_FmBufferPrefixContent buffPrefixContent;
-+ t_Handle h_Dev;
-+ t_Handle h_DfltVsp;
-+ t_Handle h_LnxWrpFmDev;
-+ uint16_t txCh;
-+ struct device *dev;
-+ struct device_attribute *dev_attr_stats;
-+ struct device_attribute *dev_attr_regs;
-+ struct device_attribute *dev_attr_bmi_regs;
-+ struct device_attribute *dev_attr_qmi_regs;
-+#if (DPAA_VERSION >= 11)
-+ struct device_attribute *dev_attr_ipv4_opt;
-+#endif
-+ struct device_attribute *dev_attr_dsar_regs;
-+ struct device_attribute *dev_attr_dsar_mem;
-+ struct auto_res_tables_sizes dsar_table_sizes;
-+} t_LnxWrpFmPortDev;
-+
-+typedef struct {
-+ uint8_t id;
-+ bool active;
-+ uint64_t baseAddr;
-+ uint32_t memSize;
-+ t_WrpFmMacDevSettings settings;
-+ t_Handle h_Dev;
-+ t_Handle h_LnxWrpFmDev;
-+} t_LnxWrpFmMacDev;
-+
-+/* information about all active ports for an FMan.
-+ * !Some ports may be disabled by u-boot, thus will not be available */
-+struct fm_active_ports {
-+ uint32_t num_oh_ports;
-+ uint32_t num_tx_ports;
-+ uint32_t num_rx_ports;
-+ uint32_t num_tx25_ports;
-+ uint32_t num_rx25_ports;
-+ uint32_t num_tx10_ports;
-+ uint32_t num_rx10_ports;
-+};
-+
-+/* FMan resources precalculated at fm probe based
-+ * on available FMan port. */
-+struct fm_resource_settings {
-+ /* buffers - fifo sizes */
-+ uint32_t tx1g_num_buffers;
-+ uint32_t rx1g_num_buffers;
-+ uint32_t tx2g5_num_buffers; /* Not supported yet by LLD */
-+ uint32_t rx2g5_num_buffers; /* Not supported yet by LLD */
-+ uint32_t tx10g_num_buffers;
-+ uint32_t rx10g_num_buffers;
-+ uint32_t oh_num_buffers;
-+ uint32_t shared_ext_buffers;
-+
-+ /* open DMAs */
-+ uint32_t tx_1g_dmas;
-+ uint32_t rx_1g_dmas;
-+ uint32_t tx_2g5_dmas; /* Not supported yet by LLD */
-+ uint32_t rx_2g5_dmas; /* Not supported yet by LLD */
-+ uint32_t tx_10g_dmas;
-+ uint32_t rx_10g_dmas;
-+ uint32_t oh_dmas;
-+ uint32_t shared_ext_open_dma;
-+
-+ /* Tnums */
-+ uint32_t tx_1g_tnums;
-+ uint32_t rx_1g_tnums;
-+ uint32_t tx_2g5_tnums; /* Not supported yet by LLD */
-+ uint32_t rx_2g5_tnums; /* Not supported yet by LLD */
-+ uint32_t tx_10g_tnums;
-+ uint32_t rx_10g_tnums;
-+ uint32_t oh_tnums;
-+ uint32_t shared_ext_tnums;
-+};
-+
-+typedef struct {
-+ uint8_t id;
-+ char name[10];
-+ bool active;
-+ bool pcdActive;
-+ bool prsActive;
-+ bool kgActive;
-+ bool ccActive;
-+ bool plcrActive;
-+ e_LnxWrpFmPortPcdDefUseCase defPcd;
-+ uint32_t usedSchemes;
-+ uint8_t totalNumOfSharedSchemes;
-+ uint8_t sharedSchemesBase;
-+ uint8_t numOfSchemesUsed;
-+ uint8_t defNetEnvId;
-+ uint64_t fmPhysBaseAddr;
-+ uint64_t fmBaseAddr;
-+ uint32_t fmMemSize;
-+ uint64_t fmMuramPhysBaseAddr;
-+ uint64_t fmMuramBaseAddr;
-+ uint32_t fmMuramMemSize;
-+ uint64_t fmRtcPhysBaseAddr;
-+ uint64_t fmRtcBaseAddr;
-+ uint32_t fmRtcMemSize;
-+ uint64_t fmVspPhysBaseAddr;
-+ uint64_t fmVspBaseAddr;
-+ uint32_t fmVspMemSize;
-+ int irq;
-+ int err_irq;
-+ t_WrpFmDevSettings fmDevSettings;
-+ t_WrpFmPcdDevSettings fmPcdDevSettings;
-+ t_Handle h_Dev;
-+ uint16_t hcCh;
-+
-+ t_Handle h_MuramDev;
-+ t_Handle h_PcdDev;
-+ t_Handle h_RtcDev;
-+
-+ t_Handle h_DsarRxPort;
-+ t_Handle h_DsarTxPort;
-+
-+ t_LnxWrpFmPortDev hcPort;
-+ t_LnxWrpFmPortDev opPorts[FM_MAX_NUM_OF_OH_PORTS-1];
-+ t_LnxWrpFmPortDev rxPorts[FM_MAX_NUM_OF_RX_PORTS];
-+ t_LnxWrpFmPortDev txPorts[FM_MAX_NUM_OF_TX_PORTS];
-+ t_LnxWrpFmMacDev macs[FM_MAX_NUM_OF_MACS];
-+ struct fm_active_ports fm_active_ports_info;
-+ struct fm_resource_settings fm_resource_settings_info;
-+
-+ struct device *dev;
-+ struct resource *res;
-+ int major;
-+ struct class *fm_class;
-+ struct device_attribute *dev_attr_stats;
-+ struct device_attribute *dev_attr_regs;
-+ struct device_attribute *dev_attr_risc_load;
-+
-+ struct device_attribute *dev_pcd_attr_stats;
-+ struct device_attribute *dev_plcr_attr_regs;
-+ struct device_attribute *dev_prs_attr_regs;
-+ struct device_attribute *dev_fm_fpm_attr_regs;
-+ struct device_attribute *dev_fm_kg_attr_regs;
-+ struct device_attribute *dev_fm_kg_pe_attr_regs;
-+ struct device_attribute *dev_attr_muram_free_size;
-+ struct device_attribute *dev_attr_fm_ctrl_code_ver;
-+
-+
-+ struct qman_fq *hc_tx_conf_fq, *hc_tx_err_fq, *hc_tx_fq;
-+} t_LnxWrpFmDev;
-+
-+typedef struct {
-+ t_LnxWrpFmDev *p_FmDevs[INTG_MAX_NUM_OF_FM];
-+} t_LnxWrpFm;
-+#define LNXWRP_FM_OBJECT(ptr) LIST_OBJECT(ptr, t_LnxWrpFm, fms[((t_LnxWrpFmDev *)ptr)->id])
-+
-+
-+t_Error LnxwrpFmIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat);
-+t_Error LnxwrpFmPortIOCTL(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev, unsigned int cmd, unsigned long arg, bool compat);
-+
-+
-+#if 0
-+static __inline__ t_Error AllocSchemesForPort(t_LnxWrpFmDev *p_LnxWrpFmDev, uint8_t numSchemes, uint8_t *p_BaseSchemeNum)
-+{
-+ uint32_t schemeMask;
-+ uint8_t i;
-+
-+ if (!numSchemes)
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ schemeMask = 0x80000000;
-+ *p_BaseSchemeNum = 0xff;
-+
-+ for (i=0; schemeMask && numSchemes; schemeMask>>=1, i++)
-+ if ((p_LnxWrpFmDev->usedSchemes & schemeMask) == 0)
-+ {
-+ p_LnxWrpFmDev->usedSchemes |= schemeMask;
-+ numSchemes--;
-+ if (*p_BaseSchemeNum==0xff)
-+ *p_BaseSchemeNum = i;
-+ }
-+ else if (*p_BaseSchemeNum!=0xff)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("Fragmentation on schemes array!!!"));
-+
-+ if (numSchemes)
-+ RETURN_ERROR(MINOR, E_FULL, ("schemes!!!"));
-+ return E_OK;
-+}
-+#endif
-+
-+void LnxWrpPCDIOCTLTypeChecking(void);
-+void LnxWrpPCDIOCTLEnumChecking(void);
-+
-+#endif /* __LNXWRP_FM_H__ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c
-@@ -0,0 +1,1512 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_fm_port.c
-+
-+ @Description FMD wrapper - FMan port functions.
-+
-+*/
-+
-+#include <linux/version.h>
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#include <config/modversions.h>
-+#endif /* MODVERSIONS */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_address.h>
-+#include <linux/cdev.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
-+#ifndef CONFIG_FMAN_ARM
-+#include <linux/fsl/svr.h>
-+#endif
-+#include <linux/io.h>
-+
-+#include "sprint_ext.h"
-+#include "fm_common.h"
-+#include "lnxwrp_fsl_fman.h"
-+#include "fm_port_ext.h"
-+#if (DPAA_VERSION >= 11)
-+#include "fm_vsp_ext.h"
-+#endif /* DPAA_VERSION >= 11 */
-+#include "fm_ioctls.h"
-+#include "lnxwrp_resources.h"
-+#include "lnxwrp_sysfs_fm_port.h"
-+
-+#define __ERR_MODULE__ MODULE_FM
-+
-+extern struct device_node *GetFmAdvArgsDevTreeNode (uint8_t fmIndx);
-+
-+/* TODO: duplicated, see lnxwrp_fm.c */
-+#define ADD_ADV_CONFIG_NO_RET(_func, _param)\
-+do {\
-+ if (i < max) {\
-+ p_Entry = &p_Entrys[i];\
-+ p_Entry->p_Function = _func;\
-+ _param\
-+ i++;\
-+ } else {\
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,\
-+ ("Number of advanced-configuration entries exceeded"));\
-+ } \
-+} while (0)
-+
-+#ifndef CONFIG_FMAN_ARM
-+#define IS_T1023_T1024 (SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1024 || \
-+ SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1023)
-+#endif
-+
-+static volatile int hcFrmRcv/* = 0 */;
-+static spinlock_t lock;
-+
-+static enum qman_cb_dqrr_result qm_tx_conf_dqrr_cb(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry
-+ *dq)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = ((t_FmTestFq *) fq)->h_Arg;
-+ unsigned long flags;
-+
-+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-+{
-+ /* extract the HC frame address */
-+ uint32_t *hcf_va = XX_PhysToVirt(qm_fd_addr((struct qm_fd *)&dq->fd));
-+ int hcf_l = ((struct qm_fd *)&dq->fd)->length20;
-+ int i;
-+
-+ /* 32b byteswap of all data in the HC Frame */
-+ for(i = 0; i < hcf_l / 4; ++i)
-+ hcf_va[i] =
-+ ___constant_swab32(hcf_va[i]);
-+}
-+#endif
-+ FM_PCD_HcTxConf(p_LnxWrpFmDev->h_PcdDev, (t_DpaaFD *)&dq->fd);
-+ spin_lock_irqsave(&lock, flags);
-+ hcFrmRcv--;
-+ spin_unlock_irqrestore(&lock, flags);
-+
-+ return qman_cb_dqrr_consume;
-+}
-+
-+static enum qman_cb_dqrr_result qm_tx_dqrr_cb(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
-+ __func__);
-+ return qman_cb_dqrr_consume;
-+}
-+
-+static void qm_err_cb(struct qman_portal *portal,
-+ struct qman_fq *fq, const struct qm_mr_entry *msg)
-+{
-+ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
-+ __func__);
-+}
-+
-+static struct qman_fq *FqAlloc(t_LnxWrpFmDev * p_LnxWrpFmDev,
-+ uint32_t fqid,
-+ uint32_t flags, uint16_t channel, uint8_t wq)
-+{
-+ int _errno;
-+ struct qman_fq *fq = NULL;
-+ t_FmTestFq *p_FmtFq;
-+ struct qm_mcc_initfq initfq;
-+
-+ p_FmtFq = (t_FmTestFq *) XX_Malloc(sizeof(t_FmTestFq));
-+ if (!p_FmtFq) {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
-+ return NULL;
-+ }
-+
-+ p_FmtFq->fq_base.cb.dqrr = ((flags & QMAN_FQ_FLAG_NO_ENQUEUE)
-+ ? qm_tx_conf_dqrr_cb
-+ : qm_tx_dqrr_cb);
-+ p_FmtFq->fq_base.cb.ern = qm_err_cb;
-+ /* p_FmtFq->fq_base.cb.fqs = qm_err_cb; */
-+ /* qm_err_cb wrongly called when the FQ is parked */
-+ p_FmtFq->fq_base.cb.fqs = NULL;
-+ p_FmtFq->h_Arg = (t_Handle) p_LnxWrpFmDev;
-+ if (fqid == 0) {
-+ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
-+ flags &= ~QMAN_FQ_FLAG_NO_MODIFY;
-+ } else {
-+ flags &= ~QMAN_FQ_FLAG_DYNAMIC_FQID;
-+ }
-+
-+ if (qman_create_fq(fqid, flags, &p_FmtFq->fq_base)) {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj - qman_new_fq!!!"));
-+ XX_Free(p_FmtFq);
-+ return NULL;
-+ }
-+ fq = &p_FmtFq->fq_base;
-+
-+ if (!(flags & QMAN_FQ_FLAG_NO_MODIFY)) {
-+ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
-+ initfq.fqd.dest.channel = channel;
-+ initfq.fqd.dest.wq = wq;
-+
-+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
-+ if (unlikely(_errno < 0)) {
-+ REPORT_ERROR(MAJOR, E_NO_MEMORY,
-+ ("FQ obj - qman_init_fq!!!"));
-+ qman_destroy_fq(fq, 0);
-+ XX_Free(p_FmtFq);
-+ return NULL;
-+ }
-+ }
-+
-+ DBG(TRACE,
-+ ("fqid %d, flags 0x%08x, channel %d, wq %d", qman_fq_fqid(fq),
-+ flags, channel, wq));
-+
-+ return fq;
-+}
-+
-+static void FqFree(struct qman_fq *fq)
-+{
-+ int _errno;
-+
-+ _errno = qman_retire_fq(fq, NULL);
-+ if (unlikely(_errno < 0))
-+ printk(KERN_WARNING "qman_retire_fq(%u) = %d\n", qman_fq_fqid(fq), _errno);
-+
-+ _errno = qman_oos_fq(fq);
-+ if (unlikely(_errno < 0))
-+ printk(KERN_WARNING "qman_oos_fq(%u) = %d\n", qman_fq_fqid(fq), _errno);
-+
-+ qman_destroy_fq(fq, 0);
-+ XX_Free((t_FmTestFq *) fq);
-+}
-+
-+static t_Error QmEnqueueCB(t_Handle h_Arg, void *p_Fd)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_Arg;
-+ int _errno, timeout = 1000000;
-+ unsigned long flags;
-+
-+ ASSERT_COND(p_LnxWrpFmDev);
-+
-+ spin_lock_irqsave(&lock, flags);
-+ hcFrmRcv++;
-+ spin_unlock_irqrestore(&lock, flags);
-+
-+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-+{
-+ /* extract the HC frame address */
-+ uint32_t *hcf_va = XX_PhysToVirt(qm_fd_addr((struct qm_fd *) p_Fd));
-+ int hcf_l = ((struct qm_fd *)p_Fd)->length20;
-+ int i;
-+
-+ /* 32b byteswap of all data in the HC Frame */
-+ for(i = 0; i < hcf_l / 4; ++i)
-+ hcf_va[i] =
-+ ___constant_swab32(hcf_va[i]);
-+}
-+#endif
-+
-+ _errno = qman_enqueue(p_LnxWrpFmDev->hc_tx_fq, (struct qm_fd *) p_Fd,
-+ 0);
-+ if (_errno)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE,
-+ ("qman_enqueue() failed"));
-+
-+ while (hcFrmRcv && --timeout) {
-+ udelay(1);
-+ cpu_relax();
-+ }
-+ if (timeout == 0) {
-+ dump_stack();
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED,
-+ ("timeout waiting for Tx confirmation"));
-+ return E_WRITE_FAILED;
-+ }
-+
-+ return E_OK;
-+}
-+
-+static t_LnxWrpFmPortDev *ReadFmPortDevTreeNode(struct platform_device
-+ *of_dev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+ struct device_node *fm_node, *port_node;
-+ struct resource res;
-+ const uint32_t *uint32_prop;
-+ int _errno = 0, lenp;
-+ uint32_t tmp_prop;
-+
-+#ifdef CONFIG_FMAN_P1023
-+ static unsigned char have_oh_port/* = 0 */;
-+#endif
-+
-+ port_node = of_node_get(of_dev->dev.of_node);
-+
-+ /* Get the FM node */
-+ fm_node = of_get_parent(port_node);
-+ if (unlikely(fm_node == NULL)) {
-+ REPORT_ERROR(MAJOR, E_NO_DEVICE,
-+ ("of_get_parent() = %d", _errno));
-+ return NULL;
-+ }
-+
-+ p_LnxWrpFmDev =
-+ dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
-+ of_node_put(fm_node);
-+
-+ /* if fm_probe() failed, no point in going further with port probing */
-+ if (p_LnxWrpFmDev == NULL)
-+ return NULL;
-+
-+ uint32_prop =
-+ (uint32_t *) of_get_property(port_node, "cell-index", &lenp);
-+ if (unlikely(uint32_prop == NULL)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ port_node->full_name));
-+ return NULL;
-+ }
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ return NULL;
-+ if (of_device_is_compatible(port_node, "fsl,fman-port-oh") ||
-+ of_device_is_compatible(port_node, "fsl,fman-v2-port-oh") ||
-+ of_device_is_compatible(port_node, "fsl,fman-v3-port-oh")) {
-+#ifndef CONFIG_FMAN_ARM
-+#ifdef CONFIG_FMAN_P3040_P4080_P5020
-+ /* On PPC FMan v2, OH ports start from cell-index 0x1 */
-+ tmp_prop -= 0x1;
-+#else
-+ /* On PPC FMan v3 (Low and High), OH ports start from
-+ * cell-index 0x2
-+ */
-+ tmp_prop -= 0x2;
-+#endif // CONFIG_FMAN_P3040_P4080_P5020
-+#endif // CONFIG_FMAN_ARM
-+
-+ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_OH_PORTS)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ port_node->full_name));
-+ return NULL;
-+ }
-+
-+#ifdef CONFIG_FMAN_P1023
-+ /* Beware, this can be done when there is only
-+ one FMan to be initialized */
-+ if (!have_oh_port) {
-+ have_oh_port = 1; /* first OP/HC port
-+ is used for host command */
-+#else
-+ /* Here it is hardcoded the use of the OH port 1
-+ (with cell-index 0) */
-+ if (tmp_prop == 0) {
-+#endif
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort;
-+ p_LnxWrpFmPortDev->id = 0;
-+ /*
-+ p_LnxWrpFmPortDev->id = *uint32_prop-1;
-+ p_LnxWrpFmPortDev->id = *uint32_prop;
-+ */
-+ p_LnxWrpFmPortDev->settings.param.portType =
-+ e_FM_PORT_TYPE_OH_HOST_COMMAND;
-+ } else {
-+ p_LnxWrpFmPortDev =
-+ &p_LnxWrpFmDev->opPorts[tmp_prop - 1];
-+ p_LnxWrpFmPortDev->id = tmp_prop- 1;
-+ p_LnxWrpFmPortDev->settings.param.portType =
-+ e_FM_PORT_TYPE_OH_OFFLINE_PARSING;
-+ }
-+ p_LnxWrpFmPortDev->settings.param.portId = tmp_prop;
-+
-+ uint32_prop =
-+ (uint32_t *) of_get_property(port_node,
-+ "fsl,qman-channel-id",
-+ &lenp);
-+ if (uint32_prop == NULL) {
-+ /*
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("missing fsl,qman-channel-id"));
-+ */
-+ XX_Print("FM warning: missing fsl,qman-channel-id"
-+ " for OH port.\n");
-+ return NULL;
-+ }
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ return NULL;
-+ p_LnxWrpFmPortDev->txCh = tmp_prop;
-+
-+ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.
-+ qmChannel = p_LnxWrpFmPortDev->txCh;
-+ } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-tx")) {
-+ tmp_prop -= 0x28;
-+ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_1G_TX_PORTS)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ port_node->full_name));
-+ return NULL;
-+ }
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[tmp_prop];
-+
-+ p_LnxWrpFmPortDev->id = tmp_prop;
-+ p_LnxWrpFmPortDev->settings.param.portId =
-+ p_LnxWrpFmPortDev->id;
-+ p_LnxWrpFmPortDev->settings.param.portType = e_FM_PORT_TYPE_TX;
-+
-+ uint32_prop = (uint32_t *) of_get_property(port_node,
-+ "fsl,qman-channel-id", &lenp);
-+ if (uint32_prop == NULL) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("missing fsl,qman-channel-id"));
-+ return NULL;
-+ }
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ return NULL;
-+ p_LnxWrpFmPortDev->txCh = tmp_prop;
-+ p_LnxWrpFmPortDev->
-+ settings.param.specificParams.nonRxParams.qmChannel =
-+ p_LnxWrpFmPortDev->txCh;
-+ } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-tx")) {
-+#ifndef CONFIG_FMAN_ARM
-+ /* On T102x, the 10G TX port IDs start from 0x28 */
-+ if (IS_T1023_T1024)
-+ tmp_prop -= 0x28;
-+ else
-+#endif
-+ tmp_prop -= 0x30;
-+
-+ if (unlikely(tmp_prop>= FM_MAX_NUM_OF_10G_TX_PORTS)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ port_node->full_name));
-+ return NULL;
-+ }
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[tmp_prop +
-+ FM_MAX_NUM_OF_1G_TX_PORTS];
-+#ifndef CONFIG_FMAN_ARM
-+ if (IS_T1023_T1024)
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[tmp_prop];
-+#endif
-+
-+ p_LnxWrpFmPortDev->id = tmp_prop;
-+ p_LnxWrpFmPortDev->settings.param.portId =
-+ p_LnxWrpFmPortDev->id;
-+ p_LnxWrpFmPortDev->settings.param.portType =
-+ e_FM_PORT_TYPE_TX_10G;
-+ uint32_prop = (uint32_t *) of_get_property(port_node,
-+ "fsl,qman-channel-id", &lenp);
-+ if (uint32_prop == NULL) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("missing fsl,qman-channel-id"));
-+ return NULL;
-+ }
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ return NULL;
-+ p_LnxWrpFmPortDev->txCh = tmp_prop;
-+ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.
-+ qmChannel = p_LnxWrpFmPortDev->txCh;
-+ } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-rx")) {
-+ tmp_prop -= 0x08;
-+ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_1G_RX_PORTS)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ port_node->full_name));
-+ return NULL;
-+ }
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[tmp_prop];
-+
-+ p_LnxWrpFmPortDev->id = tmp_prop;
-+ p_LnxWrpFmPortDev->settings.param.portId =
-+ p_LnxWrpFmPortDev->id;
-+ p_LnxWrpFmPortDev->settings.param.portType = e_FM_PORT_TYPE_RX;
-+ if (p_LnxWrpFmDev->pcdActive)
-+ p_LnxWrpFmPortDev->defPcd = p_LnxWrpFmDev->defPcd;
-+ } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-rx")) {
-+#ifndef CONFIG_FMAN_ARM
-+ /* On T102x, the 10G RX port IDs start from 0x08 */
-+ if (IS_T1023_T1024)
-+ tmp_prop -= 0x8;
-+ else
-+#endif
-+ tmp_prop -= 0x10;
-+
-+ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_10G_RX_PORTS)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ port_node->full_name));
-+ return NULL;
-+ }
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[tmp_prop +
-+ FM_MAX_NUM_OF_1G_RX_PORTS];
-+
-+#ifndef CONFIG_FMAN_ARM
-+ if (IS_T1023_T1024)
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[tmp_prop];
-+#endif
-+
-+ p_LnxWrpFmPortDev->id = tmp_prop;
-+ p_LnxWrpFmPortDev->settings.param.portId =
-+ p_LnxWrpFmPortDev->id;
-+ p_LnxWrpFmPortDev->settings.param.portType =
-+ e_FM_PORT_TYPE_RX_10G;
-+ if (p_LnxWrpFmDev->pcdActive)
-+ p_LnxWrpFmPortDev->defPcd = p_LnxWrpFmDev->defPcd;
-+ } else {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal port type"));
-+ return NULL;
-+ }
-+
-+ _errno = of_address_to_resource(port_node, 0, &res);
-+ if (unlikely(_errno < 0)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_address_to_resource() = %d", _errno));
-+ return NULL;
-+ }
-+
-+ p_LnxWrpFmPortDev->dev = &of_dev->dev;
-+ p_LnxWrpFmPortDev->baseAddr = 0;
-+ p_LnxWrpFmPortDev->phys_baseAddr = res.start;
-+ p_LnxWrpFmPortDev->memSize = res.end + 1 - res.start;
-+ p_LnxWrpFmPortDev->settings.param.h_Fm = p_LnxWrpFmDev->h_Dev;
-+ p_LnxWrpFmPortDev->h_LnxWrpFmDev = (t_Handle) p_LnxWrpFmDev;
-+
-+ of_node_put(port_node);
-+
-+ p_LnxWrpFmPortDev->active = TRUE;
-+
-+#if defined(CONFIG_FMAN_DISABLE_OH_TO_REUSE_RESOURCES)
-+ /* for performance mode no OH port available. */
-+ if (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ p_LnxWrpFmPortDev->active = FALSE;
-+#endif
-+
-+ return p_LnxWrpFmPortDev;
-+}
-+
-+struct device_node * GetFmPortAdvArgsDevTreeNode (struct device_node *fm_node,
-+ e_FmPortType portType,
-+ uint8_t portId)
-+{
-+ struct device_node *port_node;
-+ const uint32_t *uint32_prop;
-+ int lenp;
-+ char *portTypeString;
-+ uint32_t tmp_prop;
-+
-+ switch(portType) {
-+ case e_FM_PORT_TYPE_OH_OFFLINE_PARSING:
-+ portTypeString = "fsl,fman-port-op-extended-args";
-+ break;
-+ case e_FM_PORT_TYPE_TX:
-+ portTypeString = "fsl,fman-port-1g-tx-extended-args";
-+ break;
-+ case e_FM_PORT_TYPE_TX_10G:
-+ portTypeString = "fsl,fman-port-10g-tx-extended-args";
-+ break;
-+ case e_FM_PORT_TYPE_RX:
-+ portTypeString = "fsl,fman-port-1g-rx-extended-args";
-+ break;
-+ case e_FM_PORT_TYPE_RX_10G:
-+ portTypeString = "fsl,fman-port-10g-rx-extended-args";
-+ break;
-+ default:
-+ return NULL;
-+ }
-+
-+ for_each_child_of_node(fm_node, port_node) {
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "cell-index", &lenp);
-+ if (unlikely(uint32_prop == NULL)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
-+ ("of_get_property(%s, cell-index) failed",
-+ port_node->full_name));
-+ return NULL;
-+ }
-+ tmp_prop = be32_to_cpu(*uint32_prop);
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ return NULL;
-+ if ((portId == tmp_prop) &&
-+ (of_device_is_compatible(port_node, portTypeString))) {
-+ return port_node;
-+ }
-+ }
-+
-+ return NULL;
-+}
-+
-+static t_Error CheckNConfigFmPortAdvArgs (t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
-+{
-+ struct device_node *fm_node, *port_node;
-+ t_Error err;
-+ t_FmPortRsrc portRsrc;
-+ const uint32_t *uint32_prop;
-+ /*const char *str_prop;*/
-+ int lenp;
-+#ifdef CONFIG_FMAN_PFC
-+ uint8_t i, id, num_pools;
-+ t_FmBufPoolDepletion poolDepletion;
-+
-+ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX ||
-+ p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX_10G) {
-+ memset(&poolDepletion, 0, sizeof(t_FmBufPoolDepletion));
-+ poolDepletion.singlePoolModeEnable = true;
-+ num_pools = p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
-+ extBufPools.numOfPoolsUsed;
-+ for (i = 0; i < num_pools; i++) {
-+ id = p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
-+ extBufPools.extBufPool[i].id;
-+ poolDepletion.poolsToConsiderForSingleMode[id] = true;
-+ }
-+
-+ for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++)
-+ poolDepletion.pfcPrioritiesEn[i] = true;
-+
-+ err = FM_PORT_ConfigPoolDepletion(p_LnxWrpFmPortDev->h_Dev,
-+ &poolDepletion);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, ("FM_PORT_ConfigPoolDepletion() failed"));
-+ }
-+#endif
-+
-+ fm_node = GetFmAdvArgsDevTreeNode(((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev)->id);
-+ if (!fm_node) /* no advance parameters for FMan */
-+ return E_OK;
-+
-+ port_node = GetFmPortAdvArgsDevTreeNode(fm_node,
-+ p_LnxWrpFmPortDev->settings.param.portType,
-+ p_LnxWrpFmPortDev->settings.param.portId);
-+ if (!port_node) /* no advance parameters for FMan-Port */
-+ return E_OK;
-+
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "num-tnums", &lenp);
-+ if (uint32_prop) {
-+ if (WARN_ON(lenp != sizeof(uint32_t)*2))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ portRsrc.num = be32_to_cpu(uint32_prop[0]);
-+ portRsrc.extra = be32_to_cpu(uint32_prop[1]);
-+
-+ if ((err = FM_PORT_ConfigNumOfTasks(p_LnxWrpFmPortDev->h_Dev,
-+ &portRsrc)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "num-dmas", &lenp);
-+ if (uint32_prop) {
-+ if (WARN_ON(lenp != sizeof(uint32_t)*2))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ portRsrc.num = be32_to_cpu(uint32_prop[0]);
-+ portRsrc.extra = be32_to_cpu(uint32_prop[1]);
-+
-+ if ((err = FM_PORT_ConfigNumOfOpenDmas(p_LnxWrpFmPortDev->h_Dev,
-+ &portRsrc)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "fifo-size", &lenp);
-+ if (uint32_prop) {
-+ if (WARN_ON(lenp != sizeof(uint32_t)*2))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ portRsrc.num = be32_to_cpu(uint32_prop[0]);
-+ portRsrc.extra = be32_to_cpu(uint32_prop[1]);
-+
-+ if ((err = FM_PORT_ConfigSizeOfFifo(p_LnxWrpFmPortDev->h_Dev,
-+ &portRsrc)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "errors-to-discard", &lenp);
-+ if (uint32_prop) {
-+ if (WARN_ON(lenp != sizeof(uint32_t)))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+ if ((err = FM_PORT_ConfigErrorsToDiscard(p_LnxWrpFmPortDev->h_Dev,
-+ be32_to_cpu(uint32_prop[0]))) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "ar-tables-sizes",
-+ &lenp);
-+ if (uint32_prop) {
-+
-+ if (WARN_ON(lenp != sizeof(uint32_t)*8))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+ if (WARN_ON(p_LnxWrpFmPortDev->settings.param.portType !=
-+ e_FM_PORT_TYPE_RX) &&
-+ (p_LnxWrpFmPortDev->settings.param.portType !=
-+ e_FM_PORT_TYPE_RX_10G))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE,
-+ ("Auto Response is an Rx port atribute."));
-+
-+ memset(&p_LnxWrpFmPortDev->dsar_table_sizes, 0, sizeof(struct auto_res_tables_sizes));
-+
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_arp_entries =
-+ (uint16_t)be32_to_cpu(uint32_prop[0]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_echo_ipv4_entries =
-+ (uint16_t)be32_to_cpu(uint32_prop[1]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_ndp_entries =
-+ (uint16_t)be32_to_cpu(uint32_prop[2]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_echo_ipv6_entries =
-+ (uint16_t)be32_to_cpu(uint32_prop[3]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_ipv4_entries =
-+ (uint16_t)be32_to_cpu(uint32_prop[4]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_ipv6_entries =
-+ (uint16_t)be32_to_cpu(uint32_prop[5]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_oid_entries =
-+ (uint16_t)be32_to_cpu(uint32_prop[6]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_snmp_char =
-+ (uint16_t)be32_to_cpu(uint32_prop[7]);
-+
-+ uint32_prop = (uint32_t *)of_get_property(port_node,
-+ "ar-filters-sizes", &lenp);
-+ if (uint32_prop) {
-+ if (WARN_ON(lenp != sizeof(uint32_t)*3))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_ip_prot_filtering =
-+ (uint16_t)be32_to_cpu(uint32_prop[0]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_tcp_port_filtering =
-+ (uint16_t)be32_to_cpu(uint32_prop[1]);
-+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_udp_port_filtering =
-+ (uint16_t)be32_to_cpu(uint32_prop[2]);
-+ }
-+
-+ if ((err = FM_PORT_ConfigDsarSupport(p_LnxWrpFmPortDev->h_Dev,
-+ (t_FmPortDsarTablesSizes*)&p_LnxWrpFmPortDev->dsar_table_sizes)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+
-+ of_node_put(port_node);
-+ of_node_put(fm_node);
-+
-+ return E_OK;
-+}
-+
-+static t_Error CheckNSetFmPortAdvArgs (t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
-+{
-+ struct device_node *fm_node, *port_node;
-+ t_Error err;
-+ const uint32_t *uint32_prop;
-+ /*const char *str_prop;*/
-+ int lenp;
-+
-+ fm_node = GetFmAdvArgsDevTreeNode(((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev)->id);
-+ if (!fm_node) /* no advance parameters for FMan */
-+ return E_OK;
-+
-+ port_node = GetFmPortAdvArgsDevTreeNode(fm_node,
-+ p_LnxWrpFmPortDev->settings.param.portType,
-+ p_LnxWrpFmPortDev->settings.param.portId);
-+ if (!port_node) /* no advance parameters for FMan-Port */
-+ return E_OK;
-+
-+#if (DPAA_VERSION >= 11)
-+ uint32_prop = (uint32_t *)of_get_property(port_node, "vsp-window", &lenp);
-+ if (uint32_prop) {
-+ t_FmPortVSPAllocParams portVSPAllocParams;
-+ t_FmVspParams fmVspParams;
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ uint8_t portId;
-+
-+ p_LnxWrpFmDev = ((t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev);
-+
-+ if (WARN_ON(lenp != sizeof(uint32_t)*2))
-+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
-+ if ((p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_TX) ||
-+ (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_TX_10G) ||
-+ ((p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
-+ p_LnxWrpFmPortDev->settings.frag_enabled))
-+ return E_OK;
-+
-+ memset(&portVSPAllocParams, 0, sizeof(portVSPAllocParams));
-+ memset(&fmVspParams, 0, sizeof(fmVspParams));
-+
-+ portVSPAllocParams.numOfProfiles = (uint8_t)be32_to_cpu(uint32_prop[0]);
-+ portVSPAllocParams.dfltRelativeId = (uint8_t)be32_to_cpu(uint32_prop[1]);
-+ fmVspParams.h_Fm = p_LnxWrpFmDev->h_Dev;
-+
-+ fmVspParams.portParams.portType = p_LnxWrpFmPortDev->settings.param.portType;
-+ fmVspParams.portParams.portId = p_LnxWrpFmPortDev->settings.param.portId;
-+ fmVspParams.relativeProfileId = portVSPAllocParams.dfltRelativeId;
-+
-+ if (p_LnxWrpFmPortDev->settings.param.portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)
-+ {
-+ portId = fmVspParams.portParams.portId;
-+ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX_10G){
-+#ifndef CONFIG_FMAN_ARM
-+ if (!(IS_T1023_T1024))
-+#endif
-+ portId += FM_MAX_NUM_OF_1G_RX_PORTS;
-+ }
-+ portVSPAllocParams.h_FmTxPort =
-+ p_LnxWrpFmDev->txPorts[portId].h_Dev;
-+ fmVspParams.liodnOffset =
-+ p_LnxWrpFmDev->rxPorts[portId].settings.param.specificParams.rxParams.liodnOffset;
-+ memcpy(&fmVspParams.extBufPools,
-+ &p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.extBufPools,
-+ sizeof(t_FmExtPools));
-+ }
-+ else
-+ {
-+ memcpy(&fmVspParams.extBufPools,
-+ &p_LnxWrpFmPortDev->opExtPools,
-+ sizeof(t_FmExtPools));
-+ }
-+
-+ if ((err = FM_PORT_VSPAlloc(p_LnxWrpFmPortDev->h_Dev,
-+ &portVSPAllocParams)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ /* We're initializing only the default VSP that are being used by the Linux-Ethernet-driver */
-+ if ((p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
-+ !p_LnxWrpFmPortDev->opExtPools.numOfPoolsUsed)
-+ return E_OK;
-+
-+ p_LnxWrpFmPortDev->h_DfltVsp = FM_VSP_Config(&fmVspParams);
-+ if (!p_LnxWrpFmPortDev->h_DfltVsp)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("default-VSP for port!"));
-+
-+ if ((err = FM_VSP_ConfigBufferPrefixContent(p_LnxWrpFmPortDev->h_DfltVsp,
-+ &p_LnxWrpFmPortDev->buffPrefixContent)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+
-+ if ((err = FM_VSP_Init(p_LnxWrpFmPortDev->h_DfltVsp)) != E_OK)
-+ RETURN_ERROR(MINOR, err, NO_MSG);
-+ }
-+#else
-+UNUSED(err); UNUSED(uint32_prop); UNUSED(lenp);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ of_node_put(port_node);
-+ of_node_put(fm_node);
-+
-+ return E_OK;
-+}
-+
-+static t_Error ConfigureFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev =
-+ (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ struct resource *dev_res;
-+
-+ if (!p_LnxWrpFmPortDev->active)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("FM port not configured!!!"));
-+
-+ dev_res =
-+ __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res,
-+ p_LnxWrpFmPortDev->phys_baseAddr,
-+ p_LnxWrpFmPortDev->memSize,
-+ "fman-port-hc");
-+ if (unlikely(dev_res == NULL))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
-+ ("__devm_request_region() failed"));
-+ p_LnxWrpFmPortDev->baseAddr =
-+ PTR_TO_UINT(devm_ioremap
-+ (p_LnxWrpFmDev->dev,
-+ p_LnxWrpFmPortDev->phys_baseAddr,
-+ p_LnxWrpFmPortDev->memSize));
-+ if (unlikely(p_LnxWrpFmPortDev->baseAddr == 0))
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE,
-+ ("devm_ioremap() failed"));
-+
-+ p_LnxWrpFmPortDev->settings.param.baseAddr =
-+ p_LnxWrpFmPortDev->baseAddr;
-+
-+ return E_OK;
-+}
-+
-+static t_Error InitFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
-+{
-+#define MY_ADV_CONFIG_CHECK_END \
-+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,\
-+ ("Advanced configuration routine"));\
-+ if (errCode != E_OK)\
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);\
-+ }
-+
-+ int i = 0;
-+
-+ if (!p_LnxWrpFmPortDev->active || p_LnxWrpFmPortDev->h_Dev)
-+ return E_INVALID_STATE;
-+
-+ p_LnxWrpFmPortDev->h_Dev =
-+ FM_PORT_Config(&p_LnxWrpFmPortDev->settings.param);
-+ if (p_LnxWrpFmPortDev->h_Dev == NULL)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-port"));
-+
-+#ifndef FM_QMI_NO_DEQ_OPTIONS_SUPPORT
-+ if ((p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_TX_10G)
-+ || (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_TX)) {
-+ t_Error errCode = E_OK;
-+ errCode =
-+ FM_PORT_ConfigDeqHighPriority(p_LnxWrpFmPortDev->h_Dev,
-+ TRUE);
-+ if (errCode != E_OK)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+ errCode =
-+ FM_PORT_ConfigDeqPrefetchOption(p_LnxWrpFmPortDev->h_Dev,
-+ e_FM_PORT_DEQ_FULL_PREFETCH);
-+ if (errCode
-+ != E_OK)
-+ RETURN_ERROR(MAJOR, errCode, NO_MSG);
-+ }
-+#endif /* !FM_QMI_NO_DEQ_OPTIONS_SUPPORT */
-+
-+#ifndef CONFIG_FMAN_ARM
-+#ifdef FM_BCB_ERRATA_BMI_SW001
-+/* Configure BCB workaround on Rx ports, only for B4860 rev1 */
-+#define SVR_SECURITY_MASK 0x00080000
-+#define SVR_PERSONALITY_MASK 0x0000FF00
-+#define SVR_VER_IGNORE_MASK (SVR_SECURITY_MASK | SVR_PERSONALITY_MASK)
-+#define SVR_B4860_REV1_VALUE 0x86800010
-+
-+ if ((p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_RX_10G) ||
-+ (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_RX)) {
-+ unsigned int svr;
-+
-+ svr = mfspr(SPRN_SVR);
-+
-+ if ((svr & ~SVR_VER_IGNORE_MASK) == SVR_B4860_REV1_VALUE)
-+ FM_PORT_ConfigBCBWorkaround(p_LnxWrpFmPortDev->h_Dev);
-+ }
-+#endif /* FM_BCB_ERRATA_BMI_SW001 */
-+#endif /* CONFIG_FMAN_ARM */
-+/* Call the driver's advanced configuration routines, if requested:
-+ Compare the function pointer of each entry to the available routines,
-+ and invoke the matching routine with proper casting of arguments. */
-+ while (p_LnxWrpFmPortDev->settings.advConfig[i].p_Function
-+ && (i < FM_MAX_NUM_OF_ADV_SETTINGS)) {
-+
-+/* TODO: Change this MACRO */
-+ ADV_CONFIG_CHECK_START(
-+ &(p_LnxWrpFmPortDev->settings.advConfig[i]))
-+
-+ ADV_CONFIG_CHECK(p_LnxWrpFmPortDev->h_Dev,
-+ FM_PORT_ConfigBufferPrefixContent,
-+ NCSW_PARAMS(1,
-+ (t_FmBufferPrefixContent *)))
-+
-+ if ((p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
-+ (p_LnxWrpFmPortDev->settings.frag_enabled == TRUE)) {
-+
-+ ADV_CONFIG_CHECK(p_LnxWrpFmPortDev->h_Dev,
-+ FM_PORT_ConfigExtBufPools,
-+ NCSW_PARAMS(1, (t_FmExtPools *)))
-+
-+ /* this define contains an else */
-+ MY_ADV_CONFIG_CHECK_END
-+ }
-+
-+ /* Advance to next advanced configuration entry */
-+ i++;
-+ }
-+
-+
-+ if ((p_LnxWrpFmPortDev->settings.param.portType != e_FM_PORT_TYPE_TX) &&
-+ (p_LnxWrpFmPortDev->settings.param.portType != e_FM_PORT_TYPE_TX_10G)) {
-+ if (FM_PORT_ConfigErrorsToDiscard(p_LnxWrpFmPortDev->h_Dev, (FM_PORT_FRM_ERR_IPRE |
-+ FM_PORT_FRM_ERR_IPR_NCSP |
-+ FM_PORT_FRM_ERR_CLS_DISCARD)) !=E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+ }
-+
-+ if (CheckNConfigFmPortAdvArgs(p_LnxWrpFmPortDev) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (FM_PORT_Init(p_LnxWrpFmPortDev->h_Dev) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+ if (CheckNSetFmPortAdvArgs(p_LnxWrpFmPortDev) != E_OK)
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+
-+/* FMan Fifo sizes behind the scene":
-+ * Using the following formulae (*), under a set of simplifying assumptions (.):
-+ * . all ports are configured in Normal Mode (rather than Independent Mode)
-+ * . the DPAA Eth driver allocates buffers of size:
-+ * . MAXFRM + NET_IP_ALIGN + DPA_PRIV_DATA_SIZE + DPA_PARSE_RESULTS_SIZE
-+ * + DPA_HASH_RESULTS_SIZE, i.e.:
-+ * MAXFRM + 2 + 16 + sizeof(t_FmPrsResult) + 16, i.e.:
-+ * MAXFRM + 66
-+ * . excessive buffer pools not accounted for
-+ *
-+ * * for Rx ports on P4080:
-+ * . IFSZ = ceil(max(FMBM_EBMPI[PBS]) / 256) * 256 + 7 * 256
-+ * . no internal frame offset (FMBM_RIM[FOF] == 0) - otherwise,
-+ * add up to 256 to the above
-+ *
-+ * * for Rx ports on P1023:
-+ * . IFSZ = ceil(second_largest(FMBM_EBMPI[PBS] / 256)) * 256 + 7 * 256,
-+ * if at least 2 bpools are configured
-+ * . IFSZ = 8 * 256, if only a single bpool is configured
-+ *
-+ * * for Tx ports:
-+ * . IFSZ = ceil(frame_size / 256) * 256 + 3 * 256
-+ * + FMBM_TFP[DPDE] * 256, i.e.:
-+ * IFSZ = ceil(MAXFRM / 256) * 256 + 3 x 256 + FMBM_TFP[DPDE] * 256
-+ *
-+ * * for OH ports on P4080:
-+ * . IFSZ = ceil(frame_size / 256) * 256 + 1 * 256 + FMBM_PP[MXT] * 256
-+ * * for OH ports on P1023:
-+ * . IFSZ = ceil(frame_size / 256) * 256 + 3 * 256 + FMBM_TFP[DPDE] * 256
-+ * * for both P4080 and P1023:
-+ * . (conservative decisions, assuming that BMI must bring the entire
-+ * frame, not only the frame header)
-+ * . no internal frame offset (FMBM_OIM[FOF] == 0) - otherwise,
-+ * add up to 256 to the above
-+ *
-+ * . for P4080/P5020/P3041/P2040, DPDE is:
-+ * > 0 or 1, for 1Gb ports, HW default: 0
-+ * > 2..7 (recommended: 3..7) for 10Gb ports, HW default: 3
-+ * . for P1023, DPDE should be 1
-+ *
-+ * . for P1023, MXT is in range (0..31)
-+ * . for P4080, MXT is in range (0..63)
-+ *
-+ */
-+#if 0
-+ if ((p_LnxWrpFmPortDev->defPcd != e_NO_PCD) &&
-+ (InitFmPort3TupleDefPcd(p_LnxWrpFmPortDev) != E_OK))
-+ RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
-+#endif
-+ return E_OK;
-+}
-+
-+void fm_set_rx_port_params(struct fm_port *port,
-+ struct fm_port_params *params)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) port;
-+ int i;
-+
-+ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.errFqid =
-+ params->errq;
-+ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.dfltFqid =
-+ params->defq;
-+ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.extBufPools.
-+ numOfPoolsUsed = params->num_pools;
-+ for (i = 0; i < params->num_pools; i++) {
-+ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
-+ extBufPools.extBufPool[i].id =
-+ params->pool_param[i].id;
-+ p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.
-+ extBufPools.extBufPool[i].size =
-+ params->pool_param[i].size;
-+ }
-+
-+ p_LnxWrpFmPortDev->buffPrefixContent.privDataSize =
-+ params->priv_data_size;
-+ p_LnxWrpFmPortDev->buffPrefixContent.passPrsResult =
-+ params->parse_results;
-+ p_LnxWrpFmPortDev->buffPrefixContent.passHashResult =
-+ params->hash_results;
-+ p_LnxWrpFmPortDev->buffPrefixContent.passTimeStamp =
-+ params->time_stamp;
-+ p_LnxWrpFmPortDev->buffPrefixContent.dataAlign =
-+ params->data_align;
-+ p_LnxWrpFmPortDev->buffPrefixContent.manipExtraSpace =
-+ params->manip_extra_space;
-+
-+ ADD_ADV_CONFIG_START(p_LnxWrpFmPortDev->settings.advConfig,
-+ FM_MAX_NUM_OF_ADV_SETTINGS)
-+
-+ ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigBufferPrefixContent,
-+ ARGS(1,
-+ (&p_LnxWrpFmPortDev->
-+ buffPrefixContent)));
-+
-+ ADD_ADV_CONFIG_END InitFmPortDev(p_LnxWrpFmPortDev);
-+}
-+EXPORT_SYMBOL(fm_set_rx_port_params);
-+
-+/* this function is called from oh_probe as well, thus it contains oh port
-+ * specific parameters (make sure everything is checked) */
-+void fm_set_tx_port_params(struct fm_port *port,
-+ struct fm_port_params *params)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) port;
-+
-+ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.errFqid =
-+ params->errq;
-+ p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.
-+ dfltFqid = params->defq;
-+
-+ p_LnxWrpFmPortDev->buffPrefixContent.privDataSize =
-+ params->priv_data_size;
-+ p_LnxWrpFmPortDev->buffPrefixContent.passPrsResult =
-+ params->parse_results;
-+ p_LnxWrpFmPortDev->buffPrefixContent.passHashResult =
-+ params->hash_results;
-+ p_LnxWrpFmPortDev->buffPrefixContent.passTimeStamp =
-+ params->time_stamp;
-+ p_LnxWrpFmPortDev->settings.frag_enabled =
-+ params->frag_enable;
-+ p_LnxWrpFmPortDev->buffPrefixContent.dataAlign =
-+ params->data_align;
-+ p_LnxWrpFmPortDev->buffPrefixContent.manipExtraSpace =
-+ params->manip_extra_space;
-+
-+ ADD_ADV_CONFIG_START(p_LnxWrpFmPortDev->settings.advConfig,
-+ FM_MAX_NUM_OF_ADV_SETTINGS)
-+
-+ ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigBufferPrefixContent,
-+ ARGS(1,
-+ (&p_LnxWrpFmPortDev->
-+ buffPrefixContent)));
-+
-+ /* oh port specific parameter (for fragmentation only) */
-+ if ((p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_OH_OFFLINE_PARSING) &&
-+ params->num_pools) {
-+ int i;
-+
-+ p_LnxWrpFmPortDev->opExtPools.numOfPoolsUsed = params->num_pools;
-+ for (i = 0; i < params->num_pools; i++) {
-+ p_LnxWrpFmPortDev->opExtPools.extBufPool[i].id = params->pool_param[i].id;
-+ p_LnxWrpFmPortDev->opExtPools.extBufPool[i].size = params->pool_param[i].size;
-+ }
-+
-+ if (p_LnxWrpFmPortDev->settings.frag_enabled)
-+ ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigExtBufPools,
-+ ARGS(1, (&p_LnxWrpFmPortDev->opExtPools)));
-+ }
-+
-+ ADD_ADV_CONFIG_END InitFmPortDev(p_LnxWrpFmPortDev);
-+}
-+EXPORT_SYMBOL(fm_set_tx_port_params);
-+
-+void fm_mac_set_handle(t_Handle h_lnx_wrp_fm_dev,
-+ t_Handle h_fm_mac,
-+ int mac_id)
-+{
-+ t_LnxWrpFmDev *p_lnx_wrp_fm_dev = (t_LnxWrpFmDev *)h_lnx_wrp_fm_dev;
-+
-+ p_lnx_wrp_fm_dev->macs[mac_id].h_Dev = h_fm_mac;
-+ p_lnx_wrp_fm_dev->macs[mac_id].h_LnxWrpFmDev = h_lnx_wrp_fm_dev;
-+}
-+EXPORT_SYMBOL(fm_mac_set_handle);
-+
-+static void LnxwrpFmPcdDevExceptionsCb(t_Handle h_App,
-+ e_FmPcdExceptions exception)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_App;
-+
-+ ASSERT_COND(p_LnxWrpFmDev);
-+
-+ DBG(INFO, ("got fm-pcd exception %d", exception));
-+
-+ /* do nothing */
-+ UNUSED(exception);
-+}
-+
-+static void LnxwrpFmPcdDevIndexedExceptionsCb(t_Handle h_App,
-+ e_FmPcdExceptions exception,
-+ uint16_t index)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_App;
-+
-+ ASSERT_COND(p_LnxWrpFmDev);
-+
-+ DBG(INFO,
-+ ("got fm-pcd-indexed exception %d, indx %d", exception, index));
-+
-+ /* do nothing */
-+ UNUSED(exception);
-+ UNUSED(index);
-+}
-+
-+static t_Error InitFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+ spin_lock_init(&lock);
-+
-+ if (p_LnxWrpFmDev->pcdActive) {
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort;
-+ t_FmPcdParams fmPcdParams;
-+ t_Error err;
-+
-+ memset(&fmPcdParams, 0, sizeof(fmPcdParams));
-+ fmPcdParams.h_Fm = p_LnxWrpFmDev->h_Dev;
-+ fmPcdParams.prsSupport = p_LnxWrpFmDev->prsActive;
-+ fmPcdParams.kgSupport = p_LnxWrpFmDev->kgActive;
-+ fmPcdParams.plcrSupport = p_LnxWrpFmDev->plcrActive;
-+ fmPcdParams.ccSupport = p_LnxWrpFmDev->ccActive;
-+ fmPcdParams.numOfSchemes = FM_PCD_KG_NUM_OF_SCHEMES;
-+
-+#ifndef CONFIG_GUEST_PARTITION
-+ fmPcdParams.f_Exception = LnxwrpFmPcdDevExceptionsCb;
-+ if (fmPcdParams.kgSupport)
-+ fmPcdParams.f_ExceptionId =
-+ LnxwrpFmPcdDevIndexedExceptionsCb;
-+ fmPcdParams.h_App = p_LnxWrpFmDev;
-+#endif /* !CONFIG_GUEST_PARTITION */
-+
-+#ifdef CONFIG_MULTI_PARTITION_SUPPORT
-+ fmPcdParams.numOfSchemes = 0;
-+ fmPcdParams.numOfClsPlanEntries = 0;
-+ fmPcdParams.partitionId = 0;
-+#endif /* CONFIG_MULTI_PARTITION_SUPPORT */
-+ fmPcdParams.useHostCommand = TRUE;
-+
-+ p_LnxWrpFmDev->hc_tx_fq =
-+ FqAlloc(p_LnxWrpFmDev,
-+ 0,
-+ QMAN_FQ_FLAG_TO_DCPORTAL,
-+ p_LnxWrpFmPortDev->txCh, 0);
-+ if (!p_LnxWrpFmDev->hc_tx_fq)
-+ RETURN_ERROR(MAJOR, E_NULL_POINTER,
-+ ("Frame queue allocation failed..."));
-+
-+ p_LnxWrpFmDev->hc_tx_conf_fq =
-+ FqAlloc(p_LnxWrpFmDev,
-+ 0,
-+ QMAN_FQ_FLAG_NO_ENQUEUE,
-+ p_LnxWrpFmDev->hcCh, 1);
-+ if (!p_LnxWrpFmDev->hc_tx_conf_fq)
-+ RETURN_ERROR(MAJOR, E_NULL_POINTER,
-+ ("Frame queue allocation failed..."));
-+
-+ p_LnxWrpFmDev->hc_tx_err_fq =
-+ FqAlloc(p_LnxWrpFmDev,
-+ 0,
-+ QMAN_FQ_FLAG_NO_ENQUEUE,
-+ p_LnxWrpFmDev->hcCh, 2);
-+ if (!p_LnxWrpFmDev->hc_tx_err_fq)
-+ RETURN_ERROR(MAJOR, E_NULL_POINTER,
-+ ("Frame queue allocation failed..."));
-+
-+ fmPcdParams.hc.portBaseAddr = p_LnxWrpFmPortDev->baseAddr;
-+ fmPcdParams.hc.portId =
-+ p_LnxWrpFmPortDev->settings.param.portId;
-+ fmPcdParams.hc.liodnBase =
-+ p_LnxWrpFmPortDev->settings.param.liodnBase;
-+ fmPcdParams.hc.errFqid =
-+ qman_fq_fqid(p_LnxWrpFmDev->hc_tx_err_fq);
-+ fmPcdParams.hc.confFqid =
-+ qman_fq_fqid(p_LnxWrpFmDev->hc_tx_conf_fq);
-+ fmPcdParams.hc.qmChannel = p_LnxWrpFmPortDev->txCh;
-+ fmPcdParams.hc.f_QmEnqueue = QmEnqueueCB;
-+ fmPcdParams.hc.h_QmArg = (t_Handle) p_LnxWrpFmDev;
-+
-+ p_LnxWrpFmDev->h_PcdDev = FM_PCD_Config(&fmPcdParams);
-+ if (!p_LnxWrpFmDev->h_PcdDev)
-+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM PCD!"));
-+
-+ err =
-+ FM_PCD_ConfigPlcrNumOfSharedProfiles(p_LnxWrpFmDev->h_PcdDev,
-+ LNXWRP_FM_NUM_OF_SHARED_PROFILES);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ err = FM_PCD_Init(p_LnxWrpFmDev->h_PcdDev);
-+ if (err != E_OK)
-+ RETURN_ERROR(MAJOR, err, NO_MSG);
-+
-+ if (p_LnxWrpFmDev->err_irq == 0) {
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC,
-+ FALSE);
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW,
-+ FALSE);
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR,
-+ FALSE);
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC,
-+ FALSE);
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC,
-+ FALSE);
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE,
-+ FALSE);
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE,
-+ FALSE);
-+ FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev,
-+ e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC,
-+ FALSE);
-+ }
-+ }
-+
-+ return E_OK;
-+}
-+
-+void FreeFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
-+{
-+
-+ if (p_LnxWrpFmDev->h_PcdDev)
-+ FM_PCD_Free(p_LnxWrpFmDev->h_PcdDev);
-+
-+ if (p_LnxWrpFmDev->hc_tx_err_fq)
-+ FqFree(p_LnxWrpFmDev->hc_tx_err_fq);
-+
-+ if (p_LnxWrpFmDev->hc_tx_conf_fq)
-+ FqFree(p_LnxWrpFmDev->hc_tx_conf_fq);
-+
-+ if (p_LnxWrpFmDev->hc_tx_fq)
-+ FqFree(p_LnxWrpFmDev->hc_tx_fq);
-+}
-+
-+static void FreeFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev =
-+ (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+
-+ if (!p_LnxWrpFmPortDev->active)
-+ return;
-+
-+ if (p_LnxWrpFmPortDev->h_Dev)
-+ FM_PORT_Free(p_LnxWrpFmPortDev->h_Dev);
-+
-+ devm_iounmap(p_LnxWrpFmDev->dev,
-+ UINT_TO_PTR(p_LnxWrpFmPortDev->baseAddr));
-+ __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res,
-+ p_LnxWrpFmPortDev->phys_baseAddr,
-+ p_LnxWrpFmPortDev->memSize);
-+}
-+
-+static int /*__devinit*/ fm_port_probe(struct platform_device *of_dev)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ struct device *dev;
-+
-+ dev = &of_dev->dev;
-+
-+ p_LnxWrpFmPortDev = ReadFmPortDevTreeNode(of_dev);
-+ if (p_LnxWrpFmPortDev == NULL)
-+ return -EIO;
-+ /* Port can be inactive, thus will not be probed:
-+ - in performance mode, OH ports are disabled
-+ ...
-+ */
-+ if (!p_LnxWrpFmPortDev->active)
-+ return 0;
-+
-+ if (ConfigureFmPortDev(p_LnxWrpFmPortDev) != E_OK)
-+ return -EIO;
-+
-+ dev_set_drvdata(dev, p_LnxWrpFmPortDev);
-+
-+ if (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_OH_HOST_COMMAND)
-+ InitFmPcdDev((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev);
-+
-+ p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+
-+ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d",
-+ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id + DEV_FM_RX_PORTS_MINOR_BASE;
-+ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_RX_10G) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d",
-+ p_LnxWrpFmDev->name,
-+ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_RX_PORTS);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_RX_PORTS +
-+ DEV_FM_RX_PORTS_MINOR_BASE;
-+#ifndef CONFIG_FMAN_ARM
-+ if (IS_T1023_T1024) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d",
-+ p_LnxWrpFmDev->name,
-+ p_LnxWrpFmPortDev->id);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id +
-+ DEV_FM_RX_PORTS_MINOR_BASE;
-+ }
-+#endif
-+ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_TX) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d",
-+ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id + DEV_FM_TX_PORTS_MINOR_BASE;
-+ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_TX_10G) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d",
-+ p_LnxWrpFmDev->name,
-+ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_TX_PORTS);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_TX_PORTS +
-+ DEV_FM_TX_PORTS_MINOR_BASE;
-+#ifndef CONFIG_FMAN_ARM
-+ if (IS_T1023_T1024) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d",
-+ p_LnxWrpFmDev->name,
-+ p_LnxWrpFmPortDev->id);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id +
-+ DEV_FM_TX_PORTS_MINOR_BASE;
-+ }
-+#endif
-+ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_OH_HOST_COMMAND) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-oh%d",
-+ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id + DEV_FM_OH_PORTS_MINOR_BASE;
-+ } else if (p_LnxWrpFmPortDev->settings.param.portType ==
-+ e_FM_PORT_TYPE_OH_OFFLINE_PARSING) {
-+ Sprint(p_LnxWrpFmPortDev->name, "%s-port-oh%d",
-+ p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id + 1);
-+ p_LnxWrpFmPortDev->minor =
-+ p_LnxWrpFmPortDev->id + 1 +
-+ DEV_FM_OH_PORTS_MINOR_BASE;
-+ }
-+
-+ device_create(p_LnxWrpFmDev->fm_class, NULL,
-+ MKDEV(p_LnxWrpFmDev->major, p_LnxWrpFmPortDev->minor),
-+ NULL, p_LnxWrpFmPortDev->name);
-+
-+ /* create sysfs entries for stats and regs */
-+
-+ if (fm_port_sysfs_create(dev) != 0) {
-+ FreeFmPortDev(p_LnxWrpFmPortDev);
-+ REPORT_ERROR(MAJOR, E_INVALID_STATE,
-+ ("Unable to create sys entry - fm port!!!"));
-+ return -EIO;
-+ }
-+
-+#ifdef FM_TX_INVALID_ECC_ERRATA_10GMAC_A009
-+ FM_DisableRamsEcc(p_LnxWrpFmDev->h_Dev);
-+#endif /* FM_TX_INVALID_ECC_ERRATA_10GMAC_A009 */
-+
-+ DBG(TRACE, ("%s probed", p_LnxWrpFmPortDev->name));
-+
-+ return 0;
-+}
-+
-+static int fm_port_remove(struct platform_device *of_dev)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ struct device *dev;
-+
-+ dev = &of_dev->dev;
-+ p_LnxWrpFmPortDev = dev_get_drvdata(dev);
-+
-+ fm_port_sysfs_destroy(dev);
-+
-+ p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ device_destroy(p_LnxWrpFmDev->fm_class,
-+ MKDEV(p_LnxWrpFmDev->major, p_LnxWrpFmPortDev->minor));
-+
-+ FreeFmPortDev(p_LnxWrpFmPortDev);
-+
-+ dev_set_drvdata(dev, NULL);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id fm_port_match[] = {
-+ {
-+ .compatible = "fsl,fman-port-oh"},
-+ {
-+ .compatible = "fsl,fman-v2-port-oh"},
-+ {
-+ .compatible = "fsl,fman-v3-port-oh"},
-+ {
-+ .compatible = "fsl,fman-port-1g-rx"},
-+ {
-+ .compatible = "fsl,fman-port-10g-rx"},
-+ {
-+ .compatible = "fsl,fman-port-1g-tx"},
-+ {
-+ .compatible = "fsl,fman-port-10g-tx"},
-+ {}
-+};
-+
-+#ifndef MODULE
-+MODULE_DEVICE_TABLE(of, fm_port_match);
-+#endif /* !MODULE */
-+
-+static struct platform_driver fm_port_driver = {
-+
-+ .driver = {
-+ .name = "fsl-fman-port",
-+ .of_match_table = fm_port_match,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = fm_port_probe,
-+ .remove = fm_port_remove
-+};
-+
-+
-+t_Error LNXWRP_FM_Port_Init(void)
-+{
-+ /* Register to the DTB for basic FM port API */
-+ if (platform_driver_register(&fm_port_driver))
-+ return E_NO_DEVICE;
-+
-+ return E_OK;
-+}
-+
-+void LNXWRP_FM_Port_Free(void)
-+{
-+ platform_driver_unregister(&fm_port_driver);
-+}
-+
-+static int __init __cold fm_port_load(void)
-+{
-+ if (LNXWRP_FM_Port_Init() != E_OK) {
-+ printk(KERN_CRIT "Failed to init FM Ports wrapper!\n");
-+ return -ENODEV;
-+ }
-+
-+ printk(KERN_CRIT "Freescale FM Ports module\n");
-+
-+ return 0;
-+}
-+
-+static void __exit __cold fm_port_unload(void)
-+{
-+ LNXWRP_FM_Port_Free();
-+}
-+
-+module_init(fm_port_load);
-+module_exit(fm_port_unload);
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c
-@@ -0,0 +1,4854 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_ioctls_fm.c
-+ @Author Shlomi Gridish
-+ @Description FM Linux wrapper functions.
-+*/
-+
-+/* Linux Headers ------------------- */
-+#include <linux/version.h>
-+
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#include <config/modversions.h>
-+#endif /* MODVERSIONS */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/fs.h>
-+#include <linux/cdev.h>
-+#include <linux/device.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/io.h>
-+#include <linux/ioport.h>
-+#include <linux/of_platform.h>
-+#include <linux/uaccess.h>
-+#include <asm/errno.h>
-+#ifndef CONFIG_FMAN_ARM
-+#include <sysdev/fsl_soc.h>
-+#include <linux/fsl/svr.h>
-+#endif
-+
-+#if defined(CONFIG_COMPAT)
-+#include <linux/compat.h>
-+#endif
-+
-+#include "part_ext.h"
-+#include "fm_ioctls.h"
-+#include "fm_pcd_ioctls.h"
-+#include "fm_port_ioctls.h"
-+#include "fm_vsp_ext.h"
-+
-+#ifndef CONFIG_FMAN_ARM
-+#define IS_T1023_T1024 (SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1024 || \
-+ SVR_SOC_VER(mfspr(SPRN_SVR)) == SVR_T1023)
-+#endif
-+
-+#define __ERR_MODULE__ MODULE_FM
-+
-+#if defined(CONFIG_COMPAT)
-+#include "lnxwrp_ioctls_fm_compat.h"
-+#endif
-+
-+#include "lnxwrp_fm.h"
-+
-+#define CMP_IOC_DEFINE(def) (IOC_##def != def)
-+
-+/* fm_pcd_ioctls.h === fm_pcd_ext.h assertions */
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_PRIVATE_HDRS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_PRS_NUM_OF_HDRS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_GENERIC_REGS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_EXTRACT_MASKS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_DEFAULT_GROUPS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_PRS_NUM_OF_LABELS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_SW_PRS_SIZE)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if DPAA_VERSION >= 11
-+#if CMP_IOC_DEFINE(FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_TREES)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_GROUPS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_UNITS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_KEYS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_SIZE_OF_KEY)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(FM_PCD_LAST_KEY_INDEX)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+/* net_ioctls.h === net_ext.h assertions */
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPP_PID)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPP_COMPRESSED)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPoE_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPMUX_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ETH_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPv4_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPv6_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ICMP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IGMP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_TCP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SCTP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_DCCP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_UDP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPHC_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv2_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_VLAN_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_LLC_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_NLPID_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SNAP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS)
-+#warning Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ARP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_RFC2684_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_GRE_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MINENCAP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MACSEC_ALL_FIELDS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+/* fm_ioctls.h === fm_ext.h assertions */
-+#if CMP_IOC_DEFINE(FM_MAX_NUM_OF_VALID_PORTS)
-+#error Error: please synchronize IOC_ defines!
-+#endif
-+
-+void LnxWrpPCDIOCTLTypeChecking(void)
-+{
-+ /* fm_ext.h == fm_ioctls.h */
-+ ASSERT_COND(sizeof(ioc_fm_port_bandwidth_params) == sizeof(t_FmPortsBandwidthParams));
-+ ASSERT_COND(sizeof(ioc_fm_revision_info_t) == sizeof(t_FmRevisionInfo));
-+
-+ /* fm_pcd_ext.h == fm_pcd_ioctls.h */
-+ /*ioc_fm_pcd_counters_params_t : NOT USED */
-+ /*ioc_fm_pcd_exception_params_t : private */
-+#if (DPAA_VERSION >= 11)
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_capwap_params_t) == sizeof(t_FmPcdManipFragCapwapParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_capwap_params_t) == sizeof(t_FmPcdManipReassemCapwapParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t) == sizeof(t_FmPcdManipHdrInsrtByHdrParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_ip_params_t) == sizeof(t_FmPcdManipHdrInsrtIpParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_t) == sizeof(t_FmPcdManipHdrInsrt));
-+ ASSERT_COND(sizeof(ioc_fm_manip_hdr_info_t) == sizeof(t_FmManipHdrInfo));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t) == sizeof(t_FmPcdManipHdrRmvByHdrParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_special_offload_capwap_params_t) == sizeof(t_FmPcdManipSpecialOffloadCapwapParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_capwap_stats_t) == sizeof(t_FmPcdManipFragCapwapStats));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_capwap_stats_t) == sizeof(t_FmPcdManipReassemCapwapStats));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_params_t) == sizeof(t_FmPcdManipFragParams));
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ ASSERT_COND(sizeof(ioc_fm_pcd_prs_label_params_t) == sizeof(t_FmPcdPrsLabelParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_prs_sw_params_t) == sizeof(t_FmPcdPrsSwParams));
-+ /*ioc_fm_pcd_kg_dflt_value_params_t : private */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_hdr_protocol_opt_u) == sizeof(u_FmPcdHdrProtocolOpt));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_fields_u) == sizeof(t_FmPcdFields));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_from_hdr_t) == sizeof(t_FmPcdFromHdr));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_from_field_t) == sizeof(t_FmPcdFromField));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_distinction_unit_t) == sizeof(t_FmPcdDistinctionUnit));
-+
-+#if defined(CONFIG_ARM64)
-+ /* different alignment */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_net_env_params_t) == sizeof(t_FmPcdNetEnvParams) + sizeof(void *) + 4);
-+#else
-+#if !defined(CONFIG_COMPAT)
-+ /* different alignment */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_net_env_params_t) == sizeof(t_FmPcdNetEnvParams) + sizeof(void *));
-+#endif
-+#endif
-+ ASSERT_COND(sizeof(ioc_fm_pcd_extract_entry_t) == sizeof(t_FmPcdExtractEntry));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_extract_mask_t) == sizeof(t_FmPcdKgExtractMask));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_extract_dflt_t) == sizeof(t_FmPcdKgExtractDflt));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t) == sizeof(t_FmPcdKgKeyExtractAndHashParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_extracted_or_params_t) == sizeof(t_FmPcdKgExtractedOrParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_scheme_counter_t) == sizeof(t_FmPcdKgSchemeCounter));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_plcr_profile_t) == sizeof(t_FmPcdKgPlcrProfile));
-+#if (DPAA_VERSION >= 11)
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_storage_profile_t) == sizeof(t_FmPcdKgStorageProfile));
-+#endif
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_cc_t) == sizeof(t_FmPcdKgCc));
-+#if !defined(CONFIG_COMPAT)
-+ /* different alignment */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_scheme_params_t) == sizeof(t_FmPcdKgSchemeParams) + sizeof(void *));
-+#endif
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_cc_params_t) == sizeof(t_FmPcdCcNextCcParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_plcr_params_t) == sizeof(t_FmPcdCcNextPlcrParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_enqueue_params_t) == sizeof(t_FmPcdCcNextEnqueueParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_kg_params_t) == sizeof(t_FmPcdCcNextKgParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_engine_params_t) == sizeof(t_FmPcdCcNextEngineParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_key_params_t) == sizeof(t_FmPcdCcKeyParams));
-+ ASSERT_COND(sizeof(ioc_keys_params_t) == sizeof(t_KeysParams));
-+#if !defined(CONFIG_COMPAT)
-+ /* different alignment */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_node_params_t) == sizeof(t_FmPcdCcNodeParams) + sizeof(void *));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_hash_table_params_t) == sizeof(t_FmPcdHashTableParams) + sizeof(void *));
-+#endif
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_grp_params_t) == sizeof(t_FmPcdCcGrpParams));
-+#if !defined(CONFIG_COMPAT)
-+ /* different alignment */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_cc_tree_params_t) == sizeof(t_FmPcdCcTreeParams) + sizeof(void *));
-+#endif
-+ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_byte_rate_mode_param_t) == sizeof(t_FmPcdPlcrByteRateModeParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t) == sizeof(t_FmPcdPlcrNonPassthroughAlgParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_next_engine_params_u) == sizeof(u_FmPcdPlcrNextEngineParams));
-+ /*ioc_fm_pcd_port_params_t : private */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_plcr_profile_params_t) == sizeof(t_FmPcdPlcrProfileParams) + sizeof(void *));
-+ /*ioc_fm_pcd_cc_tree_modify_next_engine_params_t : private */
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+#error TODO: unsupported feature
-+/*
-+ ASSERT_COND(sizeof(TODO) == sizeof(t_FmPcdManipHdrInsrtByTemplateParams));
-+ ASSERT_COND(sizeof(TODO) == sizeof(t_CapwapFragmentationParams));
-+ ASSERT_COND(sizeof(TODO) == sizeof(t_CapwapReassemblyParams));
-+*/
-+#endif
-+
-+ /*ioc_fm_pcd_cc_node_modify_next_engine_params_t : private */
-+ /*ioc_fm_pcd_cc_node_remove_key_params_t : private */
-+ /*ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t : private */
-+ /*ioc_fm_pcd_cc_node_modify_key_params_t : private */
-+ /*ioc_fm_manip_hdr_info_t : private */
-+ /*ioc_fm_pcd_hash_table_set_t : private */
-+
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_ip_params_t) == sizeof(t_FmPcdManipFragIpParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_ip_params_t) == sizeof(t_FmPcdManipReassemIpParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_special_offload_ipsec_params_t) == sizeof(t_FmPcdManipSpecialOffloadIPSecParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_special_offload_params_t) == sizeof(t_FmPcdManipSpecialOffloadParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_rmv_generic_params_t) == sizeof(t_FmPcdManipHdrRmvGenericParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_generic_params_t) == sizeof(t_FmPcdManipHdrInsrtGenericParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_insrt_params_t) == sizeof(t_FmPcdManipHdrInsrtParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_rmv_params_t) == sizeof(t_FmPcdManipHdrRmvParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_hdr_params_t) == sizeof(t_FmPcdManipHdrParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_params_t) == sizeof(t_FmPcdManipFragParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_params_t) == sizeof(t_FmPcdManipReassemParams));
-+#if !defined(CONFIG_COMPAT)
-+ /* different alignment */
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_params_t) == sizeof(t_FmPcdManipParams) + sizeof(void *));
-+#endif
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_ip_stats_t) == sizeof(t_FmPcdManipReassemIpStats));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_ip_stats_t) == sizeof(t_FmPcdManipFragIpStats));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_reassem_stats_t) == sizeof(t_FmPcdManipReassemStats));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_frag_stats_t) == sizeof(t_FmPcdManipFragStats));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_manip_stats_t) == sizeof(t_FmPcdManipStats));
-+#if DPAA_VERSION >= 11
-+ ASSERT_COND(sizeof(ioc_fm_pcd_frm_replic_group_params_t) == sizeof(t_FmPcdFrmReplicGroupParams) + sizeof(void *));
-+#endif
-+
-+ /* fm_port_ext.h == fm_port_ioctls.h */
-+ ASSERT_COND(sizeof(ioc_fm_port_rate_limit_t) == sizeof(t_FmPortRateLimit));
-+ ASSERT_COND(sizeof(ioc_fm_port_pcd_params_t) == sizeof(t_FmPortPcdParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_kg_scheme_select_t) == sizeof(t_FmPcdKgSchemeSelect));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_port_schemes_params_t) == sizeof(t_FmPcdPortSchemesParams));
-+ ASSERT_COND(sizeof(ioc_fm_pcd_prs_start_t) == sizeof(t_FmPcdPrsStart));
-+
-+ return;
-+}
-+
-+#define ASSERT_IOC_NET_ENUM(def) ASSERT_COND((unsigned long)e_IOC_NET_##def == (unsigned long)def)
-+
-+void LnxWrpPCDIOCTLEnumChecking(void)
-+{
-+ /* net_ext.h == net_ioctls.h : sampling checks */
-+ ASSERT_IOC_NET_ENUM(HEADER_TYPE_MACSEC);
-+ ASSERT_IOC_NET_ENUM(HEADER_TYPE_PPP);
-+ ASSERT_IOC_NET_ENUM(MAX_HEADER_TYPE_COUNT);
-+
-+ /* fm_ext.h == fm_ioctls.h */
-+ ASSERT_COND((unsigned long)e_IOC_FM_PORT_TYPE_DUMMY == (unsigned long)e_FM_PORT_TYPE_DUMMY);
-+ ASSERT_COND((unsigned long)e_IOC_EX_MURAM_ECC == (unsigned long)e_FM_EX_MURAM_ECC);
-+ ASSERT_COND((unsigned long)e_IOC_FM_COUNTERS_DEQ_CONFIRM == (unsigned long)e_FM_COUNTERS_DEQ_CONFIRM);
-+
-+ /* fm_pcd_ext.h == fm_pcd_ioctls.h */
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES == (unsigned long)e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS_EXCEPTION_SINGLE_ECC == (unsigned long)e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS == (unsigned long)e_FM_PCD_PRS);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_EXTRACT_FULL_FIELD == (unsigned long)e_FM_PCD_EXTRACT_FULL_FIELD);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_EXTRACT_FROM_FLOW_ID == (unsigned long)e_FM_PCD_EXTRACT_FROM_FLOW_ID);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO == (unsigned long)e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_DFLT_ILLEGAL == (unsigned long)e_FM_PCD_KG_DFLT_ILLEGAL);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_GENERIC_NOT_FROM_DATA == (unsigned long)e_FM_PCD_KG_GENERIC_NOT_FROM_DATA);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_HDR_INDEX_LAST == (unsigned long)e_FM_PCD_HDR_INDEX_LAST);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_SHARED == (unsigned long)e_FM_PCD_PLCR_SHARED);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_RFC_4115 == (unsigned long)e_FM_PCD_PLCR_RFC_4115);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_COLOR_AWARE == (unsigned long)e_FM_PCD_PLCR_COLOR_AWARE);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_OVERRIDE == (unsigned long)e_FM_PCD_PLCR_OVERRIDE);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_FULL_FRM_LEN == (unsigned long)e_FM_PCD_PLCR_FULL_FRM_LEN);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN == (unsigned long)e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_PACKET_MODE == (unsigned long)e_FM_PCD_PLCR_PACKET_MODE);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_DROP_FRAME == (unsigned long)e_FM_PCD_DROP_FRAME);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER == (unsigned long)e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP == (unsigned long)e_FM_PCD_ACTION_INDEXED_LOOKUP);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR);
-+#if !defined(FM_CAPWAP_SUPPORT)
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_INSRT_GENERIC == (unsigned long)e_FM_PCD_MANIP_INSRT_GENERIC);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_RMV_GENERIC == (unsigned long)e_FM_PCD_MANIP_RMV_GENERIC);
-+#else
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_INSRT_BY_TEMPLATE == (unsigned long)e_FM_PCD_MANIP_INSRT_BY_TEMPLATE);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_RMV_BY_HDR == (unsigned long)e_FM_PCD_MANIP_RMV_BY_HDR);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_RMV_BY_HDR_FROM_START == (unsigned long)e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START);
-+#endif
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG == (unsigned long)e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH == (unsigned long)e_FM_PCD_MANIP_EIGHT_WAYS_HASH);
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_STATS_PER_FLOWID == (unsigned long)e_FM_PCD_STATS_PER_FLOWID);
-+#endif
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD == (unsigned long)e_FM_PCD_MANIP_SPECIAL_OFFLOAD);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_CC_STATS_MODE_FRAME == (unsigned long)e_FM_PCD_CC_STATS_MODE_FRAME);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG == (unsigned long)e_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC == (unsigned long)e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC);
-+
-+ /* fm_port_ext.h == fm_port_ioctls.h */
-+#if !defined(FM_CAPWAP_SUPPORT)
-+ ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR);
-+#else
-+ ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR);
-+#endif
-+ ASSERT_COND((unsigned long)e_IOC_FM_PORT_COUNTERS_DEQ_CONFIRM == (unsigned long)e_FM_PORT_COUNTERS_DEQ_CONFIRM);
-+ ASSERT_COND((unsigned long)e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 == (unsigned long)e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8);
-+
-+ return;
-+}
-+
-+static t_Error LnxwrpFmPcdIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat)
-+{
-+ t_Error err = E_OK;
-+
-+/*
-+Status: PCD API to fmlib (file: drivers/net/dpa/NetCommSw/inc/Peripherals/fm_pcd_ext.h):
-+
-+ FM_PCD_PrsLoadSw
-+ FM_PCD_SetAdvancedOffloadSupport
-+ FM_PCD_Enable
-+ FM_PCD_Disable
-+ FM_PCD_ForceIntr
-+ FM_PCD_SetException
-+ FM_PCD_KgSetAdditionalDataAfterParsing
-+ FM_PCD_KgSetDfltValue
-+ FM_PCD_NetEnvCharacteristicsSet
-+ FM_PCD_NetEnvCharacteristicsDelete
-+ FM_PCD_KgSchemeSet
-+ FM_PCD_KgSchemeDelete
-+ FM_PCD_MatchTableSet
-+ FM_PCD_MatchTableDelete
-+ FM_PCD_CcRootBuild
-+ FM_PCD_CcRootDelete
-+ FM_PCD_PlcrProfileSet
-+ FM_PCD_PlcrProfileDelete
-+ FM_PCD_CcRootModifyNextEngine
-+ FM_PCD_MatchTableModifyNextEngine
-+ FM_PCD_MatchTableModifyMissNextEngine
-+ FM_PCD_MatchTableRemoveKey
-+ FM_PCD_MatchTableAddKey
-+ FM_PCD_MatchTableModifyKeyAndNextEngine
-+ FM_PCD_HashTableSet
-+ FM_PCD_HashTableDelete
-+ FM_PCD_HashTableAddKey
-+ FM_PCD_HashTableRemoveKey
-+ FM_PCD_MatchTableModifyKey
-+ FM_PCD_ManipNodeReplace
-+ FM_PCD_ManipNodeSet
-+ FM_PCD_ManipNodeDelete
-+
-+Status: not exported, should be thru sysfs
-+ FM_PCD_KgSchemeGetCounter
-+ FM_PCD_KgSchemeSetCounter
-+ FM_PCD_PlcrProfileGetCounter
-+ FM_PCD_PlcrProfileSetCounter
-+
-+Status: not exported
-+ FM_PCD_MatchTableFindNRemoveKey
-+ FM_PCD_MatchTableFindNModifyNextEngine
-+ FM_PCD_MatchTableFindNModifyKeyAndNextEngine
-+ FM_PCD_MatchTableFindNModifyKey
-+ FM_PCD_MatchTableGetIndexedHashBucket
-+ FM_PCD_MatchTableGetNextEngine
-+ FM_PCD_MatchTableGetKeyCounter
-+
-+Status: not exported, would be nice to have
-+ FM_PCD_HashTableModifyNextEngine
-+ FM_PCD_HashTableModifyMissNextEngine
-+ FM_PCD_HashTableGetMissNextEngine
-+ FM_PCD_ManipGetStatistics
-+
-+Status: not exported
-+#if DPAA_VERSION >= 11
-+
-+ FM_VSP_GetStatistics -- it's not available yet
-+#endif
-+
-+Status: feature not supported
-+#ifdef FM_CAPWAP_SUPPORT
-+#error unsupported feature
-+ FM_PCD_StatisticsSetNode
-+#endif
-+
-+ */
-+ _fm_ioctl_dbg("cmd:0x%08x(type:0x%02x, nr:%u).\n",
-+ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd) - 20);
-+
-+ switch (cmd)
-+ {
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_PRS_LOAD_SW_COMPAT:
-+#endif
-+ case FM_PCD_IOC_PRS_LOAD_SW:
-+ {
-+ ioc_fm_pcd_prs_sw_params_t *param;
-+ uint8_t *p_code;
-+
-+ param = (ioc_fm_pcd_prs_sw_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_prs_sw_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_prs_sw_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_prs_sw_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_prs_sw_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_prs_sw_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_prs_sw_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_prs_sw_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_prs_sw_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_fm_pcd_prs_sw(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_prs_sw_params_t *)arg,
-+ sizeof(ioc_fm_pcd_prs_sw_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (!param->p_code || !param->size)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ p_code = (uint8_t *) XX_Malloc(param->size);
-+ if (!p_code)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(p_code, 0, param->size);
-+ if (copy_from_user(p_code, param->p_code, param->size))
-+ {
-+ XX_Free(p_code);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->p_code = p_code;
-+
-+ err = FM_PCD_PrsLoadSw(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdPrsSwParams*)param);
-+
-+ XX_Free(p_code);
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_PCD_IOC_SET_ADVANCED_OFFLOAD_SUPPORT:
-+ err = FM_PCD_SetAdvancedOffloadSupport(p_LnxWrpFmDev->h_PcdDev);
-+ break;
-+
-+ case FM_PCD_IOC_ENABLE:
-+ err = FM_PCD_Enable(p_LnxWrpFmDev->h_PcdDev);
-+ break;
-+
-+ case FM_PCD_IOC_DISABLE:
-+ err = FM_PCD_Disable(p_LnxWrpFmDev->h_PcdDev);
-+ break;
-+
-+ case FM_PCD_IOC_FORCE_INTR:
-+ {
-+ int exception;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (get_user(exception, (int *) compat_ptr(arg)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ else
-+#endif
-+ {
-+ if (get_user(exception, (int *)arg))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_ForceIntr(p_LnxWrpFmDev->h_PcdDev, (e_FmPcdExceptions)exception);
-+ break;
-+ }
-+
-+ case FM_PCD_IOC_SET_EXCEPTION:
-+ {
-+ ioc_fm_pcd_exception_params_t *param;
-+
-+ param = (ioc_fm_pcd_exception_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_exception_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_exception_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_exception_params_t *)compat_ptr(arg),
-+ sizeof(ioc_fm_pcd_exception_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_exception_params_t *)arg,
-+ sizeof(ioc_fm_pcd_exception_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, param->exception, param->enable);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_PCD_IOC_KG_SET_ADDITIONAL_DATA_AFTER_PARSING:
-+ {
-+ uint8_t payloadOffset;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (get_user(payloadOffset, (uint8_t*) compat_ptr(arg)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ else
-+#endif
-+ {
-+ if (get_user(payloadOffset, (uint8_t*) arg))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_KgSetAdditionalDataAfterParsing(p_LnxWrpFmDev->h_PcdDev, payloadOffset);
-+ break;
-+ }
-+
-+ case FM_PCD_IOC_KG_SET_DFLT_VALUE:
-+ {
-+ ioc_fm_pcd_kg_dflt_value_params_t *param;
-+
-+ param = (ioc_fm_pcd_kg_dflt_value_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_kg_dflt_value_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_kg_dflt_value_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_kg_dflt_value_params_t *)compat_ptr(arg),
-+ sizeof(ioc_fm_pcd_kg_dflt_value_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_kg_dflt_value_params_t *)arg,
-+ sizeof(ioc_fm_pcd_kg_dflt_value_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PCD_KgSetDfltValue(p_LnxWrpFmDev->h_PcdDev, param->valueId, param->value);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET_COMPAT:
-+#endif
-+ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET:
-+ {
-+ ioc_fm_pcd_net_env_params_t *param;
-+
-+ param = (ioc_fm_pcd_net_env_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_net_env_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_net_env_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_net_env_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_net_env_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_net_env_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_net_env_params_t));
-+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_net_env_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_net_env_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_net_env(compat_param, param, COMPAT_US_TO_K);
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_net_env_params_t *) arg,
-+ sizeof(ioc_fm_pcd_net_env_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ param->id = FM_PCD_NetEnvCharacteristicsSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdNetEnvParams*)param);
-+
-+ if (!param->id)
-+ {
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /* Since the LLD has no errno-style error reporting,
-+ we're left here with no other option than to report
-+ a generic E_INVALID_VALUE */
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_net_env_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_net_env_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_net_env_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_net_env_params_t));
-+ compat_copy_fm_pcd_net_env(compat_param, param, COMPAT_K_TO_US);
-+
-+ if (copy_to_user((ioc_compat_fm_pcd_net_env_params_t *) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_net_env_params_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_net_env_params_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_net_env_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0 , sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_NetEnvCharacteristicsDelete(id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_KG_SCHEME_SET_COMPAT:
-+#endif
-+ case FM_PCD_IOC_KG_SCHEME_SET:
-+ {
-+ ioc_fm_pcd_kg_scheme_params_t *param;
-+
-+ param = (ioc_fm_pcd_kg_scheme_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_kg_scheme_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param = NULL;
-+
-+ compat_param = (ioc_compat_fm_pcd_kg_scheme_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
-+
-+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_kg_scheme_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_kg_scheme(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_params_t *)arg,
-+ sizeof(ioc_fm_pcd_kg_scheme_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ param->id = FM_PCD_KgSchemeSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdKgSchemeParams*)param);
-+
-+ if (!param->id)
-+ {
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /* Since the LLD has no errno-style error reporting,
-+ we're left here with no other option than to report
-+ a generic E_INVALID_VALUE */
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_kg_scheme_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_params_t));
-+ compat_copy_fm_pcd_kg_scheme(compat_param, param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_kg_scheme_params_t *)compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_params_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_kg_scheme_params_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_kg_scheme_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_KG_SCHEME_GET_CNTR_COMPAT:
-+#endif
-+ case FM_PCD_IOC_KG_SCHEME_GET_CNTR:
-+ {
-+ ioc_fm_pcd_kg_scheme_spc_t *param;
-+
-+ param = (ioc_fm_pcd_kg_scheme_spc_t *) XX_Malloc(sizeof(ioc_fm_pcd_kg_scheme_spc_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_spc_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param = NULL;
-+
-+ compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
-+
-+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_kg_scheme_spc_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_spc_t *)arg,
-+ sizeof(ioc_fm_pcd_kg_scheme_spc_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ param->val = FM_PCD_KgSchemeGetCounter((t_Handle)param->id);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
-+ compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_kg_scheme_spc_t *)compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_kg_scheme_spc_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_kg_scheme_spc_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_KG_SCHEME_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0 , sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_KgSchemeDelete(id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_SET_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_SET:
-+ {
-+ ioc_fm_pcd_cc_node_params_t *param;
-+ uint8_t *keys;
-+ uint8_t *masks;
-+ int i,k;
-+
-+ param = (ioc_fm_pcd_cc_node_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_node_params_t) +
-+ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_params_t) +
-+ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
-+
-+ keys = (uint8_t *) (param + 1);
-+ masks = keys + IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_node_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
-+ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
-+ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
-+
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_node_params_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_node_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_node(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_node_params_t *)arg, sizeof(ioc_fm_pcd_cc_node_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ ASSERT_COND(param->keys_params.num_of_keys <= IOC_FM_PCD_MAX_NUM_OF_KEYS);
-+ ASSERT_COND(param->keys_params.key_size <= IOC_FM_PCD_MAX_SIZE_OF_KEY);
-+
-+ /* support for indexed lookup */
-+ if( !(param->extract_cc_params.type == e_IOC_FM_PCD_EXTRACT_NON_HDR &&
-+ param->extract_cc_params.extract_params.extract_non_hdr.src == e_IOC_FM_PCD_EXTRACT_FROM_HASH &&
-+ param->extract_cc_params.extract_params.extract_non_hdr.action == e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP))
-+ {
-+ for (i=0, k=0;
-+ i < param->keys_params.num_of_keys;
-+ i++, k += IOC_FM_PCD_MAX_SIZE_OF_KEY)
-+ {
-+ if (param->keys_params.key_params[i].p_key &&
-+ param->keys_params.key_size)
-+ {
-+ if (copy_from_user(&keys[k],
-+ param->keys_params.key_params[i].p_key,
-+ param->keys_params.key_size))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->keys_params.key_params[i].p_key = &keys[k];
-+ }
-+
-+ if (param->keys_params.key_params[i].p_mask)
-+ {
-+ if (copy_from_user(&masks[k],
-+ param->keys_params.key_params[i].p_mask,
-+ param->keys_params.key_size))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->keys_params.key_params[i].p_mask = &masks[k];
-+ }
-+ }
-+ }
-+
-+ param->id = FM_PCD_MatchTableSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdCcNodeParams*)param);
-+
-+ if (!param->id) {
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /* Since the LLD has no errno-style error reporting,
-+ we're left here with no other option than to report
-+ a generic E_INVALID_VALUE */
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_params_t *compat_param;
-+ compat_param = (ioc_compat_fm_pcd_cc_node_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
-+ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_params_t) +
-+ 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY);
-+ compat_copy_fm_pcd_cc_node(compat_param, param, COMPAT_K_TO_US);
-+
-+ if (copy_to_user((ioc_compat_fm_pcd_cc_node_params_t *)compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_cc_node_params_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_cc_node_params_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_cc_node_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0 , sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_MatchTableDelete(id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_CC_ROOT_BUILD_COMPAT:
-+#endif
-+ case FM_PCD_IOC_CC_ROOT_BUILD:
-+ {
-+ ioc_fm_pcd_cc_tree_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_tree_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_cc_tree_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_tree_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tree_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tree_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_tree_params_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_tree_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_tree(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_tree_params_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_tree_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ param->id = FM_PCD_CcRootBuild(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdCcTreeParams*)param);
-+
-+ if (!param->id) {
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /* Since the LLD has no errno-style error reporting,
-+ we're left here with no other option than to report
-+ a generic E_INVALID_VALUE */
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tree_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tree_params_t *) XX_Malloc(sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_params_t));
-+
-+ compat_copy_fm_pcd_cc_tree(compat_param, param, COMPAT_K_TO_US);
-+
-+ if (copy_to_user((ioc_compat_fm_pcd_cc_tree_params_t *)compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_cc_tree_params_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_cc_tree_params_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_cc_tree_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_CC_ROOT_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_CC_ROOT_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0 , sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_CcRootDelete(id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_PLCR_PROFILE_SET_COMPAT:
-+#endif
-+ case FM_PCD_IOC_PLCR_PROFILE_SET:
-+ {
-+ ioc_fm_pcd_plcr_profile_params_t *param;
-+
-+ param = (ioc_fm_pcd_plcr_profile_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_plcr_profile_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_plcr_profile_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_plcr_profile_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
-+ if (copy_from_user(compat_param, (
-+ ioc_compat_fm_pcd_plcr_profile_params_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_plcr_profile(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_plcr_profile_params_t *)arg,
-+ sizeof(ioc_fm_pcd_plcr_profile_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (!param->modify &&
-+ (((t_FmPcdPlcrProfileParams*)param)->id.newParams.profileType != e_FM_PCD_PLCR_SHARED))
-+ {
-+ t_Handle h_Port;
-+ ioc_fm_pcd_port_params_t *port_params;
-+
-+ port_params = (ioc_fm_pcd_port_params_t*) XX_Malloc(sizeof(ioc_fm_pcd_port_params_t));
-+ if (!port_params)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(port_params, 0, sizeof(ioc_fm_pcd_port_params_t));
-+ if (copy_from_user(port_params, (ioc_fm_pcd_port_params_t*)((t_FmPcdPlcrProfileParams*)param)->id.newParams.h_FmPort,
-+ sizeof(ioc_fm_pcd_port_params_t)))
-+ {
-+ XX_Free(port_params);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ switch(port_params->port_type)
-+ {
-+ case (e_IOC_FM_PORT_TYPE_RX):
-+ if (port_params->port_id < FM_MAX_NUM_OF_1G_RX_PORTS) {
-+ h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id].h_Dev;
-+ break;
-+ }
-+ goto invalid_port_id;
-+
-+ case (e_IOC_FM_PORT_TYPE_RX_10G):
-+ if (port_params->port_id < FM_MAX_NUM_OF_10G_RX_PORTS) {
-+#ifndef CONFIG_FMAN_ARM
-+ if (IS_T1023_T1024) {
-+ h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id].h_Dev;
-+ } else {
-+#else
-+ {
-+#endif
-+ h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id + FM_MAX_NUM_OF_1G_RX_PORTS].h_Dev;
-+ }
-+ break;
-+ }
-+ goto invalid_port_id;
-+
-+ case (e_IOC_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ if (port_params->port_id && port_params->port_id < FM_MAX_NUM_OF_OH_PORTS) {
-+ h_Port = p_LnxWrpFmDev->opPorts[port_params->port_id - 1].h_Dev;
-+ break;
-+ }
-+ goto invalid_port_id;
-+
-+ default:
-+invalid_port_id:
-+ XX_Free(port_params);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG);
-+ }
-+
-+ ((t_FmPcdPlcrProfileParams*)param)->id.newParams.h_FmPort = h_Port;
-+ XX_Free(port_params);
-+ }
-+
-+ param->id = FM_PCD_PlcrProfileSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdPlcrProfileParams*)param);
-+
-+ if (!param->id) {
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /* Since the LLD has no errno-style error reporting,
-+ we're left here with no other option than to report
-+ a generic E_INVALID_VALUE */
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_plcr_profile_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_plcr_profile_params_t));
-+ compat_copy_fm_pcd_plcr_profile(compat_param, param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_plcr_profile_params_t *) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_plcr_profile_params_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_plcr_profile_params_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_plcr_profile_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_PLCR_PROFILE_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_PLCR_PROFILE_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0 , sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_PlcrProfileDelete(id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE:
-+ {
-+ ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_tree_modify_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t));
-+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_fm_pcd_cc_tree_modify_next_engine(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_tree_modify_next_engine_params_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PCD_CcRootModifyNextEngine(param->id,
-+ param->grp_indx,
-+ param->indx,
-+ (t_FmPcdCcNextEngineParams*)(&param->cc_next_engine_params));
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE:
-+ {
-+ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
-+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_node_modify_next_engine(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_next_engine_params_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PCD_MatchTableModifyNextEngine(param->id,
-+ param->key_indx,
-+ (t_FmPcdCcNextEngineParams*)(&param->cc_next_engine_params));
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE:
-+ {
-+ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t));
-+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_node_modify_next_engine(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) arg,
-+ sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PCD_MatchTableModifyMissNextEngine(param->id,
-+ (t_FmPcdCcNextEngineParams*)(&param->cc_next_engine_params));
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY:
-+ {
-+ ioc_fm_pcd_cc_node_remove_key_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_node_remove_key_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_node_remove_key_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_remove_key_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_remove_key_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_node_remove_key_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_node_remove_key_params_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->id = compat_ptr(compat_param->id);
-+ param->key_indx = compat_param->key_indx;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_node_remove_key_params_t *) arg,
-+ sizeof(ioc_fm_pcd_cc_node_remove_key_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PCD_MatchTableRemoveKey(param->id, param->key_indx);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_ADD_KEY_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_ADD_KEY:
-+ {
-+ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (param->key_size)
-+ {
-+ int size = 0;
-+
-+ if (param->key_params.p_key) size += param->key_size;
-+ if (param->key_params.p_mask) size += param->key_size;
-+
-+ if (size)
-+ {
-+ uint8_t *p_tmp;
-+
-+ p_tmp = (uint8_t*) XX_Malloc(size);
-+ if (!p_tmp)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD key/mask"));
-+ }
-+
-+ if (param->key_params.p_key)
-+ {
-+ if (copy_from_user(p_tmp, param->key_params.p_key, param->key_size))
-+ {
-+ XX_Free(p_tmp);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->key_params.p_key = p_tmp;
-+ }
-+
-+ if (param->key_params.p_mask)
-+ {
-+ p_tmp += param->key_size;
-+ if (copy_from_user(p_tmp, param->key_params.p_mask, param->key_size))
-+ {
-+ XX_Free(p_tmp - param->key_size);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->key_params.p_mask = p_tmp;
-+ }
-+ }
-+ }
-+
-+ err = FM_PCD_MatchTableAddKey(
-+ param->id,
-+ param->key_indx,
-+ param->key_size,
-+ (t_FmPcdCcKeyParams*)&param->key_params);
-+
-+ if (param->key_params.p_key)
-+ XX_Free(param->key_params.p_key);
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE:
-+ {
-+ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PCD_MatchTableModifyKeyAndNextEngine(param->id,
-+ param->key_indx,
-+ param->key_size,
-+ (t_FmPcdCcKeyParams*)(&param->key_params));
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT:
-+ {
-+ ioc_fm_pcd_cc_tbl_get_stats_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_tbl_get_stats_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t)))
-+ {
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+
-+ err = FM_PCD_MatchTableGetKeyStatistics((t_Handle) param.id,
-+ param.key_index,
-+ (t_FmPcdCcKeyStatistics *) &param.statistics);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_stats_t*) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t))){
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
-+ &param,
-+ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+
-+ break;
-+ }
-+
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT:
-+ {
-+ ioc_fm_pcd_cc_tbl_get_stats_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_tbl_get_stats_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t)))
-+ {
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+
-+ err = FM_PCD_MatchTableGetMissStatistics((t_Handle) param.id,
-+ (t_FmPcdCcKeyStatistics *) &param.statistics);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_stats_t*) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t))){
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
-+ &param,
-+ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+
-+ break;
-+ }
-+
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT:
-+#endif
-+ case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT:
-+ {
-+ ioc_fm_pcd_cc_tbl_get_stats_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_cc_tbl_get_stats_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t)))
-+ {
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+
-+ err = FM_PCD_HashTableGetMissStatistics((t_Handle) param.id,
-+ (t_FmPcdCcKeyStatistics *) &param.statistics);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_stats_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t));
-+ compat_copy_fm_pcd_cc_tbl_get_stats(compat_param, &param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_stats_t*) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_stats_t))){
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_cc_tbl_get_stats_t *)arg,
-+ &param,
-+ sizeof(ioc_fm_pcd_cc_tbl_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_HASH_TABLE_SET_COMPAT:
-+#endif
-+ case FM_PCD_IOC_HASH_TABLE_SET:
-+ {
-+ ioc_fm_pcd_hash_table_params_t *param;
-+
-+ param = (ioc_fm_pcd_hash_table_params_t*) XX_Malloc(
-+ sizeof(ioc_fm_pcd_hash_table_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_hash_table_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_hash_table_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_hash_table_params_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_hash_table_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_hash_table_params_t*)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_hash_table_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_hash_table(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_hash_table_params_t *)arg,
-+ sizeof(ioc_fm_pcd_hash_table_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ param->id = FM_PCD_HashTableSet(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdHashTableParams *) param);
-+
-+ if (!param->id)
-+ {
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /* Since the LLD has no errno-style error reporting,
-+ we're left here with no other option than to report
-+ a generic E_INVALID_VALUE */
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_hash_table_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_hash_table_params_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_hash_table_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_params_t));
-+ compat_copy_fm_pcd_hash_table(compat_param, param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_hash_table_params_t*) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_hash_table_params_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_hash_table_params_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_hash_table_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_HASH_TABLE_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_HASH_TABLE_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0, sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ id.obj = compat_pcd_id2ptr(compat_id.obj);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_HashTableDelete(id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_HASH_TABLE_ADD_KEY_COMPAT:
-+#endif
-+ case FM_PCD_IOC_HASH_TABLE_ADD_KEY:
-+ {
-+ ioc_fm_pcd_hash_table_add_key_params_t *param = NULL;
-+
-+ param = (ioc_fm_pcd_hash_table_add_key_params_t*) XX_Malloc(
-+ sizeof(ioc_fm_pcd_hash_table_add_key_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_hash_table_add_key_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_hash_table_add_key_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_hash_table_add_key_params_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_hash_table_add_key_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_add_key_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_hash_table_add_key_params_t*) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_hash_table_add_key_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ if (compat_param->key_size)
-+ {
-+ param->p_hash_tbl = compat_pcd_id2ptr(compat_param->p_hash_tbl);
-+ param->key_size = compat_param->key_size;
-+
-+ compat_copy_fm_pcd_cc_key(&compat_param->key_params, &param->key_params, COMPAT_US_TO_K);
-+ }
-+ else
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ break;
-+ }
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_hash_table_add_key_params_t*) arg,
-+ sizeof(ioc_fm_pcd_hash_table_add_key_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (param->key_size)
-+ {
-+ int size = 0;
-+
-+ if (param->key_params.p_key) size += param->key_size;
-+ if (param->key_params.p_mask) size += param->key_size;
-+
-+ if (size)
-+ {
-+ uint8_t *p_tmp;
-+
-+ p_tmp = (uint8_t*) XX_Malloc(size);
-+ if (!p_tmp)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD key/mask"));
-+ }
-+
-+ if (param->key_params.p_key)
-+ {
-+ if (copy_from_user(p_tmp, param->key_params.p_key, param->key_size))
-+ {
-+ XX_Free(p_tmp);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->key_params.p_key = p_tmp;
-+ }
-+
-+ if (param->key_params.p_mask)
-+ {
-+ p_tmp += param->key_size;
-+ if (copy_from_user(p_tmp, param->key_params.p_mask, param->key_size))
-+ {
-+ XX_Free(p_tmp - param->key_size);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->key_params.p_mask = p_tmp;
-+ }
-+ }
-+ }
-+
-+ err = FM_PCD_HashTableAddKey(
-+ param->p_hash_tbl,
-+ param->key_size,
-+ (t_FmPcdCcKeyParams*)&param->key_params);
-+
-+ if (param->key_params.p_key)
-+ XX_Free(param->key_params.p_key);
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_HASH_TABLE_REMOVE_KEY_COMPAT:
-+#endif
-+ case FM_PCD_IOC_HASH_TABLE_REMOVE_KEY:
-+ {
-+ ioc_fm_pcd_hash_table_remove_key_params_t *param = NULL;
-+
-+ param = (ioc_fm_pcd_hash_table_remove_key_params_t*) XX_Malloc(
-+ sizeof(ioc_fm_pcd_hash_table_remove_key_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_hash_table_remove_key_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_hash_table_remove_key_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_hash_table_remove_key_params_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_hash_table_remove_key_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_hash_table_remove_key_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_hash_table_remove_key_params_t*) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_hash_table_remove_key_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->p_hash_tbl = compat_pcd_id2ptr(compat_param->p_hash_tbl);
-+ param->key_size = compat_param->key_size;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_hash_table_remove_key_params_t*)arg,
-+ sizeof(ioc_fm_pcd_hash_table_remove_key_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (param->key_size)
-+ {
-+ uint8_t *p_key;
-+
-+ p_key = (uint8_t*) XX_Malloc(param->key_size);
-+ if (!p_key)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ if (param->p_key && copy_from_user(p_key, param->p_key, param->key_size))
-+ {
-+ XX_Free(p_key);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ param->p_key = p_key;
-+ }
-+
-+ err = FM_PCD_HashTableRemoveKey(
-+ param->p_hash_tbl,
-+ param->key_size,
-+ param->p_key);
-+
-+ if (param->p_key)
-+ XX_Free(param->p_key);
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY:
-+ {
-+ ioc_fm_pcd_cc_node_modify_key_params_t *param;
-+
-+ param = (ioc_fm_pcd_cc_node_modify_key_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_cc_node_modify_key_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_cc_node_modify_key_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t));
-+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_key_params_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_cc_node_modify_key(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_params_t *)arg,
-+ sizeof(ioc_fm_pcd_cc_node_modify_key_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (param->key_size)
-+ {
-+ int size = 0;
-+
-+ if (param->p_key) size += param->key_size;
-+ if (param->p_mask) size += param->key_size;
-+
-+ if (size)
-+ {
-+ uint8_t *p_tmp;
-+
-+ p_tmp = (uint8_t*) XX_Malloc(size);
-+ if (!p_tmp)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD key/mask"));
-+ }
-+
-+ if (param->p_key)
-+ {
-+ if (copy_from_user(p_tmp, param->p_key, param->key_size))
-+ {
-+ XX_Free(p_tmp);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->p_key = p_tmp;
-+ }
-+
-+ if (param->p_mask)
-+ {
-+ p_tmp += param->key_size;
-+ if (copy_from_user(p_tmp, param->p_mask, param->key_size))
-+ {
-+ XX_Free(p_tmp - param->key_size);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->p_mask = p_tmp;
-+ }
-+ }
-+ }
-+
-+ err = FM_PCD_MatchTableModifyKey(param->id,
-+ param->key_indx,
-+ param->key_size,
-+ param->p_key,
-+ param->p_mask);
-+
-+ if (param->p_key)
-+ XX_Free(param->p_key);
-+ else if (param->p_mask)
-+ XX_Free(param->p_mask);
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MANIP_NODE_SET_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MANIP_NODE_SET:
-+ {
-+ ioc_fm_pcd_manip_params_t *param;
-+ uint8_t *p_data = NULL;
-+ uint8_t size;
-+
-+ param = (ioc_fm_pcd_manip_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_manip_params_t));
-+
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_manip_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_manip_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_manip_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_manip_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_manip_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_manip_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_fm_pcd_manip_set_node(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_manip_params_t *)arg,
-+ sizeof(ioc_fm_pcd_manip_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (param->type == e_IOC_FM_PCD_MANIP_HDR)
-+ {
-+ size = param->u.hdr.insrt_params.u.generic.size;
-+ p_data = (uint8_t *) XX_Malloc(size);
-+ if (!p_data )
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, NO_MSG);
-+ }
-+
-+ if (param->u.hdr.insrt_params.u.generic.p_data &&
-+ copy_from_user(p_data,
-+ param->u.hdr.insrt_params.u.generic.p_data, size))
-+ {
-+ XX_Free(p_data);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ param->u.hdr.insrt_params.u.generic.p_data = p_data;
-+ }
-+
-+ if (param->id)
-+ {
-+ /* Security Hole: the user can pass any piece of garbage
-+ in 'param->id', and that will go straight through to the LLD,
-+ no checks being done by the wrapper! */
-+ err = FM_PCD_ManipNodeReplace(
-+ (t_Handle) param->id,
-+ (t_FmPcdManipParams*) param);
-+ if (err)
-+ {
-+ if (p_data)
-+ XX_Free(p_data);
-+ XX_Free(param);
-+ break;
-+ }
-+ }
-+ else
-+ {
-+ param->id = FM_PCD_ManipNodeSet(
-+ p_LnxWrpFmDev->h_PcdDev,
-+ (t_FmPcdManipParams*) param);
-+ if (!param->id)
-+ {
-+ if (p_data)
-+ XX_Free(p_data);
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /* Since the LLD has no errno-style error reporting,
-+ we're left here with no other option than to report
-+ a generic E_INVALID_VALUE */
-+ break;
-+ }
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_manip_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_manip_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_manip_params_t));
-+ if (!compat_param)
-+ {
-+ if (p_data)
-+ XX_Free(p_data);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_params_t));
-+
-+ compat_fm_pcd_manip_set_node(compat_param, param, COMPAT_K_TO_US);
-+
-+ if (copy_to_user((ioc_compat_fm_pcd_manip_params_t *) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_manip_params_t)))
-+ err = E_READ_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_pcd_manip_params_t *)arg,
-+ param, sizeof(ioc_fm_pcd_manip_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ if (p_data)
-+ XX_Free(p_data);
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MANIP_NODE_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MANIP_NODE_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0, sizeof(ioc_fm_obj_t));
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_ManipNodeDelete(id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_MANIP_GET_STATS_COMPAT:
-+#endif
-+ case FM_PCD_IOC_MANIP_GET_STATS:
-+ {
-+ ioc_fm_pcd_manip_get_stats_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_manip_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_manip_get_stats_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_manip_get_stats_t *)compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_manip_get_stats_t)))
-+ {
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_manip_get_stats(compat_param, &param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&param, (ioc_fm_pcd_manip_get_stats_t *)arg,
-+ sizeof(ioc_fm_pcd_manip_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PCD_ManipGetStatistics((t_Handle) param.id,
-+ (t_FmPcdManipStats*) &param.stats);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_manip_get_stats_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_manip_get_stats_t*) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
-+ if (!compat_param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_manip_get_stats_t));
-+ compat_copy_fm_pcd_manip_get_stats(compat_param, &param, COMPAT_K_TO_US);
-+ if (copy_to_user((ioc_compat_fm_pcd_manip_get_stats_t*) compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_manip_get_stats_t))){
-+ XX_Free(compat_param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ if (copy_to_user((ioc_fm_pcd_manip_get_stats_t *)arg,
-+ &param,
-+ sizeof(ioc_fm_pcd_manip_get_stats_t)))
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_FRM_REPLIC_GROUP_SET_COMPAT:
-+#endif
-+ case FM_PCD_IOC_FRM_REPLIC_GROUP_SET:
-+ {
-+ ioc_fm_pcd_frm_replic_group_params_t *param;
-+
-+ param = (ioc_fm_pcd_frm_replic_group_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_frm_replic_group_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_frm_replic_group_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_frm_replic_group_params_t
-+ *compat_param;
-+
-+ compat_param =
-+ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
-+ XX_Malloc(sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY,
-+ ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
-+ compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t))) {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_frm_replic_group_params(compat_param,
-+ param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param,
-+ (ioc_fm_pcd_frm_replic_group_params_t *)arg,
-+ sizeof(ioc_fm_pcd_frm_replic_group_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ param->id = FM_PCD_FrmReplicSetGroup(p_LnxWrpFmDev->h_PcdDev,
-+ (t_FmPcdFrmReplicGroupParams*)param);
-+
-+ if (!param->id) {
-+ XX_Free(param);
-+ err = E_INVALID_VALUE;
-+ /*
-+ * Since the LLD has no errno-style error reporting,
-+ * we're left here with no other option than to report
-+ * a generic E_INVALID_VALUE
-+ */
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_frm_replic_group_params_t
-+ *compat_param;
-+
-+ compat_param =
-+ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
-+ XX_Malloc(sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY,
-+ ("IOCTL FM PCD"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t));
-+ compat_copy_fm_pcd_frm_replic_group_params(compat_param,
-+ param, COMPAT_K_TO_US);
-+ if (copy_to_user(
-+ (ioc_compat_fm_pcd_frm_replic_group_params_t *)
-+ compat_ptr(arg),
-+ compat_param,
-+ sizeof(ioc_compat_fm_pcd_frm_replic_group_params_t)))
-+ err = E_WRITE_FAILED;
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user(
-+ (ioc_fm_pcd_frm_replic_group_params_t *)arg,
-+ param,
-+ sizeof(ioc_fm_pcd_frm_replic_group_params_t)))
-+ err = E_WRITE_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+ break;
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0, sizeof(ioc_fm_obj_t));
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id,
-+ (ioc_compat_fm_obj_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_obj_t)))
-+ break;
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg,
-+ sizeof(ioc_fm_obj_t)))
-+ break;
-+ }
-+
-+ return FM_PCD_FrmReplicDeleteGroup(id.obj);
-+ }
-+ break;
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD_COMPAT:
-+#endif
-+ case FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD:
-+ {
-+ ioc_fm_pcd_frm_replic_member_params_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_frm_replic_member_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_pcd_frm_replic_member_params(&compat_param, &param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ if (copy_from_user(&param, (void *)arg, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ return FM_PCD_FrmReplicAddMember(param.member.h_replic_group,
-+ param.member.member_index,
-+ (t_FmPcdCcNextEngineParams*)&param.next_engine_params);
-+ }
-+ break;
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE:
-+ {
-+ ioc_fm_pcd_frm_replic_member_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_frm_replic_member_t compat_param;
-+
-+ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_pcd_frm_replic_member(&compat_param, &param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ if (copy_from_user(&param, (void *)arg, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ return FM_PCD_FrmReplicRemoveMember(param.h_replic_group, param.member_index);
-+ }
-+ break;
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_VSP_CONFIG_COMPAT:
-+#endif
-+ case FM_IOC_VSP_CONFIG:
-+ {
-+ ioc_fm_vsp_params_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_vsp_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_vsp_params(&compat_param, &param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ if (copy_from_user(&param, (void *)arg, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ {
-+ uint8_t portId = param.port_params.port_id;
-+ param.liodn_offset =
-+ p_LnxWrpFmDev->rxPorts[portId].settings.param.specificParams.rxParams.liodnOffset;
-+ }
-+ param.p_fm = p_LnxWrpFmDev->h_Dev;
-+ param.id = FM_VSP_Config((t_FmVspParams *)&param);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_vsp_params_t compat_param;
-+
-+ memset(&compat_param, 0, sizeof(compat_param));
-+ compat_copy_fm_vsp_params(&compat_param, &param, COMPAT_K_TO_US);
-+
-+ if (copy_to_user(compat_ptr(arg), &compat_param, sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ else
-+#endif
-+ if (copy_to_user((void *)arg, &param, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_VSP_INIT_COMPAT:
-+#endif
-+ case FM_IOC_VSP_INIT:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0, sizeof(ioc_fm_obj_t));
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id,
-+ (ioc_compat_fm_obj_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_obj_t)))
-+ break;
-+ id.obj = compat_pcd_id2ptr(compat_id.obj);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg,
-+ sizeof(ioc_fm_obj_t)))
-+ break;
-+ }
-+
-+ return FM_VSP_Init(id.obj);
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_VSP_FREE_COMPAT:
-+#endif
-+ case FM_IOC_VSP_FREE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0, sizeof(ioc_fm_obj_t));
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id,
-+ (ioc_compat_fm_obj_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_obj_t)))
-+ break;
-+ compat_obj_delete(&compat_id, &id);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg,
-+ sizeof(ioc_fm_obj_t)))
-+ break;
-+ }
-+
-+ return FM_VSP_Free(id.obj);
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_VSP_CONFIG_POOL_DEPLETION_COMPAT:
-+#endif
-+ case FM_IOC_VSP_CONFIG_POOL_DEPLETION:
-+ {
-+ ioc_fm_buf_pool_depletion_params_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_buf_pool_depletion_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_buf_pool_depletion_params(&compat_param, &param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ if (copy_from_user(&param, (void *)arg, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (FM_VSP_ConfigPoolDepletion(param.p_fm_vsp,
-+ (t_FmBufPoolDepletion *)&param.fm_buf_pool_depletion))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT_COMPAT:
-+#endif
-+ case FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT:
-+ {
-+ ioc_fm_buffer_prefix_content_params_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_buffer_prefix_content_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_buffer_prefix_content_params(&compat_param, &param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ if (copy_from_user(&param, (void *)arg, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (FM_VSP_ConfigBufferPrefixContent(param.p_fm_vsp,
-+ (t_FmBufferPrefixContent *)&param.fm_buffer_prefix_content))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_VSP_CONFIG_NO_SG_COMPAT:
-+#endif
-+ case FM_IOC_VSP_CONFIG_NO_SG:
-+ {
-+ ioc_fm_vsp_config_no_sg_params_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_vsp_config_no_sg_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_vsp_config_no_sg_params(&compat_param, &param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ if (copy_from_user(&param, (void *)arg, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (FM_VSP_ConfigNoScatherGather(param.p_fm_vsp, param.no_sg))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_VSP_GET_BUFFER_PRS_RESULT_COMPAT:
-+#endif
-+ case FM_IOC_VSP_GET_BUFFER_PRS_RESULT:
-+ {
-+ ioc_fm_vsp_prs_result_params_t param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_vsp_prs_result_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param, compat_ptr(arg), sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_vsp_prs_result_params(&compat_param, &param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ if (copy_from_user(&param, (void *)arg, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ /* this call just adds the parse results offset to p_data */
-+ param.p_data = FM_VSP_GetBufferPrsResult(param.p_fm_vsp, param.p_data);
-+
-+ if (!param.p_data)
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_vsp_prs_result_params_t compat_param;
-+
-+ memset(&compat_param, 0, sizeof(compat_param));
-+ compat_copy_fm_vsp_prs_result_params(&compat_param, &param, COMPAT_K_TO_US);
-+
-+ if (copy_to_user(compat_ptr(arg), &compat_param, sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ else
-+#endif
-+ if (copy_to_user((void *)arg, &param, sizeof(param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+#warning "feature not supported!"
-+#if defined(CONFIG_COMPAT)
-+ case FM_PCD_IOC_STATISTICS_SET_NODE_COMPAT:
-+#endif
-+ case FM_PCD_IOC_STATISTICS_SET_NODE:
-+ {
-+/* ioc_fm_pcd_stats_params_t param;
-+ ...
-+ param->id = FM_PCD_StatisticsSetNode(p_LnxWrpFmDev->h_PcdDev,
-+ (t_FmPcdStatsParams *)&param);
-+*/
-+ err = E_NOT_SUPPORTED;
-+ break;
-+ }
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("invalid ioctl: cmd:0x%08x(type:0x%02x, nr: %d.\n",
-+ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)));
-+ }
-+
-+ if (err)
-+ RETURN_ERROR(MINOR, err, ("IOCTL FM PCD"));
-+
-+ return E_OK;
-+}
-+
-+void FM_Get_Api_Version(ioc_fm_api_version_t *p_version)
-+{
-+ p_version->version.major = FMD_API_VERSION_MAJOR;
-+ p_version->version.minor = FMD_API_VERSION_MINOR;
-+ p_version->version.respin = FMD_API_VERSION_RESPIN;
-+ p_version->version.reserved = 0;
-+}
-+
-+t_Error LnxwrpFmIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat)
-+{
-+ t_Error err = E_OK;
-+
-+ switch (cmd)
-+ {
-+ case FM_IOC_SET_PORTS_BANDWIDTH:
-+ {
-+ ioc_fm_port_bandwidth_params *param;
-+
-+ param = (ioc_fm_port_bandwidth_params*) XX_Malloc(sizeof(ioc_fm_port_bandwidth_params));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_port_bandwidth_params));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_bandwidth_params*)compat_ptr(arg), sizeof(ioc_fm_port_bandwidth_params)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_bandwidth_params*)arg, sizeof(ioc_fm_port_bandwidth_params)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_SetPortsBandwidth(p_LnxWrpFmDev->h_Dev, (t_FmPortsBandwidthParams*) param);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_IOC_GET_REVISION:
-+ {
-+ ioc_fm_revision_info_t *param;
-+
-+ param = (ioc_fm_revision_info_t *) XX_Malloc(sizeof(ioc_fm_revision_info_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ FM_GetRevision(p_LnxWrpFmDev->h_Dev, (t_FmRevisionInfo*)param);
-+ /* This one never returns anything other than E_OK */
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_to_user((ioc_fm_revision_info_t *)compat_ptr(arg),
-+ param,
-+ sizeof(ioc_fm_revision_info_t))){
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_revision_info_t *)arg,
-+ param,
-+ sizeof(ioc_fm_revision_info_t))){
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
-+ }
-+ }
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_IOC_SET_COUNTER:
-+ {
-+ ioc_fm_counters_params_t *param;
-+
-+ param = (ioc_fm_counters_params_t *) XX_Malloc(sizeof(ioc_fm_counters_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_counters_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_counters_params_t *)compat_ptr(arg), sizeof(ioc_fm_counters_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_counters_params_t *)arg, sizeof(ioc_fm_counters_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_ModifyCounter(p_LnxWrpFmDev->h_Dev, param->cnt, param->val);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_IOC_GET_COUNTER:
-+ {
-+ ioc_fm_counters_params_t *param;
-+
-+ param = (ioc_fm_counters_params_t *) XX_Malloc(sizeof(ioc_fm_counters_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
-+
-+ memset(param, 0, sizeof(ioc_fm_counters_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_counters_params_t *)compat_ptr(arg), sizeof(ioc_fm_counters_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_counters_params_t *)arg, sizeof(ioc_fm_counters_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ param->val = FM_GetCounter(p_LnxWrpFmDev->h_Dev, param->cnt);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_to_user((ioc_fm_counters_params_t *)compat_ptr(arg), param, sizeof(ioc_fm_counters_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_counters_params_t *)arg, param, sizeof(ioc_fm_counters_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_IOC_FORCE_INTR:
-+ {
-+ ioc_fm_exceptions param;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (get_user(param, (ioc_fm_exceptions*) compat_ptr(arg)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ else
-+#endif
-+ {
-+ if (get_user(param, (ioc_fm_exceptions*)arg))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_ForceIntr(p_LnxWrpFmDev->h_Dev, (e_FmExceptions)param);
-+ break;
-+ }
-+
-+ case FM_IOC_GET_API_VERSION:
-+ {
-+ ioc_fm_api_version_t version;
-+
-+ FM_Get_Api_Version(&version);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_to_user(
-+ (ioc_fm_api_version_t *)compat_ptr(arg),
-+ &version, sizeof(version)))
-+ err = E_READ_FAILED;
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_api_version_t *)arg,
-+ &version, sizeof(version)))
-+ err = E_READ_FAILED;
-+ }
-+ }
-+ break;
-+
-+ case FM_IOC_CTRL_MON_START:
-+ {
-+ FM_CtrlMonStart(p_LnxWrpFmDev->h_Dev);
-+ }
-+ break;
-+
-+ case FM_IOC_CTRL_MON_STOP:
-+ {
-+ FM_CtrlMonStop(p_LnxWrpFmDev->h_Dev);
-+ }
-+ break;
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_IOC_CTRL_MON_GET_COUNTERS_COMPAT:
-+#endif
-+ case FM_IOC_CTRL_MON_GET_COUNTERS:
-+ {
-+ ioc_fm_ctrl_mon_counters_params_t param;
-+ t_FmCtrlMon mon;
-+
-+#if defined(CONFIG_COMPAT)
-+ ioc_compat_fm_ctrl_mon_counters_params_t compat_param;
-+
-+ if (compat)
-+ {
-+ if (copy_from_user(&compat_param, (void *)compat_ptr(arg),
-+ sizeof(compat_param)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ param.fm_ctrl_index = compat_param.fm_ctrl_index;
-+ param.p_mon = (fm_ctrl_mon_t *)compat_ptr(compat_param.p_mon);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&param, (void *)arg, sizeof(ioc_fm_ctrl_mon_counters_params_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ if (FM_CtrlMonGetCounters(p_LnxWrpFmDev->h_Dev, param.fm_ctrl_index, &mon))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (copy_to_user(param.p_mon, &mon, sizeof(t_FmCtrlMon)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ break;
-+
-+ default:
-+ return LnxwrpFmPcdIOCTL(p_LnxWrpFmDev, cmd, arg, compat);
-+ }
-+
-+ if (err)
-+ RETURN_ERROR(MINOR, E_INVALID_OPERATION, ("IOCTL FM"));
-+
-+ return E_OK;
-+}
-+
-+t_Error LnxwrpFmPortIOCTL(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev, unsigned int cmd, unsigned long arg, bool compat)
-+{
-+ t_Error err = E_OK;
-+
-+ _fm_ioctl_dbg("cmd:0x%08x(type:0x%02x, nr:%u).\n",
-+ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd) - 70);
-+
-+ switch (cmd)
-+ {
-+ case FM_PORT_IOC_DISABLE:
-+ FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev);
-+ /* deliberately ignoring error codes here */
-+ return E_OK;
-+
-+ case FM_PORT_IOC_ENABLE:
-+ FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev);
-+ /* deliberately ignoring error codes here */
-+ return E_OK;
-+
-+ case FM_PORT_IOC_SET_ERRORS_ROUTE:
-+ {
-+ ioc_fm_port_frame_err_select_t errs;
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (get_user(errs, (ioc_fm_port_frame_err_select_t*)compat_ptr(arg)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ else
-+#endif
-+ {
-+ if (get_user(errs, (ioc_fm_port_frame_err_select_t*)arg))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PORT_SetErrorsRoute(p_LnxWrpFmPortDev->h_Dev, (fmPortFrameErrSelect_t)errs);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_SET_RATE_LIMIT:
-+ {
-+ ioc_fm_port_rate_limit_t *param;
-+
-+ param = (ioc_fm_port_rate_limit_t *) XX_Malloc(sizeof(ioc_fm_port_rate_limit_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0, sizeof(ioc_fm_port_rate_limit_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_rate_limit_t *)compat_ptr(arg), sizeof(ioc_fm_port_rate_limit_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_rate_limit_t *)arg, sizeof(ioc_fm_port_rate_limit_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PORT_SetRateLimit(p_LnxWrpFmPortDev->h_Dev, (t_FmPortRateLimit *)param);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_REMOVE_RATE_LIMIT:
-+ FM_PORT_DeleteRateLimit(p_LnxWrpFmPortDev->h_Dev);
-+ /* deliberately ignoring error codes here */
-+ return E_OK;
-+
-+ case FM_PORT_IOC_ALLOC_PCD_FQIDS:
-+ {
-+ ioc_fm_port_pcd_fqids_params_t *param;
-+
-+ if (!p_LnxWrpFmPortDev->pcd_owner_params.cba)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("No one to listen on this PCD!!!"));
-+
-+ param = (ioc_fm_port_pcd_fqids_params_t *) XX_Malloc(sizeof(ioc_fm_port_pcd_fqids_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0, sizeof(ioc_fm_port_pcd_fqids_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_pcd_fqids_params_t *)compat_ptr(arg),
-+ sizeof(ioc_fm_port_pcd_fqids_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_pcd_fqids_params_t *)arg,
-+ sizeof(ioc_fm_port_pcd_fqids_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (p_LnxWrpFmPortDev->pcd_owner_params.cba(p_LnxWrpFmPortDev->pcd_owner_params.dev,
-+ param->num_fqids,
-+ param->alignment,
-+ &param->base_fqid))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("can't allocate fqids for PCD!!!"));
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_to_user((ioc_fm_port_pcd_fqids_params_t *)compat_ptr(arg),
-+ param, sizeof(ioc_fm_port_pcd_fqids_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_to_user((ioc_fm_port_pcd_fqids_params_t *)arg,
-+ param, sizeof(ioc_fm_port_pcd_fqids_params_t)))
-+ err = E_READ_FAILED;
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_FREE_PCD_FQIDS:
-+ {
-+ uint32_t base_fqid;
-+
-+ if (!p_LnxWrpFmPortDev->pcd_owner_params.cbf)
-+ RETURN_ERROR(MINOR, E_INVALID_STATE, ("No one to listen on this PCD!!!"));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (get_user(base_fqid, (uint32_t*) compat_ptr(arg)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ else
-+#endif
-+ {
-+ if (get_user(base_fqid, (uint32_t*)arg))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ if (p_LnxWrpFmPortDev->pcd_owner_params.cbf(p_LnxWrpFmPortDev->pcd_owner_params.dev, base_fqid))
-+ err = E_WRITE_FAILED;
-+
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PORT_IOC_SET_PCD_COMPAT:
-+#endif
-+ case FM_PORT_IOC_SET_PCD:
-+ {
-+ ioc_fm_port_pcd_params_t *port_pcd_params;
-+ ioc_fm_port_pcd_prs_params_t *port_pcd_prs_params;
-+ ioc_fm_port_pcd_cc_params_t *port_pcd_cc_params;
-+ ioc_fm_port_pcd_kg_params_t *port_pcd_kg_params;
-+ ioc_fm_port_pcd_plcr_params_t *port_pcd_plcr_params;
-+
-+ port_pcd_params = (ioc_fm_port_pcd_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_port_pcd_params_t) +
-+ sizeof(ioc_fm_port_pcd_prs_params_t) +
-+ sizeof(ioc_fm_port_pcd_cc_params_t) +
-+ sizeof(ioc_fm_port_pcd_kg_params_t) +
-+ sizeof(ioc_fm_port_pcd_plcr_params_t));
-+ if (!port_pcd_params)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(port_pcd_params, 0,
-+ sizeof(ioc_fm_port_pcd_params_t) +
-+ sizeof(ioc_fm_port_pcd_prs_params_t) +
-+ sizeof(ioc_fm_port_pcd_cc_params_t) +
-+ sizeof(ioc_fm_port_pcd_kg_params_t) +
-+ sizeof(ioc_fm_port_pcd_plcr_params_t));
-+
-+ port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (port_pcd_params + 1);
-+ port_pcd_cc_params = (ioc_fm_port_pcd_cc_params_t *) (port_pcd_prs_params + 1);
-+ port_pcd_kg_params = (ioc_fm_port_pcd_kg_params_t *) (port_pcd_cc_params + 1);
-+ port_pcd_plcr_params = (ioc_fm_port_pcd_plcr_params_t *) (port_pcd_kg_params + 1);
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_port_pcd_params_t *compat_port_pcd_params;
-+ ioc_fm_port_pcd_prs_params_t *same_port_pcd_prs_params;
-+ ioc_compat_fm_port_pcd_cc_params_t *compat_port_pcd_cc_params;
-+ ioc_compat_fm_port_pcd_kg_params_t *compat_port_pcd_kg_params;
-+ ioc_compat_fm_port_pcd_plcr_params_t *compat_port_pcd_plcr_params;
-+
-+ compat_port_pcd_params = (ioc_compat_fm_port_pcd_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_port_pcd_params_t) +
-+ sizeof(ioc_fm_port_pcd_prs_params_t) +
-+ sizeof(ioc_compat_fm_port_pcd_cc_params_t) +
-+ sizeof(ioc_compat_fm_port_pcd_kg_params_t) +
-+ sizeof(ioc_compat_fm_port_pcd_plcr_params_t));
-+ if (!compat_port_pcd_params)
-+ {
-+ XX_Free(port_pcd_params);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+ }
-+
-+ memset(compat_port_pcd_params, 0,
-+ sizeof(ioc_compat_fm_port_pcd_params_t) +
-+ sizeof(ioc_fm_port_pcd_prs_params_t) +
-+ sizeof(ioc_compat_fm_port_pcd_cc_params_t) +
-+ sizeof(ioc_compat_fm_port_pcd_kg_params_t) +
-+ sizeof(ioc_compat_fm_port_pcd_plcr_params_t));
-+ same_port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (compat_port_pcd_params + 1);
-+ compat_port_pcd_cc_params = (ioc_compat_fm_port_pcd_cc_params_t *) (same_port_pcd_prs_params + 1);
-+ compat_port_pcd_kg_params = (ioc_compat_fm_port_pcd_kg_params_t *) (compat_port_pcd_cc_params + 1);
-+ compat_port_pcd_plcr_params = (ioc_compat_fm_port_pcd_plcr_params_t *) (compat_port_pcd_kg_params + 1);
-+
-+ if (copy_from_user(compat_port_pcd_params,
-+ (ioc_compat_fm_port_pcd_params_t*) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_port_pcd_params_t)))
-+ err = E_WRITE_FAILED;
-+
-+ while (!err) /* pseudo-while */
-+ {
-+ /* set pointers from where to copy from: */
-+ port_pcd_params->p_prs_params = compat_ptr(compat_port_pcd_params->p_prs_params); /* same structure */
-+ port_pcd_params->p_cc_params = compat_ptr(compat_port_pcd_params->p_cc_params);
-+ port_pcd_params->p_kg_params = compat_ptr(compat_port_pcd_params->p_kg_params);
-+ port_pcd_params->p_plcr_params = compat_ptr(compat_port_pcd_params->p_plcr_params);
-+ port_pcd_params->p_ip_reassembly_manip = compat_ptr(compat_port_pcd_params->p_ip_reassembly_manip);
-+#if (DPAA_VERSION >= 11)
-+ port_pcd_params->p_capwap_reassembly_manip = compat_ptr(compat_port_pcd_params->p_capwap_reassembly_manip);
-+#endif
-+ /* the prs member is the same, no compat structure...memcpy only */
-+ if (port_pcd_params->p_prs_params)
-+ {
-+ if (copy_from_user(same_port_pcd_prs_params,
-+ port_pcd_params->p_prs_params,
-+ sizeof(ioc_fm_port_pcd_prs_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ memcpy(port_pcd_prs_params, same_port_pcd_prs_params, sizeof(ioc_fm_port_pcd_prs_params_t));
-+ port_pcd_params->p_prs_params = port_pcd_prs_params;
-+ }
-+
-+ if (port_pcd_params->p_cc_params)
-+ {
-+ if (copy_from_user(compat_port_pcd_cc_params,
-+ port_pcd_params->p_cc_params,
-+ sizeof(ioc_compat_fm_port_pcd_cc_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ port_pcd_params->p_cc_params = port_pcd_cc_params;
-+ }
-+
-+ if (port_pcd_params->p_kg_params)
-+ {
-+ if (copy_from_user(compat_port_pcd_kg_params,
-+ port_pcd_params->p_kg_params,
-+ sizeof(ioc_compat_fm_port_pcd_kg_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ port_pcd_params->p_kg_params = port_pcd_kg_params;
-+ }
-+
-+ if (port_pcd_params->p_plcr_params)
-+ {
-+ if (copy_from_user(compat_port_pcd_plcr_params,
-+ port_pcd_params->p_plcr_params,
-+ sizeof(ioc_compat_fm_port_pcd_plcr_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ port_pcd_params->p_plcr_params = port_pcd_plcr_params;
-+ }
-+
-+ break; /* pseudo-while: always run once! */
-+ }
-+
-+ if (!err)
-+ compat_copy_fm_port_pcd(compat_port_pcd_params, port_pcd_params, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_port_pcd_params);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(port_pcd_params,
-+ (ioc_fm_port_pcd_params_t*) arg,
-+ sizeof(ioc_fm_port_pcd_params_t)))
-+ err = E_WRITE_FAILED;
-+
-+ while (!err) /* pseudo-while */
-+ {
-+ if (port_pcd_params->p_prs_params)
-+ {
-+ if (copy_from_user(port_pcd_prs_params,
-+ port_pcd_params->p_prs_params,
-+ sizeof(ioc_fm_port_pcd_prs_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ port_pcd_params->p_prs_params = port_pcd_prs_params;
-+ }
-+
-+ if (port_pcd_params->p_cc_params)
-+ {
-+ if (copy_from_user(port_pcd_cc_params,
-+ port_pcd_params->p_cc_params,
-+ sizeof(ioc_fm_port_pcd_cc_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ port_pcd_params->p_cc_params = port_pcd_cc_params;
-+ }
-+
-+ if (port_pcd_params->p_kg_params)
-+ {
-+ if (copy_from_user(port_pcd_kg_params,
-+ port_pcd_params->p_kg_params,
-+ sizeof(ioc_fm_port_pcd_kg_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ port_pcd_params->p_kg_params = port_pcd_kg_params;
-+ }
-+
-+ if (port_pcd_params->p_plcr_params)
-+ {
-+ if (copy_from_user(port_pcd_plcr_params,
-+ port_pcd_params->p_plcr_params,
-+ sizeof(ioc_fm_port_pcd_plcr_params_t)))
-+ {
-+ err = E_WRITE_FAILED;
-+ break; /* from pseudo-while */
-+ }
-+
-+ port_pcd_params->p_plcr_params = port_pcd_plcr_params;
-+ }
-+
-+ break; /* pseudo-while: always run once! */
-+ }
-+ }
-+
-+ if (!err)
-+ err = FM_PORT_SetPCD(p_LnxWrpFmPortDev->h_Dev, (t_FmPortPcdParams*) port_pcd_params);
-+
-+ XX_Free(port_pcd_params);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_DELETE_PCD:
-+ err = FM_PORT_DeletePCD(p_LnxWrpFmPortDev->h_Dev);
-+ break;
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME_COMPAT:
-+#endif
-+ case FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME:
-+ {
-+ ioc_fm_pcd_kg_scheme_select_t *param;
-+
-+ param = (ioc_fm_pcd_kg_scheme_select_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_kg_scheme_select_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_select_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_kg_scheme_select_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_pcd_kg_scheme_select_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_select_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_select_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_pcd_kg_scheme_select_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_kg_scheme_select_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_kg_scheme_select(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_select_t *)arg,
-+ sizeof(ioc_fm_pcd_kg_scheme_select_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PORT_PcdKgModifyInitialScheme(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdKgSchemeSelect *)param);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE_COMPAT:
-+#endif
-+ case FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0 , sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ id.obj = compat_ptr(compat_id.obj);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PORT_PcdPlcrModifyInitialProfile(p_LnxWrpFmPortDev->h_Dev, id.obj);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PORT_IOC_PCD_KG_BIND_SCHEMES_COMPAT:
-+#endif
-+ case FM_PORT_IOC_PCD_KG_BIND_SCHEMES:
-+ {
-+ ioc_fm_pcd_port_schemes_params_t *param;
-+
-+ param = (ioc_fm_pcd_port_schemes_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_port_schemes_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0 , sizeof(ioc_fm_pcd_port_schemes_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_port_schemes_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param,
-+ (ioc_compat_fm_pcd_port_schemes_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_port_schemes_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_kg_schemes_params(&compat_param, param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_port_schemes_params_t *) arg,
-+ sizeof(ioc_fm_pcd_port_schemes_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PORT_PcdKgBindSchemes(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdPortSchemesParams *)param);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES_COMPAT:
-+#endif
-+ case FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES:
-+ {
-+ ioc_fm_pcd_port_schemes_params_t *param;
-+
-+ param = (ioc_fm_pcd_port_schemes_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_pcd_port_schemes_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0 , sizeof(ioc_fm_pcd_port_schemes_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_pcd_port_schemes_params_t compat_param;
-+
-+ if (copy_from_user(&compat_param,
-+ (ioc_compat_fm_pcd_port_schemes_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_pcd_port_schemes_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_pcd_kg_schemes_params(&compat_param, param, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_pcd_port_schemes_params_t *) arg,
-+ sizeof(ioc_fm_pcd_port_schemes_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = FM_PORT_PcdKgUnbindSchemes(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdPortSchemesParams *)param);
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_PCD_PLCR_ALLOC_PROFILES:
-+ {
-+ uint16_t num;
-+ if (get_user(num, (uint16_t*) arg))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ err = FM_PORT_PcdPlcrAllocProfiles(p_LnxWrpFmPortDev->h_Dev, num);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_PCD_PLCR_FREE_PROFILES:
-+ err = FM_PORT_PcdPlcrFreeProfiles(p_LnxWrpFmPortDev->h_Dev);
-+ break;
-+
-+ case FM_PORT_IOC_DETACH_PCD:
-+ err = FM_PORT_DetachPCD(p_LnxWrpFmPortDev->h_Dev);
-+ break;
-+
-+ case FM_PORT_IOC_ATTACH_PCD:
-+ err = FM_PORT_AttachPCD(p_LnxWrpFmPortDev->h_Dev);
-+ break;
-+
-+#if defined(CONFIG_COMPAT)
-+ case FM_PORT_IOC_PCD_CC_MODIFY_TREE_COMPAT:
-+#endif
-+ case FM_PORT_IOC_PCD_CC_MODIFY_TREE:
-+ {
-+ ioc_fm_obj_t id;
-+
-+ memset(&id, 0 , sizeof(ioc_fm_obj_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_obj_t compat_id;
-+
-+ if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ compat_copy_fm_port_pcd_modify_tree(&compat_id, &id, COMPAT_US_TO_K);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ err = FM_PORT_PcdCcModifyTree(p_LnxWrpFmPortDev->h_Dev, id.obj);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_ADD_CONGESTION_GRPS:
-+ case FM_PORT_IOC_REMOVE_CONGESTION_GRPS:
-+ {
-+ ioc_fm_port_congestion_groups_t *param;
-+
-+ param = (ioc_fm_port_congestion_groups_t*) XX_Malloc(sizeof(ioc_fm_port_congestion_groups_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0, sizeof(ioc_fm_port_congestion_groups_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (t_FmPortCongestionGrps*) compat_ptr(arg),
-+ sizeof(t_FmPortCongestionGrps)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif /* CONFIG_COMPAT */
-+ {
-+ if (copy_from_user(param, (t_FmPortCongestionGrps*) arg,
-+ sizeof(t_FmPortCongestionGrps)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ err = (cmd == FM_PORT_IOC_ADD_CONGESTION_GRPS)
-+ ? FM_PORT_AddCongestionGrps(p_LnxWrpFmPortDev->h_Dev, (t_FmPortCongestionGrps*) param)
-+ : FM_PORT_RemoveCongestionGrps(p_LnxWrpFmPortDev->h_Dev, (t_FmPortCongestionGrps*) param)
-+ ;
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR:
-+ case FM_PORT_IOC_REMOVE_RX_HASH_MAC_ADDR:
-+ {
-+ ioc_fm_port_mac_addr_params_t *param;
-+
-+ param = (ioc_fm_port_mac_addr_params_t*) XX_Malloc(
-+ sizeof(ioc_fm_port_mac_addr_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0, sizeof(ioc_fm_port_mac_addr_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_mac_addr_params_t*) compat_ptr(arg),
-+ sizeof(ioc_fm_port_mac_addr_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+ else
-+#endif /* CONFIG_COMPAT */
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_mac_addr_params_t*) arg,
-+ sizeof(ioc_fm_port_mac_addr_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ if (p_LnxWrpFmPortDev->pcd_owner_params.dev)
-+ {
-+ int id = -1;
-+
-+ switch(p_LnxWrpFmPortDev->settings.param.portType)
-+ {
-+ case e_FM_PORT_TYPE_RX:
-+ case e_FM_PORT_TYPE_TX:
-+ id = p_LnxWrpFmPortDev->id;
-+ break;
-+ case e_FM_PORT_TYPE_RX_10G:
-+ case e_FM_PORT_TYPE_TX_10G:
-+ id = p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_MACS;
-+ break;
-+ default:
-+ err = E_NOT_AVAILABLE;
-+ REPORT_ERROR(MINOR, err, ("Attempt to add/remove hash MAC addr. to/from MAC-less port!"));
-+ }
-+ if (id >= 0)
-+ {
-+ t_LnxWrpFmDev *fm = (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ t_Handle mac_handle = fm->macs[id].h_Dev;
-+
-+ err = (cmd == FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR)
-+ ? FM_MAC_AddHashMacAddr(mac_handle, (t_EnetAddr*) param)
-+ : FM_MAC_RemoveHashMacAddr(mac_handle, (t_EnetAddr*) param);
-+ }
-+ }
-+ else
-+ {
-+ err = E_NOT_AVAILABLE;
-+ REPORT_ERROR(MINOR, err, ("Port not initialized or other error!?!?"));
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_SET_TX_PAUSE_FRAMES:
-+ {
-+ t_LnxWrpFmDev *p_LnxWrpFmDev =
-+ (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ ioc_fm_port_tx_pause_frames_params_t param;
-+ int mac_id = p_LnxWrpFmPortDev->id;
-+
-+ if(&p_LnxWrpFmDev->txPorts[mac_id] != p_LnxWrpFmPortDev)
-+ mac_id += FM_MAX_NUM_OF_1G_MACS; /* 10G port */
-+
-+ if (copy_from_user(&param, (ioc_fm_port_tx_pause_frames_params_t *)arg,
-+ sizeof(ioc_fm_port_tx_pause_frames_params_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (p_LnxWrpFmDev && p_LnxWrpFmDev->macs[mac_id].h_Dev)
-+ {
-+ FM_MAC_SetTxPauseFrames(p_LnxWrpFmDev->macs[mac_id].h_Dev,
-+ param.priority,
-+ param.pause_time,
-+ param.thresh_time);
-+ }
-+ else
-+ {
-+ err = E_NOT_AVAILABLE;
-+ REPORT_ERROR(MINOR, err, ("Port not initialized or other error!"));
-+ }
-+
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_CONFIG_BUFFER_PREFIX_CONTENT:
-+ {
-+ ioc_fm_buffer_prefix_content_t *param;
-+
-+ param = (ioc_fm_buffer_prefix_content_t*) XX_Malloc(sizeof(ioc_fm_buffer_prefix_content_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0, sizeof(ioc_fm_buffer_prefix_content_t));
-+
-+ if (copy_from_user(param, (ioc_fm_buffer_prefix_content_t*) arg,
-+ sizeof(ioc_fm_buffer_prefix_content_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ if (FM_PORT_ConfigBufferPrefixContent(p_LnxWrpFmPortDev->h_Dev,
-+ (t_FmBufferPrefixContent *)param))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+
-+#if (DPAA_VERSION >= 11)
-+#if defined(CONFIG_COMPAT)
-+ case FM_PORT_IOC_VSP_ALLOC_COMPAT:
-+#endif
-+ case FM_PORT_IOC_VSP_ALLOC:
-+ {
-+ ioc_fm_port_vsp_alloc_params_t *param;
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ t_LnxWrpFmPortDev *p_LnxWrpFmTxPortDev;
-+
-+ param = (ioc_fm_port_vsp_alloc_params_t *) XX_Malloc(
-+ sizeof(ioc_fm_port_vsp_alloc_params_t));
-+ if (!param)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+
-+ memset(param, 0, sizeof(ioc_fm_port_vsp_alloc_params_t));
-+
-+#if defined(CONFIG_COMPAT)
-+ if (compat)
-+ {
-+ ioc_compat_fm_port_vsp_alloc_params_t *compat_param;
-+
-+ compat_param = (ioc_compat_fm_port_vsp_alloc_params_t *) XX_Malloc(
-+ sizeof(ioc_compat_fm_port_vsp_alloc_params_t));
-+ if (!compat_param)
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT"));
-+ }
-+
-+ memset(compat_param, 0, sizeof(ioc_compat_fm_port_vsp_alloc_params_t));
-+ if (copy_from_user(compat_param,
-+ (ioc_compat_fm_port_vsp_alloc_params_t *) compat_ptr(arg),
-+ sizeof(ioc_compat_fm_port_vsp_alloc_params_t)))
-+ {
-+ XX_Free(compat_param);
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ compat_copy_fm_port_vsp_alloc_params(compat_param, param, COMPAT_US_TO_K);
-+
-+ XX_Free(compat_param);
-+ }
-+ else
-+#endif
-+ {
-+ if (copy_from_user(param, (ioc_fm_port_vsp_alloc_params_t *)arg,
-+ sizeof(ioc_fm_port_vsp_alloc_params_t)))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+ }
-+
-+ /* Userspace may not have the Tx port t_handle when issuing the IOCTL */
-+ if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX ||
-+ p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX_10G)
-+ {
-+ /* Determine the Tx port t_Handle from the Rx port id */
-+ p_LnxWrpFmDev = p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ p_LnxWrpFmTxPortDev = &p_LnxWrpFmDev->txPorts[p_LnxWrpFmPortDev->id];
-+ param->p_fm_tx_port = p_LnxWrpFmTxPortDev->h_Dev;
-+ }
-+
-+ if (FM_PORT_VSPAlloc(p_LnxWrpFmPortDev->h_Dev, (t_FmPortVSPAllocParams *)param))
-+ {
-+ XX_Free(param);
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+ }
-+
-+ XX_Free(param);
-+ break;
-+ }
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+ case FM_PORT_IOC_GET_MAC_STATISTICS:
-+ {
-+ t_LnxWrpFmDev *p_LnxWrpFmDev =
-+ (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ ioc_fm_port_mac_statistics_t param;
-+ int mac_id = p_LnxWrpFmPortDev->id;
-+
-+ if (!p_LnxWrpFmDev)
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
-+
-+ if (&p_LnxWrpFmDev->txPorts[mac_id] != p_LnxWrpFmPortDev &&
-+ &p_LnxWrpFmDev->rxPorts[mac_id] != p_LnxWrpFmPortDev)
-+ mac_id += FM_MAX_NUM_OF_1G_MACS; /* 10G port */
-+
-+ if (!p_LnxWrpFmDev->macs[mac_id].h_Dev)
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
-+
-+ if (FM_MAC_GetStatistics(p_LnxWrpFmDev->macs[mac_id].h_Dev,
-+ (t_FmMacStatistics *)&param))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (copy_to_user((ioc_fm_port_mac_statistics_t *)arg, &param,
-+ sizeof(ioc_fm_port_mac_statistics_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_GET_MAC_FRAME_SIZE_COUNTERS:
-+ {
-+ t_LnxWrpFmDev *p_LnxWrpFmDev =
-+ (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ ioc_fm_port_mac_frame_size_counters_t param;
-+ t_FmMacFrameSizeCounters frameSizeCounters;
-+ int mac_id = p_LnxWrpFmPortDev->id;
-+
-+ if (!p_LnxWrpFmDev)
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
-+
-+ if (&p_LnxWrpFmDev->txPorts[mac_id] != p_LnxWrpFmPortDev &&
-+ &p_LnxWrpFmDev->rxPorts[mac_id] != p_LnxWrpFmPortDev)
-+ mac_id += FM_MAX_NUM_OF_1G_MACS; /* 10G port */
-+
-+ if (!p_LnxWrpFmDev->macs[mac_id].h_Dev)
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
-+
-+ if (copy_from_user(&param, (ioc_fm_port_mac_frame_size_counters_t *)arg,
-+ sizeof(ioc_fm_port_mac_frame_size_counters_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (FM_MAC_GetFrameSizeCounters(p_LnxWrpFmDev->macs[mac_id].h_Dev,
-+ &frameSizeCounters, param.type))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ param.count_pkts_64 = frameSizeCounters.count_pkts_64;
-+ param.count_pkts_65_to_127 = frameSizeCounters.count_pkts_65_to_127;
-+ param.count_pkts_128_to_255 = frameSizeCounters.count_pkts_128_to_255;
-+ param.count_pkts_256_to_511 = frameSizeCounters.count_pkts_256_to_511;
-+ param.count_pkts_512_to_1023 = frameSizeCounters.count_pkts_512_to_1023;
-+ param.count_pkts_1024_to_1518 = frameSizeCounters.count_pkts_1024_to_1518;
-+ param.count_pkts_1519_to_1522 = frameSizeCounters.count_pkts_1519_to_1522;
-+
-+ if (copy_to_user((ioc_fm_port_mac_frame_size_counters_t *)arg, &param,
-+ sizeof(ioc_fm_port_mac_frame_size_counters_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+
-+ case FM_PORT_IOC_GET_BMI_COUNTERS:
-+ {
-+ t_LnxWrpFmDev *p_LnxWrpFmDev =
-+ (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ ioc_fm_port_bmi_stats_t param;
-+
-+ if (!p_LnxWrpFmDev)
-+ RETURN_ERROR(MINOR, E_NOT_AVAILABLE, ("Port not initialized or other error!"));
-+
-+ if (FM_PORT_GetBmiCounters(p_LnxWrpFmPortDev->h_Dev,
-+ (t_FmPortBmiStats *)&param))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ if (copy_to_user((ioc_fm_port_bmi_stats_t *)arg, &param,
-+ sizeof(ioc_fm_port_bmi_stats_t)))
-+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
-+
-+ break;
-+ }
-+
-+ default:
-+ RETURN_ERROR(MINOR, E_INVALID_SELECTION,
-+ ("invalid ioctl: cmd:0x%08x(type:0x%02x, nr:0x%02x.\n",
-+ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)));
-+ }
-+
-+ if (err)
-+ RETURN_ERROR(MINOR, E_INVALID_OPERATION, ("IOCTL FM PORT"));
-+
-+ return E_OK;
-+}
-+
-+/*****************************************************************************/
-+/* API routines for the FM Linux Device */
-+/*****************************************************************************/
-+
-+static int fm_open(struct inode *inode, struct file *file)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = NULL;
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = NULL;
-+ unsigned int major = imajor(inode);
-+ unsigned int minor = iminor(inode);
-+ struct device_node *fm_node;
-+ static struct of_device_id fm_node_of_match[] = {
-+ { .compatible = "fsl,fman", },
-+ { /* end of list */ },
-+ };
-+
-+ DBG(TRACE, ("Opening minor - %d - ", minor));
-+
-+ if (file->private_data != NULL)
-+ return 0;
-+
-+ /* Get all the FM nodes */
-+ for_each_matching_node(fm_node, fm_node_of_match) {
-+ struct platform_device *of_dev;
-+
-+ of_dev = of_find_device_by_node(fm_node);
-+ if (unlikely(of_dev == NULL)) {
-+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!"));
-+ return -ENXIO;
-+ }
-+
-+ p_LnxWrpFmDev = (t_LnxWrpFmDev *)fm_bind(&of_dev->dev);
-+ if (p_LnxWrpFmDev->major == major)
-+ break;
-+ fm_unbind((struct fm *)p_LnxWrpFmDev);
-+ p_LnxWrpFmDev = NULL;
-+ }
-+
-+ if (!p_LnxWrpFmDev)
-+ return -ENODEV;
-+
-+ if (minor == DEV_FM_MINOR_BASE)
-+ file->private_data = p_LnxWrpFmDev;
-+ else if (minor == DEV_FM_PCD_MINOR_BASE)
-+ file->private_data = p_LnxWrpFmDev;
-+ else {
-+ if (minor == DEV_FM_OH_PORTS_MINOR_BASE)
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort;
-+ else if ((minor > DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE))
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->opPorts[minor-DEV_FM_OH_PORTS_MINOR_BASE-1];
-+ else if ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE))
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[minor-DEV_FM_RX_PORTS_MINOR_BASE];
-+ else if ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS))
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[minor-DEV_FM_TX_PORTS_MINOR_BASE];
-+ else
-+ return -EINVAL;
-+
-+ /* if trying to open port, check if it initialized */
-+ if (!p_LnxWrpFmPortDev->h_Dev)
-+ return -ENODEV;
-+
-+ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)fm_port_bind(p_LnxWrpFmPortDev->dev);
-+ file->private_data = p_LnxWrpFmPortDev;
-+ fm_unbind((struct fm *)p_LnxWrpFmDev);
-+ }
-+
-+ if (file->private_data == NULL)
-+ return -ENXIO;
-+
-+ return 0;
-+}
-+
-+static int fm_close(struct inode *inode, struct file *file)
-+{
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+ unsigned int minor = iminor(inode);
-+ int err = 0;
-+
-+ DBG(TRACE, ("Closing minor - %d - ", minor));
-+
-+ if ((minor == DEV_FM_MINOR_BASE) ||
-+ (minor == DEV_FM_PCD_MINOR_BASE))
-+ {
-+ p_LnxWrpFmDev = (t_LnxWrpFmDev*)file->private_data;
-+ if (!p_LnxWrpFmDev)
-+ return -ENODEV;
-+ fm_unbind((struct fm *)p_LnxWrpFmDev);
-+ }
-+ else if (((minor >= DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE)) ||
-+ ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE)) ||
-+ ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS)))
-+ {
-+ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)file->private_data;
-+ if (!p_LnxWrpFmPortDev)
-+ return -ENODEV;
-+ fm_port_unbind((struct fm_port *)p_LnxWrpFmPortDev);
-+ }
-+
-+ return err;
-+}
-+
-+static int fm_ioctls(unsigned int minor, struct file *file, unsigned int cmd, unsigned long arg, bool compat)
-+{
-+ DBG(TRACE, ("IOCTL minor - %u, cmd - 0x%08x, arg - 0x%08lx \n", minor, cmd, arg));
-+
-+ if ((minor == DEV_FM_MINOR_BASE) ||
-+ (minor == DEV_FM_PCD_MINOR_BASE))
-+ {
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = ((t_LnxWrpFmDev*)file->private_data);
-+ if (!p_LnxWrpFmDev)
-+ return -ENODEV;
-+ if (LnxwrpFmIOCTL(p_LnxWrpFmDev, cmd, arg, compat))
-+ return -EFAULT;
-+ }
-+ else if (((minor >= DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE)) ||
-+ ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE)) ||
-+ ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS)))
-+ {
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = ((t_LnxWrpFmPortDev*)file->private_data);
-+ if (!p_LnxWrpFmPortDev)
-+ return -ENODEV;
-+ if (LnxwrpFmPortIOCTL(p_LnxWrpFmPortDev, cmd, arg, compat))
-+ return -EFAULT;
-+ }
-+ else
-+ {
-+ REPORT_ERROR(MINOR, E_INVALID_VALUE, ("minor"));
-+ return -ENODEV;
-+ }
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_COMPAT
-+static long fm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
-+ long res;
-+
-+ fm_mutex_lock();
-+ res = fm_ioctls(minor, file, cmd, arg, true);
-+ fm_mutex_unlock();
-+
-+ return res;
-+}
-+#endif
-+
-+static long fm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
-+ long res;
-+
-+ fm_mutex_lock();
-+ res = fm_ioctls(minor, file, cmd, arg, false);
-+ fm_mutex_unlock();
-+
-+ return res;
-+}
-+
-+/* Globals for FM character device */
-+struct file_operations fm_fops =
-+{
-+ .owner = THIS_MODULE,
-+ .unlocked_ioctl = fm_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = fm_compat_ioctl,
-+#endif
-+ .open = fm_open,
-+ .release = fm_close,
-+};
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
-@@ -0,0 +1,1297 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_fm_compat_ioctls.c
-+
-+ @Description FM PCD compat functions
-+
-+*/
-+
-+#if !defined(CONFIG_COMPAT)
-+#error "missing COMPAT layer..."
-+#endif
-+
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/cdev.h>
-+#include <linux/device.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/io.h>
-+#include <linux/ioport.h>
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#ifndef CONFIG_FMAN_ARM
-+#include <sysdev/fsl_soc.h>
-+#endif
-+
-+#include "part_ext.h"
-+#include "fm_ioctls.h"
-+#include "fm_pcd_ioctls.h"
-+#include "fm_port_ioctls.h"
-+#include "lnxwrp_ioctls_fm_compat.h"
-+
-+#if defined(FM_COMPAT_DBG)
-+static void hex_dump(void * p_addr, unsigned int size)
-+{
-+ int i;
-+
-+ for(i=0; i<size; i+=16)
-+ {
-+ printk("%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", p_addr + i,
-+ *(unsigned int *)(p_addr + i),
-+ *(unsigned int *)(p_addr + i + 4),
-+ *(unsigned int *)(p_addr + i + 8),
-+ *(unsigned int *)(p_addr + i +12)
-+ );
-+ }
-+}
-+#endif
-+
-+/* maping kernel pointers w/ UserSpace id's { */
-+struct map_node {
-+ void *ptr;
-+ u8 node_type;
-+};
-+
-+static struct map_node compat_ptr2id_array[COMPAT_PTR2ID_ARRAY_MAX] = {{NULL},{FM_MAP_TYPE_UNSPEC}};
-+
-+void compat_del_ptr2id(void *p, enum fm_map_node_type node_type)
-+{
-+ compat_uptr_t k;
-+
-+ _fm_cpt_dbg(COMPAT_GENERIC, "delete (%p)\n", p);
-+
-+ for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++)
-+ if(compat_ptr2id_array[k].ptr == p){
-+ compat_ptr2id_array[k].ptr = NULL;
-+ compat_ptr2id_array[k].node_type = FM_MAP_TYPE_UNSPEC;
-+ }
-+}
-+EXPORT_SYMBOL(compat_del_ptr2id);
-+
-+compat_uptr_t compat_add_ptr2id(void *p, enum fm_map_node_type node_type)
-+{
-+ compat_uptr_t k;
-+
-+ _fm_cpt_dbg(COMPAT_GENERIC, " (%p) do ->\n", p);
-+
-+ if(!p)
-+ return 0;
-+
-+ for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++)
-+ if(compat_ptr2id_array[k].ptr == NULL)
-+ {
-+ compat_ptr2id_array[k].ptr = p;
-+ compat_ptr2id_array[k].node_type = node_type;
-+ _fm_cpt_dbg(COMPAT_GENERIC, "0x%08x \n", k | COMPAT_PTR2ID_WATERMARK);
-+ return k | COMPAT_PTR2ID_WATERMARK;
-+ }
-+
-+ printk(KERN_WARNING "FMan map list full! No more PCD space on kernel!\n");
-+ return 0;
-+}
-+EXPORT_SYMBOL(compat_add_ptr2id);
-+
-+compat_uptr_t compat_get_ptr2id(void *p, enum fm_map_node_type node_type)
-+{
-+ compat_uptr_t k;
-+
-+ _fm_cpt_dbg(COMPAT_GENERIC, " (%p) get -> \n", p);
-+
-+ for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++)
-+ if(compat_ptr2id_array[k].ptr == p &&
-+ compat_ptr2id_array[k].node_type == node_type) {
-+
-+ _fm_cpt_dbg(COMPAT_GENERIC, "0x%08x\n", k | COMPAT_PTR2ID_WATERMARK);
-+ return k | COMPAT_PTR2ID_WATERMARK;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(compat_get_ptr2id);
-+
-+void *compat_get_id2ptr(compat_uptr_t comp, enum fm_map_node_type node_type)
-+{
-+
-+ _fm_cpt_dbg(COMPAT_GENERIC, " (0x%08x) get -> \n", comp);
-+
-+ if((COMPAT_PTR2ID_WM_MASK & comp) != COMPAT_PTR2ID_WATERMARK) {
-+ _fm_cpt_dbg(COMPAT_GENERIC, "Error, invalid watermark (0x%08x)!\n\n", comp);
-+ dump_stack();
-+ return compat_ptr(comp);
-+ }
-+
-+ comp &= ~COMPAT_PTR2ID_WM_MASK;
-+
-+ if(((0 < comp) && (comp < COMPAT_PTR2ID_ARRAY_MAX) && (compat_ptr2id_array[comp].ptr != NULL)
-+ && compat_ptr2id_array[comp].node_type == node_type)) {
-+ _fm_cpt_dbg(COMPAT_GENERIC, "%p\n", compat_ptr2id_array[comp].ptr);
-+ return compat_ptr2id_array[comp].ptr;
-+ }
-+ return NULL;
-+}
-+EXPORT_SYMBOL(compat_get_id2ptr);
-+/* } maping kernel pointers w/ UserSpace id's */
-+
-+void compat_obj_delete(
-+ ioc_compat_fm_obj_t *compat_id,
-+ ioc_fm_obj_t *id)
-+{
-+ id->obj = compat_pcd_id2ptr(compat_id->obj);
-+ compat_del_ptr2id(id->obj, FM_MAP_TYPE_PCD_NODE);
-+}
-+
-+static inline void compat_copy_fm_pcd_plcr_next_engine(
-+ ioc_compat_fm_pcd_plcr_next_engine_params_u *compat_param,
-+ ioc_fm_pcd_plcr_next_engine_params_u *param,
-+ ioc_fm_pcd_engine next_engine,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ switch (next_engine)
-+ {
-+ case e_IOC_FM_PCD_PLCR:
-+ if (compat == COMPAT_US_TO_K)
-+ param->p_profile = compat_pcd_id2ptr(compat_param->p_profile);
-+ else
-+ compat_param->p_profile = compat_pcd_ptr2id(param->p_profile);
-+ break;
-+ case e_IOC_FM_PCD_KG:
-+ if (compat == COMPAT_US_TO_K)
-+ param->p_direct_scheme = compat_pcd_id2ptr(compat_param->p_direct_scheme);
-+ else
-+ compat_param->p_direct_scheme = compat_pcd_ptr2id(param->p_direct_scheme);
-+ break;
-+ default:
-+ if (compat == COMPAT_US_TO_K)
-+ param->action = compat_param->action;
-+ else
-+ compat_param->action = param->action;
-+ break;
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_pcd_plcr_profile(
-+ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param,
-+ ioc_fm_pcd_plcr_profile_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->modify = compat_param->modify;
-+
-+ /* profile_select */
-+ if (!compat_param->modify)
-+ {
-+ param->profile_select.new_params.profile_type =
-+ compat_param->profile_select.new_params.profile_type;
-+ param->profile_select.new_params.p_fm_port =
-+ compat_ptr(compat_param->profile_select.new_params.p_fm_port);
-+ param->profile_select.new_params.relative_profile_id =
-+ compat_param->profile_select.new_params.relative_profile_id;
-+ }
-+ else
-+ param->profile_select.p_profile =
-+ compat_pcd_id2ptr(compat_param->profile_select.p_profile);
-+
-+ param->alg_selection = compat_param->alg_selection;
-+ param->color_mode = compat_param->color_mode;
-+
-+ /* both parameters in the union has the same size, so memcpy works */
-+ memcpy(&param->color, &compat_param->color, sizeof(param->color));
-+
-+ memcpy(&param->non_passthrough_alg_param,
-+ &compat_param->non_passthrough_alg_param,
-+ sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t));
-+
-+ param->next_engine_on_green = compat_param->next_engine_on_green;
-+ param->next_engine_on_yellow = compat_param->next_engine_on_yellow;
-+ param->next_engine_on_red = compat_param->next_engine_on_red;
-+
-+ param->trap_profile_on_flow_A = compat_param->trap_profile_on_flow_A;
-+ param->trap_profile_on_flow_B = compat_param->trap_profile_on_flow_B;
-+ param->trap_profile_on_flow_C = compat_param->trap_profile_on_flow_C;
-+ }
-+ else
-+ {
-+ compat_param->modify = param->modify;
-+
-+ /* profile_select */
-+ if (!param->modify)
-+ {
-+ compat_param->profile_select.new_params.profile_type =
-+ param->profile_select.new_params.profile_type;
-+ compat_param->profile_select.new_params.p_fm_port =
-+ ptr_to_compat(param->profile_select.new_params.p_fm_port);
-+ compat_param->profile_select.new_params.relative_profile_id =
-+ param->profile_select.new_params.relative_profile_id;
-+ }
-+ else
-+ compat_param->profile_select.p_profile =
-+ compat_pcd_ptr2id(param->profile_select.p_profile);
-+
-+ compat_param->alg_selection = param->alg_selection;
-+ compat_param->color_mode = param->color_mode;
-+
-+ /* both parameters in the union has the same size, so memcpy works */
-+ memcpy(&compat_param->color, &param->color, sizeof(compat_param->color));
-+
-+ memcpy(&compat_param->non_passthrough_alg_param,
-+ &param->non_passthrough_alg_param,
-+ sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t));
-+
-+ compat_param->next_engine_on_green = param->next_engine_on_green;
-+ compat_param->next_engine_on_yellow = param->next_engine_on_yellow;
-+ compat_param->next_engine_on_red = param->next_engine_on_red;
-+
-+ compat_param->trap_profile_on_flow_A = param->trap_profile_on_flow_A;
-+ compat_param->trap_profile_on_flow_B = param->trap_profile_on_flow_B;
-+ compat_param->trap_profile_on_flow_C = param->trap_profile_on_flow_C;
-+
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+
-+ compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_green,
-+ &param->params_on_green, param->next_engine_on_green, compat);
-+
-+ compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_yellow,
-+ &param->params_on_yellow, param->next_engine_on_yellow, compat);
-+
-+ compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_red,
-+ &param->params_on_red, param->next_engine_on_red, compat);
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+static inline void compat_copy_fm_pcd_cc_next_kg(
-+ ioc_compat_fm_pcd_cc_next_kg_params_t *compat_param,
-+ ioc_fm_pcd_cc_next_kg_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->new_fqid = compat_param->new_fqid;
-+ param->override_fqid = compat_param->override_fqid;
-+#if DPAA_VERSION >= 11
-+ param->new_relative_storage_profile_id = compat_param->new_relative_storage_profile_id;
-+#endif
-+ param->p_direct_scheme = compat_pcd_id2ptr(compat_param->p_direct_scheme);
-+ }
-+ else
-+ {
-+ compat_param->new_fqid = param->new_fqid;
-+ compat_param->override_fqid = param->override_fqid;
-+#if DPAA_VERSION >= 11
-+ compat_param->new_relative_storage_profile_id = param->new_relative_storage_profile_id;
-+#endif
-+ compat_param->p_direct_scheme = compat_pcd_ptr2id(param->p_direct_scheme);
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+static inline void compat_copy_fm_pcd_cc_next_cc(
-+ ioc_compat_fm_pcd_cc_next_cc_params_t *compat_param,
-+ ioc_fm_pcd_cc_next_cc_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ param->cc_node_id = compat_pcd_id2ptr(compat_param->cc_node_id);
-+ else
-+ compat_param->cc_node_id = compat_pcd_ptr2id(param->cc_node_id);
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+static inline void compat_copy_fm_pcd_cc_next_engine(
-+ ioc_compat_fm_pcd_cc_next_engine_params_t *compat_param,
-+ ioc_fm_pcd_cc_next_engine_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->next_engine = compat_param->next_engine;
-+ if (param->next_engine != e_IOC_FM_PCD_INVALID )
-+ _fm_cpt_dbg(compat, " param->next_engine = %i \n", param->next_engine);
-+
-+ switch (param->next_engine)
-+ {
-+#if DPAA_VERSION >= 11
-+ case e_IOC_FM_PCD_FR:
-+ param->params.fr_params.frm_replic_id = compat_pcd_id2ptr(compat_param->params.fr_params.frm_replic_id);
-+ break;
-+#endif /* DPAA_VERSION >= 11 */
-+ case e_IOC_FM_PCD_CC:
-+ param->manip_id = compat_pcd_id2ptr(compat_param->manip_id);
-+ compat_copy_fm_pcd_cc_next_cc(&compat_param->params.cc_params, &param->params.cc_params, compat);
-+ break;
-+ case e_IOC_FM_PCD_KG:
-+ param->manip_id = compat_pcd_id2ptr(compat_param->manip_id);
-+ compat_copy_fm_pcd_cc_next_kg(&compat_param->params.kg_params, &param->params.kg_params, compat);
-+ break;
-+ case e_IOC_FM_PCD_DONE:
-+ case e_IOC_FM_PCD_PLCR:
-+ param->manip_id = compat_pcd_id2ptr(compat_param->manip_id);
-+ default:
-+ memcpy(&param->params, &compat_param->params, sizeof(param->params));
-+ }
-+ param->statistics_en = compat_param->statistics_en;
-+ }
-+ else
-+ {
-+ compat_param->next_engine = param->next_engine;
-+
-+ switch (compat_param->next_engine)
-+ {
-+#if DPAA_VERSION >= 11
-+ case e_IOC_FM_PCD_FR:
-+ compat_param->params.fr_params.frm_replic_id = compat_pcd_ptr2id(param->params.fr_params.frm_replic_id);
-+ break;
-+#endif /* DPAA_VERSION >= 11 */
-+ case e_IOC_FM_PCD_CC:
-+ compat_param->manip_id = compat_pcd_ptr2id(param->manip_id);
-+ compat_copy_fm_pcd_cc_next_cc(&compat_param->params.cc_params, &param->params.cc_params, compat);
-+ break;
-+ case e_IOC_FM_PCD_KG:
-+ compat_param->manip_id = compat_pcd_ptr2id(param->manip_id);
-+ compat_copy_fm_pcd_cc_next_kg(&compat_param->params.kg_params, &param->params.kg_params, compat);
-+ break;
-+ case e_IOC_FM_PCD_DONE:
-+ case e_IOC_FM_PCD_PLCR:
-+ compat_param->manip_id = compat_pcd_ptr2id(param->manip_id);
-+ default:
-+ memcpy(&compat_param->params, &param->params, sizeof(compat_param->params));
-+ }
-+ compat_param->statistics_en = param->statistics_en;
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_pcd_cc_key(
-+ ioc_compat_fm_pcd_cc_key_params_t *compat_param,
-+ ioc_fm_pcd_cc_key_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->p_key = compat_ptr(compat_param->p_key);
-+ param->p_mask = compat_ptr(compat_param->p_mask);
-+ }
-+ else
-+ {
-+ compat_param->p_key = ptr_to_compat(param->p_key);
-+ compat_param->p_mask = ptr_to_compat(param->p_mask);
-+ }
-+
-+ compat_copy_fm_pcd_cc_next_engine(
-+ &compat_param->cc_next_engine_params,
-+ &param->cc_next_engine_params,
-+ compat);
-+}
-+
-+void compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(
-+ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ param->key_indx = compat_param->key_indx;
-+ param->key_size = compat_param->key_size;
-+ compat_copy_fm_pcd_cc_key(
-+ &compat_param->key_params,
-+ &param->key_params,
-+ compat);
-+ }
-+ else
-+ {
-+ compat_param->id = compat_pcd_ptr2id(param->id);
-+ compat_param->key_indx = param->key_indx;
-+ compat_param->key_size = param->key_size;
-+ compat_copy_fm_pcd_cc_key(
-+ &compat_param->key_params,
-+ &param->key_params,
-+ compat);
-+ }
-+}
-+
-+void compat_copy_fm_pcd_cc_node_modify_next_engine(
-+ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ param->key_indx = compat_param->key_indx;
-+ param->key_size = compat_param->key_size;
-+ }
-+ else
-+ {
-+ compat_param->id = compat_pcd_ptr2id(param->id);
-+ compat_param->key_indx = param->key_indx;
-+ compat_param->key_size = param->key_size;
-+ }
-+
-+ compat_copy_fm_pcd_cc_next_engine(
-+ &compat_param->cc_next_engine_params,
-+ &param->cc_next_engine_params,
-+ compat);
-+}
-+
-+void compat_fm_pcd_cc_tree_modify_next_engine(
-+ ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param,
-+ ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ param->grp_indx = compat_param->grp_indx;
-+ param->indx = compat_param->indx;
-+ }
-+ else
-+ {
-+ compat_param->id = compat_pcd_ptr2id(param->id);
-+ compat_param->grp_indx = param->grp_indx;
-+ compat_param->indx = param->indx;
-+ }
-+
-+ compat_copy_fm_pcd_cc_next_engine(
-+ &compat_param->cc_next_engine_params,
-+ &param->cc_next_engine_params,
-+ compat);
-+}
-+
-+void compat_copy_fm_pcd_hash_table(
-+ ioc_compat_fm_pcd_hash_table_params_t *compat_param,
-+ ioc_fm_pcd_hash_table_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->max_num_of_keys = compat_param->max_num_of_keys;
-+ param->statistics_mode = compat_param->statistics_mode;
-+ param->kg_hash_shift = compat_param->kg_hash_shift;
-+ param->hash_res_mask = compat_param->hash_res_mask;
-+ param->hash_shift = compat_param->hash_shift;
-+ param->match_key_size = compat_param->match_key_size;
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ }
-+ else
-+ {
-+ compat_param->max_num_of_keys = param->max_num_of_keys;
-+ compat_param->statistics_mode = param->statistics_mode;
-+ compat_param->kg_hash_shift = param->kg_hash_shift;
-+ compat_param->hash_res_mask = param->hash_res_mask;
-+ compat_param->hash_shift = param->hash_shift;
-+ compat_param->match_key_size = param->match_key_size;
-+
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+
-+ compat_copy_fm_pcd_cc_next_engine(
-+ &compat_param->cc_next_engine_params_for_miss,
-+ &param->cc_next_engine_params_for_miss,
-+ compat);
-+}
-+
-+void compat_copy_fm_pcd_cc_grp(
-+ ioc_compat_fm_pcd_cc_grp_params_t *compat_param,
-+ ioc_fm_pcd_cc_grp_params_t *param,
-+ uint8_t compat)
-+{
-+ int k;
-+
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->num_of_distinction_units = compat_param->num_of_distinction_units;
-+ memcpy(param->unit_ids, compat_param->unit_ids, IOC_FM_PCD_MAX_NUM_OF_CC_UNITS);
-+ }
-+ else
-+ {
-+ compat_param->num_of_distinction_units = param->num_of_distinction_units;
-+ memcpy(compat_param->unit_ids, param->unit_ids, IOC_FM_PCD_MAX_NUM_OF_CC_UNITS);
-+ }
-+
-+ for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP; k++)
-+ compat_copy_fm_pcd_cc_next_engine(
-+ &compat_param->next_engine_per_entries_in_grp[k],
-+ &param->next_engine_per_entries_in_grp[k],
-+ compat);
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_pcd_cc_tree(
-+ ioc_compat_fm_pcd_cc_tree_params_t *compat_param,
-+ ioc_fm_pcd_cc_tree_params_t *param,
-+ uint8_t compat)
-+{
-+ int k;
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->net_env_id = compat_pcd_id2ptr(compat_param->net_env_id);
-+ param->num_of_groups = compat_param->num_of_groups;
-+ }
-+ else
-+ {
-+ compat_param->net_env_id = compat_pcd_ptr2id(param->net_env_id);
-+ compat_param->num_of_groups = param->num_of_groups;
-+
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+
-+ for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS; k++)
-+ compat_copy_fm_pcd_cc_grp(
-+ &compat_param->fm_pcd_cc_group_params[k],
-+ &param->fm_pcd_cc_group_params[k],
-+ compat);
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_fm_pcd_prs_sw(
-+ ioc_compat_fm_pcd_prs_sw_params_t *compat_param,
-+ ioc_fm_pcd_prs_sw_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->override = compat_param->override;
-+ param->size = compat_param->size;
-+ param->base = compat_param->base;
-+ param->p_code = compat_ptr(compat_param->p_code);
-+ memcpy(param->sw_prs_data_params,compat_param->sw_prs_data_params,IOC_FM_PCD_PRS_NUM_OF_HDRS*sizeof(uint32_t));
-+ param->num_of_labels = compat_param->num_of_labels;
-+ memcpy(param->labels_table,compat_param->labels_table,IOC_FM_PCD_PRS_NUM_OF_LABELS*sizeof(ioc_fm_pcd_prs_label_params_t));
-+ }
-+}
-+
-+void compat_copy_fm_pcd_kg_scheme(
-+ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param,
-+ ioc_fm_pcd_kg_scheme_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg(compat," {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->modify = compat_param->modify;
-+
-+ /* scm_id */
-+ if (compat_param->modify)
-+ {
-+ param->scm_id.scheme_id = compat_pcd_id2ptr(compat_param->scm_id.scheme_id);
-+ _fm_cpt_dbg(compat," param->scm_id.scheme_id = %p \n", param->scm_id.scheme_id);
-+ }
-+ else
-+ param->scm_id.relative_scheme_id = compat_param->scm_id.relative_scheme_id;
-+
-+ param->always_direct = compat_param->always_direct;
-+ /* net_env_params */
-+ param->net_env_params.net_env_id = compat_pcd_id2ptr(compat_param->net_env_params.net_env_id);
-+ param->net_env_params.num_of_distinction_units = compat_param->net_env_params.num_of_distinction_units;
-+ memcpy(param->net_env_params.unit_ids,
-+ compat_param->net_env_params.unit_ids,
-+ IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+
-+ param->use_hash = compat_param->use_hash;
-+ memcpy(&param->key_extract_and_hash_params,
-+ &compat_param->key_extract_and_hash_params,
-+ sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t));
-+ param->bypass_fqid_generation = compat_param->bypass_fqid_generation;
-+ param->base_fqid = compat_param->base_fqid;
-+#if DPAA_VERSION >= 11
-+ param->override_storage_profile =
-+ compat_param->override_storage_profile;
-+ param->storage_profile = compat_param->storage_profile;
-+#endif
-+ param->num_of_used_extracted_ors = compat_param->num_of_used_extracted_ors;
-+ memcpy(param->extracted_ors,
-+ compat_param->extracted_ors,
-+ IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS * sizeof(ioc_fm_pcd_kg_extracted_or_params_t));
-+ param->next_engine = compat_param->next_engine;
-+
-+ /* kg_next_engine_params */
-+ if (param->next_engine == e_IOC_FM_PCD_CC)
-+ {
-+ param->kg_next_engine_params.cc.tree_id = compat_pcd_id2ptr(compat_param->kg_next_engine_params.cc.tree_id);
-+ param->kg_next_engine_params.cc.grp_id = compat_param->kg_next_engine_params.cc.grp_id;
-+ param->kg_next_engine_params.cc.plcr_next = compat_param->kg_next_engine_params.cc.plcr_next;
-+ param->kg_next_engine_params.cc.bypass_plcr_profile_generation
-+ = compat_param->kg_next_engine_params.cc.bypass_plcr_profile_generation;
-+ memcpy(&param->kg_next_engine_params.cc.plcr_profile,
-+ &compat_param->kg_next_engine_params.cc.plcr_profile,
-+ sizeof(ioc_fm_pcd_kg_plcr_profile_t));
-+ }
-+ else
-+ memcpy(&param->kg_next_engine_params,
-+ &compat_param->kg_next_engine_params,
-+ sizeof(param->kg_next_engine_params));
-+
-+ memcpy(&param->scheme_counter,
-+ &compat_param->scheme_counter,
-+ sizeof(ioc_fm_pcd_kg_scheme_counter_t));
-+ }
-+ else
-+ {
-+ compat_param->modify = param->modify;
-+
-+ /* scm_id */
-+ if (param->modify)
-+ compat_param->scm_id.scheme_id = compat_pcd_ptr2id(param->scm_id.scheme_id);
-+ else
-+ compat_param->scm_id.relative_scheme_id = param->scm_id.relative_scheme_id;
-+
-+ compat_param->always_direct = param->always_direct;
-+
-+ /* net_env_params */
-+ compat_param->net_env_params.net_env_id = compat_pcd_ptr2id(param->net_env_params.net_env_id);
-+ compat_param->net_env_params.num_of_distinction_units = param->net_env_params.num_of_distinction_units;
-+ memcpy(compat_param->net_env_params.unit_ids, param->net_env_params.unit_ids, IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+
-+ compat_param->use_hash = param->use_hash;
-+ memcpy(&compat_param->key_extract_and_hash_params, &param->key_extract_and_hash_params, sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t));
-+ compat_param->bypass_fqid_generation = param->bypass_fqid_generation;
-+ compat_param->base_fqid = param->base_fqid;
-+#if DPAA_VERSION >= 11
-+ compat_param->override_storage_profile =
-+ param->override_storage_profile;
-+ compat_param->storage_profile = param->storage_profile;
-+#endif
-+ compat_param->num_of_used_extracted_ors = param->num_of_used_extracted_ors;
-+ memcpy(compat_param->extracted_ors, param->extracted_ors, IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS * sizeof(ioc_fm_pcd_kg_extracted_or_params_t));
-+ compat_param->next_engine = param->next_engine;
-+
-+ /* kg_next_engine_params */
-+ if (compat_param->next_engine == e_IOC_FM_PCD_CC)
-+ {
-+ compat_param->kg_next_engine_params.cc.tree_id = compat_pcd_ptr2id(param->kg_next_engine_params.cc.tree_id);
-+ compat_param->kg_next_engine_params.cc.grp_id = param->kg_next_engine_params.cc.grp_id;
-+ compat_param->kg_next_engine_params.cc.plcr_next = param->kg_next_engine_params.cc.plcr_next;
-+ compat_param->kg_next_engine_params.cc.bypass_plcr_profile_generation
-+ = param->kg_next_engine_params.cc.bypass_plcr_profile_generation;
-+ memcpy(&compat_param->kg_next_engine_params.cc.plcr_profile,
-+ &param->kg_next_engine_params.cc.plcr_profile,
-+ sizeof(ioc_fm_pcd_kg_plcr_profile_t));
-+ }
-+ else
-+ memcpy(&param->kg_next_engine_params, &compat_param->kg_next_engine_params, sizeof(compat_param->kg_next_engine_params));
-+
-+ memcpy(&compat_param->scheme_counter, &param->scheme_counter, sizeof(ioc_fm_pcd_kg_scheme_counter_t));
-+
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+
-+ _fm_cpt_dbg(compat," ...->}\n");
-+}
-+
-+void compat_copy_fm_pcd_kg_scheme_spc(
-+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param,
-+ ioc_fm_pcd_kg_scheme_spc_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ param->val = compat_param->val;
-+ } else {
-+ compat_param->id = compat_pcd_ptr2id(param->id);
-+ compat_param->val = param->val;
-+ }
-+}
-+
-+
-+void compat_copy_fm_pcd_kg_scheme_select(
-+ ioc_compat_fm_pcd_kg_scheme_select_t *compat_param,
-+ ioc_fm_pcd_kg_scheme_select_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->direct = compat_param->direct;
-+ if (param->direct)
-+ param->scheme_id = compat_pcd_id2ptr(compat_param->scheme_id);
-+ }
-+}
-+
-+void compat_copy_fm_pcd_kg_schemes_params(
-+ ioc_compat_fm_pcd_port_schemes_params_t *compat_param,
-+ ioc_fm_pcd_port_schemes_params_t *param,
-+ uint8_t compat)
-+{
-+ int k;
-+
-+ if (compat == COMPAT_US_TO_K) {
-+ param->num_of_schemes = compat_param->num_of_schemes;
-+ for(k=0; k < compat_param->num_of_schemes; k++)
-+ param->scheme_ids[k] = compat_pcd_id2ptr(compat_param->scheme_ids[k]);
-+ }
-+}
-+
-+void compat_copy_fm_port_pcd_cc(
-+ ioc_compat_fm_port_pcd_cc_params_t *compat_cc_params ,
-+ ioc_fm_port_pcd_cc_params_t *p_cc_params,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K){
-+ p_cc_params->cc_tree_id = compat_pcd_id2ptr(compat_cc_params->cc_tree_id);
-+ }
-+}
-+
-+void compat_copy_fm_port_pcd_kg(
-+ ioc_compat_fm_port_pcd_kg_params_t *compat_param,
-+ ioc_fm_port_pcd_kg_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K){
-+ uint8_t k;
-+
-+ param->num_of_schemes = compat_param->num_of_schemes;
-+ for(k=0; k<compat_param->num_of_schemes; k++)
-+ param->scheme_ids[k] = compat_pcd_id2ptr(compat_param->scheme_ids[k]);
-+
-+ param->direct_scheme = compat_param->direct_scheme;
-+ if (param->direct_scheme)
-+ param->direct_scheme_id = compat_pcd_id2ptr(compat_param->direct_scheme_id);
-+ }
-+}
-+
-+void compat_copy_fm_port_pcd(
-+ ioc_compat_fm_port_pcd_params_t *compat_param,
-+ ioc_fm_port_pcd_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ ioc_fm_port_pcd_prs_params_t *same_port_pcd_prs_params;
-+ ioc_compat_fm_port_pcd_cc_params_t *compat_port_pcd_cc_params;
-+ ioc_compat_fm_port_pcd_kg_params_t *compat_port_pcd_kg_params;
-+ ioc_compat_fm_port_pcd_plcr_params_t *compat_port_pcd_plcr_params;
-+
-+ same_port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (compat_param + 1);
-+ compat_port_pcd_cc_params = (ioc_compat_fm_port_pcd_cc_params_t *) (same_port_pcd_prs_params + 1);
-+ compat_port_pcd_kg_params = (ioc_compat_fm_port_pcd_kg_params_t *) (compat_port_pcd_cc_params + 1);
-+ compat_port_pcd_plcr_params = (ioc_compat_fm_port_pcd_plcr_params_t *) (compat_port_pcd_kg_params + 1);
-+
-+ _fm_cpt_dbg(compat,"\n param->p_prs_params=%p \n", param->p_prs_params);
-+ _fm_cpt_dbg(compat," param->p_cc_params=%p \n", param->p_cc_params);
-+ _fm_cpt_dbg(compat," param->p_kg_params=%p \n", param->p_kg_params);
-+ _fm_cpt_dbg(compat," param->p_plcr_params=%p \n", param->p_plcr_params);
-+ _fm_cpt_dbg(compat," param->p_ip_reassembly_manip=%p \n", param->p_ip_reassembly_manip);
-+#if (DPAA_VERSION >= 11)
-+ _fm_cpt_dbg(compat," param->p_capwap_reassembly_manip=%p \n", param->p_capwap_reassembly_manip);
-+#endif
-+ param->pcd_support = compat_param->pcd_support;
-+ param->net_env_id = compat_pcd_id2ptr(compat_param->net_env_id);
-+
-+ if (param->p_cc_params)
-+ compat_copy_fm_port_pcd_cc(compat_port_pcd_cc_params, param->p_cc_params, COMPAT_US_TO_K);
-+ if (param->p_kg_params)
-+ compat_copy_fm_port_pcd_kg(compat_port_pcd_kg_params, param->p_kg_params, COMPAT_US_TO_K);
-+ if (param->p_plcr_params)
-+ param->p_plcr_params->plcr_profile_id = compat_pcd_id2ptr(compat_port_pcd_plcr_params->plcr_profile_id);
-+ param->p_ip_reassembly_manip = compat_pcd_id2ptr(compat_param->p_ip_reassembly_manip);
-+#if (DPAA_VERSION >= 11)
-+ param->p_capwap_reassembly_manip = compat_pcd_id2ptr(compat_param->p_capwap_reassembly_manip);
-+#endif
-+ }
-+}
-+
-+void compat_copy_fm_port_pcd_modify_tree(
-+ ioc_compat_fm_obj_t *compat_id,
-+ ioc_fm_obj_t *id,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ id->obj = compat_pcd_id2ptr(compat_id->obj);
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+void compat_copy_fm_port_vsp_alloc_params(
-+ ioc_compat_fm_port_vsp_alloc_params_t *compat_param,
-+ ioc_fm_port_vsp_alloc_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ _fm_cpt_dbg(compat," param->p_fm_tx_port=%p \n", param->p_fm_tx_port);
-+
-+ param->dflt_relative_id = compat_param->dflt_relative_id;
-+ param->num_of_profiles = compat_param->num_of_profiles;
-+ param->p_fm_tx_port = compat_pcd_id2ptr(compat_param->p_fm_tx_port);
-+ }
-+}
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+void compat_copy_fm_pcd_cc_tbl_get_stats(
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param,
-+ ioc_fm_pcd_cc_tbl_get_stats_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ param->key_index = compat_param->key_index;
-+ memcpy(&param->statistics, &compat_param->statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t));
-+ } else {
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ compat_param->key_index = param->key_index;
-+ memcpy(&compat_param->statistics, &param->statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t));
-+ }
-+}
-+
-+
-+void compat_copy_fm_pcd_net_env(
-+ ioc_compat_fm_pcd_net_env_params_t *compat_param,
-+ ioc_fm_pcd_net_env_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->num_of_distinction_units = compat_param->num_of_distinction_units;
-+ memcpy(param->units, compat_param->units, sizeof(ioc_fm_pcd_distinction_unit_t)*IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+ param->id = NULL; /* to avoid passing garbage to the kernel */
-+ }
-+ else
-+ {
-+ compat_param->num_of_distinction_units = param->num_of_distinction_units;
-+ memcpy(compat_param->units, param->units, sizeof(ioc_fm_pcd_distinction_unit_t)*IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS);
-+
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+}
-+
-+void compat_copy_fm_pcd_cc_node_modify_key(
-+ ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_modify_key_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->key_indx = compat_param->key_indx;
-+ param->key_size = compat_param->key_size;
-+ param->p_key = (uint8_t *)compat_ptr(compat_param->p_key);
-+ _fm_cpt_dbg(compat," param->p_key = %p \n", param->p_key);
-+ param->p_mask = (uint8_t *)compat_ptr(compat_param->p_mask);
-+ _fm_cpt_dbg(compat," param->p_mask = %p\n", param->p_mask);
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ _fm_cpt_dbg(compat," param->id = %p \n", param->id);
-+ }
-+ else
-+ {
-+ compat_param->key_indx = param->key_indx;
-+ compat_param->key_size = param->key_size;
-+ compat_param->p_key = ptr_to_compat((void *)param->p_key);
-+ compat_param->p_mask = ptr_to_compat((void *)param->p_mask);
-+
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+}
-+
-+void compat_copy_keys(
-+ ioc_compat_keys_params_t *compat_param,
-+ ioc_keys_params_t *param,
-+ uint8_t compat)
-+{
-+ int k = 0;
-+
-+ _fm_cpt_dbg(compat," {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K) {
-+ param->max_num_of_keys = compat_param->max_num_of_keys;
-+ param->mask_support = compat_param->mask_support;
-+ param->statistics_mode = compat_param->statistics_mode;
-+ param->num_of_keys = compat_param->num_of_keys;
-+ param->key_size = compat_param->key_size;
-+#if (DPAA_VERSION >= 11)
-+ memcpy(&param->frame_length_ranges,
-+ &compat_param->frame_length_ranges,
-+ sizeof(param->frame_length_ranges[0]) *
-+ IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR);
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+ else {
-+ compat_param->max_num_of_keys = param->max_num_of_keys;
-+ compat_param->mask_support = param->mask_support;
-+ compat_param->statistics_mode = param->statistics_mode;
-+ compat_param->num_of_keys = param->num_of_keys;
-+ compat_param->key_size = param->key_size;
-+#if (DPAA_VERSION >= 11)
-+ memcpy(&compat_param->frame_length_ranges,
-+ &param->frame_length_ranges,
-+ sizeof(compat_param->frame_length_ranges[0]) *
-+ IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR);
-+#endif /* (DPAA_VERSION >= 11) */
-+ }
-+
-+ for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_KEYS; k++)
-+ compat_copy_fm_pcd_cc_key(
-+ &compat_param->key_params[k],
-+ &param->key_params[k],
-+ compat);
-+
-+ compat_copy_fm_pcd_cc_next_engine(
-+ &compat_param->cc_next_engine_params_for_miss,
-+ &param->cc_next_engine_params_for_miss,
-+ compat);
-+
-+ _fm_cpt_dbg(compat," ...->}\n");
-+}
-+
-+void compat_copy_fm_pcd_cc_node(
-+ ioc_compat_fm_pcd_cc_node_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg(compat," {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ memcpy(&param->extract_cc_params, &compat_param->extract_cc_params, sizeof(ioc_fm_pcd_extract_entry_t));
-+
-+ else
-+ {
-+ compat_copy_keys(&compat_param->keys_params, &param->keys_params, compat);
-+
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ _fm_cpt_dbg(compat," param->id = %p \n", param->id);
-+ }
-+
-+ compat_copy_keys(&compat_param->keys_params, &param->keys_params, compat);
-+
-+ _fm_cpt_dbg(compat," ...->}\n");
-+}
-+
-+void compat_fm_pcd_manip_set_node(
-+ ioc_compat_fm_pcd_manip_params_t *compat_param,
-+ ioc_fm_pcd_manip_params_t *param,
-+ uint8_t compat)
-+{
-+ if (compat == COMPAT_US_TO_K) {
-+ param->type = compat_param->type;
-+ switch (param->type) {
-+ case e_IOC_FM_PCD_MANIP_HDR:
-+ param->u.hdr.rmv = compat_param->u.hdr.rmv;
-+ memcpy(&param->u.hdr.rmv_params,
-+ &compat_param->u.hdr.rmv_params,
-+ sizeof(param->u.hdr.rmv_params));
-+
-+ param->u.hdr.insrt = compat_param->u.hdr.insrt;
-+ param->u.hdr.insrt_params.type =
-+ compat_param->u.hdr.insrt_params.type;
-+ switch (compat_param->u.hdr.insrt_params.type)
-+ {
-+ case e_IOC_FM_PCD_MANIP_INSRT_GENERIC:
-+ param->u.hdr.insrt_params.u.generic.offset =
-+ compat_param->u.hdr.insrt_params.u.generic.offset;
-+ param->u.hdr.insrt_params.u.generic.size =
-+ compat_param->u.hdr.insrt_params.u.generic.size;
-+ param->u.hdr.insrt_params.u.generic.replace =
-+ compat_param->u.hdr.insrt_params.u.generic.replace;
-+ param->u.hdr.insrt_params.u.generic.p_data =
-+ compat_ptr(compat_param->u.hdr.insrt_params.u.generic.p_data);
-+ break;
-+ case e_IOC_FM_PCD_MANIP_INSRT_BY_HDR:
-+ param->u.hdr.insrt_params.u.by_hdr.type =
-+ compat_param->u.hdr.insrt_params.u.by_hdr.type;
-+ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.specific_l2 =
-+ compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.specific_l2;
-+ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.update =
-+ compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.update;
-+ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.size =
-+ compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.size;
-+ param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.p_data =
-+ compat_ptr(compat_param->u.hdr.insrt_params.u.by_hdr.u.specific_l2_params.p_data);
-+ break;
-+ default:
-+ _fm_cpt_err("Unsupported type: %d", compat_param->u.hdr.insrt_params.type);
-+ }
-+
-+ param->u.hdr.field_update = compat_param->u.hdr.field_update;
-+ memcpy(&param->u.hdr.field_update_params,
-+ &compat_param->u.hdr.field_update_params,
-+ sizeof(param->u.hdr.field_update_params));
-+
-+ param->u.hdr.custom = compat_param->u.hdr.custom;
-+ memcpy(&param->u.hdr.custom_params,
-+ &compat_param->u.hdr.custom_params,
-+ sizeof(param->u.hdr.custom_params));
-+
-+ param->u.hdr.dont_parse_after_manip =
-+ compat_param->u.hdr.dont_parse_after_manip;
-+ break;
-+ case e_IOC_FM_PCD_MANIP_REASSEM:
-+ memcpy(&param->u.reassem, &compat_param->u.reassem, sizeof(param->u.reassem));
-+ break;
-+ case e_IOC_FM_PCD_MANIP_FRAG:
-+ memcpy(&param->u.frag, &compat_param->u.frag, sizeof(param->u.frag));
-+ break;
-+ case e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD:
-+ memcpy(&param->u.special_offload,
-+ &compat_param->u.special_offload,
-+ sizeof(param->u.special_offload));
-+ break;
-+ }
-+
-+ param->p_next_manip = compat_pcd_id2ptr(compat_param->p_next_manip);
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ }
-+ else {
-+ compat_param->type = param->type;
-+ memcpy(&compat_param->u, &param->u, sizeof(compat_param->u));
-+
-+ if (param->type == e_IOC_FM_PCD_MANIP_HDR &&
-+ param->u.hdr.insrt_params.type == e_IOC_FM_PCD_MANIP_INSRT_GENERIC)
-+ compat_param->u.hdr.insrt_params.u.generic.p_data =
-+ ptr_to_compat(param->u.hdr.insrt_params.u.generic.p_data);
-+
-+ compat_param->p_next_manip = compat_pcd_ptr2id(param->id);
-+ /* ... should be one that was added previously by the very call to
-+ compat_add_ptr2id() below: */
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+}
-+
-+void compat_copy_fm_pcd_manip_get_stats(
-+ ioc_compat_fm_pcd_manip_get_stats_t *compat_param,
-+ ioc_fm_pcd_manip_get_stats_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ memcpy(&param->stats, &compat_param->stats,
-+ sizeof(ioc_fm_pcd_manip_stats_t));
-+ }
-+ else
-+ {
-+ compat_param->id = compat_add_ptr2id(param->id,
-+ FM_MAP_TYPE_PCD_NODE);
-+ memcpy(&compat_param->stats, &param->stats,
-+ sizeof(ioc_fm_pcd_manip_stats_t));
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+void compat_copy_fm_pcd_frm_replic_group_params(
-+ ioc_compat_fm_pcd_frm_replic_group_params_t *compat_param,
-+ ioc_fm_pcd_frm_replic_group_params_t *param,
-+ uint8_t compat)
-+{
-+ int k;
-+
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->max_num_of_entries = compat_param->max_num_of_entries;
-+ param->num_of_entries = compat_param->num_of_entries;
-+ param->id = compat_pcd_id2ptr(compat_param->id);
-+ }
-+ else
-+ {
-+ compat_param->max_num_of_entries = param->max_num_of_entries;
-+ compat_param->num_of_entries = param->num_of_entries;
-+ compat_param->id = compat_add_ptr2id(param->id,
-+ FM_MAP_TYPE_PCD_NODE);
-+ }
-+
-+ for (k=0; k < IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES; k++)
-+ compat_copy_fm_pcd_cc_next_engine(
-+ &compat_param->next_engine_params[k],
-+ &param->next_engine_params[k],
-+ compat);
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_pcd_frm_replic_member(
-+ ioc_compat_fm_pcd_frm_replic_member_t *compat_param,
-+ ioc_fm_pcd_frm_replic_member_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->h_replic_group = compat_pcd_id2ptr(compat_param->h_replic_group);
-+ param->member_index = compat_param->member_index;
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_pcd_frm_replic_member_params(
-+ ioc_compat_fm_pcd_frm_replic_member_params_t *compat_param,
-+ ioc_fm_pcd_frm_replic_member_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ compat_copy_fm_pcd_frm_replic_member(&compat_param->member,
-+ &param->member, compat);
-+
-+ compat_copy_fm_pcd_cc_next_engine(&compat_param->next_engine_params,
-+ &param->next_engine_params, compat);
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_vsp_params(
-+ ioc_compat_fm_vsp_params_t *compat_param,
-+ ioc_fm_vsp_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ memcpy(&param->ext_buf_pools, &compat_param->ext_buf_pools, sizeof(ioc_fm_ext_pools));
-+ param->liodn_offset = compat_param->liodn_offset;
-+ param->port_params.port_id = compat_param->port_params.port_id;
-+ param->port_params.port_type = compat_param->port_params.port_type;
-+ param->relative_profile_id = compat_param->relative_profile_id;
-+ }
-+ else
-+ {
-+ memcpy(&compat_param->ext_buf_pools, &param->ext_buf_pools, sizeof(ioc_fm_ext_pools));
-+ compat_param->liodn_offset = param->liodn_offset;
-+ compat_param->port_params.port_id = param->port_params.port_id;
-+ compat_param->port_params.port_type = param->port_params.port_type;
-+ compat_param->relative_profile_id = param->relative_profile_id;
-+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_buf_pool_depletion_params(
-+ ioc_compat_fm_buf_pool_depletion_params_t *compat_param,
-+ ioc_fm_buf_pool_depletion_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
-+ memcpy(&param->fm_buf_pool_depletion,
-+ &compat_param->fm_buf_pool_depletion,
-+ sizeof(ioc_fm_buf_pool_depletion_t));
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_buffer_prefix_content_params(
-+ ioc_compat_fm_buffer_prefix_content_params_t *compat_param,
-+ ioc_fm_buffer_prefix_content_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
-+ memcpy(&param->fm_buffer_prefix_content,
-+ &compat_param->fm_buffer_prefix_content,
-+ sizeof(ioc_fm_buffer_prefix_content_t));
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_vsp_config_no_sg_params(
-+ ioc_compat_fm_vsp_config_no_sg_params_t *compat_param,
-+ ioc_fm_vsp_config_no_sg_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
-+ param->no_sg = compat_param->no_sg;
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+
-+void compat_copy_fm_vsp_prs_result_params(
-+ ioc_compat_fm_vsp_prs_result_params_t *compat_param,
-+ ioc_fm_vsp_prs_result_params_t *param,
-+ uint8_t compat)
-+{
-+ _fm_cpt_dbg (compat, " {->...\n");
-+
-+ if (compat == COMPAT_US_TO_K)
-+ {
-+ param->p_fm_vsp = compat_pcd_id2ptr(compat_param->p_fm_vsp);
-+ /* p_data is an user-space pointer that needs to remain unmodified */
-+ param->p_data = (void *)(unsigned long long)compat_param->p_data;
-+ }
-+ else
-+ {
-+ compat_param->p_fm_vsp = compat_pcd_ptr2id(param->p_fm_vsp);
-+ /* p_data is an user-space pointer that needs to remain unmodified */
-+ compat_param->p_data = (compat_uptr_t)((unsigned long long)param->p_data & 0xFFFFFFFF);
-+ }
-+
-+ _fm_cpt_dbg (compat, " ...->}\n");
-+}
-+#endif /* (DPAA_VERSION >= 11) */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
-@@ -0,0 +1,755 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_ioctls_fm_compat.h
-+
-+ @Description FM PCD compat structures definition.
-+
-+*/
-+
-+#ifndef __FM_COMPAT_IOCTLS_H
-+#define __FM_COMPAT_IOCTLS_H
-+
-+#include <linux/compat.h>
-+
-+#define COMPAT_K_TO_US 0 /* copy from Kernel to User */
-+#define COMPAT_US_TO_K 1 /* copy from User to Kernel */
-+#define COMPAT_GENERIC 2
-+
-+#define COMPAT_COPY_K2US(dest, src, type) compat_copy_##type(src, dest, 0)
-+#define COMPAT_COPY_US2K(dest, src, type) compat_copy_##type(dest, src, 1)
-+
-+/* mapping kernel pointers w/ UserSpace id's { */
-+/* Because compat_ptr(ptr_to_compat(X)) != X, this way we cannot exchange pointers
-+ back and forth (US - KS). compat_ptr is a cast and pointers are broken. */
-+#define COMPAT_PTR2ID_ARRAY_MAX (512+1) /* first location is not used */
-+#define COMPAT_PTR2ID_WATERMARK 0xface0000
-+#define COMPAT_PTR2ID_WM_MASK 0xffff0000
-+
-+/* define it for debug trace */
-+/*#define FM_COMPAT_DBG*/
-+
-+#define _fm_cpt_prk(stage, format, arg...) \
-+ printk(stage "fm_cpt (cpu:%u): " format, raw_smp_processor_id(), ##arg)
-+
-+#define _fm_cpt_inf(format, arg...) _fm_cpt_prk(KERN_INFO, format, ##arg)
-+#define _fm_cpt_wrn(format, arg...) _fm_cpt_prk(KERN_WARNING, format, ##arg)
-+#define _fm_cpt_err(format, arg...) _fm_cpt_prk(KERN_ERR, format, ##arg)
-+
-+/* used for compat IOCTL debugging */
-+#if defined(FM_COMPAT_DBG)
-+ #define _fm_cpt_dbg(from, format, arg...) \
-+ do{ \
-+ if (from == COMPAT_US_TO_K) \
-+ printk("fm_cpt to KS [%s:%u](cpu:%u) - " format, \
-+ __func__, __LINE__, raw_smp_processor_id(), ##arg); \
-+ else if (from == COMPAT_K_TO_US) \
-+ printk("fm_cpt to US [%s:%u](cpu:%u) - " format, \
-+ __func__, __LINE__, raw_smp_processor_id(), ##arg); \
-+ else \
-+ printk("fm_cpt [%s:%u](cpu:%u) - " format, \
-+ __func__, __LINE__, raw_smp_processor_id(), ##arg); \
-+ }while(0)
-+#else
-+# define _fm_cpt_dbg(arg...)
-+#endif
-+
-+/*TODO: per FMan module:
-+ *
-+ * Parser: FM_MAP_TYPE_PARSER_NODE,
-+ * Kg: FM_MAP_TYPE_KG_NODE,
-+ * Policer: FM_MAP_TYPE_POLICER_NODE
-+ * Manip: FM_MAP_TYPE_MANIP_NODE
-+ **/
-+enum fm_map_node_type {
-+ FM_MAP_TYPE_UNSPEC = 0,
-+ FM_MAP_TYPE_PCD_NODE,
-+
-+ /* add types here, update the policy */
-+
-+ __FM_MAP_TYPE_AFTER_LAST,
-+ FM_MAP_TYPE_MAX = __FM_MAP_TYPE_AFTER_LAST - 1
-+};
-+
-+void compat_del_ptr2id(void *p, enum fm_map_node_type);
-+compat_uptr_t compat_add_ptr2id(void *p, enum fm_map_node_type);
-+compat_uptr_t compat_get_ptr2id(void *p, enum fm_map_node_type);
-+void *compat_get_id2ptr(compat_uptr_t comp, enum fm_map_node_type);
-+
-+static inline compat_uptr_t compat_pcd_ptr2id(void *ptr) {
-+ return (ptr)? compat_get_ptr2id(ptr, FM_MAP_TYPE_PCD_NODE)
-+ : (compat_uptr_t) 0;
-+}
-+
-+static inline void *compat_pcd_id2ptr(compat_uptr_t id) {
-+ return (id) ? compat_get_id2ptr(id, FM_MAP_TYPE_PCD_NODE)
-+ : NULL;
-+}
-+
-+/* other similar inlines may be added as new nodes are added
-+ to enum fm_map_node_type above... */
-+/* } mapping kernel pointers w/ UserSpace id's */
-+
-+/* pcd compat structures { */
-+typedef struct ioc_compat_fm_pcd_cc_node_remove_key_params_t {
-+ compat_uptr_t id;
-+ uint16_t key_indx;
-+} ioc_compat_fm_pcd_cc_node_remove_key_params_t;
-+
-+typedef union ioc_compat_fm_pcd_plcr_next_engine_params_u {
-+ ioc_fm_pcd_done_action action;
-+ compat_uptr_t p_profile;
-+ compat_uptr_t p_direct_scheme;
-+} ioc_compat_fm_pcd_plcr_next_engine_params_u;
-+
-+typedef struct ioc_compat_fm_pcd_plcr_profile_params_t {
-+ bool modify;
-+ union {
-+ struct {
-+ ioc_fm_pcd_profile_type_selection profile_type;
-+ compat_uptr_t p_fm_port;
-+ uint16_t relative_profile_id;
-+ } new_params;
-+ compat_uptr_t p_profile;
-+ } profile_select;
-+ ioc_fm_pcd_plcr_algorithm_selection alg_selection;
-+ ioc_fm_pcd_plcr_color_mode color_mode;
-+
-+ union {
-+ ioc_fm_pcd_plcr_color dflt_color;
-+ ioc_fm_pcd_plcr_color override;
-+ } color;
-+
-+ ioc_fm_pcd_plcr_non_passthrough_alg_param_t non_passthrough_alg_param;
-+
-+ ioc_fm_pcd_engine next_engine_on_green;
-+ ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_green;
-+
-+ ioc_fm_pcd_engine next_engine_on_yellow;
-+ ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_yellow;
-+
-+ ioc_fm_pcd_engine next_engine_on_red;
-+ ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_red;
-+
-+ bool trap_profile_on_flow_A;
-+ bool trap_profile_on_flow_B;
-+ bool trap_profile_on_flow_C;
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_plcr_profile_params_t;
-+
-+typedef struct ioc_compat_fm_obj_t {
-+ compat_uptr_t obj;
-+} ioc_compat_fm_obj_t;
-+
-+typedef struct ioc_compat_fm_pcd_kg_scheme_select_t {
-+ bool direct;
-+ compat_uptr_t scheme_id;
-+} ioc_compat_fm_pcd_kg_scheme_select_t;
-+
-+typedef struct ioc_compat_fm_pcd_port_schemes_params_t {
-+ uint8_t num_of_schemes;
-+ compat_uptr_t scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES];
-+} ioc_compat_fm_pcd_port_schemes_params_t;
-+
-+#if (DPAA_VERSION >= 11)
-+typedef struct ioc_compat_fm_port_vsp_alloc_params_t {
-+ uint8_t num_of_profiles; /**< Number of Virtual Storage Profiles */
-+ uint8_t dflt_relative_id; /**< The default Virtual-Storage-Profile-id dedicated to Rx/OP port
-+ The same default Virtual-Storage-Profile-id will be for coupled Tx port
-+ if relevant function called for Rx port */
-+ compat_uptr_t p_fm_tx_port; /**< Handle to coupled Tx Port; not relevant for OP port. */
-+}ioc_compat_fm_port_vsp_alloc_params_t;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+typedef struct ioc_compat_fm_pcd_net_env_params_t {
-+ uint8_t num_of_distinction_units;
-+ ioc_fm_pcd_distinction_unit_t units[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /* same structure*/
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_net_env_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_prs_sw_params_t {
-+ bool override;
-+ uint32_t size;
-+ uint16_t base;
-+ compat_uptr_t p_code;
-+ uint32_t sw_prs_data_params[IOC_FM_PCD_PRS_NUM_OF_HDRS];
-+ uint8_t num_of_labels;
-+ ioc_fm_pcd_prs_label_params_t labels_table[IOC_FM_PCD_PRS_NUM_OF_LABELS];
-+} ioc_compat_fm_pcd_prs_sw_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_next_kg_params_t {
-+ bool override_fqid;
-+ uint32_t new_fqid;
-+#if DPAA_VERSION >= 11
-+ uint8_t new_relative_storage_profile_id;
-+#endif
-+ compat_uptr_t p_direct_scheme;
-+} ioc_compat_fm_pcd_cc_next_kg_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_next_cc_params_t {
-+ compat_uptr_t cc_node_id;
-+} ioc_compat_fm_pcd_cc_next_cc_params_t;
-+
-+#if DPAA_VERSION >= 11
-+typedef struct ioc_compat_fm_pcd_cc_next_fr_params_t {
-+ compat_uptr_t frm_replic_id;
-+} ioc_compat_fm_pcd_cc_next_fr_params_t;
-+#endif /* DPAA_VERSION >= 11 */
-+
-+typedef struct ioc_compat_fm_pcd_cc_next_engine_params_t {
-+ ioc_fm_pcd_engine next_engine;
-+ union {
-+ ioc_compat_fm_pcd_cc_next_cc_params_t cc_params; /**< compat structure*/
-+ ioc_fm_pcd_cc_next_plcr_params_t plcr_params; /**< same structure*/
-+ ioc_fm_pcd_cc_next_enqueue_params_t enqueue_params; /**< same structure*/
-+ ioc_compat_fm_pcd_cc_next_kg_params_t kg_params; /**< compat structure*/
-+#if DPAA_VERSION >= 11
-+ ioc_compat_fm_pcd_cc_next_fr_params_t fr_params; /**< compat structure*/
-+#endif /* DPAA_VERSION >= 11 */
-+ } params;
-+ compat_uptr_t manip_id;
-+ bool statistics_en;
-+} ioc_compat_fm_pcd_cc_next_engine_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_grp_params_t {
-+ uint8_t num_of_distinction_units;
-+ uint8_t unit_ids [IOC_FM_PCD_MAX_NUM_OF_CC_UNITS];
-+ ioc_compat_fm_pcd_cc_next_engine_params_t next_engine_per_entries_in_grp[IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP];
-+} ioc_compat_fm_pcd_cc_grp_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_tree_params_t {
-+ compat_uptr_t net_env_id;
-+ uint8_t num_of_groups;
-+ ioc_compat_fm_pcd_cc_grp_params_t fm_pcd_cc_group_params [IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS];
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_cc_tree_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t {
-+ compat_uptr_t id;
-+ uint8_t grp_indx;
-+ uint8_t indx;
-+ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
-+} ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_key_params_t {
-+ compat_uptr_t p_key;
-+ compat_uptr_t p_mask;
-+ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params; /**< compat structure*/
-+} ioc_compat_fm_pcd_cc_key_params_t;
-+
-+typedef struct ioc_compat_keys_params_t {
-+ uint16_t max_num_of_keys;
-+ bool mask_support;
-+ ioc_fm_pcd_cc_stats_mode statistics_mode;
-+#if (DPAA_VERSION >= 11)
-+ uint16_t frame_length_ranges[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
-+#endif /* (DPAA_VERSION >= 11) */
-+ uint16_t num_of_keys;
-+ uint8_t key_size;
-+ ioc_compat_fm_pcd_cc_key_params_t key_params[IOC_FM_PCD_MAX_NUM_OF_KEYS]; /**< compat structure*/
-+ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss; /**< compat structure*/
-+} ioc_compat_keys_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_node_params_t {
-+ ioc_fm_pcd_extract_entry_t extract_cc_params; /**< same structure*/
-+ ioc_compat_keys_params_t keys_params; /**< compat structure*/
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_cc_node_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a hash table
-+*//***************************************************************************/
-+typedef struct ioc_compat_fm_pcd_hash_table_params_t {
-+ uint16_t max_num_of_keys;
-+ ioc_fm_pcd_cc_stats_mode statistics_mode;
-+ uint8_t kg_hash_shift;
-+ uint16_t hash_res_mask;
-+ uint8_t hash_shift;
-+ uint8_t match_key_size;
-+ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss;
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_hash_table_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_hash_table_add_key_params_t {
-+ compat_uptr_t p_hash_tbl;
-+ uint8_t key_size;
-+ ioc_compat_fm_pcd_cc_key_params_t key_params;
-+} ioc_compat_fm_pcd_hash_table_add_key_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_node_modify_key_params_t {
-+ compat_uptr_t id;
-+ uint16_t key_indx;
-+ uint8_t key_size;
-+ compat_uptr_t p_key;
-+ compat_uptr_t p_mask;
-+} ioc_compat_fm_pcd_cc_node_modify_key_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_hash_table_remove_key_params_t {
-+ compat_uptr_t p_hash_tbl;
-+ uint8_t key_size;
-+ compat_uptr_t p_key;
-+} ioc_compat_fm_pcd_hash_table_remove_key_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t {
-+ compat_uptr_t id;
-+ uint16_t key_indx;
-+ uint8_t key_size;
-+ ioc_compat_fm_pcd_cc_key_params_t key_params;
-+} ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t;
-+
-+typedef struct ioc_compat_fm_port_pcd_plcr_params_t {
-+ compat_uptr_t plcr_profile_id;
-+} ioc_compat_fm_port_pcd_plcr_params_t;
-+
-+typedef struct ioc_compat_fm_port_pcd_cc_params_t {
-+ compat_uptr_t cc_tree_id;
-+} ioc_compat_fm_port_pcd_cc_params_t;
-+
-+typedef struct ioc_compat_fm_port_pcd_kg_params_t {
-+ uint8_t num_of_schemes;
-+ compat_uptr_t scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES];
-+ bool direct_scheme;
-+ compat_uptr_t direct_scheme_id;
-+} ioc_compat_fm_port_pcd_kg_params_t;
-+
-+typedef struct ioc_compat_fm_port_pcd_params_t {
-+ ioc_fm_port_pcd_support pcd_support;
-+ compat_uptr_t net_env_id;
-+ compat_uptr_t p_prs_params;
-+ compat_uptr_t p_cc_params;
-+ compat_uptr_t p_kg_params;
-+ compat_uptr_t p_plcr_params;
-+ compat_uptr_t p_ip_reassembly_manip;
-+#if DPAA_VERSION >= 11
-+ compat_uptr_t p_capwap_reassembly_manip;
-+#endif
-+} ioc_compat_fm_port_pcd_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_kg_cc_t {
-+ compat_uptr_t tree_id;
-+ uint8_t grp_id;
-+ bool plcr_next;
-+ bool bypass_plcr_profile_generation;
-+ ioc_fm_pcd_kg_plcr_profile_t plcr_profile;
-+} ioc_compat_fm_pcd_kg_cc_t;
-+
-+typedef struct ioc_compat_fm_pcd_kg_scheme_params_t {
-+ bool modify;
-+ union {
-+ uint8_t relative_scheme_id;
-+ compat_uptr_t scheme_id;
-+ } scm_id;
-+ bool always_direct;
-+ struct {
-+ compat_uptr_t net_env_id;
-+ uint8_t num_of_distinction_units;
-+ uint8_t unit_ids[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
-+ } net_env_params;
-+ bool use_hash;
-+ ioc_fm_pcd_kg_key_extract_and_hash_params_t key_extract_and_hash_params;
-+ bool bypass_fqid_generation;
-+ uint32_t base_fqid;
-+ uint8_t num_of_used_extracted_ors;
-+ ioc_fm_pcd_kg_extracted_or_params_t extracted_ors[IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS];
-+#if DPAA_VERSION >= 11
-+ bool override_storage_profile;
-+ ioc_fm_pcd_kg_storage_profile_t storage_profile;
-+#endif /* DPAA_VERSION >= 11 */
-+ ioc_fm_pcd_engine next_engine;
-+ union{
-+ ioc_fm_pcd_done_action done_action;
-+ ioc_fm_pcd_kg_plcr_profile_t plcr_profile;
-+ ioc_compat_fm_pcd_kg_cc_t cc;
-+ } kg_next_engine_params;
-+ ioc_fm_pcd_kg_scheme_counter_t scheme_counter;
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_kg_scheme_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t {
-+ compat_uptr_t id;
-+ uint16_t key_indx;
-+ uint8_t key_size;
-+ ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
-+} ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_generic_params_t {
-+ uint8_t offset;
-+ uint8_t size;
-+ bool replace;
-+ compat_uptr_t p_data;
-+} ioc_compat_fm_pcd_manip_hdr_insrt_generic_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_specific_l2_params_t {
-+ ioc_fm_pcd_manip_hdr_insrt_specific_l2 specific_l2;
-+ bool update;
-+ uint8_t size;
-+ compat_uptr_t p_data;
-+} ioc_compat_fm_pcd_manip_hdr_insrt_specific_l2_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_t {
-+ uint8_t size; /**< size of inserted section */
-+ compat_uptr_t p_data; /**< data to be inserted */
-+} ioc_compat_fm_pcd_manip_hdr_insrt_t;
-+
-+#if (DPAA_VERSION >= 11)
-+typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_ip_params_t {
-+ bool calc_l4_checksum; /**< Calculate L4 checksum. */
-+ ioc_fm_pcd_manip_hdr_qos_mapping_mode mapping_mode; /**< TODO */
-+ uint8_t last_pid_offset; /**< the offset of the last Protocol within
-+ the inserted header */
-+ uint16_t id; /**< 16 bit New IP ID */
-+ bool dont_frag_overwrite;
-+ /**< IPv4 only. DF is overwritten with the hash-result next-to-last byte.
-+ * This byte is configured to be overwritten when RPD is set. */
-+ uint8_t last_dst_offset;
-+ /**< IPv6 only. if routing extension exist, user should set the offset of the destination address
-+ * in order to calculate UDP checksum pseudo header;
-+ * Otherwise set it to '0'. */
-+ ioc_compat_fm_pcd_manip_hdr_insrt_t insrt; /**< size and data to be inserted. */
-+} ioc_compat_fm_pcd_manip_hdr_insrt_ip_params_t;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_by_hdr_params_t {
-+ ioc_fm_pcd_manip_hdr_insrt_by_hdr_type type;
-+ union {
-+ ioc_compat_fm_pcd_manip_hdr_insrt_specific_l2_params_t specific_l2_params;
-+#if (DPAA_VERSION >= 11)
-+ ioc_compat_fm_pcd_manip_hdr_insrt_ip_params_t ip_params;
-+ ioc_compat_fm_pcd_manip_hdr_insrt_t insrt;
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} ioc_compat_fm_pcd_manip_hdr_insrt_by_hdr_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_hdr_insrt_params_t {
-+ ioc_fm_pcd_manip_hdr_insrt_type type;
-+ union {
-+ ioc_compat_fm_pcd_manip_hdr_insrt_by_hdr_params_t by_hdr;
-+ ioc_compat_fm_pcd_manip_hdr_insrt_generic_params_t generic;
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+#error "FM_CAPWAP_SUPPORT feature not supported!"
-+ ioc_fm_pcd_manip_hdr_insrt_by_template_params_t by_template;
-+#endif /* FM_CAPWAP_SUPPORT */
-+ } u;
-+} ioc_compat_fm_pcd_manip_hdr_insrt_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_hdr_params_t {
-+ bool rmv;
-+ ioc_fm_pcd_manip_hdr_rmv_params_t rmv_params;
-+ bool insrt;
-+ ioc_compat_fm_pcd_manip_hdr_insrt_params_t insrt_params;
-+ bool field_update;
-+ ioc_fm_pcd_manip_hdr_field_update_params_t field_update_params;
-+ bool custom;
-+ ioc_fm_pcd_manip_hdr_custom_params_t custom_params;
-+ bool dont_parse_after_manip;
-+} ioc_compat_fm_pcd_manip_hdr_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_special_offload_params_t {
-+ bool decryption;
-+ bool ecn_copy;
-+ bool dscp_copy;
-+ bool variable_ip_hdr_len;
-+ bool variable_ip_version;
-+ uint8_t outer_ip_hdr_len;
-+ uint16_t arw_size;
-+ compat_uptr_t arw_addr;
-+} ioc_compat_fm_pcd_manip_special_offload_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_params_t {
-+ ioc_fm_pcd_manip_type type;
-+ union {
-+ ioc_compat_fm_pcd_manip_hdr_params_t hdr;
-+ ioc_fm_pcd_manip_reassem_params_t reassem;
-+ ioc_fm_pcd_manip_frag_params_t frag;
-+ ioc_compat_fm_pcd_manip_special_offload_params_t special_offload;
-+ } u;
-+ compat_uptr_t p_next_manip;
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+#error "FM_CAPWAP_SUPPORT feature not supported!"
-+ bool frag_or_reasm;
-+ ioc_fm_pcd_manip_frag_or_reasm_params_t frag_or_reasm_params;
-+#endif /* FM_CAPWAP_SUPPORT */
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_manip_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_manip_get_stats_t {
-+ compat_uptr_t id;
-+ ioc_fm_pcd_manip_stats_t stats;
-+} ioc_compat_fm_pcd_manip_get_stats_t;
-+
-+#if (DPAA_VERSION >= 11)
-+typedef struct ioc_compat_fm_pcd_frm_replic_group_params_t {
-+ uint8_t max_num_of_entries;
-+ uint8_t num_of_entries;
-+ ioc_compat_fm_pcd_cc_next_engine_params_t
-+ next_engine_params[IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES];
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_frm_replic_group_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_frm_replic_member_t {
-+ compat_uptr_t h_replic_group;
-+ uint16_t member_index;
-+} ioc_compat_fm_pcd_frm_replic_member_t;
-+
-+typedef struct ioc_compat_fm_pcd_frm_replic_member_params_t {
-+ ioc_compat_fm_pcd_frm_replic_member_t member;
-+ ioc_compat_fm_pcd_cc_next_engine_params_t next_engine_params;
-+} ioc_compat_fm_pcd_frm_replic_member_params_t;
-+
-+typedef struct ioc_compat_fm_vsp_params_t {
-+ compat_uptr_t p_fm; /**< A handle to the FM object this VSP related to */
-+ ioc_fm_ext_pools ext_buf_pools; /**< Which external buffer pools are used
-+ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes.
-+ parameter associated with Rx / OP port */
-+ uint16_t liodn_offset; /**< VSP's LIODN offset */
-+ struct {
-+ ioc_fm_port_type port_type; /**< Port type */
-+ uint8_t port_id; /**< Port Id - relative to type */
-+ } port_params;
-+ uint8_t relative_profile_id; /**< VSP Id - relative to VSP's range
-+ defined in relevant FM object */
-+ compat_uptr_t id; /**< return value */
-+} ioc_compat_fm_vsp_params_t;
-+
-+typedef struct ioc_compat_fm_buf_pool_depletion_params_t {
-+ compat_uptr_t p_fm_vsp;
-+ ioc_fm_buf_pool_depletion_t fm_buf_pool_depletion;
-+} ioc_compat_fm_buf_pool_depletion_params_t;
-+
-+typedef struct ioc_compat_fm_buffer_prefix_content_params_t {
-+ compat_uptr_t p_fm_vsp;
-+ ioc_fm_buffer_prefix_content_t fm_buffer_prefix_content;
-+} ioc_compat_fm_buffer_prefix_content_params_t;
-+
-+typedef struct ioc_compat_fm_vsp_config_no_sg_params_t {
-+ compat_uptr_t p_fm_vsp;
-+ bool no_sg;
-+} ioc_compat_fm_vsp_config_no_sg_params_t;
-+
-+typedef struct ioc_compat_fm_vsp_prs_result_params_t {
-+ compat_uptr_t p_fm_vsp;
-+ compat_uptr_t p_data;
-+} ioc_compat_fm_vsp_prs_result_params_t;
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+typedef struct ioc_compat_fm_pcd_kg_scheme_spc_t {
-+ uint32_t val;
-+ compat_uptr_t id;
-+} ioc_compat_fm_pcd_kg_scheme_spc_t;
-+
-+typedef struct ioc_compat_fm_ctrl_mon_counters_params_t {
-+ uint8_t fm_ctrl_index;
-+ compat_uptr_t p_mon;
-+} ioc_compat_fm_ctrl_mon_counters_params_t;
-+
-+typedef struct ioc_compat_fm_pcd_cc_tbl_get_stats_t {
-+ compat_uptr_t id;
-+ uint16_t key_index;
-+ ioc_fm_pcd_cc_key_statistics_t statistics;
-+} ioc_compat_fm_pcd_cc_tbl_get_stats_t;
-+
-+
-+/* } pcd compat structures */
-+
-+void compat_obj_delete(
-+ ioc_compat_fm_obj_t *compat_id,
-+ ioc_fm_obj_t *id);
-+
-+/* pcd compat functions { */
-+void compat_copy_fm_pcd_plcr_profile(
-+ ioc_compat_fm_pcd_plcr_profile_params_t *compat_param,
-+ ioc_fm_pcd_plcr_profile_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_key(
-+ ioc_compat_fm_pcd_cc_key_params_t *compat_param,
-+ ioc_fm_pcd_cc_key_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(
-+ ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_node_modify_next_engine(
-+ ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_modify_next_engine_params_t *param,
-+ uint8_t compat);
-+
-+void compat_fm_pcd_cc_tree_modify_next_engine(
-+ ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param,
-+ ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_hash_table(
-+ ioc_compat_fm_pcd_hash_table_params_t *compat_param,
-+ ioc_fm_pcd_hash_table_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_grp(
-+ ioc_compat_fm_pcd_cc_grp_params_t *compat_param,
-+ ioc_fm_pcd_cc_grp_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_tree(
-+ ioc_compat_fm_pcd_cc_tree_params_t *compat_param,
-+ ioc_fm_pcd_cc_tree_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_tbl_get_stats(
-+ ioc_compat_fm_pcd_cc_tbl_get_stats_t *compat_param,
-+ ioc_fm_pcd_cc_tbl_get_stats_t *param,
-+ uint8_t compat);
-+
-+void compat_fm_pcd_prs_sw(
-+ ioc_compat_fm_pcd_prs_sw_params_t *compat_param,
-+ ioc_fm_pcd_prs_sw_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_kg_scheme(
-+ ioc_compat_fm_pcd_kg_scheme_params_t *compat_param,
-+ ioc_fm_pcd_kg_scheme_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_kg_scheme_select(
-+ ioc_compat_fm_pcd_kg_scheme_select_t *compat_param,
-+ ioc_fm_pcd_kg_scheme_select_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_kg_schemes_params(
-+ ioc_compat_fm_pcd_port_schemes_params_t *compat_param,
-+ ioc_fm_pcd_port_schemes_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_port_pcd_kg(
-+ ioc_compat_fm_port_pcd_kg_params_t *compat_param,
-+ ioc_fm_port_pcd_kg_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_port_pcd(
-+ ioc_compat_fm_port_pcd_params_t *compat_param,
-+ ioc_fm_port_pcd_params_t *param,
-+ uint8_t compat);
-+
-+#if (DPAA_VERSION >= 11)
-+void compat_copy_fm_port_vsp_alloc_params(
-+ ioc_compat_fm_port_vsp_alloc_params_t *compat_param,
-+ ioc_fm_port_vsp_alloc_params_t *param,
-+ uint8_t compat);
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+void compat_copy_fm_pcd_net_env(
-+ ioc_compat_fm_pcd_net_env_params_t *compat_param,
-+ ioc_fm_pcd_net_env_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_node_modify_key(
-+ ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_modify_key_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_keys(
-+ ioc_compat_keys_params_t *compat_param,
-+ ioc_keys_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_cc_node(
-+ ioc_compat_fm_pcd_cc_node_params_t *compat_param,
-+ ioc_fm_pcd_cc_node_params_t *param,
-+ uint8_t compat);
-+
-+void compat_fm_pcd_manip_set_node(
-+ ioc_compat_fm_pcd_manip_params_t *compat_param,
-+ ioc_fm_pcd_manip_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_manip_get_stats(
-+ ioc_compat_fm_pcd_manip_get_stats_t *compat_param,
-+ ioc_fm_pcd_manip_get_stats_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_port_pcd_modify_tree(
-+ ioc_compat_fm_obj_t *compat_id,
-+ ioc_fm_obj_t *id,
-+ uint8_t compat);
-+
-+#if (DPAA_VERSION >= 11)
-+void compat_copy_fm_pcd_frm_replic_group_params(
-+ ioc_compat_fm_pcd_frm_replic_group_params_t *compat_param,
-+ ioc_fm_pcd_frm_replic_group_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_frm_replic_member(
-+ ioc_compat_fm_pcd_frm_replic_member_t *compat_param,
-+ ioc_fm_pcd_frm_replic_member_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_pcd_frm_replic_member_params(
-+ ioc_compat_fm_pcd_frm_replic_member_params_t *compat_param,
-+ ioc_fm_pcd_frm_replic_member_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_vsp_params(
-+ ioc_compat_fm_vsp_params_t *compat_param,
-+ ioc_fm_vsp_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_buf_pool_depletion_params(
-+ ioc_compat_fm_buf_pool_depletion_params_t *compat_param,
-+ ioc_fm_buf_pool_depletion_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_buffer_prefix_content_params(
-+ ioc_compat_fm_buffer_prefix_content_params_t *compat_param,
-+ ioc_fm_buffer_prefix_content_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_vsp_config_no_sg_params(
-+ ioc_compat_fm_vsp_config_no_sg_params_t *compat_param,
-+ ioc_fm_vsp_config_no_sg_params_t *param,
-+ uint8_t compat);
-+
-+void compat_copy_fm_vsp_prs_result_params(
-+ ioc_compat_fm_vsp_prs_result_params_t *compat_param,
-+ ioc_fm_vsp_prs_result_params_t *param,
-+ uint8_t compat);
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+void compat_copy_fm_pcd_kg_scheme_spc(
-+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param,
-+ ioc_fm_pcd_kg_scheme_spc_t *param,
-+ uint8_t compat);
-+
-+/* } pcd compat functions */
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources.h
-@@ -0,0 +1,121 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_resources.h
-+
-+ @Description FMD wrapper resource allocation functions.
-+
-+*/
-+
-+#ifndef LNXWRP_RESOURCES_H_
-+#define LNXWRP_RESOURCES_H_
-+
-+#if !defined(FMAN_RESOURCES_UNIT_TEST)
-+#include "lnxwrp_fm.h"
-+#else
-+#include "lnxwrp_resources_ut.h"
-+#endif
-+
-+#define ROUND(X) ((2*(X)+1)/2)
-+#define CEIL(X) ((X)+1)
-+/* #define ROUND_DIV(X, Y) (((X)+(Y)/2)/(Y)) */
-+#define ROUND_DIV(X, Y) ((2*(X)+(Y))/(2*(Y)))
-+#define CEIL_DIV(X, Y) (((X)+(Y)-1)/(Y))
-+
-+/* used for resource calculus */
-+#define DPDE_1G 2 /* DQDP 1g - from LLD:
-+ DEFAULT_PORT_txFifoDeqPipelineDepth_1G */
-+#define DPDE_10G 8 /* DQDP 10g - from LLD:
-+ DEFAULT_PORT_txFifoDeqPipelineDepth_10G */
-+
-+int fm_set_active_fman_ports(struct platform_device *of_dev,
-+ t_LnxWrpFmDev *p_LnxWrpFmDev);
-+
-+/* Calculate the fifosize based on MURAM allocation, number of ports, dpde
-+ * value and s/g software support (! Kernel does not suport s/g).
-+ *
-+ * Algorithm summary:
-+ * - Calculate the the minimum fifosize required for every type of port
-+ * (TX,RX for 1G, 2.5G and 10G).
-+ * - Set TX the minimum fifosize required.
-+ * - Distribute the remaining buffers (after all TX were set) to RX ports
-+ * based on:
-+ * 1G RX = Remaining_buffers * 1/(1+2.5+10)
-+ * 2.5G RX = Remaining_buffers * 2.5/(1+2.5+10)
-+ * 10G RX = Remaining_buffers * 10/(1+2.5+10)
-+ * - if the RX is smaller than the minimum required, then set the minimum
-+ * required
-+ * - In the end distribuite the leftovers if there are any (due to
-+ * unprecise calculus) or if over allocation cat some buffers from all RX
-+ * ports w/o pass over minimum required treshold, but if there must be
-+ * pass the treshold in order to cat the over allocation ,then this
-+ * configuration can not be set - KERN_ALERT.
-+*/
-+int fm_precalculate_fifosizes(t_LnxWrpFmDev *p_LnxWrpFmDev,
-+ int muram_fifo_size);
-+
-+#if !defined(FMAN_RESOURCES_UNIT_TEST)
-+int fm_config_precalculate_fifosize(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev);
-+#endif
-+
-+/* Compute FMan open DMA based on total number of open DMAs and
-+ * number of available fman ports.
-+ *
-+ * By default 10g ports are set to input parameters. The other ports
-+ * tries to keep the proportion rx=2tx open dmas or tresholds.
-+ *
-+ * If leftovers, then those will be set as shared.
-+ *
-+ * If after computing overflow appears, then it decrements open dma
-+ * for all ports w/o cross the tresholds. If the tresholds are meet
-+ * and is still overflow, then it returns error.
-+*/
-+int fm_precalculate_open_dma(t_LnxWrpFmDev *p_LnxWrpFmDev,
-+ int max_fm_open_dma,
-+ int default_tx_10g_dmas,
-+ int default_rx_10g_dmas,
-+ int min_tx_10g_treshold, int min_rx_10g_treshold);
-+
-+#if !defined(FMAN_RESOURCES_UNIT_TEST)
-+int fm_config_precalculate_open_dma(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev);
-+#endif
-+
-+/* Compute FMan tnums based on available tnums and number of ports.
-+ * Set defaults (minim tresholds) and then distribute leftovers.*/
-+int fm_precalculate_tnums(t_LnxWrpFmDev *p_LnxWrpFmDev, int max_fm_tnums);
-+
-+#if !defined(FMAN_RESOURCES_UNIT_TEST)
-+int fm_config_precalculate_tnums(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev);
-+#endif
-+
-+#endif /* LNXWRP_RESOURCES_H_ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.c
-@@ -0,0 +1,191 @@
-+/* Copyright (c) 2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "lnxwrp_resources.h"
-+#include "lnxwrp_resources_ut.h"
-+
-+#define KILOBYTE 0x400 /* 1024 */
-+
-+typedef enum e_board_type {
-+ e_p3041,
-+ e_p4080,
-+ e_p5020,
-+ e_p1023
-+} e_board_type;
-+
-+uint8_t board_type;
-+uint32_t muram_size = 0;
-+uint32_t dmas_num = 0;
-+uint32_t task_num = 0;
-+uint32_t frame_size = 0;
-+uint32_t oh_num = 0;
-+uint32_t num_ports_1g = 0;
-+uint32_t num_ports_10g = 0;
-+uint32_t num_ports_2g5 = 0;
-+uint32_t fsl_fman_phy_maxfrm = 0;
-+uint32_t dpa_rx_extra_headroom = 0;
-+
-+void show_help(void){
-+ printf(" help: \n");
-+ printf(" -b <board_type> -f <max_fram_size(mtu)> -o <num_oh_ports> -g1"
-+ " <num_1g_ports> -g10 <num_10g_ports> -g25 <num_2g5_ports>\n");
-+ printf(" Maxim num of DMAS availbale: P3/P4/P5:32 , P1023:16 \n");
-+ printf(" Maxim num of TNUMs availbale: P3/P4/P5:128, P1023:32 \n");
-+ printf(" Muram size: P3/P4/P5:160K, P1023:64K \n");
-+ printf(" Number of ports:\n");
-+ printf(" P3/P5: 5p 1g, 1p 10g, 7p oh \n");
-+ printf(" P4 : 4p 1g, 1p 10g, 7p oh \n");
-+ printf(" P1 : 2p 1g, 0p 10g, 4p oh \n");
-+ printf(" MTU: Default:1522, Jumbo:9600 \n");
-+}
-+
-+int fm_set_param(t_LnxWrpFmDev *p_LnxWrpFmDev) {
-+ struct fm_active_ports *fm_active_ports_info = NULL;
-+ fm_active_ports_info = &p_LnxWrpFmDev->fm_active_ports_info;
-+
-+ switch(board_type){
-+ case e_p3041:
-+ case e_p5020:
-+ muram_size = 160*KILOBYTE;
-+ dmas_num = 32;
-+ task_num = 128;
-+ if ((num_ports_1g+num_ports_2g5) > 5 || num_ports_10g > 1 || oh_num > 7)
-+ goto err_fm_set_param;
-+ break;
-+ case e_p4080:
-+ muram_size = 160*KILOBYTE;
-+ dmas_num = 32;
-+ task_num = 128;
-+ if ((num_ports_1g+num_ports_2g5) > 4 || num_ports_10g > 1 || oh_num > 7)
-+ goto err_fm_set_param;
-+ break;
-+ case e_p1023:
-+ muram_size = 64*KILOBYTE;
-+ dmas_num = 16;
-+ task_num = 128;
-+ if ((num_ports_1g+num_ports_2g5) > 2 || oh_num > 4)
-+ goto err_fm_set_param;
-+ break;
-+ default:
-+ goto err_fm_set_param;
-+ break;
-+ }
-+
-+ p_LnxWrpFmDev->id = 0;
-+ fsl_fman_phy_maxfrm = frame_size;
-+ dpa_rx_extra_headroom = 0; /* ATTENTION: can be != 0 */
-+ fm_active_ports_info->num_oh_ports = oh_num;
-+ fm_active_ports_info->num_tx_ports = num_ports_1g;
-+ fm_active_ports_info->num_rx_ports = num_ports_1g;
-+ fm_active_ports_info->num_tx25_ports = num_ports_2g5;
-+ fm_active_ports_info->num_rx25_ports = num_ports_2g5;
-+ fm_active_ports_info->num_tx10_ports = num_ports_10g;
-+ fm_active_ports_info->num_rx10_ports = num_ports_10g;
-+
-+ return 0;
-+
-+err_fm_set_param:
-+ printf(" ERR: To many ports!!! \n");
-+ return -1;
-+}
-+
-+int main (int argc, char *argv[]){
-+ t_LnxWrpFmDev LnxWrpFmDev;
-+ t_LnxWrpFmDev *p_LnxWrpFmDev = &LnxWrpFmDev;
-+ int tokens_cnt = 1;
-+
-+ char *token = NULL;
-+
-+ while(tokens_cnt < argc)
-+ {
-+ token = argv[tokens_cnt++];
-+ if (strcmp(token, "-b") == 0){
-+ if(strcmp(argv[tokens_cnt],"p3") == 0)
-+ board_type = e_p3041;
-+ else if(strcmp(argv[tokens_cnt],"p4") == 0)
-+ board_type = e_p4080;
-+ else if(strcmp(argv[tokens_cnt],"p5") == 0)
-+ board_type = e_p5020;
-+ else if(strcmp(argv[tokens_cnt],"p1") == 0)
-+ board_type = e_p1023;
-+ else
-+ show_help();
-+ tokens_cnt++;
-+ }
-+ else if(strcmp(token, "-d") == 0){
-+ dmas_num = atoi(argv[tokens_cnt++]);
-+ }
-+ else if(strcmp(token, "-t") == 0)
-+ task_num = atoi(argv[tokens_cnt++]);
-+ else if(strcmp(token, "-f") == 0)
-+ frame_size = atoi(argv[tokens_cnt++]);
-+ else if(strcmp(token, "-o") == 0)
-+ oh_num = atoi(argv[tokens_cnt++]);
-+ else if(strcmp(token, "-g1") == 0)
-+ num_ports_1g = atoi(argv[tokens_cnt++]);
-+ else if(strcmp(token, "-g10") == 0)
-+ num_ports_10g = atoi(argv[tokens_cnt++]);
-+ else if(strcmp(token, "-g25") == 0)
-+ num_ports_2g5 = atoi(argv[tokens_cnt++]);
-+ else {
-+ show_help();
-+ return -1;
-+ }
-+ }
-+
-+ if(fm_set_param(p_LnxWrpFmDev) < 0){
-+ show_help();
-+ return -1;
-+ }
-+
-+ if(fm_precalculate_fifosizes(
-+ p_LnxWrpFmDev,
-+ 128*KILOBYTE)
-+ != 0)
-+ return -1;
-+ if(fm_precalculate_open_dma(
-+ p_LnxWrpFmDev,
-+ dmas_num, /* max open dmas:dpaa_integration_ext.h */
-+ FM_DEFAULT_TX10G_OPENDMA, /* default TX 10g open dmas */
-+ FM_DEFAULT_RX10G_OPENDMA, /* default RX 10g open dmas */
-+ FM_10G_OPENDMA_MIN_TRESHOLD,/* TX 10g minimum treshold */
-+ FM_10G_OPENDMA_MIN_TRESHOLD)/* RX 10g minimum treshold */
-+ != 0)
-+ return -1;
-+ if(fm_precalculate_tnums(
-+ p_LnxWrpFmDev,
-+ task_num) /* max TNUMS: dpa integration file. */
-+ != 0)
-+ return -1;
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.h
-@@ -0,0 +1,144 @@
-+/* Copyright (c) 2012 Freescale Semiconductor, Inc
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef FM_RESS_TEST_H_
-+#define FM_RESS_TEST_H_
-+
-+#include <stdint.h>
-+#include <stdbool.h>
-+#include <stdio.h>
-+#include <assert.h>
-+#include <string.h>
-+#include <stdlib.h>
-+
-+#define _Packed
-+#define _PackedType __attribute__ ((packed))
-+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-+#define KERN_ALERT ""
-+#define KERN_INFO ""
-+#define ASSERT_COND assert
-+#define printk printf
-+#define NET_IP_ALIGN 0
-+#define FM_FIFO_ALLOCATION_OLD_ALG
-+
-+#if defined(CONFIG_FMAN_DISABLE_OH_AND_DISTRIBUTE_RESOURCES)
-+#define FM_10G_OPENDMA_MIN_TRESHOLD 8 /* 10g minimum treshold if only HC is enabled and no OH port enabled */
-+#define FM_OPENDMA_RX_TX_RAPORT 2 /* RX = 2*TX */
-+#else
-+#define FM_10G_OPENDMA_MIN_TRESHOLD 7 /* 10g minimum treshold if 7 OH ports are enabled */
-+#define FM_OPENDMA_RX_TX_RAPORT 1 /* RX = TX */
-+#endif
-+#define FM_DEFAULT_TX10G_OPENDMA 8 /* default TX 10g open dmas */
-+#define FM_DEFAULT_RX10G_OPENDMA 8 /* default RX 10g open dmas */
-+
-+/* information about all active ports for an FMan.
-+ * !Some ports may be disabled by u-boot, thus will not be available */
-+struct fm_active_ports {
-+ uint32_t num_oh_ports;
-+ uint32_t num_tx_ports;
-+ uint32_t num_rx_ports;
-+ uint32_t num_tx25_ports;
-+ uint32_t num_rx25_ports;
-+ uint32_t num_tx10_ports;
-+ uint32_t num_rx10_ports;
-+};
-+
-+/* FMan resources precalculated at fm probe based
-+ * on available FMan port. */
-+struct fm_resource_settings {
-+ /* buffers - fifo sizes */
-+ uint32_t tx1g_num_buffers;
-+ uint32_t rx1g_num_buffers;
-+ uint32_t tx2g5_num_buffers; /* Not supported yet by LLD */
-+ uint32_t rx2g5_num_buffers; /* Not supported yet by LLD */
-+ uint32_t tx10g_num_buffers;
-+ uint32_t rx10g_num_buffers;
-+ uint32_t oh_num_buffers;
-+ uint32_t shared_ext_buffers;
-+
-+
-+ /* open DMAs */
-+ uint32_t tx_1g_dmas;
-+ uint32_t rx_1g_dmas;
-+ uint32_t tx_2g5_dmas; /* Not supported yet by LLD */
-+ uint32_t rx_2g5_dmas; /* Not supported yet by LLD */
-+ uint32_t tx_10g_dmas;
-+ uint32_t rx_10g_dmas;
-+ uint32_t oh_dmas;
-+ uint32_t shared_ext_open_dma;
-+
-+ /* Tnums */
-+ uint32_t tx_1g_tnums;
-+ uint32_t rx_1g_tnums;
-+ uint32_t tx_2g5_tnums; /* Not supported yet by LLD */
-+ uint32_t rx_2g5_tnums; /* Not supported yet by LLD */
-+ uint32_t tx_10g_tnums;
-+ uint32_t rx_10g_tnums;
-+ uint32_t oh_tnums;
-+ uint32_t shared_ext_tnums;
-+};
-+
-+typedef struct {
-+ uint8_t id;
-+ struct fm_active_ports fm_active_ports_info;
-+ struct fm_resource_settings fm_resource_settings_info;
-+} t_LnxWrpFmDev;
-+
-+typedef struct {
-+ uint8_t id;
-+} t_LnxWrpFmPortDev;
-+
-+typedef _Packed struct t_FmPrsResult {
-+ volatile uint8_t lpid; /**< Logical port id */
-+ volatile uint8_t shimr; /**< Shim header result */
-+ volatile uint16_t l2r; /**< Layer 2 result */
-+ volatile uint16_t l3r; /**< Layer 3 result */
-+ volatile uint8_t l4r; /**< Layer 4 result */
-+ volatile uint8_t cplan; /**< Classification plan id */
-+ volatile uint16_t nxthdr; /**< Next Header */
-+ volatile uint16_t cksum; /**< Checksum */
-+ volatile uint32_t lcv; /**< LCV */
-+ volatile uint8_t shim_off[3]; /**< Shim offset */
-+ volatile uint8_t eth_off; /**< ETH offset */
-+ volatile uint8_t llc_snap_off; /**< LLC_SNAP offset */
-+ volatile uint8_t vlan_off[2]; /**< VLAN offset */
-+ volatile uint8_t etype_off; /**< ETYPE offset */
-+ volatile uint8_t pppoe_off; /**< PPP offset */
-+ volatile uint8_t mpls_off[2]; /**< MPLS offset */
-+ volatile uint8_t ip_off[2]; /**< IP offset */
-+ volatile uint8_t gre_off; /**< GRE offset */
-+ volatile uint8_t l4_off; /**< Layer 4 offset */
-+ volatile uint8_t nxthdr_off; /**< Parser end point */
-+} _PackedType t_FmPrsResult;
-+
-+#endif
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_resources_ut.make
-@@ -0,0 +1,28 @@
-+CC=gcc
-+
-+LNXWRP_RESS_UT=lnxwrp_resources_ut
-+OBJ=lnxwrp_resources
-+
-+INC_PATH=
-+LIB_PATH=
-+
-+INC=$(addprefix -I,$(INC_PATH))
-+LIB=$(addprefix -L,$(LIB_PATH))
-+
-+CFLAGS= -gdwarf-2 -g -O0 -Wall
-+XFLAGS= -DFMAN_RESOURCES_UNIT_TEST
-+
-+all: $(LNXWRP_RESS_UT)
-+
-+$(LNXWRP_RESS_UT):$(addsuffix .o,$(OBJ)) $(LNXWRP_RESS_UT).o
-+ $(CC) -o $(LNXWRP_RESS_UT) $(LNXWRP_RESS_UT).o $(addsuffix .o,$(OBJ))
-+
-+%.o: %.c
-+ @(echo " (CC) $@")
-+ @($(CC) $(INC) $(CFLAGS) $(XFLAGS) -o $(@) -c $<)
-+
-+.PHONY: clean
-+
-+clean:
-+ rm -f *.o
-+ rm -f $(LNXWRP_RESS_UT)
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c
-@@ -0,0 +1,60 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_sysfs.c
-+
-+ @Description FM wrapper sysfs related functions.
-+
-+*/
-+
-+#include <linux/types.h>
-+#include "lnxwrp_sysfs.h"
-+
-+uint8_t fm_find_statistic_counter_by_name(const char *attr_name,
-+ const struct sysfs_stats_t *sysfs_stats,
-+ uint8_t *offset)
-+{
-+ int i = 0;
-+
-+ while (sysfs_stats[i].stat_name != NULL) {
-+ if (strcmp(sysfs_stats[i].stat_name, attr_name) == 0) {
-+ if (offset != NULL)
-+ *offset = i;
-+ return sysfs_stats[i].stat_counter;
-+ }
-+
-+ i++;
-+ }
-+ WARN(1, "FMD: Should never get here!");
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h
-@@ -0,0 +1,60 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef LNXWRP_SYSFS_H_
-+#define LNXWRP_SYSFS_H_
-+
-+/* Linux Headers ------------------- */
-+#include <linux/version.h>
-+
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#include <config/modversions.h>
-+#endif /* MODVERSIONS */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/device.h>
-+#include <linux/sysfs.h>
-+
-+struct sysfs_stats_t {
-+ const char *stat_name;
-+ uint8_t stat_counter;
-+};
-+
-+uint8_t fm_find_statistic_counter_by_name(const char *attr_name,
-+ const struct sysfs_stats_t *sysfs_stats,
-+ uint8_t *offset);
-+
-+#endif /* LNXWRP_SYSFS_H_ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c
-@@ -0,0 +1,1855 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "lnxwrp_sysfs.h"
-+#include "lnxwrp_sysfs_fm.h"
-+#include "lnxwrp_fm.h"
-+
-+#include "../../sdk_fman/Peripherals/FM/inc/fm_common.h"
-+#include "../../sdk_fman/Peripherals/FM/Pcd/fm_pcd.h"
-+#include "../../sdk_fman/Peripherals/FM/Pcd/fm_kg.h"
-+#include "../../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h"
-+
-+#if defined(__ERR_MODULE__)
-+#undef __ERR_MODULE__
-+#endif
-+
-+#include "../../sdk_fman/Peripherals/FM/fm.h"
-+#include <linux/delay.h>
-+
-+
-+static int fm_get_counter(void *h_fm, e_FmCounters cnt_e, uint32_t *cnt_val);
-+
-+enum fm_dma_match_stats {
-+ FM_DMA_COUNTERS_CMQ_NOT_EMPTY,
-+ FM_DMA_COUNTERS_BUS_ERROR,
-+ FM_DMA_COUNTERS_READ_BUF_ECC_ERROR,
-+ FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR,
-+ FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR
-+};
-+
-+static const struct sysfs_stats_t fm_sysfs_stats[] = {
-+ /* FM statistics */
-+ {
-+ .stat_name = "enq_total_frame",
-+ .stat_counter = e_FM_COUNTERS_ENQ_TOTAL_FRAME,
-+ },
-+ {
-+ .stat_name = "deq_total_frame",
-+ .stat_counter = e_FM_COUNTERS_DEQ_TOTAL_FRAME,
-+ },
-+ {
-+ .stat_name = "deq_0",
-+ .stat_counter = e_FM_COUNTERS_DEQ_0,
-+ },
-+ {
-+ .stat_name = "deq_1",
-+ .stat_counter = e_FM_COUNTERS_DEQ_1,
-+ },
-+ {
-+ .stat_name = "deq_2",
-+ .stat_counter = e_FM_COUNTERS_DEQ_2,
-+ },
-+ {
-+ .stat_name = "deq_3",
-+ .stat_counter = e_FM_COUNTERS_DEQ_3,
-+ },
-+ {
-+ .stat_name = "deq_from_default",
-+ .stat_counter = e_FM_COUNTERS_DEQ_FROM_DEFAULT,
-+ },
-+ {
-+ .stat_name = "deq_from_context",
-+ .stat_counter = e_FM_COUNTERS_DEQ_FROM_CONTEXT,
-+ },
-+ {
-+ .stat_name = "deq_from_fd",
-+ .stat_counter = e_FM_COUNTERS_DEQ_FROM_FD,
-+ },
-+ {
-+ .stat_name = "deq_confirm",
-+ .stat_counter = e_FM_COUNTERS_DEQ_CONFIRM,
-+ },
-+ /* FM:DMA statistics */
-+ {
-+ .stat_name = "cmq_not_empty",
-+ .stat_counter = FM_DMA_COUNTERS_CMQ_NOT_EMPTY,
-+ },
-+ {
-+ .stat_name = "bus_error",
-+ .stat_counter = FM_DMA_COUNTERS_BUS_ERROR,
-+ },
-+ {
-+ .stat_name = "read_buf_ecc_error",
-+ .stat_counter = FM_DMA_COUNTERS_READ_BUF_ECC_ERROR,
-+ },
-+ {
-+ .stat_name = "write_buf_ecc_sys_error",
-+ .stat_counter = FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR,
-+ },
-+ {
-+ .stat_name = "write_buf_ecc_fm_error",
-+ .stat_counter = FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR,
-+ },
-+ /* FM:PCD statistics */
-+ {
-+ .stat_name = "pcd_kg_total",
-+ .stat_counter = e_FM_PCD_KG_COUNTERS_TOTAL,
-+ },
-+ {
-+ .stat_name = "pcd_plcr_yellow",
-+ .stat_counter = e_FM_PCD_PLCR_COUNTERS_YELLOW,
-+ },
-+ {
-+ .stat_name = "pcd_plcr_red",
-+ .stat_counter = e_FM_PCD_PLCR_COUNTERS_RED,
-+ },
-+ {
-+ .stat_name = "pcd_plcr_recolored_to_red",
-+ .stat_counter = e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED,
-+ },
-+ {
-+ .stat_name = "pcd_plcr_recolored_to_yellow",
-+ .stat_counter = e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW,
-+ },
-+ {
-+ .stat_name = "pcd_plcr_total",
-+ .stat_counter = e_FM_PCD_PLCR_COUNTERS_TOTAL,
-+ },
-+ {
-+ .stat_name = "pcd_plcr_length_mismatch",
-+ .stat_counter = e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH,
-+ },
-+ {
-+ .stat_name = "pcd_prs_parse_dispatch",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH,
-+ },
-+ {
-+ .stat_name = "pcd_prs_l2_parse_result_returned",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED,
-+ },
-+ {
-+ .stat_name = "pcd_prs_l3_parse_result_returned",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED,
-+ },
-+ {
-+ .stat_name = "pcd_prs_l4_parse_result_returned",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED,
-+ },
-+ {
-+ .stat_name = "pcd_prs_shim_parse_result_returned",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED,
-+ },
-+ {
-+ .stat_name = "pcd_prs_l2_parse_result_returned_with_err",
-+ .stat_counter =
-+ e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR,
-+ },
-+ {
-+ .stat_name = "pcd_prs_l3_parse_result_returned_with_err",
-+ .stat_counter =
-+ e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR,
-+ },
-+ {
-+ .stat_name = "pcd_prs_l4_parse_result_returned_with_err",
-+ .stat_counter =
-+ e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR,
-+ },
-+ {
-+ .stat_name = "pcd_prs_shim_parse_result_returned_with_err",
-+ .stat_counter =
-+ e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR,
-+ },
-+ {
-+ .stat_name = "pcd_prs_soft_prs_cycles",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES,
-+ },
-+ {
-+ .stat_name = "pcd_prs_soft_prs_stall_cycles",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES,
-+ },
-+ {
-+ .stat_name = "pcd_prs_hard_prs_cycle_incl_stall_cycles",
-+ .stat_counter =
-+ e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES,
-+ },
-+ {
-+ .stat_name = "pcd_prs_muram_read_cycles",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES,
-+ },
-+ {
-+ .stat_name = "pcd_prs_muram_read_stall_cycles",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES,
-+ },
-+ {
-+ .stat_name = "pcd_prs_muram_write_cycles",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES,
-+ },
-+ {
-+ .stat_name = "pcd_prs_muram_write_stall_cycles",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES,
-+ },
-+ {
-+ .stat_name = "pcd_prs_fpm_command_stall_cycles",
-+ .stat_counter = e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES,
-+ },
-+ {}
-+};
-+
-+
-+static ssize_t show_fm_risc_load(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+ unsigned long flags;
-+ int m =0;
-+ int err =0;
-+ unsigned n = 0;
-+ t_FmCtrlMon util;
-+ uint8_t i =0 ;
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
-+ return -EIO;
-+
-+ local_irq_save(flags);
-+
-+ /* Calculate risc load */
-+ FM_CtrlMonStart(p_wrp_fm_dev->h_Dev);
-+ msleep(1000);
-+ FM_CtrlMonStop(p_wrp_fm_dev->h_Dev);
-+
-+ for (i = 0; i < FM_NUM_OF_CTRL; i++) {
-+ err |= FM_CtrlMonGetCounters(p_wrp_fm_dev->h_Dev, i, &util);
-+ m = snprintf(&buf[n],PAGE_SIZE,"\tRisc%u: util-%u%%, efficiency-%u%%\n",
-+ i, util.percentCnt[0], util.percentCnt[1]);
-+ n=m+n;
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+}
-+
-+/* Fm stats and regs dumps via sysfs */
-+static ssize_t show_fm_dma_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+ t_FmDmaStatus dma_status;
-+ unsigned long flags = 0;
-+ unsigned n = 0;
-+ uint8_t counter_value = 0, counter = 0;
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
-+ return -EIO;
-+
-+ counter = fm_find_statistic_counter_by_name(
-+ attr->attr.name,
-+ fm_sysfs_stats, NULL);
-+
-+ local_irq_save(flags);
-+
-+ memset(&dma_status, 0, sizeof(dma_status));
-+ FM_GetDmaStatus(p_wrp_fm_dev->h_Dev, &dma_status);
-+
-+ switch (counter) {
-+ case FM_DMA_COUNTERS_CMQ_NOT_EMPTY:
-+ counter_value = dma_status.cmqNotEmpty;
-+ break;
-+ case FM_DMA_COUNTERS_BUS_ERROR:
-+ counter_value = dma_status.busError;
-+ break;
-+ case FM_DMA_COUNTERS_READ_BUF_ECC_ERROR:
-+ counter_value = dma_status.readBufEccError;
-+ break;
-+ case FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR:
-+ counter_value = dma_status.writeBufEccSysError;
-+ break;
-+ case FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR:
-+ counter_value = dma_status.writeBufEccFmError;
-+ break;
-+ default:
-+ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
-+ __func__);
-+ break;
-+ };
-+
-+ n = snprintf(buf, PAGE_SIZE, "\tFM %u counter: %c\n",
-+ p_wrp_fm_dev->id, counter_value ? 'T' : 'F');
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+ unsigned long flags = 0;
-+ unsigned n = 0, cnt_e = 0;
-+ uint32_t cnt_val;
-+ int err;
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
-+ return -EIO;
-+
-+ cnt_e = fm_find_statistic_counter_by_name(
-+ attr->attr.name,
-+ fm_sysfs_stats, NULL);
-+
-+ err = fm_get_counter(p_wrp_fm_dev->h_Dev,
-+ (e_FmCounters) cnt_e, &cnt_val);
-+
-+ if (err)
-+ return err;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "\tFM %d counter: %d\n",
-+ p_wrp_fm_dev->id, cnt_val);
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_muram_free_sz(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+ unsigned long flags = 0;
-+ unsigned n = 0;
-+ uint64_t muram_free_size = 0;
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
-+ return -EIO;
-+
-+ muram_free_size = FM_MURAM_GetFreeMemSize(p_wrp_fm_dev->h_MuramDev);
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "\tFM %d muram_free_size: %lld\n",
-+ p_wrp_fm_dev->id, muram_free_size);
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_ctrl_code_ver(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+ unsigned long flags = 0;
-+ unsigned n = 0;
-+ t_FmCtrlCodeRevisionInfo rv_info;
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
-+ return -EIO;
-+
-+ FM_GetFmanCtrlCodeRevision((t_Fm *)p_wrp_fm_dev->h_Dev, &rv_info);
-+
-+ local_irq_save(flags);
-+
-+ FM_DMP_LN(buf, n, "- FM %d ctrl code pkg info:\n", p_wrp_fm_dev->id);
-+ FM_DMP_LN(buf, n, "Package rev: %d\n", rv_info.packageRev);
-+ FM_DMP_LN(buf, n, "major rev: %d\n", rv_info.majorRev);
-+ FM_DMP_LN(buf, n, "minor rev: %d\n", rv_info.minorRev);
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_pcd_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+ unsigned long flags = 0;
-+ unsigned n = 0, counter = 0;
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev ||
-+ !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+
-+ counter = fm_find_statistic_counter_by_name(
-+ attr->attr.name,
-+ fm_sysfs_stats, NULL);
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "\tFM %d counter: %d\n",
-+ p_wrp_fm_dev->id,
-+ FM_PCD_GetCounter(p_wrp_fm_dev->h_PcdDev,
-+ (e_FmPcdCounters) counter));
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_tnum_dbg(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ if (!p_wrp_fm_dev->active)
-+ return -EIO;
-+ else {
-+ int tn_s;
-+
-+ if (!sscanf(attr->attr.name, "tnum_dbg_%d", &tn_s))
-+ return -EINVAL;
-+
-+ n = fm_dump_tnum_dbg(p_wrp_fm_dev->h_Dev,
-+ tn_s, tn_s + 15, buf, n);
-+ }
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_cls_plan(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "\n FM-KG classification plan dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+ else {
-+ int cpn;
-+
-+ if (!sscanf(attr->attr.name, "cls_plan_%d", &cpn))
-+ return -EINVAL;
-+
-+ n = fm_dump_cls_plan(p_wrp_fm_dev->h_PcdDev, cpn, buf, n);
-+ }
-+ local_irq_restore(flags);
-+#else
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_profiles(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "FM policer profile dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+ else {
-+ int pn;
-+
-+ if (!sscanf(attr->attr.name, "profile_%d", &pn))
-+ return -EINVAL;
-+
-+ n = fm_profile_dump_regs(p_wrp_fm_dev->h_PcdDev, pn, buf, n);
-+ }
-+ local_irq_restore(flags);
-+#else
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_schemes(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "FM-KG driver schemes dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+ else {
-+ int sn;
-+
-+ if (!sscanf(attr->attr.name, "scheme_%d", &sn))
-+ return -EINVAL;
-+
-+ n = fm_dump_scheme(p_wrp_fm_dev->h_PcdDev, sn, buf, n);
-+ }
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+/* FM */
-+static DEVICE_ATTR(enq_total_frame, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_total_frame, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(fm_risc_load_val, S_IRUGO, show_fm_risc_load, NULL);
-+static DEVICE_ATTR(deq_0, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_1, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_2, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_3, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_from_default, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_from_context, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_from_fd, S_IRUGO, show_fm_stats, NULL);
-+static DEVICE_ATTR(deq_confirm, S_IRUGO, show_fm_stats, NULL);
-+/* FM:DMA */
-+static DEVICE_ATTR(cmq_not_empty, S_IRUGO, show_fm_dma_stats, NULL);
-+static DEVICE_ATTR(bus_error, S_IRUGO, show_fm_dma_stats, NULL);
-+static DEVICE_ATTR(read_buf_ecc_error, S_IRUGO, show_fm_dma_stats, NULL);
-+static DEVICE_ATTR(write_buf_ecc_sys_error, S_IRUGO, show_fm_dma_stats, NULL);
-+static DEVICE_ATTR(write_buf_ecc_fm_error, S_IRUGO, show_fm_dma_stats, NULL);
-+/* FM:PCD */
-+static DEVICE_ATTR(pcd_kg_total, S_IRUGO, show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_plcr_yellow, S_IRUGO, show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_plcr_red, S_IRUGO, show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_plcr_recolored_to_red, S_IRUGO, show_fm_pcd_stats,
-+ NULL);
-+static DEVICE_ATTR(pcd_plcr_recolored_to_yellow, S_IRUGO, show_fm_pcd_stats,
-+ NULL);
-+static DEVICE_ATTR(pcd_plcr_total, S_IRUGO, show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_plcr_length_mismatch, S_IRUGO, show_fm_pcd_stats,
-+ NULL);
-+static DEVICE_ATTR(pcd_prs_parse_dispatch, S_IRUGO, show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_l2_parse_result_returned, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_l3_parse_result_returned, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_l4_parse_result_returned, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_shim_parse_result_returned, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_l2_parse_result_returned_with_err, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_l3_parse_result_returned_with_err, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_l4_parse_result_returned_with_err, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_shim_parse_result_returned_with_err, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_soft_prs_cycles, S_IRUGO, show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_soft_prs_stall_cycles, S_IRUGO, show_fm_pcd_stats,
-+ NULL);
-+static DEVICE_ATTR(pcd_prs_hard_prs_cycle_incl_stall_cycles, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_muram_read_cycles, S_IRUGO, show_fm_pcd_stats,
-+ NULL);
-+static DEVICE_ATTR(pcd_prs_muram_read_stall_cycles, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_muram_write_cycles, S_IRUGO, show_fm_pcd_stats,
-+ NULL);
-+static DEVICE_ATTR(pcd_prs_muram_write_stall_cycles, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+static DEVICE_ATTR(pcd_prs_fpm_command_stall_cycles, S_IRUGO,
-+ show_fm_pcd_stats, NULL);
-+
-+static DEVICE_ATTR(tnum_dbg_0, S_IRUGO, show_fm_tnum_dbg, NULL);
-+static DEVICE_ATTR(tnum_dbg_16, S_IRUGO, show_fm_tnum_dbg, NULL);
-+static DEVICE_ATTR(tnum_dbg_32, S_IRUGO, show_fm_tnum_dbg, NULL);
-+static DEVICE_ATTR(tnum_dbg_48, S_IRUGO, show_fm_tnum_dbg, NULL);
-+static DEVICE_ATTR(tnum_dbg_64, S_IRUGO, show_fm_tnum_dbg, NULL);
-+static DEVICE_ATTR(tnum_dbg_80, S_IRUGO, show_fm_tnum_dbg, NULL);
-+static DEVICE_ATTR(tnum_dbg_96, S_IRUGO, show_fm_tnum_dbg, NULL);
-+static DEVICE_ATTR(tnum_dbg_112, S_IRUGO, show_fm_tnum_dbg, NULL);
-+
-+static DEVICE_ATTR(cls_plan_0, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_1, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_2, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_3, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_4, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_5, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_6, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_7, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_8, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_9, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_10, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_11, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_12, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_13, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_14, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_15, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_16, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_17, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_18, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_19, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_20, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_21, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_22, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_23, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_24, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_25, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_26, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_27, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_28, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_29, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_30, S_IRUGO, show_fm_cls_plan, NULL);
-+static DEVICE_ATTR(cls_plan_31, S_IRUGO, show_fm_cls_plan, NULL);
-+
-+static DEVICE_ATTR(profile_0, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_1, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_2, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_3, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_4, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_5, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_6, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_7, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_8, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_9, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_10, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_11, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_12, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_13, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_14, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_15, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_16, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_17, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_18, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_19, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_20, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_21, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_22, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_23, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_24, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_25, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_26, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_27, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_28, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_29, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_30, S_IRUGO, show_fm_profiles, NULL);
-+static DEVICE_ATTR(profile_31, S_IRUGO, show_fm_profiles, NULL);
-+
-+static DEVICE_ATTR(scheme_0, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_1, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_2, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_3, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_4, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_5, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_6, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_7, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_8, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_9, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_10, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_11, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_12, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_13, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_14, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_15, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_16, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_17, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_18, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_19, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_20, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_21, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_22, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_23, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_24, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_25, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_26, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_27, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_28, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_29, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_30, S_IRUGO, show_fm_schemes, NULL);
-+static DEVICE_ATTR(scheme_31, S_IRUGO, show_fm_schemes, NULL);
-+
-+
-+static struct attribute *fm_dev_stats_attributes[] = {
-+ &dev_attr_enq_total_frame.attr,
-+ &dev_attr_deq_total_frame.attr,
-+ &dev_attr_deq_0.attr,
-+ &dev_attr_deq_1.attr,
-+ &dev_attr_deq_2.attr,
-+ &dev_attr_deq_3.attr,
-+ &dev_attr_deq_from_default.attr,
-+ &dev_attr_deq_from_context.attr,
-+ &dev_attr_deq_from_fd.attr,
-+ &dev_attr_deq_confirm.attr,
-+ &dev_attr_cmq_not_empty.attr,
-+ &dev_attr_bus_error.attr,
-+ &dev_attr_read_buf_ecc_error.attr,
-+ &dev_attr_write_buf_ecc_sys_error.attr,
-+ &dev_attr_write_buf_ecc_fm_error.attr,
-+ &dev_attr_pcd_kg_total.attr,
-+ &dev_attr_pcd_plcr_yellow.attr,
-+ &dev_attr_pcd_plcr_red.attr,
-+ &dev_attr_pcd_plcr_recolored_to_red.attr,
-+ &dev_attr_pcd_plcr_recolored_to_yellow.attr,
-+ &dev_attr_pcd_plcr_total.attr,
-+ &dev_attr_pcd_plcr_length_mismatch.attr,
-+ &dev_attr_pcd_prs_parse_dispatch.attr,
-+ &dev_attr_pcd_prs_l2_parse_result_returned.attr,
-+ &dev_attr_pcd_prs_l3_parse_result_returned.attr,
-+ &dev_attr_pcd_prs_l4_parse_result_returned.attr,
-+ &dev_attr_pcd_prs_shim_parse_result_returned.attr,
-+ &dev_attr_pcd_prs_l2_parse_result_returned_with_err.attr,
-+ &dev_attr_pcd_prs_l3_parse_result_returned_with_err.attr,
-+ &dev_attr_pcd_prs_l4_parse_result_returned_with_err.attr,
-+ &dev_attr_pcd_prs_shim_parse_result_returned_with_err.attr,
-+ &dev_attr_pcd_prs_soft_prs_cycles.attr,
-+ &dev_attr_pcd_prs_soft_prs_stall_cycles.attr,
-+ &dev_attr_pcd_prs_hard_prs_cycle_incl_stall_cycles.attr,
-+ &dev_attr_pcd_prs_muram_read_cycles.attr,
-+ &dev_attr_pcd_prs_muram_read_stall_cycles.attr,
-+ &dev_attr_pcd_prs_muram_write_cycles.attr,
-+ &dev_attr_pcd_prs_muram_write_stall_cycles.attr,
-+ &dev_attr_pcd_prs_fpm_command_stall_cycles.attr,
-+ NULL
-+};
-+
-+static struct attribute *fm_dev_tnums_dbg_attributes[] = {
-+ &dev_attr_tnum_dbg_0.attr,
-+ &dev_attr_tnum_dbg_16.attr,
-+ &dev_attr_tnum_dbg_32.attr,
-+ &dev_attr_tnum_dbg_48.attr,
-+ &dev_attr_tnum_dbg_64.attr,
-+ &dev_attr_tnum_dbg_80.attr,
-+ &dev_attr_tnum_dbg_96.attr,
-+ &dev_attr_tnum_dbg_112.attr,
-+ NULL
-+};
-+
-+static struct attribute *fm_dev_cls_plans_attributes[] = {
-+ &dev_attr_cls_plan_0.attr,
-+ &dev_attr_cls_plan_1.attr,
-+ &dev_attr_cls_plan_2.attr,
-+ &dev_attr_cls_plan_3.attr,
-+ &dev_attr_cls_plan_4.attr,
-+ &dev_attr_cls_plan_5.attr,
-+ &dev_attr_cls_plan_6.attr,
-+ &dev_attr_cls_plan_7.attr,
-+ &dev_attr_cls_plan_8.attr,
-+ &dev_attr_cls_plan_9.attr,
-+ &dev_attr_cls_plan_10.attr,
-+ &dev_attr_cls_plan_11.attr,
-+ &dev_attr_cls_plan_12.attr,
-+ &dev_attr_cls_plan_13.attr,
-+ &dev_attr_cls_plan_14.attr,
-+ &dev_attr_cls_plan_15.attr,
-+ &dev_attr_cls_plan_16.attr,
-+ &dev_attr_cls_plan_17.attr,
-+ &dev_attr_cls_plan_18.attr,
-+ &dev_attr_cls_plan_19.attr,
-+ &dev_attr_cls_plan_20.attr,
-+ &dev_attr_cls_plan_21.attr,
-+ &dev_attr_cls_plan_22.attr,
-+ &dev_attr_cls_plan_23.attr,
-+ &dev_attr_cls_plan_24.attr,
-+ &dev_attr_cls_plan_25.attr,
-+ &dev_attr_cls_plan_26.attr,
-+ &dev_attr_cls_plan_27.attr,
-+ &dev_attr_cls_plan_28.attr,
-+ &dev_attr_cls_plan_29.attr,
-+ &dev_attr_cls_plan_30.attr,
-+ &dev_attr_cls_plan_31.attr,
-+ NULL
-+};
-+
-+static struct attribute *fm_dev_profiles_attributes[] = {
-+ &dev_attr_profile_0.attr,
-+ &dev_attr_profile_1.attr,
-+ &dev_attr_profile_2.attr,
-+ &dev_attr_profile_3.attr,
-+ &dev_attr_profile_4.attr,
-+ &dev_attr_profile_5.attr,
-+ &dev_attr_profile_6.attr,
-+ &dev_attr_profile_7.attr,
-+ &dev_attr_profile_8.attr,
-+ &dev_attr_profile_9.attr,
-+ &dev_attr_profile_10.attr,
-+ &dev_attr_profile_11.attr,
-+ &dev_attr_profile_12.attr,
-+ &dev_attr_profile_13.attr,
-+ &dev_attr_profile_14.attr,
-+ &dev_attr_profile_15.attr,
-+ &dev_attr_profile_16.attr,
-+ &dev_attr_profile_17.attr,
-+ &dev_attr_profile_18.attr,
-+ &dev_attr_profile_19.attr,
-+ &dev_attr_profile_20.attr,
-+ &dev_attr_profile_21.attr,
-+ &dev_attr_profile_22.attr,
-+ &dev_attr_profile_23.attr,
-+ &dev_attr_profile_24.attr,
-+ &dev_attr_profile_25.attr,
-+ &dev_attr_profile_26.attr,
-+ &dev_attr_profile_27.attr,
-+ &dev_attr_profile_28.attr,
-+ &dev_attr_profile_29.attr,
-+ &dev_attr_profile_30.attr,
-+ &dev_attr_profile_31.attr,
-+ NULL
-+};
-+
-+static struct attribute *fm_dev_schemes_attributes[] = {
-+ &dev_attr_scheme_0.attr,
-+ &dev_attr_scheme_1.attr,
-+ &dev_attr_scheme_2.attr,
-+ &dev_attr_scheme_3.attr,
-+ &dev_attr_scheme_4.attr,
-+ &dev_attr_scheme_5.attr,
-+ &dev_attr_scheme_6.attr,
-+ &dev_attr_scheme_7.attr,
-+ &dev_attr_scheme_8.attr,
-+ &dev_attr_scheme_9.attr,
-+ &dev_attr_scheme_10.attr,
-+ &dev_attr_scheme_11.attr,
-+ &dev_attr_scheme_12.attr,
-+ &dev_attr_scheme_13.attr,
-+ &dev_attr_scheme_14.attr,
-+ &dev_attr_scheme_15.attr,
-+ &dev_attr_scheme_16.attr,
-+ &dev_attr_scheme_17.attr,
-+ &dev_attr_scheme_18.attr,
-+ &dev_attr_scheme_19.attr,
-+ &dev_attr_scheme_20.attr,
-+ &dev_attr_scheme_21.attr,
-+ &dev_attr_scheme_22.attr,
-+ &dev_attr_scheme_23.attr,
-+ &dev_attr_scheme_24.attr,
-+ &dev_attr_scheme_25.attr,
-+ &dev_attr_scheme_26.attr,
-+ &dev_attr_scheme_27.attr,
-+ &dev_attr_scheme_28.attr,
-+ &dev_attr_scheme_29.attr,
-+ &dev_attr_scheme_30.attr,
-+ &dev_attr_scheme_31.attr,
-+ NULL
-+};
-+
-+static const struct attribute_group fm_dev_stats_attr_grp = {
-+ .name = "statistics",
-+ .attrs = fm_dev_stats_attributes
-+};
-+
-+static const struct attribute_group fm_dev_tnums_dbg_attr_grp = {
-+ .name = "tnums_dbg",
-+ .attrs = fm_dev_tnums_dbg_attributes
-+};
-+
-+static const struct attribute_group fm_dev_cls_plans_attr_grp = {
-+ .name = "cls_plans",
-+ .attrs = fm_dev_cls_plans_attributes
-+};
-+
-+static const struct attribute_group fm_dev_schemes_attr_grp = {
-+ .name = "schemes",
-+ .attrs = fm_dev_schemes_attributes
-+};
-+
-+static const struct attribute_group fm_dev_profiles_attr_grp = {
-+ .name = "profiles",
-+ .attrs = fm_dev_profiles_attributes
-+};
-+
-+static ssize_t show_fm_regs(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "FM driver registers dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
-+ return -EIO;
-+ else
-+ n = fm_dump_regs(p_wrp_fm_dev->h_Dev, buf, n);
-+
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_kg_pe_regs(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE,
-+ "\n FM-KG Port Partition Config registers dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+ else
-+ n = fm_kg_pe_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
-+
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static ssize_t show_fm_kg_regs(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "FM-KG registers dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+ else
-+ n = fm_kg_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
-+
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+
-+static ssize_t show_fm_fpm_regs(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+
-+ n = snprintf(buf, PAGE_SIZE, "FM-FPM registers dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_Dev)
-+ return -EIO;
-+ else
-+ n = fm_fpm_dump_regs(p_wrp_fm_dev->h_Dev, buf, n);
-+
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static ssize_t show_prs_regs(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE, "FM Policer registers dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+ else
-+ n = fm_prs_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
-+
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static ssize_t show_plcr_regs(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return -EINVAL;
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE, "FM Policer registers dump.\n");
-+
-+ if (!p_wrp_fm_dev->active || !p_wrp_fm_dev->h_PcdDev)
-+ return -EIO;
-+ else
-+ n = fm_plcr_dump_regs(p_wrp_fm_dev->h_PcdDev, buf, n);
-+
-+ local_irq_restore(flags);
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+#endif /* (defined(DEBUG_ERRORS) && ... */
-+
-+ return n;
-+}
-+
-+static DEVICE_ATTR(fm_regs, S_IRUGO, show_fm_regs, NULL);
-+static DEVICE_ATTR(fm_fpm_regs, S_IRUGO, show_fm_fpm_regs, NULL);
-+static DEVICE_ATTR(fm_kg_regs, S_IRUGO, show_fm_kg_regs, NULL);
-+static DEVICE_ATTR(fm_kg_pe_regs, S_IRUGO, show_fm_kg_pe_regs, NULL);
-+static DEVICE_ATTR(fm_plcr_regs, S_IRUGO, show_plcr_regs, NULL);
-+static DEVICE_ATTR(fm_prs_regs, S_IRUGO, show_prs_regs, NULL);
-+static DEVICE_ATTR(fm_muram_free_size, S_IRUGO, show_fm_muram_free_sz, NULL);
-+static DEVICE_ATTR(fm_ctrl_code_ver, S_IRUGO, show_fm_ctrl_code_ver, NULL);
-+
-+int fm_sysfs_create(struct device *dev)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+
-+ if (dev == NULL)
-+ return -EIO;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+
-+ /* store to remove them when module is disabled */
-+ p_wrp_fm_dev->dev_attr_regs = &dev_attr_fm_regs;
-+ p_wrp_fm_dev->dev_attr_risc_load = &dev_attr_fm_risc_load_val;
-+ p_wrp_fm_dev->dev_fm_fpm_attr_regs = &dev_attr_fm_fpm_regs;
-+ p_wrp_fm_dev->dev_fm_kg_attr_regs = &dev_attr_fm_kg_regs;
-+ p_wrp_fm_dev->dev_fm_kg_pe_attr_regs = &dev_attr_fm_kg_pe_regs;
-+ p_wrp_fm_dev->dev_plcr_attr_regs = &dev_attr_fm_plcr_regs;
-+ p_wrp_fm_dev->dev_prs_attr_regs = &dev_attr_fm_prs_regs;
-+ p_wrp_fm_dev->dev_attr_muram_free_size = &dev_attr_fm_muram_free_size;
-+ p_wrp_fm_dev->dev_attr_fm_ctrl_code_ver = &dev_attr_fm_ctrl_code_ver;
-+
-+ /* Create sysfs statistics group for FM module */
-+ if (sysfs_create_group(&dev->kobj, &fm_dev_stats_attr_grp) != 0)
-+ return -EIO;
-+
-+ if (sysfs_create_group(&dev->kobj, &fm_dev_schemes_attr_grp) != 0)
-+ return -EIO;
-+
-+ if (sysfs_create_group(&dev->kobj, &fm_dev_profiles_attr_grp) != 0)
-+ return -EIO;
-+
-+ if (sysfs_create_group(&dev->kobj, &fm_dev_tnums_dbg_attr_grp) != 0)
-+ return -EIO;
-+
-+ if (sysfs_create_group(&dev->kobj, &fm_dev_cls_plans_attr_grp) != 0)
-+ return -EIO;
-+
-+ /* Registers dump entry - in future will be moved to debugfs */
-+ if (device_create_file(dev, &dev_attr_fm_regs) != 0)
-+ return -EIO;
-+
-+ if (device_create_file(dev, &dev_attr_fm_risc_load_val) != 0)
-+ return -EIO;
-+
-+ if (device_create_file(dev, &dev_attr_fm_fpm_regs) != 0)
-+ return -EIO;
-+
-+ if (device_create_file(dev, &dev_attr_fm_kg_regs) != 0)
-+ return -EIO;
-+
-+ if (device_create_file(dev, &dev_attr_fm_kg_pe_regs) != 0)
-+ return -EIO;
-+
-+ if (device_create_file(dev, &dev_attr_fm_plcr_regs) != 0)
-+ return -EIO;
-+
-+ if (device_create_file(dev, &dev_attr_fm_prs_regs) != 0)
-+ return -EIO;
-+
-+ /* muram free size */
-+ if (device_create_file(dev, &dev_attr_fm_muram_free_size) != 0)
-+ return -EIO;
-+
-+ /* fm ctrl code version */
-+ if (device_create_file(dev, &dev_attr_fm_ctrl_code_ver) != 0)
-+ return -EIO;
-+
-+ return 0;
-+}
-+
-+void fm_sysfs_destroy(struct device *dev)
-+{
-+ t_LnxWrpFmDev *p_wrp_fm_dev = NULL;
-+
-+ if (WARN_ON(dev == NULL))
-+ return;
-+
-+ p_wrp_fm_dev = (t_LnxWrpFmDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_wrp_fm_dev == NULL))
-+ return;
-+
-+ sysfs_remove_group(&dev->kobj, &fm_dev_stats_attr_grp);
-+ sysfs_remove_group(&dev->kobj, &fm_dev_schemes_attr_grp);
-+ sysfs_remove_group(&dev->kobj, &fm_dev_profiles_attr_grp);
-+ sysfs_remove_group(&dev->kobj, &fm_dev_cls_plans_attr_grp);
-+ sysfs_remove_group(&dev->kobj, &fm_dev_tnums_dbg_attr_grp);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_attr_regs);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_fm_fpm_attr_regs);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_fm_kg_attr_regs);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_fm_kg_pe_attr_regs);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_plcr_attr_regs);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_prs_attr_regs);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_attr_muram_free_size);
-+ device_remove_file(dev, p_wrp_fm_dev->dev_attr_fm_ctrl_code_ver);
-+}
-+
-+int fm_dump_regs(void *h_fm, char *buf, int nn)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_fm;
-+ uint8_t i = 0;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ FM_DMP_TITLE(buf, n, p_Fm->p_FmDmaRegs, "FM-DMA Regs");
-+
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmsr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmemsr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmmr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmhy);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmsetr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtah);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtal);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmtcid);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmra);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmrd);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmwcr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmebcr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmDmaRegs, fmdmdcr);
-+
-+ FM_DMP_TITLE(buf, n, &p_Fm->p_FmDmaRegs->fmdmplr, "fmdmplr");
-+
-+ for (i = 0; i < FM_MAX_NUM_OF_HW_PORT_IDS / 2 ; ++i)
-+ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmDmaRegs->fmdmplr[i]);
-+
-+ FM_DMP_TITLE(buf, n, p_Fm->p_FmBmiRegs, "FM-BMI COMMON Regs");
-+ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_init);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_cfg1);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_cfg2);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_ievr);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmBmiRegs, fmbm_ier);
-+
-+ FM_DMP_TITLE(buf, n, &p_Fm->p_FmBmiRegs->fmbm_arb, "fmbm_arb");
-+ for (i = 0; i < 8 ; ++i)
-+ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmBmiRegs->fmbm_arb[i]);
-+
-+ FM_DMP_TITLE(buf, n, p_Fm->p_FmQmiRegs, "FM-QMI COMMON Regs");
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_gc);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_eie);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_eien);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_eif);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_ie);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_ien);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_if);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_gs);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmQmiRegs, fmqm_etfc);
-+
-+ return n;
-+}
-+
-+int fm_dump_tnum_dbg(void *h_fm, int tn_s, int tn_e, char *buf, int nn)
-+{
-+ t_Fm *p_Fm = (t_Fm *)h_fm;
-+ uint8_t i, j = 0;
-+ int n = nn;
-+
-+ FM_DMP_TITLE(buf, n, NULL, "Tnums and Tnum dbg regs %d - %d",
-+ tn_s, tn_e);
-+
-+ iowrite32be(tn_s << 24, &p_Fm->p_FmFpmRegs->fmfp_dra);
-+
-+ mb();
-+
-+ for (j = tn_s; j <= tn_e; j++) {
-+ FM_DMP_LN(buf, n, "> fmfp_ts[%d]\n", j);
-+ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmFpmRegs->fmfp_ts[j]);
-+ FM_DMP_V32(buf, n, p_Fm->p_FmFpmRegs, fmfp_dra);
-+ FM_DMP_LN(buf, n, "> fmfp_drd[0-3]\n");
-+
-+ for (i = 0; i < 4 ; ++i)
-+ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmFpmRegs->fmfp_drd[i]);
-+
-+ FM_DMP_LN(buf, n, "\n");
-+
-+ }
-+
-+ return n;
-+}
-+
-+int fm_dump_cls_plan(void *h_fm_pcd, int cpn, char *buf, int nn)
-+{
-+ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
-+ int i = 0;
-+ uint32_t tmp;
-+ unsigned long i_flg;
-+ int n = nn;
-+ u_FmPcdKgIndirectAccessRegs *idac;
-+ spinlock_t *p_lk;
-+
-+ p_lk = (spinlock_t *)p_pcd->p_FmPcdKg->h_HwSpinlock;
-+ idac = p_pcd->p_FmPcdKg->p_IndirectAccessRegs;
-+
-+ spin_lock_irqsave(p_lk, i_flg);
-+
-+ /* Read ClsPlan Block Action Regs */
-+ tmp = (uint32_t)(FM_KG_KGAR_GO |
-+ FM_KG_KGAR_READ |
-+ FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
-+ DUMMY_PORT_ID |
-+ ((uint32_t)cpn << FM_PCD_KG_KGAR_NUM_SHIFT) |
-+ FM_PCD_KG_KGAR_WSEL_MASK);
-+
-+ if (fman_kg_write_ar_wait(p_pcd->p_FmPcdKg->p_FmPcdKgRegs, tmp)) {
-+ FM_DMP_LN(buf, nn, "Keygen scheme access violation");
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+ return nn;
-+ }
-+ FM_DMP_TITLE(buf, n, &idac->clsPlanRegs,
-+ "ClsPlan %d Indirect Access Regs", cpn);
-+
-+ for (i = 0; i < 8; i++)
-+ FM_DMP_MEM_32(buf, n, &idac->clsPlanRegs.kgcpe[i]);
-+
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+
-+ return n;
-+}
-+
-+int fm_profile_dump_regs(void *h_fm_pcd, int ppn, char *buf, int nn)
-+{
-+ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
-+ t_FmPcdPlcrProfileRegs *p_prof_regs;
-+ t_FmPcdPlcrRegs *p_plcr_regs;
-+ t_FmPcdPlcr *p_plcr;
-+ uint32_t tmp;
-+ unsigned long i_flg;
-+ int n = nn;
-+ int toc = 10;
-+ spinlock_t *p_lk;
-+
-+ p_plcr = p_pcd->p_FmPcdPlcr;
-+ p_prof_regs = &p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs;
-+ p_plcr_regs = p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs;
-+
-+ p_lk = (spinlock_t *)((t_FmPcdPlcr *)p_plcr)->h_HwSpinlock;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_plcr_regs, "FM-PCD policer-profile regs");
-+
-+ tmp = (uint32_t)(FM_PCD_PLCR_PAR_GO |
-+ FM_PCD_PLCR_PAR_R |
-+ ((uint32_t)ppn << FM_PCD_PLCR_PAR_PNUM_SHIFT) |
-+ FM_PCD_PLCR_PAR_PWSEL_MASK);
-+
-+ spin_lock_irqsave(p_lk, i_flg);
-+
-+ iowrite32be(tmp, &p_plcr_regs->fmpl_par);
-+
-+ mb();
-+
-+ /* wait for the porfile regs to be present */
-+ do {
-+ --toc;
-+ udelay(10);
-+ if (!toc) {
-+ /* looks like PLCR_PAR_GO refuses to clear */
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+ FM_DMP_LN(buf, n, "Profile regs not accessible -");
-+ FM_DMP_LN(buf, n, " check profile init process\n");
-+ return n;
-+ }
-+ } while ((ioread32be(&p_plcr_regs->fmpl_par) & FM_PCD_PLCR_PAR_GO));
-+
-+ FM_DMP_TITLE(buf, n, p_prof_regs, "Profile %d regs", ppn);
-+
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pemode);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pegnia);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_peynia);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pernia);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pecir);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pecbs);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pepepir_eir);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pepbs_ebs);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pelts);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pects);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pepts_ets);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_pegpc);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_peypc);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_perpc);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_perypc);
-+ FM_DMP_V32(buf, n, p_prof_regs, fmpl_perrpc);
-+
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+
-+ return n;
-+}
-+
-+int fm_dump_scheme(void *h_fm_pcd, int scnum, char *buf, int nn)
-+{
-+ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
-+ uint32_t tmp_ar;
-+ unsigned long i_flg;
-+ int i, n = nn;
-+ spinlock_t *p_lk;
-+ u_FmPcdKgIndirectAccessRegs *idac;
-+
-+ idac = p_pcd->p_FmPcdKg->p_IndirectAccessRegs;
-+ p_lk = (spinlock_t *)p_pcd->p_FmPcdKg->h_HwSpinlock;
-+
-+ spin_lock_irqsave(p_lk, i_flg);
-+
-+ tmp_ar = FmPcdKgBuildReadSchemeActionReg((uint8_t)scnum);
-+ if (fman_kg_write_ar_wait(p_pcd->p_FmPcdKg->p_FmPcdKgRegs, tmp_ar)) {
-+ FM_DMP_LN(buf, nn,
-+ "Keygen scheme access violation or no such scheme");
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+ return nn;
-+ }
-+
-+ FM_DMP_TITLE(buf, n, &idac->schemeRegs,
-+ "Scheme %d Indirect Access Regs", scnum);
-+
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_mode);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ekfc);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ekdv);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_bmch);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_bmcl);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_fqb);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_hc);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ppc);
-+
-+ FM_DMP_TITLE(buf, n, &idac->schemeRegs.kgse_gec, "kgse_gec");
-+
-+ for (i = 0; i < FM_KG_NUM_OF_GENERIC_REGS; i++)
-+ FM_DMP_MEM_32(buf, n, &idac->schemeRegs.kgse_gec[i]);
-+
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_spc);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_dv0);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_dv1);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_ccbs);
-+ FM_DMP_V32(buf, n, &idac->schemeRegs, kgse_mv);
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+
-+ return n;
-+}
-+
-+int fm_kg_pe_dump_regs(void *h_fm_pcd, char *buf, int nn)
-+{
-+ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
-+ int i = 0;
-+ uint8_t prt_id = 0;
-+ uint32_t tmp_ar;
-+ unsigned long i_flg;
-+ int n = nn;
-+ u_FmPcdKgIndirectAccessRegs *idac;
-+ t_FmPcdKg *p_kg;
-+ spinlock_t *p_lk;
-+
-+ p_kg = p_pcd->p_FmPcdKg;
-+ idac = p_pcd->p_FmPcdKg->p_IndirectAccessRegs;
-+ p_lk = (spinlock_t *)p_kg->h_HwSpinlock;
-+
-+ spin_lock_irqsave(p_lk, i_flg);
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++) {
-+ SW_PORT_INDX_TO_HW_PORT_ID(prt_id, i);
-+
-+ tmp_ar = FmPcdKgBuildReadPortSchemeBindActionReg(prt_id);
-+
-+ if (fman_kg_write_ar_wait(p_kg->p_FmPcdKgRegs, tmp_ar)) {
-+ FM_DMP_LN(buf, nn, "Keygen scheme access violation");
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+ return nn;
-+ }
-+ FM_DMP_TITLE(buf, n, &idac->portRegs, "Port %d regs", prt_id);
-+ FM_DMP_V32(buf, n, &idac->portRegs, fmkg_pe_sp);
-+ FM_DMP_V32(buf, n, &idac->portRegs, fmkg_pe_cpp);
-+ }
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ spin_unlock_irqrestore(p_lk, i_flg);
-+
-+ return n;
-+}
-+
-+int fm_kg_dump_regs(void *h_fm_pcd, char *buf, int nn)
-+{
-+ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_TITLE(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs,
-+ "FmPcdKgRegs Regs");
-+
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gcr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_eer);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_eeer);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_seer);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_seeer);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gsr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_tpc);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_serc);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_fdor);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gdv0r);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_gdv1r);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_feer);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdKg->p_FmPcdKgRegs, fmkg_ar);
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ return n;
-+}
-+
-+
-+int fm_fpm_dump_regs(void *h_fm, char *buf, int nn)
-+{
-+ t_Fm *p_fm = (t_Fm *)h_fm;
-+ uint8_t i;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ FM_DMP_TITLE(buf, n, p_fm->p_FmFpmRegs, "FM-FPM Regs");
-+
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tnc);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_prc);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_brkc);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_mxd);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_dist1);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_dist2);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_epi);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_rie);
-+
-+ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_fcev, "fmfp_fcev");
-+ for (i = 0; i < 4; ++i)
-+ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_fcev[i]);
-+
-+ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_cee, "fmfp_cee");
-+ for (i = 0; i < 4; ++i)
-+ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_cee[i]);
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsc1);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsc2);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsp);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_tsf);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_rcr);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_extc);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_ext1);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_ext2);
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_ip_rev_1);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_ip_rev_2);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_rstc);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_cld);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fm_npi);
-+ FM_DMP_V32(buf, n, p_fm->p_FmFpmRegs, fmfp_ee);
-+
-+ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_cev, "fmfp_cev");
-+ for (i = 0; i < 4; ++i)
-+ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_cev[i]);
-+
-+ FM_DMP_TITLE(buf, n, &p_fm->p_FmFpmRegs->fmfp_ps, "fmfp_ps");
-+ for (i = 0; i < 64; ++i)
-+ FM_DMP_MEM_32(buf, n, &p_fm->p_FmFpmRegs->fmfp_ps[i]);
-+
-+ return n;
-+}
-+
-+int fm_prs_dump_regs(void *h_fm_pcd, char *buf, int nn)
-+{
-+ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ FM_DMP_TITLE(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs,
-+ "FM-PCD parser regs");
-+
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_rpclim);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_rpimac);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, pmeec);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_pevr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_pever);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_perr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_perer);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_ppsc);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_pds);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l2rrs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l3rrs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l4rrs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_srrs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l2rres);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l3rres);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_l4rres);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_srres);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_spcs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_spscs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_hxscs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mrcs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mwcs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mrscs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_mwscs);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPrs->p_FmPcdPrsRegs, fmpr_fcscs);
-+
-+ return n;
-+}
-+
-+int fm_plcr_dump_regs(void *h_fm_pcd, char *buf, int nn)
-+{
-+ t_FmPcd *p_pcd = (t_FmPcd *)h_fm_pcd;
-+ int i = 0;
-+ int n = nn;
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ FM_DMP_TITLE(buf, n,
-+ p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,
-+ "FM policer regs");
-+
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_gcr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_gsr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_evr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_ier);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_ifr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_eevr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_eier);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_eifr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_rpcnt);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_ypcnt);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_rrpcnt);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_rypcnt);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_tpcnt);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_flmcnt);
-+
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_serc);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_upcr);
-+ FM_DMP_V32(buf, n, p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, fmpl_dpmr);
-+
-+ FM_DMP_TITLE(buf, n,
-+ &p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr,
-+ "fmpl_pmr");
-+
-+ for (i = 0; i < 63; ++i)
-+ FM_DMP_MEM_32(buf, n,
-+ &p_pcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr[i]);
-+
-+ return n;
-+}
-+
-+int fm_get_counter(void *h_fm, e_FmCounters cnt_e, uint32_t *cnt_val)
-+{
-+ t_Fm *p_fm = (t_Fm *)h_fm;
-+
-+ /* When applicable (when there is an "enable counters" bit),
-+ check that counters are enabled */
-+
-+ switch (cnt_e) {
-+ case (e_FM_COUNTERS_DEQ_1):
-+ case (e_FM_COUNTERS_DEQ_2):
-+ case (e_FM_COUNTERS_DEQ_3):
-+ if (p_fm->p_FmStateStruct->revInfo.majorRev >= 6)
-+ return -EINVAL; /* counter not available */
-+
-+ case (e_FM_COUNTERS_ENQ_TOTAL_FRAME):
-+ case (e_FM_COUNTERS_DEQ_TOTAL_FRAME):
-+ case (e_FM_COUNTERS_DEQ_0):
-+ case (e_FM_COUNTERS_DEQ_FROM_DEFAULT):
-+ case (e_FM_COUNTERS_DEQ_FROM_CONTEXT):
-+ case (e_FM_COUNTERS_DEQ_FROM_FD):
-+ case (e_FM_COUNTERS_DEQ_CONFIRM):
-+ if (!(ioread32be(&p_fm->p_FmQmiRegs->fmqm_gc) &
-+ QMI_CFG_EN_COUNTERS))
-+ return -EINVAL; /* Requested counter not available */
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ switch (cnt_e) {
-+ case (e_FM_COUNTERS_ENQ_TOTAL_FRAME):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_etfc);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_TOTAL_FRAME):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dtfc);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_0):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc0);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_1):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc1);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_2):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc2);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_3):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dc3);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_FROM_DEFAULT):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dfdc);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_FROM_CONTEXT):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dfcc);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_FROM_FD):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dffc);
-+ return 0;
-+ case (e_FM_COUNTERS_DEQ_CONFIRM):
-+ *cnt_val = ioread32be(&p_fm->p_FmQmiRegs->fmqm_dcc);
-+ return 0;
-+ }
-+ /* should never get here */
-+ return -EINVAL; /* counter not available */
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h
-@@ -0,0 +1,136 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef LNXWRP_SYSFS_FM_H_
-+#define LNXWRP_SYSFS_FM_H_
-+
-+#include "lnxwrp_sysfs.h"
-+
-+int fm_sysfs_create(struct device *dev);
-+void fm_sysfs_destroy(struct device *dev);
-+int fm_dump_regs(void *h_dev, char *buf, int nn);
-+int fm_fpm_dump_regs(void *h_dev, char *buf, int nn);
-+int fm_kg_dump_regs(void *h_pcd, char *buf, int nn);
-+int fm_kg_pe_dump_regs(void *h_pcd, char *buf, int nn);
-+int fm_dump_scheme(void *h_pcd, int scnum, char *buf, int nn);
-+int fm_dump_tnum_dbg(void *h_fm, int tn_s, int tn_e, char *buf, int nn);
-+int fm_dump_cls_plan(void *h_pcd, int cpn, char *buf, int nn);
-+int fm_plcr_dump_regs(void *h_pcd, char *buf, int nn);
-+int fm_prs_dump_regs(void *h_pcd, char *buf, int nn);
-+int fm_profile_dump_regs(void *h_pcd, int ppnum, char *buf, int nn);
-+
-+#define FM_DMP_PGSZ_ERR { \
-+ snprintf(&buf[PAGE_SIZE - 80], 70, \
-+ "\n Err: current sysfs buffer reached PAGE_SIZE\n");\
-+ n = PAGE_SIZE - 2; \
-+ }
-+
-+#define FM_DMP_LN(buf, n, ...) \
-+ do { \
-+ int k, m = n; \
-+ m += k = snprintf(&buf[m], PAGE_SIZE - m, __VA_ARGS__); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ n = m; \
-+ } while (0)
-+
-+#define FM_DMP_TITLE(buf, n, addr, ...) \
-+ do { \
-+ int k, m = n; \
-+ m += k = snprintf(&buf[m], PAGE_SIZE - m, "\n"); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ m += k = snprintf(&buf[m], PAGE_SIZE - m, __VA_ARGS__); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ if (addr) { \
-+ phys_addr_t pa; \
-+ pa = virt_to_phys(addr); \
-+ m += k = \
-+ snprintf(&buf[m], PAGE_SIZE - m, " (0x%lX)", \
-+ (long unsigned int)(pa)); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ } \
-+ m += k = snprintf(&buf[m], PAGE_SIZE - m, \
-+ "\n----------------------------------------\n\n"); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ n = m; \
-+ } while (0)
-+
-+#define FM_DMP_SUBTITLE(buf, n, ...) \
-+ do { \
-+ int k, m = n; \
-+ m += k = snprintf(&buf[m], PAGE_SIZE - m, "------- "); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ m += k = snprintf(&buf[m], PAGE_SIZE - m, __VA_ARGS__); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ m += k = snprintf(&buf[m], PAGE_SIZE - m, "\n"); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ n = m; \
-+ } while (0)
-+
-+#define FM_DMP_MEM_32(buf, n, addr) \
-+ { \
-+ uint32_t val; \
-+ phys_addr_t pa; \
-+ int k, m = n; \
-+ pa = virt_to_phys(addr); \
-+ val = ioread32be((addr)); \
-+ do { \
-+ m += k = snprintf(&buf[m], \
-+ PAGE_SIZE - m, "0x%010llX: 0x%08x\n", \
-+ pa, val); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ n += k; \
-+ } while (0) ;\
-+ }
-+
-+#define FM_DMP_V32(buf, n, st, phrase) \
-+ do { \
-+ int k, m = n; \
-+ phys_addr_t pa = virt_to_phys(&((st)->phrase)); \
-+ k = snprintf(&buf[m], PAGE_SIZE - m, \
-+ "0x%010llX: 0x%08x%8s\t%s\n", (unsigned long long) pa, \
-+ ioread32be((uint32_t *)&((st)->phrase)), "", #phrase); \
-+ if (k < 0 || m > PAGE_SIZE - 90) \
-+ FM_DMP_PGSZ_ERR \
-+ n += k; \
-+ } while (0)
-+
-+#endif /* LNXWRP_SYSFS_FM_H_ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c
-@@ -0,0 +1,1268 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "lnxwrp_sysfs.h"
-+#include "lnxwrp_fm.h"
-+#include "debug_ext.h"
-+#include "lnxwrp_sysfs_fm_port.h"
-+#include "lnxwrp_sysfs_fm.h"
-+
-+#include "../../sdk_fman/Peripherals/FM/Port/fm_port.h"
-+#include "../../sdk_fman/Peripherals/FM/Port/fm_port_dsar.h"
-+
-+#if defined(__ERR_MODULE__)
-+#undef __ERR_MODULE__
-+#endif
-+
-+#include "../../sdk_fman/Peripherals/FM/fm.h"
-+
-+static const struct sysfs_stats_t portSysfsStats[] = {
-+ /* RX/TX/OH common statistics */
-+ {
-+ .stat_name = "port_frame",
-+ .stat_counter = e_FM_PORT_COUNTERS_FRAME,
-+ },
-+ {
-+ .stat_name = "port_discard_frame",
-+ .stat_counter = e_FM_PORT_COUNTERS_DISCARD_FRAME,
-+ },
-+ {
-+ .stat_name = "port_dealloc_buf",
-+ .stat_counter = e_FM_PORT_COUNTERS_DEALLOC_BUF,
-+ },
-+ {
-+ .stat_name = "port_enq_total",
-+ .stat_counter = e_FM_PORT_COUNTERS_ENQ_TOTAL,
-+ },
-+ /* TX/OH */
-+ {
-+ .stat_name = "port_length_err",
-+ .stat_counter = e_FM_PORT_COUNTERS_LENGTH_ERR,
-+ },
-+ {
-+ .stat_name = "port_unsupprted_format",
-+ .stat_counter = e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT,
-+ },
-+ {
-+ .stat_name = "port_deq_total",
-+ .stat_counter = e_FM_PORT_COUNTERS_DEQ_TOTAL,
-+ },
-+ {
-+ .stat_name = "port_deq_from_default",
-+ .stat_counter = e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT,
-+ },
-+ {
-+ .stat_name = "port_deq_confirm",
-+ .stat_counter = e_FM_PORT_COUNTERS_DEQ_CONFIRM,
-+ },
-+ /* RX/OH */
-+ {
-+ .stat_name = "port_rx_bad_frame",
-+ .stat_counter = e_FM_PORT_COUNTERS_RX_BAD_FRAME,
-+ },
-+ {
-+ .stat_name = "port_rx_large_frame",
-+ .stat_counter = e_FM_PORT_COUNTERS_RX_LARGE_FRAME,
-+ },
-+ {
-+ .stat_name = "port_rx_out_of_buffers_discard",
-+ .stat_counter = e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD,
-+ },
-+ {
-+ .stat_name = "port_rx_filter_frame",
-+ .stat_counter = e_FM_PORT_COUNTERS_RX_FILTER_FRAME,
-+ },
-+ /* TODO: Particular statistics for OH ports */
-+ {}
-+};
-+
-+static ssize_t show_fm_port_stats(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+ t_LnxWrpFmDev *p_LnxWrpFmDev;
-+ unsigned long flags;
-+ int n = 0;
-+ uint8_t counter = 0;
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_LnxWrpFmPortDev == NULL))
-+ return -EINVAL;
-+
-+ p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev;
-+ if (WARN_ON(p_LnxWrpFmDev == NULL))
-+ return -EINVAL;
-+
-+ if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_Dev)
-+ return -EIO;
-+
-+ if (!p_LnxWrpFmPortDev->h_Dev) {
-+ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
-+ return n;
-+ }
-+
-+ counter = fm_find_statistic_counter_by_name(
-+ attr->attr.name,
-+ portSysfsStats, NULL);
-+
-+ if (counter == e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR) {
-+ uint32_t fmRev = 0;
-+ fmRev = 0xffff &
-+ ioread32(UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr +
-+ 0x000c30c4));
-+
-+ if (fmRev == 0x0100) {
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "counter not available for revision 1\n");
-+ local_irq_restore(flags);
-+ }
-+ return n;
-+ }
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE, "\t%s counter: %u\n",
-+ p_LnxWrpFmPortDev->name,
-+ FM_PORT_GetCounter(p_LnxWrpFmPortDev->h_Dev,
-+ (e_FmPortCounters) counter));
-+ local_irq_restore(flags);
-+
-+ return n;
-+}
-+
-+/* FM PORT RX/TX/OH statistics */
-+static DEVICE_ATTR(port_frame, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_discard_frame, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_dealloc_buf, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_enq_total, S_IRUGO, show_fm_port_stats, NULL);
-+/* FM PORT TX/OH statistics */
-+static DEVICE_ATTR(port_length_err, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_unsupprted_format, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_deq_total, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_deq_from_default, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_deq_confirm, S_IRUGO, show_fm_port_stats, NULL);
-+/* FM PORT RX/OH statistics */
-+static DEVICE_ATTR(port_rx_bad_frame, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_rx_large_frame, S_IRUGO, show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_rx_out_of_buffers_discard, S_IRUGO,
-+ show_fm_port_stats, NULL);
-+static DEVICE_ATTR(port_rx_filter_frame, S_IRUGO, show_fm_port_stats, NULL);
-+
-+/* FM PORT TX statistics */
-+static struct attribute *fm_tx_port_dev_stats_attributes[] = {
-+ &dev_attr_port_frame.attr,
-+ &dev_attr_port_discard_frame.attr,
-+ &dev_attr_port_dealloc_buf.attr,
-+ &dev_attr_port_enq_total.attr,
-+ &dev_attr_port_length_err.attr,
-+ &dev_attr_port_unsupprted_format.attr,
-+ &dev_attr_port_deq_total.attr,
-+ &dev_attr_port_deq_from_default.attr,
-+ &dev_attr_port_deq_confirm.attr,
-+ NULL
-+};
-+
-+static const struct attribute_group fm_tx_port_dev_stats_attr_grp = {
-+ .name = "statistics",
-+ .attrs = fm_tx_port_dev_stats_attributes
-+};
-+
-+/* FM PORT RX statistics */
-+static struct attribute *fm_rx_port_dev_stats_attributes[] = {
-+ &dev_attr_port_frame.attr,
-+ &dev_attr_port_discard_frame.attr,
-+ &dev_attr_port_dealloc_buf.attr,
-+ &dev_attr_port_enq_total.attr,
-+ &dev_attr_port_rx_bad_frame.attr,
-+ &dev_attr_port_rx_large_frame.attr,
-+ &dev_attr_port_rx_out_of_buffers_discard.attr,
-+ &dev_attr_port_rx_filter_frame.attr,
-+ NULL
-+};
-+
-+static const struct attribute_group fm_rx_port_dev_stats_attr_grp = {
-+ .name = "statistics",
-+ .attrs = fm_rx_port_dev_stats_attributes
-+};
-+
-+/* TODO: add particular OH ports statistics */
-+static struct attribute *fm_oh_port_dev_stats_attributes[] = {
-+ &dev_attr_port_frame.attr,
-+ &dev_attr_port_discard_frame.attr,
-+ &dev_attr_port_dealloc_buf.attr,
-+ &dev_attr_port_enq_total.attr,
-+ /*TX*/ &dev_attr_port_length_err.attr,
-+ &dev_attr_port_unsupprted_format.attr,
-+ &dev_attr_port_deq_total.attr,
-+ &dev_attr_port_deq_from_default.attr,
-+ &dev_attr_port_deq_confirm.attr,
-+ /* &dev_attr_port_rx_bad_frame.attr, */
-+ /* &dev_attr_port_rx_large_frame.attr, */
-+ &dev_attr_port_rx_out_of_buffers_discard.attr,
-+ /*&dev_attr_port_rx_filter_frame.attr, */
-+ NULL
-+};
-+
-+static const struct attribute_group fm_oh_port_dev_stats_attr_grp = {
-+ .name = "statistics",
-+ .attrs = fm_oh_port_dev_stats_attributes
-+};
-+
-+static ssize_t show_fm_port_regs(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+#endif
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_LnxWrpFmPortDev =
-+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+
-+
-+ local_irq_save(flags);
-+
-+ if (!p_LnxWrpFmPortDev->h_Dev) {
-+ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
-+ return n;
-+ } else {
-+ n = snprintf(buf, PAGE_SIZE,
-+ "FM port driver registers dump.\n");
-+ n = fm_port_dump_regs(p_LnxWrpFmPortDev->h_Dev, buf, n);
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+ return n;
-+#endif
-+}
-+static int fm_port_dsar_dump_mem(void *h_dev, char *buf, int nn)
-+{
-+ t_FmPort *p_FmPort;
-+ t_Fm *p_Fm;
-+ uint8_t hardwarePortId;
-+ uint32_t *param_page;
-+ t_ArCommonDesc *ArCommonDescPtr;
-+ uint32_t *mem;
-+ int i, n = nn;
-+
-+ p_FmPort = (t_FmPort *)h_dev;
-+ hardwarePortId = p_FmPort->hardwarePortId;
-+ p_Fm = (t_Fm *)p_FmPort->h_Fm;
-+
-+ if (!FM_PORT_IsInDsar(p_FmPort))
-+ {
-+ FM_DMP_LN(buf, n, "port %u is not a DSAR port\n",
-+ hardwarePortId);
-+ return n;
-+ }
-+ FM_DMP_LN(buf, n, "port %u DSAR mem\n", hardwarePortId);
-+ FM_DMP_LN(buf, n, "========================\n");
-+
-+ /* do I need request_mem_region here? */
-+ param_page = ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr), 4);
-+ ArCommonDescPtr = (t_ArCommonDesc*)(ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(param_page), 300*4)); /* this should be changed*/
-+ mem = (uint32_t*)ArCommonDescPtr;
-+ for (i = 0; i < 300; i+=4)
-+ FM_DMP_LN(buf, n, "%08x: %08x %08x %08x %08x\n", i*4, mem[i], mem[i + 1], mem[i + 2], mem[i + 3]);
-+ iounmap(ArCommonDescPtr);
-+ iounmap(param_page);
-+ return n;
-+}
-+
-+static int fm_port_dsar_dump_regs(void *h_dev, char *buf, int nn)
-+{
-+ t_FmPort *p_FmPort;
-+ t_Fm *p_Fm;
-+ uint8_t hardwarePortId;
-+ uint32_t *param_page;
-+ t_ArCommonDesc *ArCommonDescPtr;
-+ int i, n = nn;
-+
-+ p_FmPort = (t_FmPort *)h_dev;
-+ hardwarePortId = p_FmPort->hardwarePortId;
-+ p_Fm = (t_Fm *)p_FmPort->h_Fm;
-+
-+ if (!FM_PORT_IsInDsar(p_FmPort))
-+ {
-+ FM_DMP_LN(buf, n, "port %u is not a DSAR port\n",
-+ hardwarePortId);
-+ return n;
-+ }
-+ FM_DMP_LN(buf, n, "port %u DSAR information\n", hardwarePortId);
-+ FM_DMP_LN(buf, n, "========================\n");
-+
-+ /* do I need request_mem_region here? */
-+ param_page = ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rgpr), 4);
-+ ArCommonDescPtr = (t_ArCommonDesc*)(ioremap(p_FmPort->fmMuramPhysBaseAddr + ioread32be(param_page), sizeof(t_ArCommonDesc))); /* this should be changed*/
-+ FM_DMP_LN(buf, n, "Tx port: 0x%x\n", ArCommonDescPtr->arTxPort);
-+ FM_DMP_LN(buf, n, "Active HPNIA: 0x%08x\n", ArCommonDescPtr->activeHPNIA);
-+ FM_DMP_LN(buf, n, "Snmp port: 0x%x\n", ArCommonDescPtr->snmpPort);
-+ FM_DMP_LN(buf, n, "MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", ArCommonDescPtr->macStationAddr[0],
-+ ArCommonDescPtr->macStationAddr[1], ArCommonDescPtr->macStationAddr[2],
-+ ArCommonDescPtr->macStationAddr[3], ArCommonDescPtr->macStationAddr[4],
-+ ArCommonDescPtr->macStationAddr[5]);
-+ FM_DMP_LN(buf, n, "filterControl: 0x%02x\n", ArCommonDescPtr->filterControl);
-+ FM_DMP_LN(buf, n, "tcpControlPass: 0x%04x\n", ArCommonDescPtr->tcpControlPass);
-+ FM_DMP_LN(buf, n, "ipProtocolTblSize: 0x%x\n", ArCommonDescPtr->ipProtocolTblSize);
-+ FM_DMP_LN(buf, n, "udpPortTblSize: 0x%x\n", ArCommonDescPtr->udpPortTblSize);
-+ FM_DMP_LN(buf, n, "tcpPortTblSize: 0x%x\n", ArCommonDescPtr->tcpPortTblSize);
-+ if (ArCommonDescPtr->p_ArStats)
-+ {
-+ t_ArStatistics *arStatistics = (t_ArStatistics*)
-+ ioremap(ioread32be(&ArCommonDescPtr->p_ArStats) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof (t_ArStatistics));
-+ FM_DMP_LN(buf, n, "\nDSAR statistics\n");
-+ FM_DMP_LN(buf, n, "DSAR_Discarded: 0x%x\n", arStatistics->dsarDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_Err_Discarded: 0x%x\n", arStatistics->dsarErrDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_Frag_Discarded: 0x%x\n", arStatistics->dsarFragDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_Tunnel_Discarded: 0x%x\n", arStatistics->dsarTunnelDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_ARP_Discarded: 0x%x\n", arStatistics->dsarArpDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_IP_Discarded: 0x%x\n", arStatistics->dsarIpDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_TCP_Discarded: 0x%x\n", arStatistics->dsarTcpDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_UDP_Discarded: 0x%x\n", arStatistics->dsarUdpDiscarded);
-+ FM_DMP_LN(buf, n, "DSAR_ICMPv6_Checksum_Err: 0x%x\n", arStatistics->dsarIcmpV6ChecksumErr);
-+ FM_DMP_LN(buf, n, "DSAR_ICMPv6_Other_Type: 0x%x\n", arStatistics->dsarIcmpV6OtherType);
-+ FM_DMP_LN(buf, n, "DSAR_ICMPv4_Other_Type: 0x%x\n", arStatistics->dsarIcmpV4OtherType);
-+
-+ iounmap(arStatistics);
-+ }
-+ if (ArCommonDescPtr->p_ArpDescriptor)
-+ {
-+ t_DsarArpDescriptor* ArpDescriptor = (t_DsarArpDescriptor*)
-+ ioremap(ioread32be(&ArCommonDescPtr->p_ArpDescriptor) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof (t_DsarArpDescriptor));
-+ FM_DMP_LN(buf, n, "\nARP\n");
-+ FM_DMP_LN(buf, n, "===\n");
-+ FM_DMP_LN(buf, n, "control bits 0x%04x\n", ArpDescriptor->control);
-+ if (ArpDescriptor->numOfBindings)
-+ {
-+ char ip_str[100];
-+ t_DsarArpBindingEntry* bindings = ioremap(
-+ ioread32be(&ArpDescriptor->p_Bindings) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ ArpDescriptor->numOfBindings *
-+ sizeof(t_DsarArpBindingEntry));
-+ uint8_t* ip_addr = (uint8_t*)&bindings->ipv4Addr;
-+ FM_DMP_LN(buf, n, " ip vlan id\n");
-+ for (i = 0; i < ArpDescriptor->numOfBindings; i++)
-+ {
-+ n += snprintf(ip_str, 100, "%d.%d.%d.%d",
-+ ip_addr[0], ip_addr[1],
-+ ip_addr[2], ip_addr[3]);
-+ FM_DMP_LN(buf, n, "%-15s 0x%x\n",
-+ ip_str, bindings->vlanId);
-+ }
-+ iounmap(bindings);
-+ }
-+ if (ArpDescriptor->p_Statistics)
-+ {
-+ t_DsarArpStatistics* arpStats = ioremap(
-+ ioread32be(&ArpDescriptor->p_Statistics) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof(t_DsarArpStatistics));
-+ FM_DMP_LN(buf, n, "statistics\n");
-+ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", arpStats->invalCnt);
-+ FM_DMP_LN(buf, n, "ECHO_CNT: 0x%x\n", arpStats->echoCnt);
-+ FM_DMP_LN(buf, n, "CD_CNT: 0x%x\n", arpStats->cdCnt);
-+ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", arpStats->arCnt);
-+ FM_DMP_LN(buf, n, "RATM_CNT: 0x%x\n", arpStats->ratmCnt);
-+ FM_DMP_LN(buf, n, "UKOP_CNT: 0x%x\n", arpStats->ukopCnt);
-+ FM_DMP_LN(buf, n, "NMTP_CNT: 0x%x\n", arpStats->nmtpCnt);
-+ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", arpStats->nmVlanCnt);
-+ iounmap(arpStats);
-+ }
-+
-+ iounmap(ArpDescriptor);
-+ }
-+ if (ArCommonDescPtr->p_IcmpV4Descriptor)
-+ {
-+ t_DsarIcmpV4Descriptor* ICMPV4Descriptor =
-+ (t_DsarIcmpV4Descriptor*)ioremap(ioread32be(
-+ &ArCommonDescPtr->p_IcmpV4Descriptor) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof (t_DsarIcmpV4Descriptor));
-+ FM_DMP_LN(buf, n, "\nEcho ICMPv4\n");
-+ FM_DMP_LN(buf, n, "===========\n");
-+ FM_DMP_LN(buf, n, "control bits 0x%04x\n", ICMPV4Descriptor->control);
-+ if (ICMPV4Descriptor->numOfBindings)
-+ {
-+ char ip_str[100];
-+ t_DsarArpBindingEntry* bindings = ioremap(
-+ ioread32be(&ICMPV4Descriptor->p_Bindings) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ ICMPV4Descriptor->numOfBindings *
-+ sizeof(t_DsarArpBindingEntry));
-+ uint8_t* ip_addr = (uint8_t*)&bindings->ipv4Addr;
-+ FM_DMP_LN(buf, n, " ip vlan id\n");
-+ for (i = 0; i < ICMPV4Descriptor->numOfBindings; i++)
-+ {
-+ n += snprintf(ip_str, 100, "%d.%d.%d.%d",
-+ ip_addr[0], ip_addr[1],
-+ ip_addr[2], ip_addr[3]);
-+ FM_DMP_LN(buf, n, "%-15s 0x%x\n",
-+ ip_str, bindings->vlanId);
-+ }
-+ iounmap(bindings);
-+ }
-+ if (ICMPV4Descriptor->p_Statistics)
-+ {
-+ t_DsarIcmpV4Statistics* icmpv4Stats = ioremap(
-+ ioread32be(&ICMPV4Descriptor->p_Statistics) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof(t_DsarIcmpV4Statistics));
-+ FM_DMP_LN(buf, n, "statistics\n");
-+ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", icmpv4Stats->invalCnt);
-+ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", icmpv4Stats->nmVlanCnt);
-+ FM_DMP_LN(buf, n, "NMIP_CNT: 0x%x\n", icmpv4Stats->nmIpCnt);
-+ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", icmpv4Stats->arCnt);
-+ FM_DMP_LN(buf, n, "CSERR_CNT: 0x%x\n", icmpv4Stats->cserrCnt);
-+ iounmap(icmpv4Stats);
-+ }
-+ iounmap(ICMPV4Descriptor);
-+ }
-+ if (ArCommonDescPtr->p_NdDescriptor)
-+ {
-+ t_DsarNdDescriptor *NDDescriptor =
-+ (t_DsarNdDescriptor*)ioremap(ioread32be(
-+ &ArCommonDescPtr->p_NdDescriptor) + p_FmPort->
-+ fmMuramPhysBaseAddr, sizeof (t_DsarNdDescriptor));
-+ FM_DMP_LN(buf, n, "\nNDP\n");
-+ FM_DMP_LN(buf, n, "===\n");
-+ FM_DMP_LN(buf, n, "control bits 0x%04x\n", NDDescriptor->control);
-+ FM_DMP_LN(buf, n, "solicited address 0x%08x\n", NDDescriptor->solicitedAddr);
-+ if (NDDescriptor->numOfBindings)
-+ {
-+ char ip_str[100];
-+ t_DsarIcmpV6BindingEntry* bindings = ioremap(
-+ ioread32be(&NDDescriptor->p_Bindings) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ NDDescriptor->numOfBindings *
-+ sizeof(t_DsarIcmpV6BindingEntry));
-+ uint16_t* ip_addr = (uint16_t*)&bindings->ipv6Addr;
-+ FM_DMP_LN(buf, n, " ip vlan id\n");
-+ for (i = 0; i < NDDescriptor->numOfBindings; i++)
-+ {
-+ n += snprintf(ip_str, 100,
-+ "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
-+ ip_addr[0], ip_addr[1], ip_addr[2], ip_addr[3],
-+ ip_addr[4], ip_addr[5], ip_addr[6], ip_addr[7]);
-+ FM_DMP_LN(buf, n, "%s 0x%x\n", ip_str, bindings->vlanId);
-+ }
-+ iounmap(bindings);
-+ }
-+ if (NDDescriptor->p_Statistics)
-+ {
-+ t_NdStatistics* ndStats = ioremap(
-+ ioread32be(&NDDescriptor->p_Statistics) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof(t_NdStatistics));
-+ FM_DMP_LN(buf, n, "statistics\n");
-+ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", ndStats->invalCnt);
-+ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", ndStats->nmVlanCnt);
-+ FM_DMP_LN(buf, n, "NMIP_CNT: 0x%x\n", ndStats->nmIpCnt);
-+ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", ndStats->arCnt);
-+ FM_DMP_LN(buf, n, "USADVERT_CNT: 0x%x\n", ndStats->usadvertCnt);
-+ FM_DMP_LN(buf, n, "NMMCAST_CNT: 0x%x\n", ndStats->nmmcastCnt);
-+ FM_DMP_LN(buf, n, "NSLLA_CNT: 0x%x\n", ndStats->nsllaCnt);
-+ iounmap(ndStats);
-+ }
-+ iounmap(NDDescriptor);
-+ }
-+ if (ArCommonDescPtr->p_IcmpV6Descriptor)
-+ {
-+ t_DsarIcmpV6Descriptor *ICMPV6Descriptor =
-+ (t_DsarIcmpV6Descriptor*)ioremap(ioread32be(
-+ &ArCommonDescPtr->p_IcmpV6Descriptor) + p_FmPort->
-+ fmMuramPhysBaseAddr, sizeof (t_DsarIcmpV6Descriptor));
-+ FM_DMP_LN(buf, n, "\nEcho ICMPv6\n");
-+ FM_DMP_LN(buf, n, "===========\n");
-+ FM_DMP_LN(buf, n, "control bits 0x%04x\n", ICMPV6Descriptor->control);
-+ if (ICMPV6Descriptor->numOfBindings)
-+ {
-+ char ip_str[100];
-+ t_DsarIcmpV6BindingEntry* bindings = ioremap(
-+ ioread32be(&ICMPV6Descriptor->p_Bindings) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ ICMPV6Descriptor->numOfBindings *
-+ sizeof(t_DsarIcmpV6BindingEntry));
-+ uint16_t* ip_addr = (uint16_t*)&bindings->ipv6Addr;
-+ FM_DMP_LN(buf, n, " ip vlan id\n");
-+ for (i = 0; i < ICMPV6Descriptor->numOfBindings; i++)
-+ {
-+ n += snprintf(ip_str, 100,
-+ "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
-+ ip_addr[0], ip_addr[1], ip_addr[2], ip_addr[3],
-+ ip_addr[4], ip_addr[5], ip_addr[6], ip_addr[7]);
-+ FM_DMP_LN(buf, n, "%s 0x%x\n", ip_str, bindings->vlanId);
-+ }
-+ iounmap(bindings);
-+ }
-+ if (ICMPV6Descriptor->p_Statistics)
-+ {
-+ t_DsarIcmpV6Statistics* icmpv6Stats = ioremap(
-+ ioread32be(&ICMPV6Descriptor->p_Statistics) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof(t_DsarIcmpV6Statistics));
-+ FM_DMP_LN(buf, n, "statistics\n");
-+ FM_DMP_LN(buf, n, "INVAL_CNT: 0x%x\n", icmpv6Stats->invalCnt);
-+ FM_DMP_LN(buf, n, "NMVLAN_CNT: 0x%x\n", icmpv6Stats->nmVlanCnt);
-+ FM_DMP_LN(buf, n, "NMIP_CNT: 0x%x\n", icmpv6Stats->nmIpCnt);
-+ FM_DMP_LN(buf, n, "AR_CNT: 0x%x\n", icmpv6Stats->arCnt);
-+ iounmap(icmpv6Stats);
-+ }
-+ iounmap(ICMPV6Descriptor);
-+ }
-+ if (ArCommonDescPtr->p_SnmpDescriptor)
-+ {
-+ t_DsarSnmpDescriptor *SnmpDescriptor =
-+ (t_DsarSnmpDescriptor*)ioremap(ioread32be(
-+ &ArCommonDescPtr->p_SnmpDescriptor) + p_FmPort->
-+ fmMuramPhysBaseAddr, sizeof (t_DsarSnmpDescriptor));
-+ FM_DMP_LN(buf, n, "\nSNMP\n");
-+ FM_DMP_LN(buf, n, "===========\n");
-+ FM_DMP_LN(buf, n, "control bits 0x%04x\n", SnmpDescriptor->control);
-+ FM_DMP_LN(buf, n, "max message length 0x%04x\n", SnmpDescriptor->maxSnmpMsgLength);
-+ if (SnmpDescriptor->numOfIpv4Addresses)
-+ {
-+ char ip_str[100];
-+ t_DsarSnmpIpv4AddrTblEntry* addrs = ioremap(
-+ ioread32be(&SnmpDescriptor->p_Ipv4AddrTbl) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ SnmpDescriptor->numOfIpv4Addresses *
-+ sizeof(t_DsarSnmpIpv4AddrTblEntry));
-+ uint8_t* ip_addr = (uint8_t*)&addrs->ipv4Addr;
-+ FM_DMP_LN(buf, n, " ip vlan id\n");
-+ for (i = 0; i < SnmpDescriptor->numOfIpv4Addresses; i++)
-+ {
-+ n += snprintf(ip_str, 100, "%d.%d.%d.%d",
-+ ip_addr[0], ip_addr[1],
-+ ip_addr[2], ip_addr[3]);
-+ FM_DMP_LN(buf, n, "%-15s 0x%x\n", ip_str, addrs->vlanId);
-+ }
-+ iounmap(addrs);
-+ }
-+ if (SnmpDescriptor->p_Statistics)
-+ {
-+ t_DsarSnmpStatistics* snmpStats = ioremap(
-+ ioread32be(&SnmpDescriptor->p_Statistics) +
-+ p_FmPort->fmMuramPhysBaseAddr,
-+ sizeof(t_DsarSnmpStatistics));
-+ FM_DMP_LN(buf, n, "statistics\n");
-+ FM_DMP_LN(buf, n, "snmpErrCnt: 0x%x\n", snmpStats->snmpErrCnt);
-+ FM_DMP_LN(buf, n, "snmpCommunityErrCnt: 0x%x\n", snmpStats->snmpCommunityErrCnt);
-+ FM_DMP_LN(buf, n, "snmpTotalDiscardCnt: 0x%x\n", snmpStats->snmpTotalDiscardCnt);
-+ FM_DMP_LN(buf, n, "snmpGetReqCnt: 0x%x\n", snmpStats->snmpGetReqCnt);
-+ FM_DMP_LN(buf, n, "snmpGetNextReqCnt: 0x%x\n", snmpStats->snmpGetNextReqCnt);
-+ iounmap(snmpStats);
-+ }
-+ iounmap(SnmpDescriptor);
-+ }
-+ iounmap(ArCommonDescPtr);
-+ iounmap(param_page);
-+ return n;
-+}
-+
-+static ssize_t show_fm_port_dsar_mem(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+#endif
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_LnxWrpFmPortDev =
-+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+
-+ local_irq_save(flags);
-+
-+ if (!p_LnxWrpFmPortDev->h_Dev) {
-+ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
-+ return n;
-+ } else {
-+ n = snprintf(buf, PAGE_SIZE,
-+ "FM port driver registers dump.\n");
-+ n = fm_port_dsar_dump_mem(p_LnxWrpFmPortDev->h_Dev, buf, n);
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+ return n;
-+#endif
-+}
-+
-+static ssize_t show_fm_port_dsar_regs(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+#endif
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_LnxWrpFmPortDev =
-+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+
-+ local_irq_save(flags);
-+
-+ if (!p_LnxWrpFmPortDev->h_Dev) {
-+ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
-+ return n;
-+ } else {
-+ n = snprintf(buf, PAGE_SIZE,
-+ "FM port driver registers dump.\n");
-+ n = fm_port_dsar_dump_regs(p_LnxWrpFmPortDev->h_Dev, buf, n);
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+ return n;
-+#endif
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+static ssize_t show_fm_port_ipv4_options(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_LnxWrpFmPortDev =
-+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+
-+ local_irq_save(flags);
-+
-+ if (!p_LnxWrpFmPortDev->h_Dev) {
-+ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
-+ return n;
-+ } else if (((t_FmPort *)p_LnxWrpFmPortDev->h_Dev)->p_ParamsPage
-+ == NULL) {
-+ n = snprintf(buf, PAGE_SIZE,
-+ "\tPort: FMan-controller params page not set\n");
-+ return n;
-+ } else {
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Counter for fragmented pkt with IP header options\n");
-+ n = fm_port_dump_ipv4_opt(p_LnxWrpFmPortDev->h_Dev, buf, n);
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+ return n;
-+#endif
-+}
-+
-+#endif
-+
-+static ssize_t show_fm_port_bmi_regs(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_LnxWrpFmPortDev =
-+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+
-+ local_irq_save(flags);
-+
-+ if (!p_LnxWrpFmPortDev->h_Dev) {
-+ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
-+ return n;
-+ } else {
-+ n = snprintf(buf, PAGE_SIZE,
-+ "FM port driver registers dump.\n");
-+ n = fm_port_dump_regs_bmi(p_LnxWrpFmPortDev->h_Dev, buf, n);
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+ return n;
-+#endif
-+}
-+
-+static ssize_t show_fm_port_qmi_regs(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ unsigned long flags;
-+ unsigned n = 0;
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+#endif
-+
-+ if (attr == NULL || buf == NULL || dev == NULL)
-+ return -EINVAL;
-+
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ p_LnxWrpFmPortDev =
-+ (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+
-+ local_irq_save(flags);
-+
-+ if (!p_LnxWrpFmPortDev->h_Dev) {
-+ n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n");
-+ return n;
-+ } else {
-+ n = snprintf(buf, PAGE_SIZE,
-+ "FM port driver registers dump.\n");
-+ n = fm_port_dump_regs_qmi(p_LnxWrpFmPortDev->h_Dev, buf, n);
-+ }
-+
-+ local_irq_restore(flags);
-+
-+ return n;
-+#else
-+
-+ local_irq_save(flags);
-+ n = snprintf(buf, PAGE_SIZE,
-+ "Debug level is too low to dump registers!!!\n");
-+ local_irq_restore(flags);
-+
-+ return n;
-+#endif
-+}
-+
-+static DEVICE_ATTR(fm_port_regs, S_IRUGO | S_IRUSR, show_fm_port_regs, NULL);
-+static DEVICE_ATTR(fm_port_qmi_regs, S_IRUGO | S_IRUSR, show_fm_port_qmi_regs, NULL);
-+static DEVICE_ATTR(fm_port_bmi_regs, S_IRUGO | S_IRUSR, show_fm_port_bmi_regs, NULL);
-+#if (DPAA_VERSION >= 11)
-+static DEVICE_ATTR(fm_port_ipv4_opt, S_IRUGO | S_IRUSR, show_fm_port_ipv4_options, NULL);
-+#endif
-+static DEVICE_ATTR(fm_port_dsar_regs, S_IRUGO | S_IRUSR, show_fm_port_dsar_regs, NULL);
-+static DEVICE_ATTR(fm_port_dsar_mem, S_IRUGO | S_IRUSR, show_fm_port_dsar_mem, NULL);
-+
-+int fm_port_sysfs_create(struct device *dev)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
-+
-+ if (dev == NULL)
-+ return -EINVAL;
-+
-+ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_LnxWrpFmPortDev == NULL))
-+ return -EINVAL;
-+
-+ /* store to remove them when module is disabled */
-+ p_LnxWrpFmPortDev->dev_attr_regs = &dev_attr_fm_port_regs;
-+ p_LnxWrpFmPortDev->dev_attr_qmi_regs = &dev_attr_fm_port_qmi_regs;
-+ p_LnxWrpFmPortDev->dev_attr_bmi_regs = &dev_attr_fm_port_bmi_regs;
-+#if (DPAA_VERSION >= 11)
-+ p_LnxWrpFmPortDev->dev_attr_ipv4_opt = &dev_attr_fm_port_ipv4_opt;
-+#endif
-+ p_LnxWrpFmPortDev->dev_attr_dsar_regs = &dev_attr_fm_port_dsar_regs;
-+ p_LnxWrpFmPortDev->dev_attr_dsar_mem = &dev_attr_fm_port_dsar_mem;
-+ /* Registers dump entry - in future will be moved to debugfs */
-+ if (device_create_file(dev, &dev_attr_fm_port_regs) != 0)
-+ return -EIO;
-+ if (device_create_file(dev, &dev_attr_fm_port_qmi_regs) != 0)
-+ return -EIO;
-+ if (device_create_file(dev, &dev_attr_fm_port_bmi_regs) != 0)
-+ return -EIO;
-+#if (DPAA_VERSION >= 11)
-+ if (device_create_file(dev, &dev_attr_fm_port_ipv4_opt) != 0)
-+ return -EIO;
-+#endif
-+ if (device_create_file(dev, &dev_attr_fm_port_dsar_regs) != 0)
-+ return -EIO;
-+ if (device_create_file(dev, &dev_attr_fm_port_dsar_mem) != 0)
-+ return -EIO;
-+
-+ /* FM Ports statistics */
-+ switch (p_LnxWrpFmPortDev->settings.param.portType) {
-+ case e_FM_PORT_TYPE_TX:
-+ case e_FM_PORT_TYPE_TX_10G:
-+ if (sysfs_create_group
-+ (&dev->kobj, &fm_tx_port_dev_stats_attr_grp) != 0)
-+ return -EIO;
-+ break;
-+ case e_FM_PORT_TYPE_RX:
-+ case e_FM_PORT_TYPE_RX_10G:
-+ if (sysfs_create_group
-+ (&dev->kobj, &fm_rx_port_dev_stats_attr_grp) != 0)
-+ return -EIO;
-+ break;
-+ case e_FM_PORT_TYPE_DUMMY:
-+ case e_FM_PORT_TYPE_OH_OFFLINE_PARSING:
-+ if (sysfs_create_group
-+ (&dev->kobj, &fm_oh_port_dev_stats_attr_grp) != 0)
-+ return -EIO;
-+ break;
-+ default:
-+ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
-+ __func__);
-+ return -EINVAL;
-+ break;
-+ };
-+
-+ return 0;
-+}
-+
-+void fm_port_sysfs_destroy(struct device *dev)
-+{
-+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = NULL;
-+
-+ /* this function has never been tested !!! */
-+
-+ if (WARN_ON(dev == NULL))
-+ return;
-+
-+ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev);
-+ if (WARN_ON(p_LnxWrpFmPortDev == NULL))
-+ return;
-+
-+ /* The name attribute will be freed also by these 2 functions? */
-+ switch (p_LnxWrpFmPortDev->settings.param.portType) {
-+ case e_FM_PORT_TYPE_TX:
-+ case e_FM_PORT_TYPE_TX_10G:
-+ sysfs_remove_group(&dev->kobj, &fm_tx_port_dev_stats_attr_grp);
-+ break;
-+ case e_FM_PORT_TYPE_RX:
-+ case e_FM_PORT_TYPE_RX_10G:
-+ sysfs_remove_group(&dev->kobj, &fm_rx_port_dev_stats_attr_grp);
-+ break;
-+ case e_FM_PORT_TYPE_DUMMY:
-+ case e_FM_PORT_TYPE_OH_OFFLINE_PARSING:
-+ sysfs_remove_group(&dev->kobj, &fm_oh_port_dev_stats_attr_grp);
-+ break;
-+ default:
-+ WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__,
-+ __func__);
-+ break;
-+ };
-+
-+ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_regs);
-+ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_qmi_regs);
-+ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_bmi_regs);
-+#if (DPAA_VERSION >= 11)
-+ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_ipv4_opt);
-+#endif
-+ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_dsar_regs);
-+ device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_dsar_mem);
-+}
-+
-+
-+int fm_port_dump_regs(void *h_dev, char *buf, int nn)
-+{
-+ t_FmPort *p_FmPort;
-+ t_Fm *p_Fm;
-+ uint8_t hardwarePortId;
-+ int n = nn;
-+
-+ p_FmPort = (t_FmPort *)h_dev;
-+ hardwarePortId = p_FmPort->hardwarePortId;
-+ p_Fm = (t_Fm *)p_FmPort->h_Fm;
-+
-+ FM_DMP_TITLE(buf, n, &p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId - 1],
-+ "fmbm_pp for port %u", hardwarePortId);
-+ FM_DMP_MEM_32(buf, n,
-+ &p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId - 1]);
-+
-+ FM_DMP_TITLE(buf, n, &p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId - 1],
-+ "fmbm_pfs for port %u", hardwarePortId);
-+ FM_DMP_MEM_32(buf, n,
-+ &p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId - 1]);
-+
-+ FM_DMP_TITLE(buf, n,
-+ &p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId - 1],
-+ "fmbm_spliodn for port %u", hardwarePortId);
-+ FM_DMP_MEM_32(buf, n,
-+ &p_Fm->p_FmBmiRegs->fmbm_spliodn[hardwarePortId - 1]);
-+
-+ FM_DMP_TITLE(buf, n, &p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId],
-+ "fmfp_psfor port %u", hardwarePortId);
-+ FM_DMP_MEM_32(buf, n, &p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId]);
-+
-+ FM_DMP_TITLE(buf, n, &p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId / 2],
-+ "fmdmplrfor port %u", hardwarePortId);
-+ FM_DMP_MEM_32(buf, n,
-+ &p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId / 2]);
-+ return n;
-+}
-+
-+#if (DPAA_VERSION >= 11)
-+
-+int fm_port_dump_ipv4_opt(void *h_dev, char *buf, int nn)
-+{
-+ t_FmPort *p_FmPort;
-+ int n = nn;
-+
-+ p_FmPort = (t_FmPort *)h_dev;
-+
-+ FM_DMP_V32(buf, n, p_FmPort->p_ParamsPage, ipfOptionsCounter);
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ return n;
-+}
-+#endif
-+
-+int fm_port_dump_regs_bmi(void *h_dev, char *buf, int nn)
-+{
-+ t_FmPort *p_FmPort;
-+ u_FmPortBmiRegs *p_bmi;
-+
-+ char arr[20];
-+ uint8_t flag;
-+ int i = 0;
-+ int n = nn;
-+
-+ p_FmPort = (t_FmPort *)h_dev;
-+ p_bmi = p_FmPort->p_FmPortBmiRegs;
-+
-+ memset(arr, 0, sizeof(arr));
-+ switch (p_FmPort->portType) {
-+ case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING):
-+ strcpy(arr, "OFFLINE-PARSING");
-+ flag = 0;
-+ break;
-+ case (e_FM_PORT_TYPE_OH_HOST_COMMAND):
-+ strcpy(arr, "HOST-COMMAND");
-+ flag = 0;
-+ break;
-+ case (e_FM_PORT_TYPE_RX):
-+ strcpy(arr, "RX");
-+ flag = 1;
-+ break;
-+ case (e_FM_PORT_TYPE_RX_10G):
-+ strcpy(arr, "RX-10G");
-+ flag = 1;
-+ break;
-+ case (e_FM_PORT_TYPE_TX):
-+ strcpy(arr, "TX");
-+ flag = 2;
-+ break;
-+ case (e_FM_PORT_TYPE_TX_10G):
-+ strcpy(arr, "TX-10G");
-+ flag = 2;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ FM_DMP_TITLE(buf, n, NULL,
-+ "FMan-Port (%s #%d) registers:",
-+ arr, p_FmPort->portId);
-+
-+ FM_DMP_TITLE(buf, n, p_bmi, "Bmi Port Regs");
-+
-+ switch (flag) {
-+ case (0):
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ocfg);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ost);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oda);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oicp);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofdne);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofne);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofca);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofpne);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opso);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opp);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_occb);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oim);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofp);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofed);
-+
-+ FM_DMP_TITLE(buf, n,
-+ &(p_bmi->ohPortBmiRegs.fmbm_oprai), "fmbm_oprai");
-+ for (i = 0; i < FM_PORT_PRS_RESULT_NUM_OF_WORDS; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->ohPortBmiRegs.fmbm_oprai[i]));
-+ }
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofqid);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oefqid);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofsdm);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofsem);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofene);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_orlmts);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_orlmt);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ocmne);
-+ {
-+#ifndef FM_NO_OP_OBSERVED_POOLS
-+ if (p_FmPort->fmRevInfo.majorRev == 4) {
-+ FM_DMP_TITLE(buf, n,
-+ &p_bmi->ohPortBmiRegs.fmbm_oebmpi,
-+ "fmbm_oebmpi");
-+
-+ for (i = 0; i < FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->ohPortBmiRegs.fmbm_oebmpi[i]));
-+ }
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ocgm);
-+ }
-+#endif /* !FM_NO_OP_OBSERVED_POOLS */
-+ }
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ostc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofrc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofdc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofledc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofufdc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_offc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofwdc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofldec);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_opcp);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_occn);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_otuc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_oduc);
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ofuc);
-+ FM_DMP_TITLE(buf, n, &(p_bmi->ohPortBmiRegs.fmbm_odcfg),
-+ "fmbm_odcfg");
-+ for (i = 0; i < 3; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->ohPortBmiRegs.fmbm_odcfg[i]));
-+ }
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ FM_DMP_V32(buf, n, &p_bmi->ohPortBmiRegs, fmbm_ogpr);
-+ break;
-+ case (1):
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rcfg);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rst);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rda);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfp);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_reth);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfed);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_ricp);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rebm);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfne);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfca);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfpne);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpso);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpp);
-+ FM_DMP_TITLE(buf, n, &(p_bmi->rxPortBmiRegs.fmbm_rprai),
-+ "fmbm_rprai");
-+ for (i = 0; i < FM_PORT_PRS_RESULT_NUM_OF_WORDS; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->rxPortBmiRegs.fmbm_rprai[i]));
-+ }
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfqid);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_refqid);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfsdm);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfsem);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfene);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rcmne);
-+ FM_DMP_TITLE(buf, n, &p_bmi->rxPortBmiRegs.fmbm_ebmpi,
-+ "fmbm_ebmpi");
-+ for (i = 0; i < FM_PORT_MAX_NUM_OF_EXT_POOLS; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->rxPortBmiRegs.fmbm_ebmpi[i]));
-+ }
-+ FM_DMP_TITLE(buf, n, &p_bmi->rxPortBmiRegs.fmbm_acnt,
-+ "fmbm_acnt");
-+ for (i = 0; i < FM_PORT_MAX_NUM_OF_EXT_POOLS; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->rxPortBmiRegs.fmbm_acnt[i]));
-+ }
-+ FM_DMP_TITLE(buf, n, &p_bmi->rxPortBmiRegs.fmbm_rcgm,
-+ "fmbm_rcgm");
-+ for (i = 0; i < FM_PORT_NUM_OF_CONGESTION_GRPS / 32; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->rxPortBmiRegs.fmbm_rcgm[i]));
-+ }
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rmpd);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rstc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfrc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfbc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rlfc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rffc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfcd);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfldec);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rodc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpcp);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rccn);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rtuc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rrquc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rduc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rfuc);
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rpac);
-+ FM_DMP_TITLE(buf, n, &(p_bmi->rxPortBmiRegs.fmbm_rdcfg),
-+ "fmbm_rdcfg");
-+ for (i = 0; i < 3; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->rxPortBmiRegs.fmbm_rdcfg[i]));
-+ }
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->rxPortBmiRegs, fmbm_rgpr);
-+ break;
-+ case (2):
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tcfg);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tst);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tda);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfp);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfed);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_ticp);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfdne);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfca);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tcfqid);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfeqid);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfene);
-+#if (DPAA_VERSION >= 11)
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfne);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tcmne);
-+#endif /* (DPAA_VERSION >= 11) */
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_trlmts);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_trlmt);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tstc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfrc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfdc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfledc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfufdc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tpc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tpcp);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tccn);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_ttuc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_ttcquc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tduc);
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tfuc);
-+ FM_DMP_TITLE(buf, n, &(p_bmi->txPortBmiRegs.fmbm_tdcfg),
-+ "fmbm_tdcfg");
-+ for (i = 0; i < 3 ; ++i) {
-+ FM_DMP_MEM_32(buf, n,
-+ &(p_bmi->txPortBmiRegs.fmbm_tdcfg[i]));
-+ }
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+ FM_DMP_V32(buf, n, &p_bmi->txPortBmiRegs, fmbm_tgpr);
-+ break;
-+ }
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ return n;
-+}
-+
-+int fm_port_dump_regs_qmi(void *h_dev, char *buf, int nn)
-+{
-+ t_FmPort *p_FmPort;
-+ int n = nn;
-+
-+ p_FmPort = (t_FmPort *)h_dev;
-+
-+ FM_DMP_TITLE(buf, n, p_FmPort->p_FmPortQmiRegs, "Qmi Port Regs");
-+
-+ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnc);
-+ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pns);
-+ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnts);
-+ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnen);
-+ FM_DMP_V32(buf, n, p_FmPort->p_FmPortQmiRegs, fmqm_pnetfc);
-+ FM_DMP_V32(buf, n,
-+ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndn);
-+ FM_DMP_V32(buf, n,
-+ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndc);
-+ FM_DMP_V32(buf, n,
-+ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndtfc);
-+ FM_DMP_V32(buf, n,
-+ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndfdc);
-+ FM_DMP_V32(buf, n,
-+ &p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs, fmqm_pndcc);
-+
-+ FM_DMP_SUBTITLE(buf, n, "\n");
-+
-+ return n;
-+}
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h
-@@ -0,0 +1,56 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/*
-+ @File lnxwrp_sysfs_fm_port.h
-+
-+ @Description FM port sysfs functions.
-+
-+*/
-+
-+#ifndef LNXWRP_SYSFS_FM_PORT_H_
-+#define LNXWRP_SYSFS_FM_PORT_H_
-+
-+#include "lnxwrp_sysfs.h"
-+
-+int fm_port_sysfs_create(struct device *dev);
-+void fm_port_sysfs_destroy(struct device *dev);
-+
-+int fm_port_dump_regs(void *h_dev, char *buf, int n);
-+int fm_port_dump_regs_bmi(void *h_dev, char *buf, int n);
-+int fm_port_dump_regs_qmi(void *h_dev, char *buf, int n);
-+
-+#if (DPAA_VERSION >= 11)
-+int fm_port_dump_ipv4_opt(void *h_dev, char *buf, int n);
-+#endif
-+
-+#endif /* LNXWRP_SYSFS_FM_PORT_H_ */
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/Makefile
-@@ -0,0 +1,18 @@
-+#
-+# Makefile for the Freescale Ethernet controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+#
-+#Include netcomm SW specific definitions
-+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
-+
-+obj-y += fsl-ncsw-xx.o
-+
-+ifneq ($(CONFIG_FMAN_ARM),y)
-+fsl-ncsw-xx-objs := xx_linux.o \
-+ module_strings.o
-+else
-+fsl-ncsw-xx-objs := xx_arm_linux.o \
-+ module_strings.o
-+endif
-+
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/module_strings.c
-@@ -0,0 +1,46 @@
-+/*
-+ * Copyright 2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/* Module names for debug messages */
-+const char *moduleStrings[] =
-+{
-+ "", /* MODULE_UNKNOWN */
-+ "FM", /* MODULE_FM */
-+ "FM-MURAM", /* MODULE_FM_MURAM */
-+ "FM-PCD", /* MODULE_FM_PCD */
-+ "FM-RTC", /* MODULE_FM_RTC */
-+ "FM-MAC", /* MODULE_FM_MAC */
-+ "FM-Port", /* MODULE_FM_PORT */
-+ "MM", /* MODULE_MM */
-+ "FM-SP", /* MODULE_FM_SP */
-+ "FM-MACSEC" /* MODULE_FM_MACSEC */
-+};
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_arm_linux.c
-@@ -0,0 +1,905 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File xx_arm_linux.c
-+
-+ @Description XX routines implementation for Linux.
-+*//***************************************************************************/
-+#include <linux/version.h>
-+
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#include <config/modversions.h>
-+#endif /* MODVERSIONS */
-+
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/ptrace.h>
-+#include <linux/errno.h>
-+#include <linux/ioport.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/fs.h>
-+#include <linux/vmalloc.h>
-+#include <linux/init.h>
-+#include <linux/timer.h>
-+#include <linux/spinlock.h>
-+#include <linux/delay.h>
-+#include <linux/proc_fs.h>
-+#include <linux/smp.h>
-+#include <linux/of.h>
-+#include <linux/irqdomain.h>
-+
-+#include <linux/workqueue.h>
-+
-+#ifdef BIGPHYSAREA_ENABLE
-+#include <linux/bigphysarea.h>
-+#endif /* BIGPHYSAREA_ENABLE */
-+
-+//#include <sysdev/fsl_soc.h>
-+#include <asm/pgtable.h>
-+#include <asm/irq.h>
-+#include <asm/bitops.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/string.h>
-+#include <asm/byteorder.h>
-+#include <asm/page.h>
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "list_ext.h"
-+#include "mm_ext.h"
-+#include "sys_io_ext.h"
-+#include "xx.h"
-+
-+
-+#define __ERR_MODULE__ MODULE_UNKNOWN
-+
-+#ifdef BIGPHYSAREA_ENABLE
-+#define MAX_ALLOCATION_SIZE 128 * 1024 /* Maximum size allocated with kmalloc is 128K */
-+
-+
-+/* TODO: large allocations => use big phys area */
-+/******************************************************************************
-+ * routine: get_nr_pages
-+ *
-+ * description:
-+ * calculates the number of memory pages for a given size (in bytes)
-+ *
-+ * arguments:
-+ * size - the number of bytes
-+ *
-+ * return code:
-+ * The number of pages
-+ *
-+ *****************************************************************************/
-+static __inline__ uint32_t get_nr_pages (uint32_t size)
-+{
-+ return (uint32_t)((size >> PAGE_SHIFT) + (size & PAGE_SHIFT ? 1 : 0));
-+}
-+
-+static bool in_big_phys_area (uint32_t addr)
-+{
-+ uint32_t base, size;
-+
-+ bigphysarea_get_details (&base, &size);
-+ return ((addr >= base) && (addr < base + size));
-+}
-+#endif /* BIGPHYSAREA_ENABLE */
-+
-+void * xx_Malloc(uint32_t n)
-+{
-+ void *a;
-+ uint32_t flags;
-+
-+ flags = XX_DisableAllIntr();
-+#ifdef BIGPHYSAREA_ENABLE
-+ if (n >= MAX_ALLOCATION_SIZE)
-+ a = (void*)bigphysarea_alloc_pages(get_nr_pages(n), 0, GFP_ATOMIC);
-+ else
-+#endif /* BIGPHYSAREA_ENABLE */
-+ a = (void *)kmalloc((uint32_t)n, GFP_ATOMIC);
-+ if (!a)
-+ XX_Print("No memory for XX_Malloc\n");
-+ XX_RestoreAllIntr(flags);
-+
-+ return a;
-+}
-+
-+void xx_Free(void *p)
-+{
-+#ifdef BIGPHYSAREA_ENABLE
-+ if (in_big_phys_area ((uint32_t)p))
-+ bigphysarea_free_pages(p);
-+ else
-+#endif /* BIGPHYSAREA_ENABLE */
-+ kfree(p);
-+}
-+
-+void XX_Exit(int status)
-+{
-+ WARN(1, "\n\nFMD: fatal error, driver can't go on!!!\n\n");
-+}
-+
-+#define BUF_SIZE 512
-+void XX_Print(char *str, ...)
-+{
-+ va_list args;
-+#ifdef CONFIG_SMP
-+ char buf[BUF_SIZE];
-+#endif /* CONFIG_SMP */
-+
-+ va_start(args, str);
-+#ifdef CONFIG_SMP
-+ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
-+ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
-+ printk(KERN_CRIT "cpu %d: %s", raw_smp_processor_id(), buf);
-+#else
-+ vprintk(str, args);
-+#endif /* CONFIG_SMP */
-+ va_end(args);
-+}
-+
-+void XX_Fprint(void *file, char *str, ...)
-+{
-+ va_list args;
-+#ifdef CONFIG_SMP
-+ char buf[BUF_SIZE];
-+#endif /* CONFIG_SMP */
-+
-+ va_start(args, str);
-+#ifdef CONFIG_SMP
-+ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
-+ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
-+ printk (KERN_CRIT "cpu %d: %s", smp_processor_id(), buf);
-+
-+#else
-+ vprintk(str, args);
-+#endif /* CONFIG_SMP */
-+ va_end(args);
-+}
-+
-+#ifdef DEBUG_XX_MALLOC
-+typedef void (*t_ffn)(void *);
-+typedef struct {
-+ t_ffn f_free;
-+ void *mem;
-+ char *fname;
-+ int fline;
-+ uint32_t size;
-+ t_List node;
-+} t_MemDebug;
-+#define MEMDBG_OBJECT(p_List) LIST_OBJECT(p_List, t_MemDebug, node)
-+
-+LIST(memDbgLst);
-+
-+
-+void * XX_MallocDebug(uint32_t size, char *fname, int line)
-+{
-+ void *mem;
-+ t_MemDebug *p_MemDbg;
-+
-+ p_MemDbg = (t_MemDebug *)xx_Malloc(sizeof(t_MemDebug));
-+ if (p_MemDbg == NULL)
-+ return NULL;
-+
-+ mem = xx_Malloc(size);
-+ if (mem == NULL)
-+ {
-+ XX_Free(p_MemDbg);
-+ return NULL;
-+ }
-+
-+ INIT_LIST(&p_MemDbg->node);
-+ p_MemDbg->f_free = xx_Free;
-+ p_MemDbg->mem = mem;
-+ p_MemDbg->fname = fname;
-+ p_MemDbg->fline = line;
-+ p_MemDbg->size = size+sizeof(t_MemDebug);
-+ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
-+
-+ return mem;
-+}
-+
-+void * XX_MallocSmartDebug(uint32_t size,
-+ int memPartitionId,
-+ uint32_t align,
-+ char *fname,
-+ int line)
-+{
-+ void *mem;
-+ t_MemDebug *p_MemDbg;
-+
-+ p_MemDbg = (t_MemDebug *)XX_Malloc(sizeof(t_MemDebug));
-+ if (p_MemDbg == NULL)
-+ return NULL;
-+
-+ mem = xx_MallocSmart((uint32_t)size, memPartitionId, align);
-+ if (mem == NULL)
-+ {
-+ XX_Free(p_MemDbg);
-+ return NULL;
-+ }
-+
-+ INIT_LIST(&p_MemDbg->node);
-+ p_MemDbg->f_free = xx_FreeSmart;
-+ p_MemDbg->mem = mem;
-+ p_MemDbg->fname = fname;
-+ p_MemDbg->fline = line;
-+ p_MemDbg->size = size+sizeof(t_MemDebug);
-+ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
-+
-+ return mem;
-+}
-+
-+static void debug_free(void *mem)
-+{
-+ t_List *p_MemDbgLh = NULL;
-+ t_MemDebug *p_MemDbg;
-+ bool found = FALSE;
-+
-+ if (LIST_IsEmpty(&memDbgLst))
-+ {
-+ REPORT_ERROR(MAJOR, E_ALREADY_FREE, ("Unbalanced free (0x%08x)", mem));
-+ return;
-+ }
-+
-+ LIST_FOR_EACH(p_MemDbgLh, &memDbgLst)
-+ {
-+ p_MemDbg = MEMDBG_OBJECT(p_MemDbgLh);
-+ if (p_MemDbg->mem == mem)
-+ {
-+ found = TRUE;
-+ break;
-+ }
-+ }
-+
-+ if (!found)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_FOUND,
-+ ("Attempt to free unallocated address (0x%08x)",mem));
-+ dump_stack();
-+ return;
-+ }
-+
-+ LIST_Del(p_MemDbgLh);
-+ p_MemDbg->f_free(mem);
-+ p_MemDbg->f_free(p_MemDbg);
-+}
-+
-+void XX_FreeSmart(void *p)
-+{
-+ debug_free(p);
-+}
-+
-+
-+void XX_Free(void *p)
-+{
-+ debug_free(p);
-+}
-+
-+#else /* not DEBUG_XX_MALLOC */
-+void * XX_Malloc(uint32_t size)
-+{
-+ return xx_Malloc(size);
-+}
-+
-+void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
-+{
-+ return xx_MallocSmart(size,memPartitionId, alignment);
-+}
-+
-+void XX_FreeSmart(void *p)
-+{
-+ xx_FreeSmart(p);
-+}
-+
-+
-+void XX_Free(void *p)
-+{
-+ xx_Free(p);
-+}
-+#endif /* not DEBUG_XX_MALLOC */
-+
-+
-+#if (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0))
-+void XX_EventById(uint32_t event, t_Handle appId, uint16_t flags, char *msg)
-+{
-+ e_Event eventCode = (e_Event)event;
-+
-+ UNUSED(eventCode);
-+ UNUSED(appId);
-+ UNUSED(flags);
-+ UNUSED(msg);
-+}
-+#endif /* (defined(REPORT_EVENTS) && ... */
-+
-+
-+uint32_t XX_DisableAllIntr(void)
-+{
-+ unsigned long flags;
-+
-+#ifdef local_irq_save_nort
-+ local_irq_save_nort(flags);
-+#else
-+ local_irq_save(flags);
-+#endif
-+
-+ return (uint32_t)flags;
-+}
-+
-+void XX_RestoreAllIntr(uint32_t flags)
-+{
-+#ifdef local_irq_restore_nort
-+ local_irq_restore_nort((unsigned long)flags);
-+#else
-+ local_irq_restore((unsigned long)flags);
-+#endif
-+}
-+
-+t_Error XX_Call( uint32_t qid, t_Error (* f)(t_Handle), t_Handle id, t_Handle appId, uint16_t flags )
-+{
-+ UNUSED(qid);
-+ UNUSED(appId);
-+ UNUSED(flags);
-+
-+ return f(id);
-+}
-+
-+int XX_IsICacheEnable(void)
-+{
-+ return TRUE;
-+}
-+
-+int XX_IsDCacheEnable(void)
-+{
-+ return TRUE;
-+}
-+
-+
-+typedef struct {
-+ t_Isr *f_Isr;
-+ t_Handle handle;
-+} t_InterruptHandler;
-+
-+
-+t_Handle interruptHandlers[0x00010000];
-+
-+static irqreturn_t LinuxInterruptHandler (int irq, void *dev_id)
-+{
-+ t_InterruptHandler *p_IntrHndl = (t_InterruptHandler *)dev_id;
-+ p_IntrHndl->f_Isr(p_IntrHndl->handle);
-+ return IRQ_HANDLED;
-+}
-+
-+t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle)
-+{
-+ const char *device;
-+ t_InterruptHandler *p_IntrHndl;
-+
-+ device = GetDeviceName(irq);
-+ if (device == NULL)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Interrupt source - %d", irq));
-+
-+ p_IntrHndl = (t_InterruptHandler *)XX_Malloc(sizeof(t_InterruptHandler));
-+ if (p_IntrHndl == NULL)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ p_IntrHndl->f_Isr = f_Isr;
-+ p_IntrHndl->handle = handle;
-+ interruptHandlers[irq] = p_IntrHndl;
-+
-+ if (request_irq(GetDeviceIrqNum(irq), LinuxInterruptHandler, 0, device, p_IntrHndl) < 0)
-+ RETURN_ERROR(MAJOR, E_BUSY, ("Can't get IRQ %s\n", device));
-+ disable_irq(GetDeviceIrqNum(irq));
-+
-+ return E_OK;
-+}
-+
-+t_Error XX_FreeIntr(int irq)
-+{
-+ t_InterruptHandler *p_IntrHndl = interruptHandlers[irq];
-+ free_irq(GetDeviceIrqNum(irq), p_IntrHndl);
-+ XX_Free(p_IntrHndl);
-+ interruptHandlers[irq] = 0;
-+ return E_OK;
-+}
-+
-+t_Error XX_EnableIntr(int irq)
-+{
-+ enable_irq(GetDeviceIrqNum(irq));
-+ return E_OK;
-+}
-+
-+t_Error XX_DisableIntr(int irq)
-+{
-+ disable_irq(GetDeviceIrqNum(irq));
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* Tasklet Service Routines */
-+/*****************************************************************************/
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
-+typedef struct
-+{
-+ t_Handle h_Data;
-+ void (*f_Callback) (void *);
-+ struct delayed_work dwork;
-+} t_Tasklet;
-+
-+static void GenericTaskletCallback(struct work_struct *p_Work)
-+{
-+ t_Tasklet *p_Task = container_of(p_Work, t_Tasklet, dwork.work);
-+
-+ p_Task->f_Callback(p_Task->h_Data);
-+}
-+#endif /* LINUX_VERSION_CODE */
-+
-+
-+t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ struct work_struct *p_Task;
-+ p_Task = (struct work_struct *)XX_Malloc(sizeof(struct work_struct));
-+ INIT_WORK(p_Task, routine, data);
-+#else
-+ t_Tasklet *p_Task = (t_Tasklet *)XX_Malloc(sizeof(t_Tasklet));
-+ p_Task->h_Data = data;
-+ p_Task->f_Callback = routine;
-+ INIT_DELAYED_WORK(&p_Task->dwork, GenericTaskletCallback);
-+#endif /* LINUX_VERSION_CODE */
-+
-+ return (t_TaskletHandle)p_Task;
-+}
-+
-+
-+void XX_FreeTasklet (t_TaskletHandle h_Tasklet)
-+{
-+ if (h_Tasklet)
-+ XX_Free(h_Tasklet);
-+}
-+
-+int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate)
-+{
-+ int ans;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ if (immediate)
-+ ans = schedule_work(h_Tasklet);
-+ else
-+ ans = schedule_delayed_work(h_Tasklet, 1);
-+#else
-+ if (immediate)
-+ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, 0);
-+ else
-+ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, HZ);
-+#endif /* LINUX_VERSION_CODE */
-+
-+ return ans;
-+}
-+
-+void XX_FlushScheduledTasks(void)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+ flush_scheduled_tasks();
-+#else
-+ flush_scheduled_work();
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ return (int)(((struct work_struct *)h_Tasklet)->pending);
-+#else
-+ return (int)delayed_work_pending(&((t_Tasklet *)h_Tasklet)->dwork);
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+ ((struct tq_struct *)h_Tasklet)->data = data;
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ ((struct work_struct *)h_Tasklet)->data = data;
-+#else
-+ ((t_Tasklet *)h_Tasklet)->h_Data = data;
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ return (t_Handle)(((struct work_struct *)h_Tasklet)->data);
-+#else
-+ return ((t_Tasklet *)h_Tasklet)->h_Data;
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+
-+/*****************************************************************************/
-+/* Spinlock Service Routines */
-+/*****************************************************************************/
-+
-+t_Handle XX_InitSpinlock(void)
-+{
-+ spinlock_t *p_Spinlock = (spinlock_t *)XX_Malloc(sizeof(spinlock_t));
-+ if (!p_Spinlock)
-+ return NULL;
-+
-+ spin_lock_init(p_Spinlock);
-+
-+ return (t_Handle)p_Spinlock;
-+}
-+
-+void XX_FreeSpinlock(t_Handle h_Spinlock)
-+{
-+ if (h_Spinlock)
-+ XX_Free(h_Spinlock);
-+}
-+
-+void XX_LockSpinlock(t_Handle h_Spinlock)
-+{
-+ spin_lock((spinlock_t *)h_Spinlock);
-+}
-+
-+void XX_UnlockSpinlock(t_Handle h_Spinlock)
-+{
-+ spin_unlock((spinlock_t *)h_Spinlock);
-+}
-+
-+uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock)
-+{
-+ unsigned long intrFlags;
-+ spin_lock_irqsave((spinlock_t *)h_Spinlock, intrFlags);
-+ return intrFlags;
-+}
-+
-+void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags)
-+{
-+ spin_unlock_irqrestore((spinlock_t *)h_Spinlock, (unsigned long)intrFlags);
-+}
-+
-+
-+/*****************************************************************************/
-+/* Timers Service Routines */
-+/*****************************************************************************/
-+/* The time now is in mili sec. resolution */
-+uint32_t XX_CurrentTime(void)
-+{
-+ return (jiffies*1000)/HZ;
-+}
-+
-+
-+t_Handle XX_CreateTimer(void)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)XX_Malloc(sizeof(struct timer_list));
-+ if (p_Timer)
-+ {
-+ memset(p_Timer, 0, sizeof(struct timer_list));
-+ init_timer(p_Timer);
-+ }
-+ return (t_Handle)p_Timer;
-+}
-+
-+void XX_FreeTimer(t_Handle h_Timer)
-+{
-+ if (h_Timer)
-+ XX_Free(h_Timer);
-+}
-+
-+void XX_StartTimer(t_Handle h_Timer,
-+ uint32_t msecs,
-+ bool periodic,
-+ void (*f_TimerExpired)(t_Handle),
-+ t_Handle h_Arg)
-+{
-+ int tmp_jiffies = (msecs*HZ)/1000;
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ SANITY_CHECK_RETURN((periodic == FALSE), E_NOT_SUPPORTED);
-+
-+ p_Timer->function = (void (*)(unsigned long))f_TimerExpired;
-+ p_Timer->data = (unsigned long)h_Arg;
-+ if ((msecs*HZ)%1000)
-+ tmp_jiffies++;
-+ p_Timer->expires = (jiffies + tmp_jiffies);
-+
-+ add_timer((struct timer_list *)h_Timer);
-+}
-+
-+void XX_SetTimerData(t_Handle h_Timer, t_Handle data)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ p_Timer->data = (unsigned long)data;
-+}
-+
-+t_Handle XX_GetTimerData(t_Handle h_Timer)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ return (t_Handle)p_Timer->data;
-+}
-+
-+uint32_t XX_GetExpirationTime(t_Handle h_Timer)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ return (uint32_t)p_Timer->expires;
-+}
-+
-+void XX_StopTimer(t_Handle h_Timer)
-+{
-+ del_timer((struct timer_list *)h_Timer);
-+}
-+
-+void XX_ModTimer(t_Handle h_Timer, uint32_t msecs)
-+{
-+ int tmp_jiffies = (msecs*HZ)/1000;
-+
-+ if ((msecs*HZ)%1000)
-+ tmp_jiffies++;
-+ mod_timer((struct timer_list *)h_Timer, jiffies + tmp_jiffies);
-+}
-+
-+int XX_TimerIsActive(t_Handle h_Timer)
-+{
-+ return timer_pending((struct timer_list *)h_Timer);
-+}
-+
-+uint32_t XX_Sleep(uint32_t msecs)
-+{
-+ int tmp_jiffies = (msecs*HZ)/1000;
-+
-+ if ((msecs*HZ)%1000)
-+ tmp_jiffies++;
-+ return schedule_timeout(tmp_jiffies);
-+}
-+
-+/*BEWARE!!!!! UDelay routine is BUSY WAITTING!!!!!*/
-+void XX_UDelay(uint32_t usecs)
-+{
-+ udelay(usecs);
-+}
-+
-+/* TODO: verify that these are correct */
-+#define MSG_BODY_SIZE 512
-+typedef t_Error (t_MsgHandler) (t_Handle h_Mod, uint32_t msgId, uint8_t msgBody[MSG_BODY_SIZE]);
-+typedef void (t_MsgCompletionCB) (t_Handle h_Arg, uint8_t msgBody[MSG_BODY_SIZE]);
-+t_Error XX_SendMessage(char *p_DestAddr,
-+ uint32_t msgId,
-+ uint8_t msgBody[MSG_BODY_SIZE],
-+ t_MsgCompletionCB *f_CompletionCB,
-+ t_Handle h_CBArg);
-+
-+typedef struct {
-+ char *p_Addr;
-+ t_MsgHandler *f_MsgHandlerCB;
-+ t_Handle h_Mod;
-+ t_List node;
-+} t_MsgHndlr;
-+#define MSG_HNDLR_OBJECT(ptr) LIST_OBJECT(ptr, t_MsgHndlr, node)
-+
-+LIST(msgHndlrList);
-+
-+static void EnqueueMsgHndlr(t_MsgHndlr *p_MsgHndlr)
-+{
-+ uint32_t intFlags;
-+
-+ intFlags = XX_DisableAllIntr();
-+ LIST_AddToTail(&p_MsgHndlr->node, &msgHndlrList);
-+ XX_RestoreAllIntr(intFlags);
-+}
-+/* TODO: add this for multi-platform support
-+static t_MsgHndlr * DequeueMsgHndlr(void)
-+{
-+ t_MsgHndlr *p_MsgHndlr = NULL;
-+ uint32_t intFlags;
-+
-+ intFlags = XX_DisableAllIntr();
-+ if (!LIST_IsEmpty(&msgHndlrList))
-+ {
-+ p_MsgHndlr = MSG_HNDLR_OBJECT(msgHndlrList.p_Next);
-+ LIST_DelAndInit(&p_MsgHndlr->node);
-+ }
-+ XX_RestoreAllIntr(intFlags);
-+
-+ return p_MsgHndlr;
-+}
-+*/
-+static t_MsgHndlr * FindMsgHndlr(char *p_Addr)
-+{
-+ t_MsgHndlr *p_MsgHndlr;
-+ t_List *p_Pos;
-+
-+ LIST_FOR_EACH(p_Pos, &msgHndlrList)
-+ {
-+ p_MsgHndlr = MSG_HNDLR_OBJECT(p_Pos);
-+ if (strstr(p_MsgHndlr->p_Addr, p_Addr))
-+ return p_MsgHndlr;
-+ }
-+
-+ return NULL;
-+}
-+
-+t_Error XX_RegisterMessageHandler (char *p_Addr, t_MsgHandler *f_MsgHandlerCB, t_Handle h_Mod)
-+{
-+ t_MsgHndlr *p_MsgHndlr;
-+ uint32_t len;
-+
-+ p_MsgHndlr = (t_MsgHndlr*)XX_Malloc(sizeof(t_MsgHndlr));
-+ if (!p_MsgHndlr)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!"));
-+ memset(p_MsgHndlr, 0, sizeof(t_MsgHndlr));
-+
-+ len = strlen(p_Addr);
-+ p_MsgHndlr->p_Addr = (char*)XX_Malloc(len+1);
-+ strncpy(p_MsgHndlr->p_Addr,p_Addr, (uint32_t)(len+1));
-+
-+ p_MsgHndlr->f_MsgHandlerCB = f_MsgHandlerCB;
-+ p_MsgHndlr->h_Mod = h_Mod;
-+ INIT_LIST(&p_MsgHndlr->node);
-+ EnqueueMsgHndlr(p_MsgHndlr);
-+
-+ return E_OK;
-+}
-+
-+t_Error XX_UnregisterMessageHandler (char *p_Addr)
-+{
-+ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_Addr);
-+ if (!p_MsgHndlr)
-+ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
-+
-+ LIST_Del(&p_MsgHndlr->node);
-+ XX_Free(p_MsgHndlr->p_Addr);
-+ XX_Free(p_MsgHndlr);
-+
-+ return E_OK;
-+}
-+
-+t_Error XX_SendMessage(char *p_DestAddr,
-+ uint32_t msgId,
-+ uint8_t msgBody[MSG_BODY_SIZE],
-+ t_MsgCompletionCB *f_CompletionCB,
-+ t_Handle h_CBArg)
-+{
-+ t_Error ans;
-+ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_DestAddr);
-+ if (!p_MsgHndlr)
-+ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
-+
-+ ans = p_MsgHndlr->f_MsgHandlerCB(p_MsgHndlr->h_Mod, msgId, msgBody);
-+
-+ if (f_CompletionCB)
-+ f_CompletionCB(h_CBArg, msgBody);
-+
-+ return ans;
-+}
-+
-+t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH],
-+ t_IpcMsgHandler *f_MsgHandler,
-+ t_Handle h_Module,
-+ uint32_t replyLength)
-+{
-+ UNUSED(addr);UNUSED(f_MsgHandler);UNUSED(h_Module);UNUSED(replyLength);
-+ return E_OK;
-+}
-+
-+t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH])
-+{
-+ UNUSED(addr);
-+ return E_OK;
-+}
-+
-+
-+t_Error XX_IpcSendMessage(t_Handle h_Session,
-+ uint8_t *p_Msg,
-+ uint32_t msgLength,
-+ uint8_t *p_Reply,
-+ uint32_t *p_ReplyLength,
-+ t_IpcMsgCompletion *f_Completion,
-+ t_Handle h_Arg)
-+{
-+ UNUSED(h_Session); UNUSED(p_Msg); UNUSED(msgLength); UNUSED(p_Reply);
-+ UNUSED(p_ReplyLength); UNUSED(f_Completion); UNUSED(h_Arg);
-+ return E_OK;
-+}
-+
-+t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH],
-+ char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH])
-+{
-+ UNUSED(destAddr); UNUSED(srcAddr);
-+ return E_OK;
-+}
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+int GetDeviceIrqNum(int irq)
-+{
-+ struct device_node *iPar;
-+ struct irq_domain *irqHost;
-+ uint32_t hwIrq;
-+
-+ /* Get the interrupt controller */
-+ iPar = of_find_node_by_name(NULL, "mpic");
-+ hwIrq = 0;
-+
-+ ASSERT_COND(iPar != NULL);
-+ /* Get the irq host */
-+ irqHost = irq_find_host(iPar);
-+ of_node_put(iPar);
-+
-+ /* Create irq mapping */
-+ return irq_create_mapping(irqHost, hwIrq);
-+}
-+#else
-+#error "kernel not supported!!!"
-+#endif /* LINUX_VERSION_CODE */
-+
-+void * XX_PhysToVirt(physAddress_t addr)
-+{
-+ return UINT_TO_PTR(SYS_PhysToVirt((uint64_t)addr));
-+}
-+
-+physAddress_t XX_VirtToPhys(void * addr)
-+{
-+ return (physAddress_t)SYS_VirtToPhys(PTR_TO_UINT(addr));
-+}
-+
-+void * xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
-+{
-+ uintptr_t *returnCode, tmp;
-+
-+ if (alignment < sizeof(uintptr_t))
-+ alignment = sizeof(uintptr_t);
-+ size += alignment + sizeof(returnCode);
-+ tmp = (uintptr_t)xx_Malloc(size);
-+ if (tmp == 0)
-+ return NULL;
-+ returnCode = (uintptr_t*)((tmp + alignment + sizeof(returnCode)) & ~((uintptr_t)alignment - 1));
-+ *(returnCode - 1) = tmp;
-+
-+ return (void*)returnCode;
-+}
-+
-+void xx_FreeSmart(void *p)
-+{
-+ xx_Free((void*)(*((uintptr_t *)(p) - 1)));
-+}
---- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_fman/src/xx/xx_linux.c
-@@ -0,0 +1,918 @@
-+/*
-+ * Copyright 2008-2012 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File xx_linux.c
-+
-+ @Description XX routines implementation for Linux.
-+*//***************************************************************************/
-+#include <linux/version.h>
-+
-+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+#ifdef MODVERSIONS
-+#include <config/modversions.h>
-+#endif /* MODVERSIONS */
-+
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/ptrace.h>
-+#include <linux/errno.h>
-+#include <linux/ioport.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/fs.h>
-+#include <linux/vmalloc.h>
-+#include <linux/init.h>
-+#include <linux/timer.h>
-+#include <linux/spinlock.h>
-+#include <linux/delay.h>
-+#include <linux/proc_fs.h>
-+#include <linux/smp.h>
-+#include <linux/of.h>
-+#ifdef CONFIG_FMAN_ARM
-+#include <linux/irqdomain.h>
-+#endif
-+
-+#include <linux/workqueue.h>
-+
-+#ifdef BIGPHYSAREA_ENABLE
-+#include <linux/bigphysarea.h>
-+#endif /* BIGPHYSAREA_ENABLE */
-+
-+#ifndef CONFIG_FMAN_ARM
-+#include <sysdev/fsl_soc.h>
-+#endif
-+#include <asm/pgtable.h>
-+#include <asm/irq.h>
-+#include <asm/bitops.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/string.h>
-+#include <asm/byteorder.h>
-+#include <asm/page.h>
-+
-+#include "error_ext.h"
-+#include "std_ext.h"
-+#include "list_ext.h"
-+#include "mm_ext.h"
-+#include "sys_io_ext.h"
-+#include "xx.h"
-+
-+
-+#define __ERR_MODULE__ MODULE_UNKNOWN
-+
-+#ifdef BIGPHYSAREA_ENABLE
-+#define MAX_ALLOCATION_SIZE 128 * 1024 /* Maximum size allocated with kmalloc is 128K */
-+
-+
-+/* TODO: large allocations => use big phys area */
-+/******************************************************************************
-+ * routine: get_nr_pages
-+ *
-+ * description:
-+ * calculates the number of memory pages for a given size (in bytes)
-+ *
-+ * arguments:
-+ * size - the number of bytes
-+ *
-+ * return code:
-+ * The number of pages
-+ *
-+ *****************************************************************************/
-+static __inline__ uint32_t get_nr_pages (uint32_t size)
-+{
-+ return (uint32_t)((size >> PAGE_SHIFT) + (size & PAGE_SHIFT ? 1 : 0));
-+}
-+
-+static bool in_big_phys_area (uint32_t addr)
-+{
-+ uint32_t base, size;
-+
-+ bigphysarea_get_details (&base, &size);
-+ return ((addr >= base) && (addr < base + size));
-+}
-+#endif /* BIGPHYSAREA_ENABLE */
-+
-+void * xx_Malloc(uint32_t n)
-+{
-+ void *a;
-+ uint32_t flags;
-+
-+ flags = XX_DisableAllIntr();
-+#ifdef BIGPHYSAREA_ENABLE
-+ if (n >= MAX_ALLOCATION_SIZE)
-+ a = (void*)bigphysarea_alloc_pages(get_nr_pages(n), 0, GFP_ATOMIC);
-+ else
-+#endif /* BIGPHYSAREA_ENABLE */
-+ a = (void *)kmalloc((uint32_t)n, GFP_ATOMIC);
-+ if (!a)
-+ XX_Print("No memory for XX_Malloc\n");
-+ XX_RestoreAllIntr(flags);
-+
-+ return a;
-+}
-+
-+void xx_Free(void *p)
-+{
-+#ifdef BIGPHYSAREA_ENABLE
-+ if (in_big_phys_area ((uint32_t)p))
-+ bigphysarea_free_pages(p);
-+ else
-+#endif /* BIGPHYSAREA_ENABLE */
-+ kfree(p);
-+}
-+
-+void XX_Exit(int status)
-+{
-+ WARN(1, "\n\nFMD: fatal error, driver can't go on!!!\n\n");
-+}
-+
-+#define BUF_SIZE 512
-+void XX_Print(char *str, ...)
-+{
-+ va_list args;
-+#ifdef CONFIG_SMP
-+ char buf[BUF_SIZE];
-+#endif /* CONFIG_SMP */
-+
-+ va_start(args, str);
-+#ifdef CONFIG_SMP
-+ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
-+ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
-+ printk(KERN_CRIT "cpu%d/%d: %s", raw_smp_processor_id(), NR_CPUS, buf);
-+#else
-+ vprintk(str, args);
-+#endif /* CONFIG_SMP */
-+ va_end(args);
-+}
-+
-+void XX_Fprint(void *file, char *str, ...)
-+{
-+ va_list args;
-+#ifdef CONFIG_SMP
-+ char buf[BUF_SIZE];
-+#endif /* CONFIG_SMP */
-+
-+ va_start(args, str);
-+#ifdef CONFIG_SMP
-+ if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE)
-+ printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE);
-+ printk (KERN_CRIT "cpu%d/%d: %s", raw_smp_processor_id(), NR_CPUS, buf);
-+
-+#else
-+ vprintk(str, args);
-+#endif /* CONFIG_SMP */
-+ va_end(args);
-+}
-+
-+#ifdef DEBUG_XX_MALLOC
-+typedef void (*t_ffn)(void *);
-+typedef struct {
-+ t_ffn f_free;
-+ void *mem;
-+ char *fname;
-+ int fline;
-+ uint32_t size;
-+ t_List node;
-+} t_MemDebug;
-+#define MEMDBG_OBJECT(p_List) LIST_OBJECT(p_List, t_MemDebug, node)
-+
-+LIST(memDbgLst);
-+
-+
-+void * XX_MallocDebug(uint32_t size, char *fname, int line)
-+{
-+ void *mem;
-+ t_MemDebug *p_MemDbg;
-+
-+ p_MemDbg = (t_MemDebug *)xx_Malloc(sizeof(t_MemDebug));
-+ if (p_MemDbg == NULL)
-+ return NULL;
-+
-+ mem = xx_Malloc(size);
-+ if (mem == NULL)
-+ {
-+ XX_Free(p_MemDbg);
-+ return NULL;
-+ }
-+
-+ INIT_LIST(&p_MemDbg->node);
-+ p_MemDbg->f_free = xx_Free;
-+ p_MemDbg->mem = mem;
-+ p_MemDbg->fname = fname;
-+ p_MemDbg->fline = line;
-+ p_MemDbg->size = size+sizeof(t_MemDebug);
-+ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
-+
-+ return mem;
-+}
-+
-+void * XX_MallocSmartDebug(uint32_t size,
-+ int memPartitionId,
-+ uint32_t align,
-+ char *fname,
-+ int line)
-+{
-+ void *mem;
-+ t_MemDebug *p_MemDbg;
-+
-+ p_MemDbg = (t_MemDebug *)XX_Malloc(sizeof(t_MemDebug));
-+ if (p_MemDbg == NULL)
-+ return NULL;
-+
-+ mem = xx_MallocSmart((uint32_t)size, memPartitionId, align);
-+ if (mem == NULL)
-+ {
-+ XX_Free(p_MemDbg);
-+ return NULL;
-+ }
-+
-+ INIT_LIST(&p_MemDbg->node);
-+ p_MemDbg->f_free = xx_FreeSmart;
-+ p_MemDbg->mem = mem;
-+ p_MemDbg->fname = fname;
-+ p_MemDbg->fline = line;
-+ p_MemDbg->size = size+sizeof(t_MemDebug);
-+ LIST_AddToTail(&p_MemDbg->node, &memDbgLst);
-+
-+ return mem;
-+}
-+
-+static void debug_free(void *mem)
-+{
-+ t_List *p_MemDbgLh = NULL;
-+ t_MemDebug *p_MemDbg;
-+ bool found = FALSE;
-+
-+ if (LIST_IsEmpty(&memDbgLst))
-+ {
-+ REPORT_ERROR(MAJOR, E_ALREADY_FREE, ("Unbalanced free (0x%08x)", mem));
-+ return;
-+ }
-+
-+ LIST_FOR_EACH(p_MemDbgLh, &memDbgLst)
-+ {
-+ p_MemDbg = MEMDBG_OBJECT(p_MemDbgLh);
-+ if (p_MemDbg->mem == mem)
-+ {
-+ found = TRUE;
-+ break;
-+ }
-+ }
-+
-+ if (!found)
-+ {
-+ REPORT_ERROR(MAJOR, E_NOT_FOUND,
-+ ("Attempt to free unallocated address (0x%08x)",mem));
-+ dump_stack();
-+ return;
-+ }
-+
-+ LIST_Del(p_MemDbgLh);
-+ p_MemDbg->f_free(mem);
-+ p_MemDbg->f_free(p_MemDbg);
-+}
-+
-+void XX_FreeSmart(void *p)
-+{
-+ debug_free(p);
-+}
-+
-+
-+void XX_Free(void *p)
-+{
-+ debug_free(p);
-+}
-+
-+#else /* not DEBUG_XX_MALLOC */
-+void * XX_Malloc(uint32_t size)
-+{
-+ return xx_Malloc(size);
-+}
-+
-+void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
-+{
-+ return xx_MallocSmart(size,memPartitionId, alignment);
-+}
-+
-+void XX_FreeSmart(void *p)
-+{
-+ xx_FreeSmart(p);
-+}
-+
-+
-+void XX_Free(void *p)
-+{
-+ xx_Free(p);
-+}
-+#endif /* not DEBUG_XX_MALLOC */
-+
-+
-+#if (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0))
-+void XX_EventById(uint32_t event, t_Handle appId, uint16_t flags, char *msg)
-+{
-+ e_Event eventCode = (e_Event)event;
-+
-+ UNUSED(eventCode);
-+ UNUSED(appId);
-+ UNUSED(flags);
-+ UNUSED(msg);
-+}
-+#endif /* (defined(REPORT_EVENTS) && ... */
-+
-+
-+uint32_t XX_DisableAllIntr(void)
-+{
-+ unsigned long flags;
-+
-+#ifdef local_irq_save_nort
-+ local_irq_save_nort(flags);
-+#else
-+ local_irq_save(flags);
-+#endif
-+
-+ return (uint32_t)flags;
-+}
-+
-+void XX_RestoreAllIntr(uint32_t flags)
-+{
-+#ifdef local_irq_restore_nort
-+ local_irq_restore_nort((unsigned long)flags);
-+#else
-+ local_irq_restore((unsigned long)flags);
-+#endif
-+}
-+
-+t_Error XX_Call( uint32_t qid, t_Error (* f)(t_Handle), t_Handle id, t_Handle appId, uint16_t flags )
-+{
-+ UNUSED(qid);
-+ UNUSED(appId);
-+ UNUSED(flags);
-+
-+ return f(id);
-+}
-+
-+int XX_IsICacheEnable(void)
-+{
-+ return TRUE;
-+}
-+
-+int XX_IsDCacheEnable(void)
-+{
-+ return TRUE;
-+}
-+
-+
-+typedef struct {
-+ t_Isr *f_Isr;
-+ t_Handle handle;
-+} t_InterruptHandler;
-+
-+
-+t_Handle interruptHandlers[0x00010000];
-+
-+#ifdef CONFIG_FMAN_ARM
-+static irqreturn_t LinuxInterruptHandler (int irq, void *dev_id)
-+{
-+ t_InterruptHandler *p_IntrHndl = (t_InterruptHandler *)dev_id;
-+ p_IntrHndl->f_Isr(p_IntrHndl->handle);
-+ return IRQ_HANDLED;
-+}
-+#endif
-+
-+t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle)
-+{
-+#ifdef CONFIG_FMAN_ARM
-+ const char *device;
-+ t_InterruptHandler *p_IntrHndl;
-+
-+ device = GetDeviceName(irq);
-+ if (device == NULL)
-+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Interrupt source - %d", irq));
-+
-+ p_IntrHndl = (t_InterruptHandler *)XX_Malloc(sizeof(t_InterruptHandler));
-+ if (p_IntrHndl == NULL)
-+ RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG);
-+ p_IntrHndl->f_Isr = f_Isr;
-+ p_IntrHndl->handle = handle;
-+ interruptHandlers[irq] = p_IntrHndl;
-+
-+ if (request_irq(GetDeviceIrqNum(irq), LinuxInterruptHandler, 0, device, p_IntrHndl) < 0)
-+ RETURN_ERROR(MAJOR, E_BUSY, ("Can't get IRQ %s\n", device));
-+ disable_irq(GetDeviceIrqNum(irq));
-+#endif
-+ return E_OK;
-+}
-+
-+t_Error XX_FreeIntr(int irq)
-+{
-+ t_InterruptHandler *p_IntrHndl = interruptHandlers[irq];
-+ free_irq(GetDeviceIrqNum(irq), p_IntrHndl);
-+ XX_Free(p_IntrHndl);
-+ interruptHandlers[irq] = 0;
-+ return E_OK;
-+}
-+
-+t_Error XX_EnableIntr(int irq)
-+{
-+ enable_irq(GetDeviceIrqNum(irq));
-+ return E_OK;
-+}
-+
-+t_Error XX_DisableIntr(int irq)
-+{
-+ disable_irq(GetDeviceIrqNum(irq));
-+ return E_OK;
-+}
-+
-+
-+/*****************************************************************************/
-+/* Tasklet Service Routines */
-+/*****************************************************************************/
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
-+typedef struct
-+{
-+ t_Handle h_Data;
-+ void (*f_Callback) (void *);
-+ struct delayed_work dwork;
-+} t_Tasklet;
-+
-+static void GenericTaskletCallback(struct work_struct *p_Work)
-+{
-+ t_Tasklet *p_Task = container_of(p_Work, t_Tasklet, dwork.work);
-+
-+ p_Task->f_Callback(p_Task->h_Data);
-+}
-+#endif /* LINUX_VERSION_CODE */
-+
-+
-+t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ struct work_struct *p_Task;
-+ p_Task = (struct work_struct *)XX_Malloc(sizeof(struct work_struct));
-+ INIT_WORK(p_Task, routine, data);
-+#else
-+ t_Tasklet *p_Task = (t_Tasklet *)XX_Malloc(sizeof(t_Tasklet));
-+ p_Task->h_Data = data;
-+ p_Task->f_Callback = routine;
-+ INIT_DELAYED_WORK(&p_Task->dwork, GenericTaskletCallback);
-+#endif /* LINUX_VERSION_CODE */
-+
-+ return (t_TaskletHandle)p_Task;
-+}
-+
-+
-+void XX_FreeTasklet (t_TaskletHandle h_Tasklet)
-+{
-+ if (h_Tasklet)
-+ XX_Free(h_Tasklet);
-+}
-+
-+int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate)
-+{
-+ int ans;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ if (immediate)
-+ ans = schedule_work(h_Tasklet);
-+ else
-+ ans = schedule_delayed_work(h_Tasklet, 1);
-+#else
-+ if (immediate)
-+ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, 0);
-+ else
-+ ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, HZ);
-+#endif /* LINUX_VERSION_CODE */
-+
-+ return ans;
-+}
-+
-+void XX_FlushScheduledTasks(void)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+ flush_scheduled_tasks();
-+#else
-+ flush_scheduled_work();
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ return (int)(((struct work_struct *)h_Tasklet)->pending);
-+#else
-+ return (int)delayed_work_pending(&((t_Tasklet *)h_Tasklet)->dwork);
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+ ((struct tq_struct *)h_Tasklet)->data = data;
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ ((struct work_struct *)h_Tasklet)->data = data;
-+#else
-+ ((t_Tasklet *)h_Tasklet)->h_Data = data;
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet)
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+ return (t_Handle)(((struct work_struct *)h_Tasklet)->data);
-+#else
-+ return ((t_Tasklet *)h_Tasklet)->h_Data;
-+#endif /* LINUX_VERSION_CODE */
-+}
-+
-+
-+/*****************************************************************************/
-+/* Spinlock Service Routines */
-+/*****************************************************************************/
-+
-+t_Handle XX_InitSpinlock(void)
-+{
-+ spinlock_t *p_Spinlock = (spinlock_t *)XX_Malloc(sizeof(spinlock_t));
-+ if (!p_Spinlock)
-+ return NULL;
-+
-+ spin_lock_init(p_Spinlock);
-+
-+ return (t_Handle)p_Spinlock;
-+}
-+
-+void XX_FreeSpinlock(t_Handle h_Spinlock)
-+{
-+ if (h_Spinlock)
-+ XX_Free(h_Spinlock);
-+}
-+
-+void XX_LockSpinlock(t_Handle h_Spinlock)
-+{
-+ spin_lock((spinlock_t *)h_Spinlock);
-+}
-+
-+void XX_UnlockSpinlock(t_Handle h_Spinlock)
-+{
-+ spin_unlock((spinlock_t *)h_Spinlock);
-+}
-+
-+uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock)
-+{
-+ unsigned long intrFlags;
-+ spin_lock_irqsave((spinlock_t *)h_Spinlock, intrFlags);
-+ return intrFlags;
-+}
-+
-+void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags)
-+{
-+ spin_unlock_irqrestore((spinlock_t *)h_Spinlock, (unsigned long)intrFlags);
-+}
-+
-+
-+/*****************************************************************************/
-+/* Timers Service Routines */
-+/*****************************************************************************/
-+/* The time now is in mili sec. resolution */
-+uint32_t XX_CurrentTime(void)
-+{
-+ return (jiffies*1000)/HZ;
-+}
-+
-+
-+t_Handle XX_CreateTimer(void)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)XX_Malloc(sizeof(struct timer_list));
-+ if (p_Timer)
-+ {
-+ memset(p_Timer, 0, sizeof(struct timer_list));
-+ init_timer(p_Timer);
-+ }
-+ return (t_Handle)p_Timer;
-+}
-+
-+void XX_FreeTimer(t_Handle h_Timer)
-+{
-+ if (h_Timer)
-+ XX_Free(h_Timer);
-+}
-+
-+void XX_StartTimer(t_Handle h_Timer,
-+ uint32_t msecs,
-+ bool periodic,
-+ void (*f_TimerExpired)(t_Handle),
-+ t_Handle h_Arg)
-+{
-+ int tmp_jiffies = (msecs*HZ)/1000;
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ SANITY_CHECK_RETURN((periodic == FALSE), E_NOT_SUPPORTED);
-+
-+ p_Timer->function = (void (*)(unsigned long))f_TimerExpired;
-+ p_Timer->data = (unsigned long)h_Arg;
-+ if ((msecs*HZ)%1000)
-+ tmp_jiffies++;
-+ p_Timer->expires = (jiffies + tmp_jiffies);
-+
-+ add_timer((struct timer_list *)h_Timer);
-+}
-+
-+void XX_SetTimerData(t_Handle h_Timer, t_Handle data)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ p_Timer->data = (unsigned long)data;
-+}
-+
-+t_Handle XX_GetTimerData(t_Handle h_Timer)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ return (t_Handle)p_Timer->data;
-+}
-+
-+uint32_t XX_GetExpirationTime(t_Handle h_Timer)
-+{
-+ struct timer_list *p_Timer = (struct timer_list *)h_Timer;
-+
-+ return (uint32_t)p_Timer->expires;
-+}
-+
-+void XX_StopTimer(t_Handle h_Timer)
-+{
-+ del_timer((struct timer_list *)h_Timer);
-+}
-+
-+void XX_ModTimer(t_Handle h_Timer, uint32_t msecs)
-+{
-+ int tmp_jiffies = (msecs*HZ)/1000;
-+
-+ if ((msecs*HZ)%1000)
-+ tmp_jiffies++;
-+ mod_timer((struct timer_list *)h_Timer, jiffies + tmp_jiffies);
-+}
-+
-+int XX_TimerIsActive(t_Handle h_Timer)
-+{
-+ return timer_pending((struct timer_list *)h_Timer);
-+}
-+
-+uint32_t XX_Sleep(uint32_t msecs)
-+{
-+ int tmp_jiffies = (msecs*HZ)/1000;
-+
-+ if ((msecs*HZ)%1000)
-+ tmp_jiffies++;
-+ return schedule_timeout(tmp_jiffies);
-+}
-+
-+/*BEWARE!!!!! UDelay routine is BUSY WAITTING!!!!!*/
-+void XX_UDelay(uint32_t usecs)
-+{
-+ udelay(usecs);
-+}
-+
-+/* TODO: verify that these are correct */
-+#define MSG_BODY_SIZE 512
-+typedef t_Error (t_MsgHandler) (t_Handle h_Mod, uint32_t msgId, uint8_t msgBody[MSG_BODY_SIZE]);
-+typedef void (t_MsgCompletionCB) (t_Handle h_Arg, uint8_t msgBody[MSG_BODY_SIZE]);
-+t_Error XX_SendMessage(char *p_DestAddr,
-+ uint32_t msgId,
-+ uint8_t msgBody[MSG_BODY_SIZE],
-+ t_MsgCompletionCB *f_CompletionCB,
-+ t_Handle h_CBArg);
-+
-+typedef struct {
-+ char *p_Addr;
-+ t_MsgHandler *f_MsgHandlerCB;
-+ t_Handle h_Mod;
-+ t_List node;
-+} t_MsgHndlr;
-+#define MSG_HNDLR_OBJECT(ptr) LIST_OBJECT(ptr, t_MsgHndlr, node)
-+
-+LIST(msgHndlrList);
-+
-+static void EnqueueMsgHndlr(t_MsgHndlr *p_MsgHndlr)
-+{
-+ uint32_t intFlags;
-+
-+ intFlags = XX_DisableAllIntr();
-+ LIST_AddToTail(&p_MsgHndlr->node, &msgHndlrList);
-+ XX_RestoreAllIntr(intFlags);
-+}
-+/* TODO: add this for multi-platform support
-+static t_MsgHndlr * DequeueMsgHndlr(void)
-+{
-+ t_MsgHndlr *p_MsgHndlr = NULL;
-+ uint32_t intFlags;
-+
-+ intFlags = XX_DisableAllIntr();
-+ if (!LIST_IsEmpty(&msgHndlrList))
-+ {
-+ p_MsgHndlr = MSG_HNDLR_OBJECT(msgHndlrList.p_Next);
-+ LIST_DelAndInit(&p_MsgHndlr->node);
-+ }
-+ XX_RestoreAllIntr(intFlags);
-+
-+ return p_MsgHndlr;
-+}
-+*/
-+static t_MsgHndlr * FindMsgHndlr(char *p_Addr)
-+{
-+ t_MsgHndlr *p_MsgHndlr;
-+ t_List *p_Pos;
-+
-+ LIST_FOR_EACH(p_Pos, &msgHndlrList)
-+ {
-+ p_MsgHndlr = MSG_HNDLR_OBJECT(p_Pos);
-+ if (strstr(p_MsgHndlr->p_Addr, p_Addr))
-+ return p_MsgHndlr;
-+ }
-+
-+ return NULL;
-+}
-+
-+t_Error XX_RegisterMessageHandler (char *p_Addr, t_MsgHandler *f_MsgHandlerCB, t_Handle h_Mod)
-+{
-+ t_MsgHndlr *p_MsgHndlr;
-+ uint32_t len;
-+
-+ p_MsgHndlr = (t_MsgHndlr*)XX_Malloc(sizeof(t_MsgHndlr));
-+ if (!p_MsgHndlr)
-+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!"));
-+ memset(p_MsgHndlr, 0, sizeof(t_MsgHndlr));
-+
-+ len = strlen(p_Addr);
-+ p_MsgHndlr->p_Addr = (char*)XX_Malloc(len+1);
-+ strncpy(p_MsgHndlr->p_Addr,p_Addr, (uint32_t)(len+1));
-+
-+ p_MsgHndlr->f_MsgHandlerCB = f_MsgHandlerCB;
-+ p_MsgHndlr->h_Mod = h_Mod;
-+ INIT_LIST(&p_MsgHndlr->node);
-+ EnqueueMsgHndlr(p_MsgHndlr);
-+
-+ return E_OK;
-+}
-+
-+t_Error XX_UnregisterMessageHandler (char *p_Addr)
-+{
-+ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_Addr);
-+ if (!p_MsgHndlr)
-+ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
-+
-+ LIST_Del(&p_MsgHndlr->node);
-+ XX_Free(p_MsgHndlr->p_Addr);
-+ XX_Free(p_MsgHndlr);
-+
-+ return E_OK;
-+}
-+
-+t_Error XX_SendMessage(char *p_DestAddr,
-+ uint32_t msgId,
-+ uint8_t msgBody[MSG_BODY_SIZE],
-+ t_MsgCompletionCB *f_CompletionCB,
-+ t_Handle h_CBArg)
-+{
-+ t_Error ans;
-+ t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_DestAddr);
-+ if (!p_MsgHndlr)
-+ RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!"));
-+
-+ ans = p_MsgHndlr->f_MsgHandlerCB(p_MsgHndlr->h_Mod, msgId, msgBody);
-+
-+ if (f_CompletionCB)
-+ f_CompletionCB(h_CBArg, msgBody);
-+
-+ return ans;
-+}
-+
-+t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH],
-+ t_IpcMsgHandler *f_MsgHandler,
-+ t_Handle h_Module,
-+ uint32_t replyLength)
-+{
-+ UNUSED(addr);UNUSED(f_MsgHandler);UNUSED(h_Module);UNUSED(replyLength);
-+ return E_OK;
-+}
-+
-+t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH])
-+{
-+ UNUSED(addr);
-+ return E_OK;
-+}
-+
-+
-+t_Error XX_IpcSendMessage(t_Handle h_Session,
-+ uint8_t *p_Msg,
-+ uint32_t msgLength,
-+ uint8_t *p_Reply,
-+ uint32_t *p_ReplyLength,
-+ t_IpcMsgCompletion *f_Completion,
-+ t_Handle h_Arg)
-+{
-+ UNUSED(h_Session); UNUSED(p_Msg); UNUSED(msgLength); UNUSED(p_Reply);
-+ UNUSED(p_ReplyLength); UNUSED(f_Completion); UNUSED(h_Arg);
-+ return E_OK;
-+}
-+
-+t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH],
-+ char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH])
-+{
-+ UNUSED(destAddr); UNUSED(srcAddr);
-+ return E_OK;
-+}
-+
-+/*Forced to introduce due to PRINT_FMT_PARAMS define*/
-+uint32_t E500_GetId(void)
-+{
-+ return raw_smp_processor_id();
-+}
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+int GetDeviceIrqNum(int irq)
-+{
-+ struct device_node *iPar;
-+ struct irq_domain *irqHost;
-+ uint32_t hwIrq;
-+
-+ /* Get the interrupt controller */
-+ iPar = of_find_node_by_name(NULL, "mpic");
-+ hwIrq = 0;
-+
-+ ASSERT_COND(iPar != NULL);
-+ /* Get the irq host */
-+ irqHost = irq_find_host(iPar);
-+ of_node_put(iPar);
-+
-+ /* Create irq mapping */
-+ return irq_create_mapping(irqHost, hwIrq);
-+}
-+#else
-+#error "kernel not supported!!!"
-+#endif /* LINUX_VERSION_CODE */
-+
-+void * XX_PhysToVirt(physAddress_t addr)
-+{
-+ return UINT_TO_PTR(SYS_PhysToVirt((uint64_t)addr));
-+}
-+
-+physAddress_t XX_VirtToPhys(void * addr)
-+{
-+ return (physAddress_t)SYS_VirtToPhys(PTR_TO_UINT(addr));
-+}
-+
-+void * xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment)
-+{
-+ uintptr_t *returnCode, tmp;
-+
-+ if (alignment < sizeof(uintptr_t))
-+ alignment = sizeof(uintptr_t);
-+ size += alignment + sizeof(returnCode);
-+ tmp = (uintptr_t)xx_Malloc(size);
-+ if (tmp == 0)
-+ return NULL;
-+ returnCode = (uintptr_t*)((tmp + alignment + sizeof(returnCode)) & ~((uintptr_t)alignment - 1));
-+ *(returnCode - 1) = tmp;
-+
-+ return (void*)returnCode;
-+}
-+
-+void xx_FreeSmart(void *p)
-+{
-+ xx_Free((void*)(*((uintptr_t *)(p) - 1)));
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/Kconfig
-@@ -0,0 +1,228 @@
-+config FSL_SDK_DPA
-+ bool "Freescale Datapath Queue and Buffer management"
-+ depends on !FSL_DPAA
-+ select FSL_QMAN_FQ_LOOKUP if PPC64
-+ select FSL_QMAN_FQ_LOOKUP if ARM64
-+
-+
-+menu "Freescale Datapath QMan/BMan options"
-+ depends on FSL_SDK_DPA
-+
-+config FSL_DPA_CHECKING
-+ bool "additional driver checking"
-+ default n
-+ ---help---
-+ Compiles in additional checks to sanity-check the drivers and any
-+ use of it by other code. Not recommended for performance.
-+
-+config FSL_DPA_CAN_WAIT
-+ bool
-+ default y
-+
-+config FSL_DPA_CAN_WAIT_SYNC
-+ bool
-+ default y
-+
-+config FSL_DPA_PIRQ_FAST
-+ bool
-+ default y
-+
-+config FSL_DPA_PIRQ_SLOW
-+ bool
-+ default y
-+
-+config FSL_DPA_PORTAL_SHARE
-+ bool
-+ default y
-+
-+config FSL_SDK_BMAN
-+ bool "Freescale Buffer Manager (BMan) support"
-+ default y
-+
-+if FSL_SDK_BMAN
-+
-+config FSL_BMAN_CONFIG
-+ bool "BMan device management"
-+ default y
-+ ---help---
-+ If this linux image is running natively, you need this option. If this
-+ linux image is running as a guest OS under the hypervisor, only one
-+ guest OS ("the control plane") needs this option.
-+
-+config FSL_BMAN_TEST
-+ tristate "BMan self-tests"
-+ default n
-+ ---help---
-+ This option compiles self-test code for BMan.
-+
-+config FSL_BMAN_TEST_HIGH
-+ bool "BMan high-level self-test"
-+ depends on FSL_BMAN_TEST
-+ default y
-+ ---help---
-+ This requires the presence of cpu-affine portals, and performs
-+ high-level API testing with them (whichever portal(s) are affine to
-+ the cpu(s) the test executes on).
-+
-+config FSL_BMAN_TEST_THRESH
-+ bool "BMan threshold test"
-+ depends on FSL_BMAN_TEST
-+ default y
-+ ---help---
-+ Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded
-+ before multiple threads (one per cpu) create pool objects to track
-+ depletion state changes. The pool is then drained to empty by a
-+ "drainer" thread, and the other threads that they observe exactly
-+ the depletion state changes that are expected.
-+
-+config FSL_BMAN_DEBUGFS
-+ tristate "BMan debugfs interface"
-+ depends on DEBUG_FS
-+ default y
-+ ---help---
-+ This option compiles debugfs code for BMan.
-+
-+endif # FSL_SDK_BMAN
-+
-+config FSL_SDK_QMAN
-+ bool "Freescale Queue Manager (QMan) support"
-+ default y
-+
-+if FSL_SDK_QMAN
-+
-+config FSL_QMAN_POLL_LIMIT
-+ int
-+ default 32
-+
-+config FSL_QMAN_CONFIG
-+ bool "QMan device management"
-+ default y
-+ ---help---
-+ If this linux image is running natively, you need this option. If this
-+ linux image is running as a guest OS under the hypervisor, only one
-+ guest OS ("the control plane") needs this option.
-+
-+config FSL_QMAN_TEST
-+ tristate "QMan self-tests"
-+ default n
-+ ---help---
-+ This option compiles self-test code for QMan.
-+
-+config FSL_QMAN_TEST_STASH_POTATO
-+ bool "QMan 'hot potato' data-stashing self-test"
-+ depends on FSL_QMAN_TEST
-+ default y
-+ ---help---
-+ This performs a "hot potato" style test enqueuing/dequeuing a frame
-+ across a series of FQs scheduled to different portals (and cpus), with
-+ DQRR, data and context stashing always on.
-+
-+config FSL_QMAN_TEST_HIGH
-+ bool "QMan high-level self-test"
-+ depends on FSL_QMAN_TEST
-+ default y
-+ ---help---
-+ This requires the presence of cpu-affine portals, and performs
-+ high-level API testing with them (whichever portal(s) are affine to
-+ the cpu(s) the test executes on).
-+
-+config FSL_QMAN_DEBUGFS
-+ tristate "QMan debugfs interface"
-+ depends on DEBUG_FS
-+ default y
-+ ---help---
-+ This option compiles debugfs code for QMan.
-+
-+# H/w settings that can be hard-coded for now.
-+config FSL_QMAN_FQD_SZ
-+ int "size of Frame Queue Descriptor region"
-+ default 10
-+ ---help---
-+ This is the size of the FQD region defined as: PAGE_SIZE * (2^value)
-+ ex: 10 => PAGE_SIZE * (2^10)
-+ Note: Default device-trees now require minimum Kconfig setting of 10.
-+
-+config FSL_QMAN_PFDR_SZ
-+ int "size of the PFDR pool"
-+ default 13
-+ ---help---
-+ This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value)
-+ ex: 13 => PAGE_SIZE * (2^13)
-+
-+# Corenet initiator settings. Stash request queues are 4-deep to match cores'
-+# ability to snart. Stash priority is 3, other priorities are 2.
-+config FSL_QMAN_CI_SCHED_CFG_SRCCIV
-+ int
-+ depends on FSL_QMAN_CONFIG
-+ default 4
-+config FSL_QMAN_CI_SCHED_CFG_SRQ_W
-+ int
-+ depends on FSL_QMAN_CONFIG
-+ default 3
-+config FSL_QMAN_CI_SCHED_CFG_RW_W
-+ int
-+ depends on FSL_QMAN_CONFIG
-+ default 2
-+config FSL_QMAN_CI_SCHED_CFG_BMAN_W
-+ int
-+ depends on FSL_QMAN_CONFIG
-+ default 2
-+
-+# portal interrupt settings
-+config FSL_QMAN_PIRQ_DQRR_ITHRESH
-+ int
-+ default 12
-+config FSL_QMAN_PIRQ_MR_ITHRESH
-+ int
-+ default 4
-+config FSL_QMAN_PIRQ_IPERIOD
-+ int
-+ default 100
-+
-+# 64 bit kernel support
-+config FSL_QMAN_FQ_LOOKUP
-+ bool
-+ default n
-+
-+config QMAN_CEETM_UPDATE_PERIOD
-+ int "Token update period for shaping, in nanoseconds"
-+ default 1000
-+ ---help---
-+ Traffic shaping works by performing token calculations (using
-+ credits) on shaper instances periodically. This update period
-+ sets the granularity for how often those token rate credit
-+ updates are performed, and thus determines the accuracy and
-+ range of traffic rates that can be configured by users. The
-+ reference manual recommends a 1 microsecond period as providing
-+ a good balance between granularity and range.
-+
-+ Unless you know what you are doing, leave this value at its default.
-+
-+config FSL_QMAN_INIT_TIMEOUT
-+ int "timeout for qman init stage, in seconds"
-+ default 10
-+ ---help---
-+ The timeout setting to quit the initialization loop for non-control
-+ partition in case the control partition fails to boot-up.
-+
-+endif # FSL_SDK_QMAN
-+
-+config FSL_USDPAA
-+ bool "Freescale USDPAA process driver"
-+ depends on FSL_SDK_DPA
-+ default y
-+ ---help---
-+ This driver provides user-space access to kernel-managed
-+ resource interfaces for USDPAA applications, on the assumption
-+ that each process will open this device once. Specifically, this
-+ device exposes functionality that would be awkward if exposed
-+ via the portal devices - ie. this device exposes functionality
-+ that is inherently process-wide rather than portal-specific.
-+ This device is necessary for obtaining access to DMA memory and
-+ for allocation of Qman and Bman resources. In short, if you wish
-+ to use USDPAA applications, you need this.
-+
-+ If unsure, say Y.
-+
-+
-+endmenu
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/Makefile
-@@ -0,0 +1,28 @@
-+subdir-ccflags-y := -Werror
-+
-+# Common
-+obj-$(CONFIG_FSL_SDK_DPA) += dpa_alloc.o
-+obj-$(CONFIG_FSL_SDK_DPA) += qbman_driver.o
-+
-+# Bman
-+obj-$(CONFIG_FSL_SDK_BMAN) += bman_high.o
-+obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o
-+obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o
-+obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o
-+bman_tester-y = bman_test.o
-+bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o
-+bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o
-+bman_debugfs_interface-y = bman_debugfs.o
-+
-+# Qman
-+obj-$(CONFIG_FSL_SDK_QMAN) += qman_high.o qman_utility.o
-+obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o
-+obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o
-+qman_tester-y = qman_test.o
-+qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o
-+qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o
-+obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o
-+qman_debugfs_interface-y = qman_debugfs.o
-+
-+# USDPAA
-+obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_config.c
-@@ -0,0 +1,720 @@
-+/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <asm/cacheflush.h>
-+#include "bman_private.h"
-+#include <linux/of_reserved_mem.h>
-+
-+/* Last updated for v00.79 of the BG */
-+
-+struct bman;
-+
-+/* Register offsets */
-+#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
-+#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
-+#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
-+#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
-+#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
-+#define REG_FBPR_FPC 0x0800
-+#define REG_STATE_IDLE 0x960
-+#define REG_STATE_STOP 0x964
-+#define REG_ECSR 0x0a00
-+#define REG_ECIR 0x0a04
-+#define REG_EADR 0x0a08
-+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
-+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
-+#define REG_IP_REV_1 0x0bf8
-+#define REG_IP_REV_2 0x0bfc
-+#define REG_FBPR_BARE 0x0c00
-+#define REG_FBPR_BAR 0x0c04
-+#define REG_FBPR_AR 0x0c10
-+#define REG_SRCIDR 0x0d04
-+#define REG_LIODNR 0x0d08
-+#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
-+
-+/* Used by all error interrupt registers except 'inhibit' */
-+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
-+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
-+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
-+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
-+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
-+
-+/* BMAN_ECIR valid error bit */
-+#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
-+
-+union bman_ecir {
-+ u32 ecir_raw;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 __reserved1:4;
-+ u32 portal_num:4;
-+ u32 __reserved2:12;
-+ u32 numb:4;
-+ u32 __reserved3:2;
-+ u32 pid:6;
-+#else
-+ u32 pid:6;
-+ u32 __reserved3:2;
-+ u32 numb:4;
-+ u32 __reserved2:12;
-+ u32 portal_num:4;
-+ u32 __reserved1:4;
-+#endif
-+ } __packed info;
-+};
-+
-+union bman_eadr {
-+ u32 eadr_raw;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 __reserved1:5;
-+ u32 memid:3;
-+ u32 __reserved2:14;
-+ u32 eadr:10;
-+#else
-+ u32 eadr:10;
-+ u32 __reserved2:14;
-+ u32 memid:3;
-+ u32 __reserved1:5;
-+#endif
-+ } __packed info;
-+};
-+
-+struct bman_hwerr_txt {
-+ u32 mask;
-+ const char *txt;
-+};
-+
-+#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
-+
-+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
-+ BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
-+ BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
-+ BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
-+ BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
-+ BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
-+};
-+#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
-+
-+struct bman_error_info_mdata {
-+ u16 addr_mask;
-+ u16 bits;
-+ const char *txt;
-+};
-+
-+#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
-+static const struct bman_error_info_mdata error_mdata[] = {
-+ BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
-+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
-+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
-+};
-+#define BMAN_ERR_MDATA_COUNT \
-+ (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
-+
-+/* Add this in Kconfig */
-+#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
-+
-+/**
-+ * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
-+ * @v: for accessors that write values, this is the 32-bit value
-+ *
-+ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
-+ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
-+ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
-+ * "write the enable register" rather than "enable the write register"!
-+ */
-+#define bm_err_isr_status_read(bm) \
-+ __bm_err_isr_read(bm, bm_isr_status)
-+#define bm_err_isr_status_clear(bm, m) \
-+ __bm_err_isr_write(bm, bm_isr_status, m)
-+#define bm_err_isr_enable_read(bm) \
-+ __bm_err_isr_read(bm, bm_isr_enable)
-+#define bm_err_isr_enable_write(bm, v) \
-+ __bm_err_isr_write(bm, bm_isr_enable, v)
-+#define bm_err_isr_disable_read(bm) \
-+ __bm_err_isr_read(bm, bm_isr_disable)
-+#define bm_err_isr_disable_write(bm, v) \
-+ __bm_err_isr_write(bm, bm_isr_disable, v)
-+#define bm_err_isr_inhibit(bm) \
-+ __bm_err_isr_write(bm, bm_isr_inhibit, 1)
-+#define bm_err_isr_uninhibit(bm) \
-+ __bm_err_isr_write(bm, bm_isr_inhibit, 0)
-+
-+/*
-+ * TODO: unimplemented registers
-+ *
-+ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
-+ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
-+ */
-+
-+/* Encapsulate "struct bman *" as a cast of the register space address. */
-+
-+static struct bman *bm_create(void *regs)
-+{
-+ return (struct bman *)regs;
-+}
-+
-+static inline u32 __bm_in(struct bman *bm, u32 offset)
-+{
-+ return in_be32((void *)bm + offset);
-+}
-+static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
-+{
-+ out_be32((void *)bm + offset, val);
-+}
-+#define bm_in(reg) __bm_in(bm, REG_##reg)
-+#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
-+
-+static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
-+{
-+ return __bm_in(bm, REG_ERR_ISR + (n << 2));
-+}
-+
-+static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
-+{
-+ __bm_out(bm, REG_ERR_ISR + (n << 2), val);
-+}
-+
-+static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
-+{
-+ u32 v = bm_in(IP_REV_1);
-+ *id = (v >> 16);
-+ *major = (v >> 8) & 0xff;
-+ *minor = v & 0xff;
-+}
-+
-+static u32 __generate_thresh(u32 val, int roundup)
-+{
-+ u32 e = 0; /* co-efficient, exponent */
-+ int oddbit = 0;
-+ while (val > 0xff) {
-+ oddbit = val & 1;
-+ val >>= 1;
-+ e++;
-+ if (roundup && oddbit)
-+ val++;
-+ }
-+ DPA_ASSERT(e < 0x10);
-+ return val | (e << 8);
-+}
-+
-+static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
-+ u32 hwdet, u32 hwdxt)
-+{
-+ DPA_ASSERT(pool < bman_pool_max);
-+ bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
-+ bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
-+ bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
-+ bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
-+}
-+
-+static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
-+{
-+ u32 exp = ilog2(size);
-+ /* choke if size isn't within range */
-+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
-+ is_power_of_2(size));
-+ /* choke if '[e]ba' has lower-alignment than 'size' */
-+ DPA_ASSERT(!(ba & (size - 1)));
-+ bm_out(FBPR_BARE, upper_32_bits(ba));
-+ bm_out(FBPR_BAR, lower_32_bits(ba));
-+ bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
-+}
-+
-+/*****************/
-+/* Config driver */
-+/*****************/
-+
-+/* TODO: Kconfig these? */
-+#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12)
-+
-+/* We support only one of these. */
-+static struct bman *bm;
-+static struct device_node *bm_node;
-+
-+/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used
-+ * during bman_init_ccsr(). */
-+static dma_addr_t fbpr_a;
-+static size_t fbpr_sz = DEFAULT_FBPR_SZ;
-+
-+static int bman_fbpr(struct reserved_mem *rmem)
-+{
-+ fbpr_a = rmem->base;
-+ fbpr_sz = rmem->size;
-+
-+ WARN_ON(!(fbpr_a && fbpr_sz));
-+
-+ return 0;
-+}
-+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
-+
-+static int __init fsl_bman_init(struct device_node *node)
-+{
-+ struct resource res;
-+ u32 __iomem *regs;
-+ const char *s;
-+ int ret, standby = 0;
-+ u16 id;
-+ u8 major, minor;
-+
-+ ret = of_address_to_resource(node, 0, &res);
-+ if (ret) {
-+ pr_err("Can't get %s property 'reg'\n",
-+ node->full_name);
-+ return ret;
-+ }
-+ s = of_get_property(node, "fsl,hv-claimable", &ret);
-+ if (s && !strcmp(s, "standby"))
-+ standby = 1;
-+ /* Global configuration */
-+ regs = ioremap(res.start, res.end - res.start + 1);
-+ bm = bm_create(regs);
-+ BUG_ON(!bm);
-+ bm_node = node;
-+ bm_get_version(bm, &id, &major, &minor);
-+ pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor);
-+ if ((major == 1) && (minor == 0)) {
-+ bman_ip_rev = BMAN_REV10;
-+ bman_pool_max = 64;
-+ } else if ((major == 2) && (minor == 0)) {
-+ bman_ip_rev = BMAN_REV20;
-+ bman_pool_max = 8;
-+ } else if ((major == 2) && (minor == 1)) {
-+ bman_ip_rev = BMAN_REV21;
-+ bman_pool_max = 64;
-+ } else {
-+ pr_warn("unknown Bman version, default to rev1.0\n");
-+ }
-+
-+ if (standby) {
-+ pr_info(" -> in standby mode\n");
-+ return 0;
-+ }
-+ return 0;
-+}
-+
-+int bman_have_ccsr(void)
-+{
-+ return bm ? 1 : 0;
-+}
-+
-+int bm_pool_set(u32 bpid, const u32 *thresholds)
-+{
-+ if (!bm)
-+ return -ENODEV;
-+ bm_set_pool(bm, bpid, thresholds[0],
-+ thresholds[1], thresholds[2],
-+ thresholds[3]);
-+ return 0;
-+}
-+EXPORT_SYMBOL(bm_pool_set);
-+
-+__init int bman_init_early(void)
-+{
-+ struct device_node *dn;
-+ int ret;
-+
-+ for_each_compatible_node(dn, NULL, "fsl,bman") {
-+ if (bm)
-+ pr_err("%s: only one 'fsl,bman' allowed\n",
-+ dn->full_name);
-+ else {
-+ if (!of_device_is_available(dn))
-+ continue;
-+
-+ ret = fsl_bman_init(dn);
-+ BUG_ON(ret);
-+ }
-+ }
-+ return 0;
-+}
-+postcore_initcall_sync(bman_init_early);
-+
-+
-+static void log_edata_bits(u32 bit_count)
-+{
-+ u32 i, j, mask = 0xffffffff;
-+
-+ pr_warn("Bman ErrInt, EDATA:\n");
-+ i = bit_count/32;
-+ if (bit_count%32) {
-+ i++;
-+ mask = ~(mask << bit_count%32);
-+ }
-+ j = 16-i;
-+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
-+ j++;
-+ for (; j < 16; j++)
-+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
-+}
-+
-+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
-+{
-+ union bman_ecir ecir_val;
-+ union bman_eadr eadr_val;
-+
-+ ecir_val.ecir_raw = bm_in(ECIR);
-+ /* Is portal info valid */
-+ if (ecsr_val & PORTAL_ECSR_ERR) {
-+ pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n",
-+ ecir_val.info.portal_num, ecir_val.info.numb,
-+ ecir_val.info.pid);
-+ }
-+ if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
-+ eadr_val.eadr_raw = bm_in(EADR);
-+ pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n",
-+ error_mdata[eadr_val.info.memid].txt,
-+ error_mdata[eadr_val.info.memid].addr_mask
-+ & eadr_val.info.eadr);
-+ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
-+ }
-+}
-+
-+/* Bman interrupt handler */
-+static irqreturn_t bman_isr(int irq, void *ptr)
-+{
-+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
-+
-+ ier_val = bm_err_isr_enable_read(bm);
-+ isr_val = bm_err_isr_status_read(bm);
-+ ecsr_val = bm_in(ECSR);
-+ isr_mask = isr_val & ier_val;
-+
-+ if (!isr_mask)
-+ return IRQ_NONE;
-+ for (i = 0; i < BMAN_HWE_COUNT; i++) {
-+ if (bman_hwerr_txts[i].mask & isr_mask) {
-+ pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt);
-+ if (bman_hwerr_txts[i].mask & ecsr_val) {
-+ log_additional_error_info(isr_mask, ecsr_val);
-+ /* Re-arm error capture registers */
-+ bm_out(ECSR, ecsr_val);
-+ }
-+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
-+ pr_devel("Bman un-enabling error 0x%x\n",
-+ bman_hwerr_txts[i].mask);
-+ ier_val &= ~bman_hwerr_txts[i].mask;
-+ bm_err_isr_enable_write(bm, ier_val);
-+ }
-+ }
-+ }
-+ bm_err_isr_status_clear(bm, isr_val);
-+ return IRQ_HANDLED;
-+}
-+
-+static int __bind_irq(void)
-+{
-+ int ret, err_irq;
-+
-+ err_irq = of_irq_to_resource(bm_node, 0, NULL);
-+ if (err_irq == 0) {
-+ pr_info("Can't get %s property '%s'\n", bm_node->full_name,
-+ "interrupts");
-+ return -ENODEV;
-+ }
-+ ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node);
-+ if (ret) {
-+ pr_err("request_irq() failed %d for '%s'\n", ret,
-+ bm_node->full_name);
-+ return -ENODEV;
-+ }
-+ /* Disable Buffer Pool State Change */
-+ bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
-+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
-+ * to resource allocation during driver init). */
-+ bm_err_isr_status_clear(bm, 0xffffffff);
-+ /* Enable Error Interrupts */
-+ bm_err_isr_enable_write(bm, 0xffffffff);
-+ return 0;
-+}
-+
-+int bman_init_ccsr(struct device_node *node)
-+{
-+ int ret;
-+ if (!bman_have_ccsr())
-+ return 0;
-+ if (node != bm_node)
-+ return -EINVAL;
-+ /* FBPR memory */
-+ bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
-+ pr_info("bman-fbpr addr %pad size 0x%zx\n", &fbpr_a, fbpr_sz);
-+
-+ ret = __bind_irq();
-+ if (ret)
-+ return ret;
-+ return 0;
-+}
-+
-+u32 bm_pool_free_buffers(u32 bpid)
-+{
-+ return bm_in(POOL_CONTENT(bpid));
-+}
-+
-+#ifdef CONFIG_SYSFS
-+
-+#define DRV_NAME "fsl-bman"
-+#define SBEC_MAX_ID 1
-+#define SBEC_MIN_ID 0
-+
-+static ssize_t show_fbpr_fpc(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
-+};
-+
-+static ssize_t show_pool_count(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ u32 data;
-+ int i;
-+
-+ if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max))
-+ return -EINVAL;
-+ data = bm_in(POOL_CONTENT(i));
-+ return snprintf(buf, PAGE_SIZE, "%d\n", data);
-+};
-+
-+static ssize_t show_err_isr(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
-+};
-+
-+static ssize_t show_sbec(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ int i;
-+
-+ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
-+ return -EINVAL;
-+ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
-+ return -EINVAL;
-+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
-+};
-+
-+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
-+static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
-+
-+/* Didn't use DEVICE_ATTR as 64 of this would be required.
-+ * Initialize them when needed. */
-+static char *name_attrs_pool_count; /* "xx" + null-terminator */
-+static struct device_attribute *dev_attr_buffer_pool_count;
-+
-+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
-+
-+static struct attribute *bman_dev_attributes[] = {
-+ &dev_attr_fbpr_fpc.attr,
-+ &dev_attr_err_isr.attr,
-+ NULL
-+};
-+
-+static struct attribute *bman_dev_ecr_attributes[] = {
-+ &dev_attr_sbec_0.attr,
-+ &dev_attr_sbec_1.attr,
-+ NULL
-+};
-+
-+static struct attribute **bman_dev_pool_count_attributes;
-+
-+
-+/* root level */
-+static const struct attribute_group bman_dev_attr_grp = {
-+ .name = NULL,
-+ .attrs = bman_dev_attributes
-+};
-+static const struct attribute_group bman_dev_ecr_grp = {
-+ .name = "error_capture",
-+ .attrs = bman_dev_ecr_attributes
-+};
-+static struct attribute_group bman_dev_pool_countent_grp = {
-+ .name = "pool_count",
-+};
-+
-+static int of_fsl_bman_remove(struct platform_device *ofdev)
-+{
-+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
-+ return 0;
-+};
-+
-+static int of_fsl_bman_probe(struct platform_device *ofdev)
-+{
-+ int ret, i;
-+
-+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
-+ if (ret)
-+ goto done;
-+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
-+ if (ret)
-+ goto del_group_0;
-+
-+ name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3,
-+ GFP_KERNEL);
-+ if (!name_attrs_pool_count) {
-+ pr_err("Can't alloc name_attrs_pool_count\n");
-+ goto del_group_1;
-+ }
-+
-+ dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) *
-+ bman_pool_max, GFP_KERNEL);
-+ if (!dev_attr_buffer_pool_count) {
-+ pr_err("Can't alloc dev_attr-buffer_pool_count\n");
-+ goto del_group_2;
-+ }
-+
-+ bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) *
-+ (bman_pool_max + 1), GFP_KERNEL);
-+ if (!bman_dev_pool_count_attributes) {
-+ pr_err("can't alloc bman_dev_pool_count_attributes\n");
-+ goto del_group_3;
-+ }
-+
-+ for (i = 0; i < bman_pool_max; i++) {
-+ ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
-+ if (!ret)
-+ goto del_group_4;
-+ dev_attr_buffer_pool_count[i].attr.name =
-+ (name_attrs_pool_count + i * 3);
-+ dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
-+ dev_attr_buffer_pool_count[i].show = show_pool_count;
-+ bman_dev_pool_count_attributes[i] =
-+ &dev_attr_buffer_pool_count[i].attr;
-+ sysfs_attr_init(bman_dev_pool_count_attributes[i]);
-+ }
-+ bman_dev_pool_count_attributes[bman_pool_max] = NULL;
-+
-+ bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
-+
-+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp);
-+ if (ret)
-+ goto del_group_4;
-+
-+ goto done;
-+
-+del_group_4:
-+ kfree(bman_dev_pool_count_attributes);
-+del_group_3:
-+ kfree(dev_attr_buffer_pool_count);
-+del_group_2:
-+ kfree(name_attrs_pool_count);
-+del_group_1:
-+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
-+del_group_0:
-+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
-+done:
-+ if (ret)
-+ dev_err(&ofdev->dev,
-+ "Cannot create dev attributes ret=%d\n", ret);
-+ return ret;
-+};
-+
-+static struct of_device_id of_fsl_bman_ids[] = {
-+ {
-+ .compatible = "fsl,bman",
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
-+
-+#ifdef CONFIG_SUSPEND
-+static u32 saved_isdr;
-+
-+static int bman_pm_suspend_noirq(struct device *dev)
-+{
-+ uint32_t idle_state;
-+
-+ suspend_unused_bportal();
-+ /* save isdr, disable all, clear isr */
-+ saved_isdr = bm_err_isr_disable_read(bm);
-+ bm_err_isr_disable_write(bm, 0xffffffff);
-+ bm_err_isr_status_clear(bm, 0xffffffff);
-+
-+ if (bman_ip_rev < BMAN_REV21) {
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Bman version doesn't have STATE_IDLE\n");
-+#endif
-+ return 0;
-+ }
-+ idle_state = bm_in(STATE_IDLE);
-+ if (!(idle_state & 0x1)) {
-+ pr_err("Bman not idle 0x%x aborting\n", idle_state);
-+ bm_err_isr_disable_write(bm, saved_isdr);
-+ resume_unused_bportal();
-+ return -EBUSY;
-+ }
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state);
-+#endif
-+ return 0;
-+}
-+
-+static int bman_pm_resume_noirq(struct device *dev)
-+{
-+ /* restore isdr */
-+ bm_err_isr_disable_write(bm, saved_isdr);
-+ resume_unused_bportal();
-+ return 0;
-+}
-+#else
-+#define bman_pm_suspend_noirq NULL
-+#define bman_pm_resume_noirq NULL
-+#endif
-+
-+static const struct dev_pm_ops bman_pm_ops = {
-+ .suspend_noirq = bman_pm_suspend_noirq,
-+ .resume_noirq = bman_pm_resume_noirq,
-+};
-+
-+static struct platform_driver of_fsl_bman_driver = {
-+ .driver = {
-+ .owner = THIS_MODULE,
-+ .name = DRV_NAME,
-+ .of_match_table = of_fsl_bman_ids,
-+ .pm = &bman_pm_ops,
-+ },
-+ .probe = of_fsl_bman_probe,
-+ .remove = of_fsl_bman_remove,
-+};
-+
-+static int bman_ctrl_init(void)
-+{
-+ return platform_driver_register(&of_fsl_bman_driver);
-+}
-+
-+static void bman_ctrl_exit(void)
-+{
-+ platform_driver_unregister(&of_fsl_bman_driver);
-+}
-+
-+module_init(bman_ctrl_init);
-+module_exit(bman_ctrl_exit);
-+
-+#endif /* CONFIG_SYSFS */
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_debugfs.c
-@@ -0,0 +1,119 @@
-+/* Copyright 2010-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/module.h>
-+#include <linux/fsl_bman.h>
-+#include <linux/debugfs.h>
-+#include <linux/seq_file.h>
-+#include <linux/uaccess.h>
-+
-+static struct dentry *dfs_root; /* debugfs root directory */
-+
-+/*******************************************************************************
-+ * Query Buffer Pool State
-+ ******************************************************************************/
-+static int query_bp_state_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct bm_pool_state state;
-+ int i, j;
-+ u32 mask;
-+
-+ memset(&state, 0, sizeof(struct bm_pool_state));
-+ ret = bman_query_pools(&state);
-+ if (ret) {
-+ seq_printf(file, "Error %d\n", ret);
-+ return 0;
-+ }
-+ seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
-+ for (i = 0; i < 2; i++) {
-+ mask = 0x80000000;
-+ for (j = 0; j < 32; j++) {
-+ seq_printf(file,
-+ " %-2u %-3s %-3s\n",
-+ (i*32)+j,
-+ (state.as.state.__state[i] & mask) ? "no" : "yes",
-+ (state.ds.state.__state[i] & mask) ? "yes" : "no");
-+ mask >>= 1;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static int query_bp_state_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, query_bp_state_show, NULL);
-+}
-+
-+static const struct file_operations query_bp_state_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_bp_state_open,
-+ .read = seq_read,
-+ .release = single_release,
-+};
-+
-+static int __init bman_debugfs_module_init(void)
-+{
-+ int ret = 0;
-+ struct dentry *d;
-+
-+ dfs_root = debugfs_create_dir("bman", NULL);
-+
-+ if (dfs_root == NULL) {
-+ ret = -ENOMEM;
-+ pr_err("Cannot create bman debugfs dir\n");
-+ goto _return;
-+ }
-+ d = debugfs_create_file("query_bp_state",
-+ S_IRUGO,
-+ dfs_root,
-+ NULL,
-+ &query_bp_state_fops);
-+ if (d == NULL) {
-+ ret = -ENOMEM;
-+ pr_err("Cannot create query_bp_state\n");
-+ goto _return;
-+ }
-+ return 0;
-+
-+_return:
-+ debugfs_remove_recursive(dfs_root);
-+ return ret;
-+}
-+
-+static void __exit bman_debugfs_module_exit(void)
-+{
-+ debugfs_remove_recursive(dfs_root);
-+}
-+
-+
-+module_init(bman_debugfs_module_init);
-+module_exit(bman_debugfs_module_exit);
-+MODULE_LICENSE("Dual BSD/GPL");
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_driver.c
-@@ -0,0 +1,559 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include "bman_low.h"
-+#ifdef CONFIG_HOTPLUG_CPU
-+#include <linux/cpu.h>
-+#endif
-+/*
-+ * Global variables of the max portal/pool number this bman version supported
-+ */
-+u16 bman_ip_rev;
-+EXPORT_SYMBOL(bman_ip_rev);
-+u16 bman_pool_max;
-+EXPORT_SYMBOL(bman_pool_max);
-+static u16 bman_portal_max;
-+
-+/* After initialising cpus that own shared portal configs, we cache the
-+ * resulting portals (ie. not just the configs) in this array. Then we
-+ * initialise slave cpus that don't have their own portals, redirecting them to
-+ * portals from this cache in a round-robin assignment. */
-+static struct bman_portal *shared_portals[NR_CPUS];
-+static int num_shared_portals;
-+static int shared_portals_idx;
-+static LIST_HEAD(unused_pcfgs);
-+static DEFINE_SPINLOCK(unused_pcfgs_lock);
-+static void *affine_bportals[NR_CPUS];
-+
-+static int __init fsl_bpool_init(struct device_node *node)
-+{
-+ int ret;
-+ u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret);
-+ if (!bpid || (ret != 4)) {
-+ pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name);
-+ return -ENODEV;
-+ }
-+ thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret);
-+ if (thresh) {
-+ if (ret != 16) {
-+ pr_err("Invalid %s property '%s'\n",
-+ node->full_name, "fsl,bpool-thresholds");
-+ return -ENODEV;
-+ }
-+ }
-+ if (thresh) {
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+ ret = bm_pool_set(be32_to_cpu(*bpid), thresh);
-+ if (ret)
-+ pr_err("No CCSR node for %s property '%s'\n",
-+ node->full_name, "fsl,bpool-thresholds");
-+ return ret;
-+#else
-+ pr_err("Ignoring %s property '%s', no CCSR support\n",
-+ node->full_name, "fsl,bpool-thresholds");
-+#endif
-+ }
-+ return 0;
-+}
-+
-+static int __init fsl_bpid_range_init(struct device_node *node)
-+{
-+ int ret;
-+ u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret);
-+ if (!range) {
-+ pr_err("No 'fsl,bpid-range' property in node %s\n",
-+ node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n",
-+ node->full_name);
-+ return -EINVAL;
-+ }
-+ bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ pr_info("Bman: BPID allocator includes range %d:%d\n",
-+ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ return 0;
-+}
-+
-+static struct bm_portal_config * __init parse_pcfg(struct device_node *node)
-+{
-+ struct bm_portal_config *pcfg;
-+ const u32 *index;
-+ int irq, ret;
-+ resource_size_t len;
-+
-+ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
-+ if (!pcfg) {
-+ pr_err("can't allocate portal config");
-+ return NULL;
-+ }
-+
-+ if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
-+ of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
-+ bman_ip_rev = BMAN_REV10;
-+ bman_pool_max = 64;
-+ bman_portal_max = 10;
-+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
-+ of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
-+ bman_ip_rev = BMAN_REV20;
-+ bman_pool_max = 8;
-+ bman_portal_max = 3;
-+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) {
-+ bman_ip_rev = BMAN_REV21;
-+ bman_pool_max = 64;
-+ bman_portal_max = 50;
-+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) {
-+ bman_ip_rev = BMAN_REV21;
-+ bman_pool_max = 64;
-+ bman_portal_max = 25;
-+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) {
-+ bman_ip_rev = BMAN_REV21;
-+ bman_pool_max = 64;
-+ bman_portal_max = 18;
-+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
-+ bman_ip_rev = BMAN_REV21;
-+ bman_pool_max = 64;
-+ bman_portal_max = 10;
-+ } else {
-+ pr_warn("unknown BMan version in portal node,"
-+ "default to rev1.0\n");
-+ bman_ip_rev = BMAN_REV10;
-+ bman_pool_max = 64;
-+ bman_portal_max = 10;
-+ }
-+
-+ ret = of_address_to_resource(node, DPA_PORTAL_CE,
-+ &pcfg->addr_phys[DPA_PORTAL_CE]);
-+ if (ret) {
-+ pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
-+ goto err;
-+ }
-+ ret = of_address_to_resource(node, DPA_PORTAL_CI,
-+ &pcfg->addr_phys[DPA_PORTAL_CI]);
-+ if (ret) {
-+ pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
-+ goto err;
-+ }
-+
-+ index = of_get_property(node, "cell-index", &ret);
-+ if (!index || (ret != 4)) {
-+ pr_err("Can't get %s property '%s'\n", node->full_name,
-+ "cell-index");
-+ goto err;
-+ }
-+ if (be32_to_cpu(*index) >= bman_portal_max) {
-+ pr_err("BMan portal cell index %d out of range, max %d\n",
-+ be32_to_cpu(*index), bman_portal_max);
-+ goto err;
-+ }
-+
-+ pcfg->public_cfg.cpu = -1;
-+
-+ irq = irq_of_parse_and_map(node, 0);
-+ if (irq == 0) {
-+ pr_err("Can't get %s property 'interrupts'\n", node->full_name);
-+ goto err;
-+ }
-+ pcfg->public_cfg.irq = irq;
-+ pcfg->public_cfg.index = be32_to_cpu(*index);
-+ bman_depletion_fill(&pcfg->public_cfg.mask);
-+
-+ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
-+ if (len != (unsigned long)len)
-+ goto err;
-+
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
-+ pcfg->addr_phys[DPA_PORTAL_CE].start,
-+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
-+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
-+ pcfg->addr_phys[DPA_PORTAL_CI].start,
-+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
-+
-+#else
-+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
-+ pcfg->addr_phys[DPA_PORTAL_CE].start,
-+ (unsigned long)len,
-+ 0);
-+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
-+ pcfg->addr_phys[DPA_PORTAL_CI].start,
-+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
-+ _PAGE_GUARDED | _PAGE_NO_CACHE);
-+#endif
-+ /* disable bp depletion */
-+ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0));
-+ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1));
-+ return pcfg;
-+err:
-+ kfree(pcfg);
-+ return NULL;
-+}
-+
-+static struct bm_portal_config *get_pcfg(struct list_head *list)
-+{
-+ struct bm_portal_config *pcfg;
-+ if (list_empty(list))
-+ return NULL;
-+ pcfg = list_entry(list->prev, struct bm_portal_config, list);
-+ list_del(&pcfg->list);
-+ return pcfg;
-+}
-+
-+static struct bm_portal_config *get_pcfg_idx(struct list_head *list,
-+ uint32_t idx)
-+{
-+ struct bm_portal_config *pcfg;
-+ if (list_empty(list))
-+ return NULL;
-+ list_for_each_entry(pcfg, list, list) {
-+ if (pcfg->public_cfg.index == idx) {
-+ list_del(&pcfg->list);
-+ return pcfg;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+struct bm_portal_config *bm_get_unused_portal(void)
-+{
-+ return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
-+}
-+
-+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx)
-+{
-+ struct bm_portal_config *ret;
-+ spin_lock(&unused_pcfgs_lock);
-+ if (idx == QBMAN_ANY_PORTAL_IDX)
-+ ret = get_pcfg(&unused_pcfgs);
-+ else
-+ ret = get_pcfg_idx(&unused_pcfgs, idx);
-+ spin_unlock(&unused_pcfgs_lock);
-+ return ret;
-+}
-+
-+void bm_put_unused_portal(struct bm_portal_config *pcfg)
-+{
-+ spin_lock(&unused_pcfgs_lock);
-+ list_add(&pcfg->list, &unused_pcfgs);
-+ spin_unlock(&unused_pcfgs_lock);
-+}
-+
-+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
-+{
-+ struct bman_portal *p;
-+ p = bman_create_affine_portal(pcfg);
-+ if (p) {
-+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
-+ bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
-+#endif
-+ pr_info("Bman portal %sinitialised, cpu %d\n",
-+ pcfg->public_cfg.is_shared ? "(shared) " : "",
-+ pcfg->public_cfg.cpu);
-+ affine_bportals[pcfg->public_cfg.cpu] = p;
-+ } else
-+ pr_crit("Bman portal failure on cpu %d\n",
-+ pcfg->public_cfg.cpu);
-+ return p;
-+}
-+
-+static void init_slave(int cpu)
-+{
-+ struct bman_portal *p;
-+ p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
-+ if (!p)
-+ pr_err("Bman slave portal failure on cpu %d\n", cpu);
-+ else
-+ pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
-+ if (shared_portals_idx >= num_shared_portals)
-+ shared_portals_idx = 0;
-+ affine_bportals[cpu] = p;
-+}
-+
-+/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
-+ * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes
-+ * and/or ranges of indexes, with each being optionally prefixed by "s" to
-+ * explicitly mark it or them for sharing.
-+ * Eg;
-+ * bportals=s0,1-3,s4
-+ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
-+ * portals, and any remaining cpus share the portals that are assigned to cpus 0
-+ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
-+ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
-+ * 0's portal.) */
-+static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
-+static struct cpumask want_shared __initdata; /* cpus requested with "s" */
-+
-+static int __init parse_bportals(char *str)
-+{
-+ return parse_portals_bootarg(str, &want_shared, &want_unshared,
-+ "bportals");
-+}
-+__setup("bportals=", parse_bportals);
-+
-+static int bman_offline_cpu(unsigned int cpu)
-+{
-+ struct bman_portal *p;
-+ const struct bm_portal_config *pcfg;
-+ p = (struct bman_portal *)affine_bportals[cpu];
-+ if (p) {
-+ pcfg = bman_get_bm_portal_config(p);
-+ if (pcfg)
-+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
-+ }
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static int bman_online_cpu(unsigned int cpu)
-+{
-+ struct bman_portal *p;
-+ const struct bm_portal_config *pcfg;
-+ p = (struct bman_portal *)affine_bportals[cpu];
-+ if (p) {
-+ pcfg = bman_get_bm_portal_config(p);
-+ if (pcfg)
-+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
-+ }
-+ return 0;
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+/* Initialise the Bman driver. The meat of this function deals with portals. The
-+ * following describes the flow of portal-handling, the code "steps" refer to
-+ * this description;
-+ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
-+ * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
-+ * bound).
-+ * 2. The "want_shared" and "want_unshared" lists (as filled by the
-+ * "bportals=[...]" bootarg) are processed, allocating portals and assigning
-+ * them to cpus, placing them in the relevant list and setting ::cpu as
-+ * appropriate. If no "bportals" bootarg was present, the defaut is to try to
-+ * assign portals to all online cpus at the time of driver initialisation.
-+ * Any failure to allocate portals (when parsing the "want" lists or when
-+ * using default behaviour) will be silently tolerated (the "fixup" logic in
-+ * step 3 will determine what happens in this case).
-+ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
-+ * sharing and sharing is required (because not all cpus have been assigned
-+ * portals), then one portal will marked for sharing. Conversely if no
-+ * sharing is required, any portals marked for sharing will not be shared. It
-+ * may be that sharing occurs when it wasn't expected, if portal allocation
-+ * failed to honour all the requested assignments (including the default
-+ * assignments if no bootarg is present).
-+ * 4. Unshared portals are initialised on their respective cpus.
-+ * 5. Shared portals are initialised on their respective cpus.
-+ * 6. Each remaining cpu is initialised to slave to one of the shared portals,
-+ * which are selected in a round-robin fashion.
-+ * Any portal configs left unused are available for USDPAA allocation.
-+ */
-+__init int bman_init(void)
-+{
-+ struct cpumask slave_cpus;
-+ struct cpumask unshared_cpus = *cpu_none_mask;
-+ struct cpumask shared_cpus = *cpu_none_mask;
-+ LIST_HEAD(unshared_pcfgs);
-+ LIST_HEAD(shared_pcfgs);
-+ struct device_node *dn;
-+ struct bm_portal_config *pcfg;
-+ struct bman_portal *p;
-+ int cpu, ret;
-+ struct cpumask offline_cpus;
-+
-+ /* Initialise the Bman (CCSR) device */
-+ for_each_compatible_node(dn, NULL, "fsl,bman") {
-+ if (!bman_init_ccsr(dn))
-+ pr_info("Bman err interrupt handler present\n");
-+ else
-+ pr_err("Bman CCSR setup failed\n");
-+ }
-+ /* Initialise any declared buffer pools */
-+ for_each_compatible_node(dn, NULL, "fsl,bpool") {
-+ ret = fsl_bpool_init(dn);
-+ if (ret)
-+ return ret;
-+ }
-+ /* Step 1. See comments at the beginning of the file. */
-+ for_each_compatible_node(dn, NULL, "fsl,bman-portal") {
-+ if (!of_device_is_available(dn))
-+ continue;
-+ pcfg = parse_pcfg(dn);
-+ if (pcfg)
-+ list_add_tail(&pcfg->list, &unused_pcfgs);
-+ }
-+ /* Step 2. */
-+ for_each_possible_cpu(cpu) {
-+ if (cpumask_test_cpu(cpu, &want_shared)) {
-+ pcfg = get_pcfg(&unused_pcfgs);
-+ if (!pcfg)
-+ break;
-+ pcfg->public_cfg.cpu = cpu;
-+ list_add_tail(&pcfg->list, &shared_pcfgs);
-+ cpumask_set_cpu(cpu, &shared_cpus);
-+ }
-+ if (cpumask_test_cpu(cpu, &want_unshared)) {
-+ if (cpumask_test_cpu(cpu, &shared_cpus))
-+ continue;
-+ pcfg = get_pcfg(&unused_pcfgs);
-+ if (!pcfg)
-+ break;
-+ pcfg->public_cfg.cpu = cpu;
-+ list_add_tail(&pcfg->list, &unshared_pcfgs);
-+ cpumask_set_cpu(cpu, &unshared_cpus);
-+ }
-+ }
-+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
-+ /* Default, give an unshared portal to each online cpu */
-+ for_each_online_cpu(cpu) {
-+ pcfg = get_pcfg(&unused_pcfgs);
-+ if (!pcfg)
-+ break;
-+ pcfg->public_cfg.cpu = cpu;
-+ list_add_tail(&pcfg->list, &unshared_pcfgs);
-+ cpumask_set_cpu(cpu, &unshared_cpus);
-+ }
-+ }
-+ /* Step 3. */
-+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
-+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
-+ if (cpumask_empty(&slave_cpus)) {
-+ /* No sharing required */
-+ if (!list_empty(&shared_pcfgs)) {
-+ /* Migrate "shared" to "unshared" */
-+ cpumask_or(&unshared_cpus, &unshared_cpus,
-+ &shared_cpus);
-+ cpumask_clear(&shared_cpus);
-+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
-+ INIT_LIST_HEAD(&shared_pcfgs);
-+ }
-+ } else {
-+ /* Sharing required */
-+ if (list_empty(&shared_pcfgs)) {
-+ /* Migrate one "unshared" to "shared" */
-+ pcfg = get_pcfg(&unshared_pcfgs);
-+ if (!pcfg) {
-+ pr_crit("No BMan portals available!\n");
-+ return 0;
-+ }
-+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
-+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
-+ list_add_tail(&pcfg->list, &shared_pcfgs);
-+ }
-+ }
-+ /* Step 4. */
-+ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
-+ pcfg->public_cfg.is_shared = 0;
-+ p = init_pcfg(pcfg);
-+ if (!p) {
-+ pr_crit("Unable to initialize bman portal\n");
-+ return 0;
-+ }
-+ }
-+ /* Step 5. */
-+ list_for_each_entry(pcfg, &shared_pcfgs, list) {
-+ pcfg->public_cfg.is_shared = 1;
-+ p = init_pcfg(pcfg);
-+ if (p)
-+ shared_portals[num_shared_portals++] = p;
-+ }
-+ /* Step 6. */
-+ if (!cpumask_empty(&slave_cpus))
-+ for_each_cpu(cpu, &slave_cpus)
-+ init_slave(cpu);
-+ pr_info("Bman portals initialised\n");
-+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
-+ for_each_cpu(cpu, &offline_cpus)
-+ bman_offline_cpu(cpu);
-+#ifdef CONFIG_HOTPLUG_CPU
-+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-+ "soc/qbman_portal:online",
-+ bman_online_cpu, bman_offline_cpu);
-+ if (ret < 0) {
-+ pr_err("bman: failed to register hotplug callbacks.\n");
-+ return 0;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+__init int bman_resource_init(void)
-+{
-+ struct device_node *dn;
-+ int ret;
-+
-+ /* Initialise BPID allocation ranges */
-+ for_each_compatible_node(dn, NULL, "fsl,bpid-range") {
-+ ret = fsl_bpid_range_init(dn);
-+ if (ret)
-+ return ret;
-+ }
-+ return 0;
-+}
-+
-+#ifdef CONFIG_SUSPEND
-+void suspend_unused_bportal(void)
-+{
-+ struct bm_portal_config *pcfg;
-+
-+ if (list_empty(&unused_pcfgs))
-+ return;
-+
-+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Need to save bportal %d\n", pcfg->public_cfg.index);
-+#endif
-+ /* save isdr, disable all via isdr, clear isr */
-+ pcfg->saved_isdr =
-+ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
-+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
-+ 0xe08);
-+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
-+ 0xe00);
-+ }
-+ return;
-+}
-+
-+void resume_unused_bportal(void)
-+{
-+ struct bm_portal_config *pcfg;
-+
-+ if (list_empty(&unused_pcfgs))
-+ return;
-+
-+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index);
-+#endif
-+ /* restore isdr */
-+ __raw_writel(pcfg->saved_isdr,
-+ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
-+ }
-+ return;
-+}
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_high.c
-@@ -0,0 +1,1145 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "bman_low.h"
-+
-+/* Compilation constants */
-+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
-+#define IRQNAME "BMan portal %d"
-+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
-+
-+struct bman_portal {
-+ struct bm_portal p;
-+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
-+ struct bman_depletion *pools;
-+ int thresh_set;
-+ unsigned long irq_sources;
-+ u32 slowpoll; /* only used when interrupts are off */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
-+#endif
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ raw_spinlock_t sharing_lock; /* only used if is_shared */
-+ int is_shared;
-+ struct bman_portal *sharing_redirect;
-+#endif
-+ /* When the cpu-affine portal is activated, this is non-NULL */
-+ const struct bm_portal_config *config;
-+ /* This is needed for power management */
-+ struct platform_device *pdev;
-+ /* 64-entry hash-table of pool objects that are tracking depletion
-+ * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
-+ * we're not fussy about cache-misses and so forth - whereas the above
-+ * members should all fit in one cacheline.
-+ * BTW, with 64 entries in the hash table and 64 buffer pools to track,
-+ * you'll never guess the hash-function ... */
-+ struct bman_pool *cb[64];
-+ char irqname[MAX_IRQNAME];
-+ /* Track if the portal was alloced by the driver */
-+ u8 alloced;
-+ /* power management data */
-+ u32 save_isdr;
-+};
-+
-+/* For an explanation of the locking, redirection, or affine-portal logic,
-+ * please consult the Qman driver for details. This is the same, only simpler
-+ * (no fiddly Qman-specific bits.) */
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+#define PORTAL_IRQ_LOCK(p, irqflags) \
-+ do { \
-+ if ((p)->is_shared) \
-+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
-+ else \
-+ local_irq_save(irqflags); \
-+ } while (0)
-+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
-+ do { \
-+ if ((p)->is_shared) \
-+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
-+ irqflags); \
-+ else \
-+ local_irq_restore(irqflags); \
-+ } while (0)
-+#else
-+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
-+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
-+#endif
-+
-+static cpumask_t affine_mask;
-+static DEFINE_SPINLOCK(affine_mask_lock);
-+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
-+static inline struct bman_portal *get_raw_affine_portal(void)
-+{
-+ return &get_cpu_var(bman_affine_portal);
-+}
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+static inline struct bman_portal *get_affine_portal(void)
-+{
-+ struct bman_portal *p = get_raw_affine_portal();
-+ if (p->sharing_redirect)
-+ return p->sharing_redirect;
-+ return p;
-+}
-+#else
-+#define get_affine_portal() get_raw_affine_portal()
-+#endif
-+static inline void put_affine_portal(void)
-+{
-+ put_cpu_var(bman_affine_portal);
-+}
-+static inline struct bman_portal *get_poll_portal(void)
-+{
-+ return &get_cpu_var(bman_affine_portal);
-+}
-+#define put_poll_portal()
-+
-+/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
-+ * more than one such object per Bman buffer pool, eg. if different users of the
-+ * pool are operating via different portals. */
-+struct bman_pool {
-+ struct bman_pool_params params;
-+ /* Used for hash-table admin when using depletion notifications. */
-+ struct bman_portal *portal;
-+ struct bman_pool *next;
-+ /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
-+ struct bm_buffer *sp;
-+ unsigned int sp_fill;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ atomic_t in_use;
-+#endif
-+};
-+
-+/* (De)Registration of depletion notification callbacks */
-+static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
-+{
-+ __maybe_unused unsigned long irqflags;
-+ pool->portal = portal;
-+ PORTAL_IRQ_LOCK(portal, irqflags);
-+ pool->next = portal->cb[pool->params.bpid];
-+ portal->cb[pool->params.bpid] = pool;
-+ if (!pool->next)
-+ /* First object for that bpid on this portal, enable the BSCN
-+ * mask bit. */
-+ bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
-+ PORTAL_IRQ_UNLOCK(portal, irqflags);
-+}
-+static void depletion_unlink(struct bman_pool *pool)
-+{
-+ struct bman_pool *it, *last = NULL;
-+ struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
-+ __maybe_unused unsigned long irqflags;
-+ PORTAL_IRQ_LOCK(pool->portal, irqflags);
-+ it = *base; /* <-- gotcha, don't do this prior to the irq_save */
-+ while (it != pool) {
-+ last = it;
-+ it = it->next;
-+ }
-+ if (!last)
-+ *base = pool->next;
-+ else
-+ last->next = pool->next;
-+ if (!last && !pool->next) {
-+ /* Last object for that bpid on this portal, disable the BSCN
-+ * mask bit. */
-+ bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
-+ /* And "forget" that we last saw this pool as depleted */
-+ bman_depletion_unset(&pool->portal->pools[1],
-+ pool->params.bpid);
-+ }
-+ PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
-+}
-+
-+/* In the case that the application's core loop calls qman_poll() and
-+ * bman_poll(), we ought to balance how often we incur the overheads of the
-+ * slow-path poll. We'll use two decrementer sources. The idle decrementer
-+ * constant is used when the last slow-poll detected no work to do, and the busy
-+ * decrementer constant when the last slow-poll had work to do. */
-+#define SLOW_POLL_IDLE 1000
-+#define SLOW_POLL_BUSY 10
-+static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
-+
-+/* Portal interrupt handler */
-+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
-+{
-+ struct bman_portal *p = ptr;
-+ u32 clear = p->irq_sources;
-+ u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
-+ clear |= __poll_portal_slow(p, is);
-+ bm_isr_status_clear(&p->p, clear);
-+ return IRQ_HANDLED;
-+}
-+
-+#ifdef CONFIG_SUSPEND
-+static int _bman_portal_suspend_noirq(struct device *dev)
-+{
-+ struct bman_portal *p = (struct bman_portal *)dev->platform_data;
-+#ifdef CONFIG_PM_DEBUG
-+ struct platform_device *pdev = to_platform_device(dev);
-+#endif
-+ p->save_isdr = bm_isr_disable_read(&p->p);
-+ bm_isr_disable_write(&p->p, 0xffffffff);
-+ bm_isr_status_clear(&p->p, 0xffffffff);
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Suspend for %s\n", pdev->name);
-+#endif
-+ return 0;
-+}
-+
-+static int _bman_portal_resume_noirq(struct device *dev)
-+{
-+ struct bman_portal *p = (struct bman_portal *)dev->platform_data;
-+
-+ /* restore isdr */
-+ bm_isr_disable_write(&p->p, p->save_isdr);
-+ return 0;
-+}
-+#else
-+#define _bman_portal_suspend_noirq NULL
-+#define _bman_portal_resume_noirq NULL
-+#endif
-+
-+struct dev_pm_domain bman_portal_device_pm_domain = {
-+ .ops = {
-+ USE_PLATFORM_PM_SLEEP_OPS
-+ .suspend_noirq = _bman_portal_suspend_noirq,
-+ .resume_noirq = _bman_portal_resume_noirq,
-+ }
-+};
-+
-+struct bman_portal *bman_create_portal(
-+ struct bman_portal *portal,
-+ const struct bm_portal_config *config)
-+{
-+ struct bm_portal *__p;
-+ const struct bman_depletion *pools = &config->public_cfg.mask;
-+ int ret;
-+ u8 bpid = 0;
-+ char buf[16];
-+
-+ if (!portal) {
-+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
-+ if (!portal)
-+ return portal;
-+ portal->alloced = 1;
-+ } else
-+ portal->alloced = 0;
-+
-+ __p = &portal->p;
-+
-+ /* prep the low-level portal struct with the mapped addresses from the
-+ * config, everything that follows depends on it and "config" is more
-+ * for (de)reference... */
-+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
-+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
-+ if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
-+ pr_err("Bman RCR initialisation failed\n");
-+ goto fail_rcr;
-+ }
-+ if (bm_mc_init(__p)) {
-+ pr_err("Bman MC initialisation failed\n");
-+ goto fail_mc;
-+ }
-+ if (bm_isr_init(__p)) {
-+ pr_err("Bman ISR initialisation failed\n");
-+ goto fail_isr;
-+ }
-+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
-+ if (!portal->pools)
-+ goto fail_pools;
-+ portal->pools[0] = *pools;
-+ bman_depletion_init(portal->pools + 1);
-+ while (bpid < bman_pool_max) {
-+ /* Default to all BPIDs disabled, we enable as required at
-+ * run-time. */
-+ bm_isr_bscn_mask(__p, bpid, 0);
-+ bpid++;
-+ }
-+ portal->slowpoll = 0;
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ portal->rcri_owned = NULL;
-+#endif
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ raw_spin_lock_init(&portal->sharing_lock);
-+ portal->is_shared = config->public_cfg.is_shared;
-+ portal->sharing_redirect = NULL;
-+#endif
-+ sprintf(buf, "bportal-%u", config->public_cfg.index);
-+ portal->pdev = platform_device_alloc(buf, -1);
-+ if (!portal->pdev)
-+ goto fail_devalloc;
-+ portal->pdev->dev.pm_domain = &bman_portal_device_pm_domain;
-+ portal->pdev->dev.platform_data = portal;
-+ ret = platform_device_add(portal->pdev);
-+ if (ret)
-+ goto fail_devadd;
-+ memset(&portal->cb, 0, sizeof(portal->cb));
-+ /* Write-to-clear any stale interrupt status bits */
-+ bm_isr_disable_write(__p, 0xffffffff);
-+ portal->irq_sources = 0;
-+ bm_isr_enable_write(__p, portal->irq_sources);
-+ bm_isr_status_clear(__p, 0xffffffff);
-+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
-+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
-+ portal)) {
-+ pr_err("request_irq() failed\n");
-+ goto fail_irq;
-+ }
-+ if ((config->public_cfg.cpu != -1) &&
-+ irq_can_set_affinity(config->public_cfg.irq) &&
-+ irq_set_affinity(config->public_cfg.irq,
-+ cpumask_of(config->public_cfg.cpu))) {
-+ pr_err("irq_set_affinity() failed %s\n", portal->irqname);
-+ goto fail_affinity;
-+ }
-+
-+ /* Need RCR to be empty before continuing */
-+ ret = bm_rcr_get_fill(__p);
-+ if (ret) {
-+ pr_err("Bman RCR unclean\n");
-+ goto fail_rcr_empty;
-+ }
-+ /* Success */
-+ portal->config = config;
-+
-+ bm_isr_disable_write(__p, 0);
-+ bm_isr_uninhibit(__p);
-+ return portal;
-+fail_rcr_empty:
-+fail_affinity:
-+ free_irq(config->public_cfg.irq, portal);
-+fail_irq:
-+ platform_device_del(portal->pdev);
-+fail_devadd:
-+ platform_device_put(portal->pdev);
-+fail_devalloc:
-+ kfree(portal->pools);
-+fail_pools:
-+ bm_isr_finish(__p);
-+fail_isr:
-+ bm_mc_finish(__p);
-+fail_mc:
-+ bm_rcr_finish(__p);
-+fail_rcr:
-+ if (portal->alloced)
-+ kfree(portal);
-+ return NULL;
-+}
-+
-+struct bman_portal *bman_create_affine_portal(
-+ const struct bm_portal_config *config)
-+{
-+ struct bman_portal *portal;
-+
-+ portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
-+ portal = bman_create_portal(portal, config);
-+ if (portal) {
-+ spin_lock(&affine_mask_lock);
-+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
-+ spin_unlock(&affine_mask_lock);
-+ }
-+ return portal;
-+}
-+
-+
-+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
-+ int cpu)
-+{
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ struct bman_portal *p;
-+ p = &per_cpu(bman_affine_portal, cpu);
-+ BUG_ON(p->config);
-+ BUG_ON(p->is_shared);
-+ BUG_ON(!redirect->config->public_cfg.is_shared);
-+ p->irq_sources = 0;
-+ p->sharing_redirect = redirect;
-+ return p;
-+#else
-+ BUG();
-+ return NULL;
-+#endif
-+}
-+
-+void bman_destroy_portal(struct bman_portal *bm)
-+{
-+ const struct bm_portal_config *pcfg;
-+ pcfg = bm->config;
-+ bm_rcr_cce_update(&bm->p);
-+ bm_rcr_cce_update(&bm->p);
-+
-+ free_irq(pcfg->public_cfg.irq, bm);
-+
-+ kfree(bm->pools);
-+ bm_isr_finish(&bm->p);
-+ bm_mc_finish(&bm->p);
-+ bm_rcr_finish(&bm->p);
-+ bm->config = NULL;
-+ if (bm->alloced)
-+ kfree(bm);
-+}
-+
-+const struct bm_portal_config *bman_destroy_affine_portal(void)
-+{
-+ struct bman_portal *bm = get_raw_affine_portal();
-+ const struct bm_portal_config *pcfg;
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (bm->sharing_redirect) {
-+ bm->sharing_redirect = NULL;
-+ put_affine_portal();
-+ return NULL;
-+ }
-+ bm->is_shared = 0;
-+#endif
-+ pcfg = bm->config;
-+ bman_destroy_portal(bm);
-+ spin_lock(&affine_mask_lock);
-+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
-+ spin_unlock(&affine_mask_lock);
-+ put_affine_portal();
-+ return pcfg;
-+}
-+
-+/* When release logic waits on available RCR space, we need a global waitqueue
-+ * in the case of "affine" use (as the waits wake on different cpus which means
-+ * different portals - so we can't wait on any per-portal waitqueue). */
-+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
-+
-+static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
-+{
-+ struct bman_depletion tmp;
-+ u32 ret = is;
-+
-+ /* There is a gotcha to be aware of. If we do the query before clearing
-+ * the status register, we may miss state changes that occur between the
-+ * two. If we write to clear the status register before the query, the
-+ * cache-enabled query command may overtake the status register write
-+ * unless we use a heavyweight sync (which we don't want). Instead, we
-+ * write-to-clear the status register then *read it back* before doing
-+ * the query, hence the odd while loop with the 'is' accumulation. */
-+ if (is & BM_PIRQ_BSCN) {
-+ struct bm_mc_result *mcr;
-+ __maybe_unused unsigned long irqflags;
-+ unsigned int i, j;
-+ u32 __is;
-+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
-+ while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
-+ is |= __is;
-+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
-+ }
-+ is &= ~BM_PIRQ_BSCN;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ bm_mc_start(&p->p);
-+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
-+ while (!(mcr = bm_mc_result(&p->p)))
-+ cpu_relax();
-+ tmp = mcr->query.ds.state;
-+ tmp.__state[0] = be32_to_cpu(tmp.__state[0]);
-+ tmp.__state[1] = be32_to_cpu(tmp.__state[1]);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ for (i = 0; i < 2; i++) {
-+ int idx = i * 32;
-+ /* tmp is a mask of currently-depleted pools.
-+ * pools[0] is mask of those we care about.
-+ * pools[1] is our previous view (we only want to
-+ * be told about changes). */
-+ tmp.__state[i] &= p->pools[0].__state[i];
-+ if (tmp.__state[i] == p->pools[1].__state[i])
-+ /* fast-path, nothing to see, move along */
-+ continue;
-+ for (j = 0; j <= 31; j++, idx++) {
-+ struct bman_pool *pool = p->cb[idx];
-+ int b4 = bman_depletion_get(&p->pools[1], idx);
-+ int af = bman_depletion_get(&tmp, idx);
-+ if (b4 == af)
-+ continue;
-+ while (pool) {
-+ pool->params.cb(p, pool,
-+ pool->params.cb_ctx, af);
-+ pool = pool->next;
-+ }
-+ }
-+ }
-+ p->pools[1] = tmp;
-+ }
-+
-+ if (is & BM_PIRQ_RCRI) {
-+ __maybe_unused unsigned long irqflags;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ bm_rcr_cce_update(&p->p);
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ /* If waiting for sync, we only cancel the interrupt threshold
-+ * when the ring utilisation hits zero. */
-+ if (p->rcri_owned) {
-+ if (!bm_rcr_get_fill(&p->p)) {
-+ p->rcri_owned = NULL;
-+ bm_rcr_set_ithresh(&p->p, 0);
-+ }
-+ } else
-+#endif
-+ bm_rcr_set_ithresh(&p->p, 0);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ wake_up(&affine_queue);
-+ bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
-+ is &= ~BM_PIRQ_RCRI;
-+ }
-+
-+ /* There should be no status register bits left undefined */
-+ DPA_ASSERT(!is);
-+ return ret;
-+}
-+
-+const struct bman_portal_config *bman_get_portal_config(void)
-+{
-+ struct bman_portal *p = get_affine_portal();
-+ const struct bman_portal_config *ret = &p->config->public_cfg;
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(bman_get_portal_config);
-+
-+u32 bman_irqsource_get(void)
-+{
-+ struct bman_portal *p = get_raw_affine_portal();
-+ u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(bman_irqsource_get);
-+
-+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
-+{
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (p->sharing_redirect)
-+ return -EINVAL;
-+ else
-+#endif
-+ {
-+ __maybe_unused unsigned long irqflags;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
-+ bm_isr_enable_write(&p->p, p->irq_sources);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(bman_p_irqsource_add);
-+
-+int bman_irqsource_add(__maybe_unused u32 bits)
-+{
-+ struct bman_portal *p = get_raw_affine_portal();
-+ int ret = 0;
-+ ret = bman_p_irqsource_add(p, bits);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(bman_irqsource_add);
-+
-+int bman_irqsource_remove(u32 bits)
-+{
-+ struct bman_portal *p = get_raw_affine_portal();
-+ __maybe_unused unsigned long irqflags;
-+ u32 ier;
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (p->sharing_redirect) {
-+ put_affine_portal();
-+ return -EINVAL;
-+ }
-+#endif
-+ /* Our interrupt handler only processes+clears status register bits that
-+ * are in p->irq_sources. As we're trimming that mask, if one of them
-+ * were to assert in the status register just before we remove it from
-+ * the enable register, there would be an interrupt-storm when we
-+ * release the IRQ lock. So we wait for the enable register update to
-+ * take effect in h/w (by reading it back) and then clear all other bits
-+ * in the status register. Ie. we clear them from ISR once it's certain
-+ * IER won't allow them to reassert. */
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ bits &= BM_PIRQ_VISIBLE;
-+ clear_bits(bits, &p->irq_sources);
-+ bm_isr_enable_write(&p->p, p->irq_sources);
-+ ier = bm_isr_enable_read(&p->p);
-+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
-+ * data-dependency, ie. to protect against re-ordering. */
-+ bm_isr_status_clear(&p->p, ~ier);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return 0;
-+}
-+EXPORT_SYMBOL(bman_irqsource_remove);
-+
-+const cpumask_t *bman_affine_cpus(void)
-+{
-+ return &affine_mask;
-+}
-+EXPORT_SYMBOL(bman_affine_cpus);
-+
-+u32 bman_poll_slow(void)
-+{
-+ struct bman_portal *p = get_poll_portal();
-+ u32 ret;
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (unlikely(p->sharing_redirect))
-+ ret = (u32)-1;
-+ else
-+#endif
-+ {
-+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
-+ ret = __poll_portal_slow(p, is);
-+ bm_isr_status_clear(&p->p, ret);
-+ }
-+ put_poll_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(bman_poll_slow);
-+
-+/* Legacy wrapper */
-+void bman_poll(void)
-+{
-+ struct bman_portal *p = get_poll_portal();
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (unlikely(p->sharing_redirect))
-+ goto done;
-+#endif
-+ if (!(p->slowpoll--)) {
-+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
-+ u32 active = __poll_portal_slow(p, is);
-+ if (active)
-+ p->slowpoll = SLOW_POLL_BUSY;
-+ else
-+ p->slowpoll = SLOW_POLL_IDLE;
-+ }
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+done:
-+#endif
-+ put_poll_portal();
-+}
-+EXPORT_SYMBOL(bman_poll);
-+
-+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
-+
-+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
-+{
-+ struct bman_pool *pool = NULL;
-+ u32 bpid;
-+
-+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
-+ int ret = bman_alloc_bpid(&bpid);
-+ if (ret)
-+ return NULL;
-+ } else {
-+ if (params->bpid >= bman_pool_max)
-+ return NULL;
-+ bpid = params->bpid;
-+ }
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
-+ int ret = bm_pool_set(bpid, params->thresholds);
-+ if (ret)
-+ goto err;
-+ }
-+#else
-+ if (params->flags & BMAN_POOL_FLAG_THRESH)
-+ goto err;
-+#endif
-+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
-+ if (!pool)
-+ goto err;
-+ pool->sp = NULL;
-+ pool->sp_fill = 0;
-+ pool->params = *params;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ atomic_set(&pool->in_use, 1);
-+#endif
-+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
-+ pool->params.bpid = bpid;
-+ if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
-+ pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
-+ GFP_KERNEL);
-+ if (!pool->sp)
-+ goto err;
-+ }
-+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
-+ struct bman_portal *p = get_affine_portal();
-+ if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
-+ pr_err("Depletion events disabled for bpid %d\n", bpid);
-+ goto err;
-+ }
-+ depletion_link(p, pool);
-+ put_affine_portal();
-+ }
-+ return pool;
-+err:
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+ if (params->flags & BMAN_POOL_FLAG_THRESH)
-+ bm_pool_set(bpid, zero_thresholds);
-+#endif
-+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
-+ bman_release_bpid(bpid);
-+ if (pool) {
-+ kfree(pool->sp);
-+ kfree(pool);
-+ }
-+ return NULL;
-+}
-+EXPORT_SYMBOL(bman_new_pool);
-+
-+void bman_free_pool(struct bman_pool *pool)
-+{
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
-+ bm_pool_set(pool->params.bpid, zero_thresholds);
-+#endif
-+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
-+ depletion_unlink(pool);
-+ if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
-+ if (pool->sp_fill)
-+ pr_err("Stockpile not flushed, has %u in bpid %u.\n",
-+ pool->sp_fill, pool->params.bpid);
-+ kfree(pool->sp);
-+ pool->sp = NULL;
-+ pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
-+ }
-+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
-+ bman_release_bpid(pool->params.bpid);
-+ kfree(pool);
-+}
-+EXPORT_SYMBOL(bman_free_pool);
-+
-+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
-+{
-+ return &pool->params;
-+}
-+EXPORT_SYMBOL(bman_get_params);
-+
-+static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
-+{
-+ if (avail)
-+ bm_rcr_cce_prefetch(&p->p);
-+ else
-+ bm_rcr_cce_update(&p->p);
-+}
-+
-+int bman_rcr_is_empty(void)
-+{
-+ __maybe_unused unsigned long irqflags;
-+ struct bman_portal *p = get_affine_portal();
-+ u8 avail;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ update_rcr_ci(p, 0);
-+ avail = bm_rcr_get_fill(&p->p);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return avail == 0;
-+}
-+EXPORT_SYMBOL(bman_rcr_is_empty);
-+
-+static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ __maybe_unused struct bman_pool *pool,
-+#endif
-+ __maybe_unused unsigned long *irqflags,
-+ __maybe_unused u32 flags)
-+{
-+ struct bm_rcr_entry *r;
-+ u8 avail;
-+
-+ *p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(*p, (*irqflags));
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
-+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
-+ if ((*p)->rcri_owned) {
-+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
-+ put_affine_portal();
-+ return NULL;
-+ }
-+ (*p)->rcri_owned = pool;
-+ }
-+#endif
-+ avail = bm_rcr_get_avail(&(*p)->p);
-+ if (avail < 2)
-+ update_rcr_ci(*p, avail);
-+ r = bm_rcr_start(&(*p)->p);
-+ if (unlikely(!r)) {
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
-+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
-+ (*p)->rcri_owned = NULL;
-+#endif
-+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
-+ put_affine_portal();
-+ }
-+ return r;
-+}
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
-+ struct bman_pool *pool,
-+ __maybe_unused unsigned long *irqflags,
-+ u32 flags)
-+{
-+ struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
-+ if (!rcr)
-+ bm_rcr_set_ithresh(&(*p)->p, 1);
-+ return rcr;
-+}
-+
-+static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
-+ struct bman_pool *pool,
-+ __maybe_unused unsigned long *irqflags,
-+ u32 flags)
-+{
-+ struct bm_rcr_entry *rcr;
-+#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ pool = NULL;
-+#endif
-+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
-+ /* NB: return NULL if signal occurs before completion. Signal
-+ * can occur during return. Caller must check for signal */
-+ wait_event_interruptible(affine_queue,
-+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
-+ else
-+ wait_event(affine_queue,
-+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
-+ return rcr;
-+}
-+#endif
-+
-+static inline int __bman_release(struct bman_pool *pool,
-+ const struct bm_buffer *bufs, u8 num, u32 flags)
-+{
-+ struct bman_portal *p;
-+ struct bm_rcr_entry *r;
-+ __maybe_unused unsigned long irqflags;
-+ u32 i = num - 1;
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & BMAN_RELEASE_FLAG_WAIT)
-+ r = wait_rel_start(&p, pool, &irqflags, flags);
-+ else
-+ r = try_rel_start(&p, pool, &irqflags, flags);
-+#else
-+ r = try_rel_start(&p, &irqflags, flags);
-+#endif
-+ if (!r)
-+ return -EBUSY;
-+ /* We can copy all but the first entry, as this can trigger badness
-+ * with the valid-bit. Use the overlay to mask the verb byte. */
-+ r->bufs[0].opaque =
-+ ((cpu_to_be64((bufs[0].opaque |
-+ ((u64)pool->params.bpid<<48))
-+ & 0x00ffffffffffffff)));
-+ if (i) {
-+ for (i = 1; i < num; i++)
-+ r->bufs[i].opaque =
-+ cpu_to_be64(bufs[i].opaque);
-+ }
-+
-+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
-+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ /* if we wish to sync we need to set the threshold after h/w sees the
-+ * new ring entry. As we're mixing cache-enabled and cache-inhibited
-+ * accesses, this requires a heavy-weight sync. */
-+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
-+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
-+ hwsync();
-+ bm_rcr_set_ithresh(&p->p, 1);
-+ }
-+#endif
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
-+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
-+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
-+ /* NB: return success even if signal occurs before
-+ * condition is true. pvb_commit guarantees success */
-+ wait_event_interruptible(affine_queue,
-+ (p->rcri_owned != pool));
-+ else
-+ wait_event(affine_queue, (p->rcri_owned != pool));
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
-+ u32 flags)
-+{
-+ int ret;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (!num || (num > 8))
-+ return -EINVAL;
-+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
-+ return -EINVAL;
-+#endif
-+ /* Without stockpile, this API is a pass-through to the h/w operation */
-+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
-+ return __bman_release(pool, bufs, num, flags);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (!atomic_dec_and_test(&pool->in_use)) {
-+ pr_crit("Parallel attempts to enter bman_released() detected.");
-+ panic("only one instance of bman_released/acquired allowed");
-+ }
-+#endif
-+ /* Two movements of buffers are possible, and can occur in either order.
-+ * A: moving buffers from the caller to the stockpile.
-+ * B: moving buffers from the stockpile to hardware.
-+ * Order 1: if there is already enough space in the stockpile for A
-+ * then we want to do A first, and only do B if we trigger the
-+ * stockpile-high threshold.
-+ * Order 2: if there is not enough space in the stockpile for A, then
-+ * we want to do B first, then do A if B had succeeded. However in this
-+ * case B is dependent on how many buffers the user needs to release,
-+ * not the stockpile-high threshold.
-+ * Due to the different handling of B between the two cases, putting A
-+ * and B in a while() loop would require quite obscure logic, so handle
-+ * the different sequences explicitly. */
-+ if ((pool->sp_fill + num) <= BMAN_STOCKPILE_SZ) {
-+ /* Order 1: do A */
-+ copy_words(pool->sp + pool->sp_fill, bufs,
-+ sizeof(struct bm_buffer) * num);
-+ pool->sp_fill += num;
-+ /* do B relative to STOCKPILE_HIGH */
-+ while (pool->sp_fill >= BMAN_STOCKPILE_HIGH) {
-+ ret = __bman_release(pool,
-+ pool->sp + (pool->sp_fill - 8), 8,
-+ flags);
-+ if (ret >= 0)
-+ pool->sp_fill -= 8;
-+ }
-+ } else {
-+ /* Order 2: do B relative to 'num' */
-+ do {
-+ ret = __bman_release(pool,
-+ pool->sp + (pool->sp_fill - 8), 8,
-+ flags);
-+ if (ret < 0)
-+ /* failure */
-+ goto release_done;
-+ pool->sp_fill -= 8;
-+ } while ((pool->sp_fill + num) > BMAN_STOCKPILE_SZ);
-+ /* do A */
-+ copy_words(pool->sp + pool->sp_fill, bufs,
-+ sizeof(struct bm_buffer) * num);
-+ pool->sp_fill += num;
-+ }
-+ /* success */
-+ ret = 0;
-+release_done:
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ atomic_inc(&pool->in_use);
-+#endif
-+ return ret;
-+}
-+EXPORT_SYMBOL(bman_release);
-+
-+static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
-+ u8 num)
-+{
-+ struct bman_portal *p = get_affine_portal();
-+ struct bm_mc_command *mcc;
-+ struct bm_mc_result *mcr;
-+ __maybe_unused unsigned long irqflags;
-+ int ret, i;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ mcc = bm_mc_start(&p->p);
-+ mcc->acquire.bpid = pool->params.bpid;
-+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
-+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
-+ while (!(mcr = bm_mc_result(&p->p)))
-+ cpu_relax();
-+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
-+ if (bufs) {
-+ for (i = 0; i < num; i++)
-+ bufs[i].opaque =
-+ be64_to_cpu(mcr->acquire.bufs[i].opaque);
-+ }
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (ret != num)
-+ ret = -ENOMEM;
-+ return ret;
-+}
-+
-+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
-+ u32 flags)
-+{
-+ int ret;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (!num || (num > 8))
-+ return -EINVAL;
-+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
-+ return -EINVAL;
-+#endif
-+ /* Without stockpile, this API is a pass-through to the h/w operation */
-+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
-+ return __bman_acquire(pool, bufs, num);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (!atomic_dec_and_test(&pool->in_use)) {
-+ pr_crit("Parallel attempts to enter bman_acquire() detected.");
-+ panic("only one instance of bman_released/acquired allowed");
-+ }
-+#endif
-+ /* Two movements of buffers are possible, and can occur in either order.
-+ * A: moving buffers from stockpile to the caller.
-+ * B: moving buffers from hardware to the stockpile.
-+ * Order 1: if there are already enough buffers in the stockpile for A
-+ * then we want to do A first, and only do B if we trigger the
-+ * stockpile-low threshold.
-+ * Order 2: if there are not enough buffers in the stockpile for A,
-+ * then we want to do B first, then do A if B had succeeded. However in
-+ * this case B is dependent on how many buffers the user needs, not the
-+ * stockpile-low threshold.
-+ * Due to the different handling of B between the two cases, putting A
-+ * and B in a while() loop would require quite obscure logic, so handle
-+ * the different sequences explicitly. */
-+ if (num <= pool->sp_fill) {
-+ /* Order 1: do A */
-+ copy_words(bufs, pool->sp + (pool->sp_fill - num),
-+ sizeof(struct bm_buffer) * num);
-+ pool->sp_fill -= num;
-+ /* do B relative to STOCKPILE_LOW */
-+ while (pool->sp_fill <= BMAN_STOCKPILE_LOW) {
-+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
-+ if (ret < 0)
-+ ret = __bman_acquire(pool,
-+ pool->sp + pool->sp_fill, 1);
-+ if (ret < 0)
-+ break;
-+ pool->sp_fill += ret;
-+ }
-+ } else {
-+ /* Order 2: do B relative to 'num' */
-+ do {
-+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
-+ if (ret < 0)
-+ ret = __bman_acquire(pool,
-+ pool->sp + pool->sp_fill, 1);
-+ if (ret < 0)
-+ /* failure */
-+ goto acquire_done;
-+ pool->sp_fill += ret;
-+ } while (pool->sp_fill < num);
-+ /* do A */
-+ copy_words(bufs, pool->sp + (pool->sp_fill - num),
-+ sizeof(struct bm_buffer) * num);
-+ pool->sp_fill -= num;
-+ }
-+ /* success */
-+ ret = num;
-+acquire_done:
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ atomic_inc(&pool->in_use);
-+#endif
-+ return ret;
-+}
-+EXPORT_SYMBOL(bman_acquire);
-+
-+int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
-+{
-+ u8 num;
-+ int ret;
-+
-+ while (pool->sp_fill) {
-+ num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
-+ ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
-+ num, flags);
-+ if (ret)
-+ return ret;
-+ pool->sp_fill -= num;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(bman_flush_stockpile);
-+
-+int bman_query_pools(struct bm_pool_state *state)
-+{
-+ struct bman_portal *p = get_affine_portal();
-+ struct bm_mc_result *mcr;
-+ __maybe_unused unsigned long irqflags;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ bm_mc_start(&p->p);
-+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
-+ while (!(mcr = bm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
-+ *state = mcr->query;
-+ state->as.state.__state[0] = be32_to_cpu(state->as.state.__state[0]);
-+ state->as.state.__state[1] = be32_to_cpu(state->as.state.__state[1]);
-+ state->ds.state.__state[0] = be32_to_cpu(state->ds.state.__state[0]);
-+ state->ds.state.__state[1] = be32_to_cpu(state->ds.state.__state[1]);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return 0;
-+}
-+EXPORT_SYMBOL(bman_query_pools);
-+
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+u32 bman_query_free_buffers(struct bman_pool *pool)
-+{
-+ return bm_pool_free_buffers(pool->params.bpid);
-+}
-+EXPORT_SYMBOL(bman_query_free_buffers);
-+
-+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
-+{
-+ u32 bpid;
-+
-+ bpid = bman_get_params(pool)->bpid;
-+
-+ return bm_pool_set(bpid, thresholds);
-+}
-+EXPORT_SYMBOL(bman_update_pool_thresholds);
-+#endif
-+
-+int bman_shutdown_pool(u32 bpid)
-+{
-+ struct bman_portal *p = get_affine_portal();
-+ __maybe_unused unsigned long irqflags;
-+ int ret;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ ret = bm_shutdown_pool(&p->p, bpid);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(bman_shutdown_pool);
-+
-+const struct bm_portal_config *bman_get_bm_portal_config(
-+ struct bman_portal *portal)
-+{
-+ return portal->sharing_redirect ? NULL : portal->config;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_low.h
-@@ -0,0 +1,565 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "bman_private.h"
-+
-+/***************************/
-+/* Portal register assists */
-+/***************************/
-+
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+
-+/* Cache-inhibited register offsets */
-+#define BM_REG_RCR_PI_CINH 0x0000
-+#define BM_REG_RCR_CI_CINH 0x0004
-+#define BM_REG_RCR_ITR 0x0008
-+#define BM_REG_CFG 0x0100
-+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
-+#define BM_REG_ISR 0x0e00
-+#define BM_REG_IIR 0x0e0c
-+
-+/* Cache-enabled register offsets */
-+#define BM_CL_CR 0x0000
-+#define BM_CL_RR0 0x0100
-+#define BM_CL_RR1 0x0140
-+#define BM_CL_RCR 0x1000
-+#define BM_CL_RCR_PI_CENA 0x3000
-+#define BM_CL_RCR_CI_CENA 0x3100
-+
-+#endif
-+
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+
-+/* Cache-inhibited register offsets */
-+#define BM_REG_RCR_PI_CINH 0x3000
-+#define BM_REG_RCR_CI_CINH 0x3100
-+#define BM_REG_RCR_ITR 0x3200
-+#define BM_REG_CFG 0x3300
-+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
-+#define BM_REG_ISR 0x3e00
-+#define BM_REG_IIR 0x3ec0
-+
-+/* Cache-enabled register offsets */
-+#define BM_CL_CR 0x0000
-+#define BM_CL_RR0 0x0100
-+#define BM_CL_RR1 0x0140
-+#define BM_CL_RCR 0x1000
-+#define BM_CL_RCR_PI_CENA 0x3000
-+#define BM_CL_RCR_CI_CENA 0x3100
-+
-+#endif
-+
-+/* BTW, the drivers (and h/w programming model) already obtain the required
-+ * synchronisation for portal accesses via lwsync(), hwsync(), and
-+ * data-dependencies. Use of barrier()s or other order-preserving primitives
-+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
-+ * simply ensure that the compiler treats the portal registers as volatile (ie.
-+ * non-coherent). */
-+
-+/* Cache-inhibited register access. */
-+#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o)))
-+#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
-+ (bm)->addr_ci + (o));
-+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
-+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
-+
-+/* Cache-enabled (index) register access */
-+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
-+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
-+#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o)))
-+#define __bm_cl_out(bm, o, val) \
-+ do { \
-+ u32 *__tmpclout = (bm)->addr_ce + (o); \
-+ __raw_writel(cpu_to_be32(val), __tmpclout); \
-+ dcbf(__tmpclout); \
-+ } while (0)
-+#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
-+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
-+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
-+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
-+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
-+#define bm_cl_invalidate(reg)\
-+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
-+
-+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
-+ * analysis, look at using the "extra" bit in the ring index registers to avoid
-+ * cyclic issues. */
-+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
-+{
-+ /* 'first' is included, 'last' is excluded */
-+ if (first <= last)
-+ return last - first;
-+ return ringsize + last - first;
-+}
-+
-+/* Portal modes.
-+ * Enum types;
-+ * pmode == production mode
-+ * cmode == consumption mode,
-+ * Enum values use 3 letter codes. First letter matches the portal mode,
-+ * remaining two letters indicate;
-+ * ci == cache-inhibited portal register
-+ * ce == cache-enabled portal register
-+ * vb == in-band valid-bit (cache-enabled)
-+ */
-+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
-+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
-+ bm_rcr_pce = 1, /* PI index, cache-enabled */
-+ bm_rcr_pvb = 2 /* valid-bit */
-+};
-+enum bm_rcr_cmode { /* s/w-only */
-+ bm_rcr_cci, /* CI index, cache-inhibited */
-+ bm_rcr_cce /* CI index, cache-enabled */
-+};
-+
-+
-+/* ------------------------- */
-+/* --- Portal structures --- */
-+
-+#define BM_RCR_SIZE 8
-+
-+struct bm_rcr {
-+ struct bm_rcr_entry *ring, *cursor;
-+ u8 ci, available, ithresh, vbit;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ u32 busy;
-+ enum bm_rcr_pmode pmode;
-+ enum bm_rcr_cmode cmode;
-+#endif
-+};
-+
-+struct bm_mc {
-+ struct bm_mc_command *cr;
-+ struct bm_mc_result *rr;
-+ u8 rridx, vbit;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ enum {
-+ /* Can only be _mc_start()ed */
-+ mc_idle,
-+ /* Can only be _mc_commit()ed or _mc_abort()ed */
-+ mc_user,
-+ /* Can only be _mc_retry()ed */
-+ mc_hw
-+ } state;
-+#endif
-+};
-+
-+struct bm_addr {
-+ void __iomem *addr_ce; /* cache-enabled */
-+ void __iomem *addr_ci; /* cache-inhibited */
-+};
-+
-+struct bm_portal {
-+ struct bm_addr addr;
-+ struct bm_rcr rcr;
-+ struct bm_mc mc;
-+ struct bm_portal_config config;
-+} ____cacheline_aligned;
-+
-+
-+/* --------------- */
-+/* --- RCR API --- */
-+
-+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
-+#define RCR_CARRYCLEAR(p) \
-+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
-+
-+/* Bit-wise logic to convert a ring pointer to a ring index */
-+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
-+{
-+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
-+}
-+
-+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
-+static inline void RCR_INC(struct bm_rcr *rcr)
-+{
-+ /* NB: this is odd-looking, but experiments show that it generates
-+ * fast code with essentially no branching overheads. We increment to
-+ * the next RCR pointer and handle overflow and 'vbit'. */
-+ struct bm_rcr_entry *partial = rcr->cursor + 1;
-+ rcr->cursor = RCR_CARRYCLEAR(partial);
-+ if (partial != rcr->cursor)
-+ rcr->vbit ^= BM_RCR_VERB_VBIT;
-+}
-+
-+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
-+ __maybe_unused enum bm_rcr_cmode cmode)
-+{
-+ /* This use of 'register', as well as all other occurrences, is because
-+ * it has been observed to generate much faster code with gcc than is
-+ * otherwise the case. */
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ u32 cfg;
-+ u8 pi;
-+
-+ rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
-+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
-+
-+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
-+ rcr->cursor = rcr->ring + pi;
-+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
-+ rcr->available = BM_RCR_SIZE - 1
-+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
-+ rcr->ithresh = bm_in(RCR_ITR);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ rcr->busy = 0;
-+ rcr->pmode = pmode;
-+ rcr->cmode = cmode;
-+#endif
-+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
-+ bm_out(CFG, cfg);
-+ return 0;
-+}
-+
-+static inline void bm_rcr_finish(struct bm_portal *portal)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
-+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
-+ DPA_ASSERT(!rcr->busy);
-+ if (pi != RCR_PTR2IDX(rcr->cursor))
-+ pr_crit("losing uncommited RCR entries\n");
-+ if (ci != rcr->ci)
-+ pr_crit("missing existing RCR completions\n");
-+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
-+ pr_crit("RCR destroyed unquiesced\n");
-+}
-+
-+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ DPA_ASSERT(!rcr->busy);
-+ if (!rcr->available)
-+ return NULL;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ rcr->busy = 1;
-+#endif
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+ dcbz_64(rcr->cursor);
-+#endif
-+ return rcr->cursor;
-+}
-+
-+static inline void bm_rcr_abort(struct bm_portal *portal)
-+{
-+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
-+ DPA_ASSERT(rcr->busy);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ rcr->busy = 0;
-+#endif
-+}
-+
-+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
-+ struct bm_portal *portal, u8 myverb)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ DPA_ASSERT(rcr->busy);
-+ DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
-+ if (rcr->available == 1)
-+ return NULL;
-+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
-+ dcbf_64(rcr->cursor);
-+ RCR_INC(rcr);
-+ rcr->available--;
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+ dcbz_64(rcr->cursor);
-+#endif
-+ return rcr->cursor;
-+}
-+
-+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ DPA_ASSERT(rcr->busy);
-+ DPA_ASSERT(rcr->pmode == bm_rcr_pci);
-+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
-+ RCR_INC(rcr);
-+ rcr->available--;
-+ hwsync();
-+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ rcr->busy = 0;
-+#endif
-+}
-+
-+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
-+{
-+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
-+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
-+ bm_cl_invalidate(RCR_PI);
-+ bm_cl_touch_rw(RCR_PI);
-+}
-+
-+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ DPA_ASSERT(rcr->busy);
-+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
-+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
-+ RCR_INC(rcr);
-+ rcr->available--;
-+ lwsync();
-+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ rcr->busy = 0;
-+#endif
-+}
-+
-+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ struct bm_rcr_entry *rcursor;
-+ DPA_ASSERT(rcr->busy);
-+ DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
-+ lwsync();
-+ rcursor = rcr->cursor;
-+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
-+ dcbf_64(rcursor);
-+ RCR_INC(rcr);
-+ rcr->available--;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ rcr->busy = 0;
-+#endif
-+}
-+
-+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ u8 diff, old_ci = rcr->ci;
-+ DPA_ASSERT(rcr->cmode == bm_rcr_cci);
-+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
-+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
-+ rcr->available += diff;
-+ return diff;
-+}
-+
-+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
-+{
-+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
-+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
-+ bm_cl_touch_ro(RCR_CI);
-+}
-+
-+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ u8 diff, old_ci = rcr->ci;
-+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
-+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
-+ bm_cl_invalidate(RCR_CI);
-+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
-+ rcr->available += diff;
-+ return diff;
-+}
-+
-+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ return rcr->ithresh;
-+}
-+
-+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ rcr->ithresh = ithresh;
-+ bm_out(RCR_ITR, ithresh);
-+}
-+
-+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ return rcr->available;
-+}
-+
-+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
-+{
-+ register struct bm_rcr *rcr = &portal->rcr;
-+ return BM_RCR_SIZE - 1 - rcr->available;
-+}
-+
-+
-+/* ------------------------------ */
-+/* --- Management command API --- */
-+
-+static inline int bm_mc_init(struct bm_portal *portal)
-+{
-+ register struct bm_mc *mc = &portal->mc;
-+ mc->cr = portal->addr.addr_ce + BM_CL_CR;
-+ mc->rr = portal->addr.addr_ce + BM_CL_RR0;
-+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
-+ BM_MCC_VERB_VBIT) ? 0 : 1;
-+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = mc_idle;
-+#endif
-+ return 0;
-+}
-+
-+static inline void bm_mc_finish(struct bm_portal *portal)
-+{
-+ __maybe_unused register struct bm_mc *mc = &portal->mc;
-+ DPA_ASSERT(mc->state == mc_idle);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (mc->state != mc_idle)
-+ pr_crit("Losing incomplete MC command\n");
-+#endif
-+}
-+
-+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
-+{
-+ register struct bm_mc *mc = &portal->mc;
-+ DPA_ASSERT(mc->state == mc_idle);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = mc_user;
-+#endif
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+ dcbz_64(mc->cr);
-+#endif
-+ return mc->cr;
-+}
-+
-+static inline void bm_mc_abort(struct bm_portal *portal)
-+{
-+ __maybe_unused register struct bm_mc *mc = &portal->mc;
-+ DPA_ASSERT(mc->state == mc_user);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = mc_idle;
-+#endif
-+}
-+
-+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
-+{
-+ register struct bm_mc *mc = &portal->mc;
-+ struct bm_mc_result *rr = mc->rr + mc->rridx;
-+ DPA_ASSERT(mc->state == mc_user);
-+ lwsync();
-+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
-+ dcbf(mc->cr);
-+ dcbit_ro(rr);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = mc_hw;
-+#endif
-+}
-+
-+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
-+{
-+ register struct bm_mc *mc = &portal->mc;
-+ struct bm_mc_result *rr = mc->rr + mc->rridx;
-+ DPA_ASSERT(mc->state == mc_hw);
-+ /* The inactive response register's verb byte always returns zero until
-+ * its command is submitted and completed. This includes the valid-bit,
-+ * in case you were wondering... */
-+ if (!__raw_readb(&rr->verb)) {
-+ dcbit_ro(rr);
-+ return NULL;
-+ }
-+ mc->rridx ^= 1;
-+ mc->vbit ^= BM_MCC_VERB_VBIT;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = mc_idle;
-+#endif
-+ return rr;
-+}
-+
-+
-+/* ------------------------------------- */
-+/* --- Portal interrupt register API --- */
-+
-+static inline int bm_isr_init(__always_unused struct bm_portal *portal)
-+{
-+ return 0;
-+}
-+
-+static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
-+{
-+}
-+
-+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
-+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
-+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
-+ int enable)
-+{
-+ u32 val;
-+ DPA_ASSERT(bpid < bman_pool_max);
-+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
-+ val = __bm_in(&portal->addr, SCN_REG(bpid));
-+ if (enable)
-+ val |= SCN_BIT(bpid);
-+ else
-+ val &= ~SCN_BIT(bpid);
-+ __bm_out(&portal->addr, SCN_REG(bpid), val);
-+}
-+
-+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
-+{
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
-+#else
-+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
-+#endif
-+}
-+
-+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
-+ u32 val)
-+{
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
-+#else
-+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
-+#endif
-+}
-+
-+/* Buffer Pool Cleanup */
-+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
-+{
-+ struct bm_mc_command *bm_cmd;
-+ struct bm_mc_result *bm_res;
-+
-+ int aq_count = 0;
-+ bool stop = false;
-+ while (!stop) {
-+ /* Acquire buffers until empty */
-+ bm_cmd = bm_mc_start(p);
-+ bm_cmd->acquire.bpid = bpid;
-+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
-+ while (!(bm_res = bm_mc_result(p)))
-+ cpu_relax();
-+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
-+ /* Pool is empty */
-+ /* TBD : Should we do a few extra iterations in
-+ case some other some blocks keep buffers 'on deck',
-+ which may also be problematic */
-+ stop = true;
-+ } else
-+ ++aq_count;
-+ }
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_private.h
-@@ -0,0 +1,166 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "dpa_sys.h"
-+#include <linux/fsl_bman.h>
-+
-+/* Revision info (for errata and feature handling) */
-+#define BMAN_REV10 0x0100
-+#define BMAN_REV20 0x0200
-+#define BMAN_REV21 0x0201
-+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
-+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
-+
-+/*
-+ * Global variables of the max portal/pool number this bman version supported
-+ */
-+extern u16 bman_pool_max;
-+
-+/* used by CCSR and portal interrupt code */
-+enum bm_isr_reg {
-+ bm_isr_status = 0,
-+ bm_isr_enable = 1,
-+ bm_isr_disable = 2,
-+ bm_isr_inhibit = 3
-+};
-+
-+struct bm_portal_config {
-+ /* Corenet portal addresses;
-+ * [0]==cache-enabled, [1]==cache-inhibited. */
-+ __iomem void *addr_virt[2];
-+ struct resource addr_phys[2];
-+ /* Allow these to be joined in lists */
-+ struct list_head list;
-+ /* User-visible portal configuration settings */
-+ struct bman_portal_config public_cfg;
-+ /* power management saved data */
-+ u32 saved_isdr;
-+};
-+
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+/* Hooks from bman_driver.c to bman_config.c */
-+int bman_init_ccsr(struct device_node *node);
-+#endif
-+
-+/* Hooks from bman_driver.c in to bman_high.c */
-+struct bman_portal *bman_create_portal(
-+ struct bman_portal *portal,
-+ const struct bm_portal_config *config);
-+struct bman_portal *bman_create_affine_portal(
-+ const struct bm_portal_config *config);
-+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
-+ int cpu);
-+void bman_destroy_portal(struct bman_portal *bm);
-+
-+const struct bm_portal_config *bman_destroy_affine_portal(void);
-+
-+/* Hooks from fsl_usdpaa.c to bman_driver.c */
-+struct bm_portal_config *bm_get_unused_portal(void);
-+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
-+void bm_put_unused_portal(struct bm_portal_config *pcfg);
-+void bm_set_liodns(struct bm_portal_config *pcfg);
-+
-+/* Pool logic in the portal driver, during initialisation, needs to know if
-+ * there's access to CCSR or not (if not, it'll cripple the pool allocator). */
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+int bman_have_ccsr(void);
-+#else
-+#define bman_have_ccsr() 0
-+#endif
-+
-+/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
-+ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
-+ * might fail (if the buffer pool is depleted). So this value provides some
-+ * "stagger" in that the bman_acquire() function will only fail if lots of bufs
-+ * are requested at once or if h/w has been tested a couple of times without
-+ * luck. The _HIGH value: when bman_release() is called and the stockpile
-+ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
-+ * the release ring is full). So this value provides some "stagger" so that
-+ * ring-access is retried a couple of times prior to the API returning a
-+ * failure. The following *must* be true;
-+ * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
-+ * (to avoid thrashing)
-+ * BMAN_STOCKPILE_SZ >= 16
-+ * (as the release logic expects to either send 8 buffers to hw prior to
-+ * adding the given buffers to the stockpile or add the buffers to the
-+ * stockpile before sending 8 to hw, as the API must be an all-or-nothing
-+ * success/fail.)
-+ */
-+#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
-+#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
-+#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
-+
-+/*************************************************/
-+/* BMan s/w corenet portal, low-level i/face */
-+/*************************************************/
-+
-+/* Used by all portal interrupt registers except 'inhibit'
-+ * This mask contains all the "irqsource" bits visible to API users
-+ */
-+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
-+
-+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
-+ * the disable register" rather than "disable the ability to write". */
-+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
-+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
-+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
-+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
-+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
-+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
-+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
-+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
-+
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+/* Set depletion thresholds associated with a buffer pool. Requires that the
-+ * operating system have access to Bman CCSR (ie. compiled in support and
-+ * run-time access courtesy of the device-tree). */
-+int bm_pool_set(u32 bpid, const u32 *thresholds);
-+#define BM_POOL_THRESH_SW_ENTER 0
-+#define BM_POOL_THRESH_SW_EXIT 1
-+#define BM_POOL_THRESH_HW_ENTER 2
-+#define BM_POOL_THRESH_HW_EXIT 3
-+
-+/* Read the free buffer count for a given buffer */
-+u32 bm_pool_free_buffers(u32 bpid);
-+
-+__init int bman_init(void);
-+__init int bman_resource_init(void);
-+
-+const struct bm_portal_config *bman_get_bm_portal_config(
-+ struct bman_portal *portal);
-+
-+/* power management */
-+#ifdef CONFIG_SUSPEND
-+void suspend_unused_bportal(void);
-+void resume_unused_bportal(void);
-+#endif
-+
-+#endif /* CONFIG_FSL_BMAN_CONFIG */
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_test.c
-@@ -0,0 +1,56 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "bman_test.h"
-+
-+MODULE_AUTHOR("Geoff Thorpe");
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_DESCRIPTION("Bman testing");
-+
-+static int test_init(void)
-+{
-+#ifdef CONFIG_FSL_BMAN_TEST_HIGH
-+ int loop = 1;
-+ while (loop--)
-+ bman_test_high();
-+#endif
-+#ifdef CONFIG_FSL_BMAN_TEST_THRESH
-+ bman_test_thresh();
-+#endif
-+ return 0;
-+}
-+
-+static void test_exit(void)
-+{
-+}
-+
-+module_init(test_init);
-+module_exit(test_exit);
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_test.h
-@@ -0,0 +1,44 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/io.h>
-+#include <linux/slab.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/kthread.h>
-+
-+#include <linux/fsl_bman.h>
-+
-+void bman_test_high(void);
-+void bman_test_thresh(void);
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_test_high.c
-@@ -0,0 +1,183 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "bman_test.h"
-+#include "bman_private.h"
-+
-+/*************/
-+/* constants */
-+/*************/
-+
-+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
-+#define POOL_OPAQUE ((void *)0xdeadabba)
-+#define NUM_BUFS 93
-+#define LOOPS 3
-+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
-+
-+/***************/
-+/* global vars */
-+/***************/
-+
-+static struct bman_pool *pool;
-+static int depleted;
-+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
-+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
-+static int bufs_received;
-+
-+/* Predeclare the callback so we can instantiate pool parameters */
-+static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
-+
-+/**********************/
-+/* internal functions */
-+/**********************/
-+
-+static void bufs_init(void)
-+{
-+ int i;
-+ for (i = 0; i < NUM_BUFS; i++)
-+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
-+ bufs_received = 0;
-+}
-+
-+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
-+{
-+ if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
-+
-+ /* On SoCs with Bman revison 2.0, Bman only respects the 40
-+ * LS-bits of buffer addresses, masking off the upper 8-bits on
-+ * release commands. The API provides for 48-bit addresses
-+ * because some SoCs support all 48-bits. When generating
-+ * garbage addresses for testing, we either need to zero the
-+ * upper 8-bits when releasing to Bman (otherwise we'll be
-+ * disappointed when the buffers we acquire back from Bman
-+ * don't match), or we need to mask the upper 8-bits off when
-+ * comparing. We do the latter.
-+ */
-+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
-+ < (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
-+ return -1;
-+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
-+ > (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
-+ return 1;
-+ } else {
-+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
-+ return -1;
-+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static void bufs_confirm(void)
-+{
-+ int i, j;
-+ for (i = 0; i < NUM_BUFS; i++) {
-+ int matches = 0;
-+ for (j = 0; j < NUM_BUFS; j++)
-+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
-+ matches++;
-+ BUG_ON(matches != 1);
-+ }
-+}
-+
-+/********/
-+/* test */
-+/********/
-+
-+static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
-+ void *pool_ctx, int __depleted)
-+{
-+ BUG_ON(__pool != pool);
-+ BUG_ON(pool_ctx != POOL_OPAQUE);
-+ depleted = __depleted;
-+}
-+
-+void bman_test_high(void)
-+{
-+ struct bman_pool_params pparams = {
-+ .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
-+ .cb = depletion_cb,
-+ .cb_ctx = POOL_OPAQUE,
-+ };
-+ int i, loops = LOOPS;
-+ struct bm_buffer tmp_buf;
-+
-+ bufs_init();
-+
-+ pr_info("BMAN: --- starting high-level test ---\n");
-+
-+ pool = bman_new_pool(&pparams);
-+ BUG_ON(!pool);
-+
-+ /*******************/
-+ /* Release buffers */
-+ /*******************/
-+do_loop:
-+ i = 0;
-+ while (i < NUM_BUFS) {
-+ u32 flags = BMAN_RELEASE_FLAG_WAIT;
-+ int num = 8;
-+ if ((i + num) > NUM_BUFS)
-+ num = NUM_BUFS - i;
-+ if ((i + num) == NUM_BUFS)
-+ flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
-+ if (bman_release(pool, bufs_in + i, num, flags))
-+ panic("bman_release() failed\n");
-+ i += num;
-+ }
-+
-+ /*******************/
-+ /* Acquire buffers */
-+ /*******************/
-+ while (i > 0) {
-+ int tmp, num = 8;
-+ if (num > i)
-+ num = i;
-+ tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
-+ BUG_ON(tmp != num);
-+ i -= num;
-+ }
-+
-+ i = bman_acquire(pool, &tmp_buf, 1, 0);
-+ BUG_ON(i > 0);
-+
-+ bufs_confirm();
-+
-+ if (--loops)
-+ goto do_loop;
-+
-+ /************/
-+ /* Clean up */
-+ /************/
-+ bman_free_pool(pool);
-+ pr_info("BMAN: --- finished high-level test ---\n");
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/bman_test_thresh.c
-@@ -0,0 +1,196 @@
-+/* Copyright 2010-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "bman_test.h"
-+
-+/* Test constants */
-+#define TEST_NUMBUFS 129728
-+#define TEST_EXIT 129536
-+#define TEST_ENTRY 129024
-+
-+struct affine_test_data {
-+ struct task_struct *t;
-+ int cpu;
-+ int expect_affinity;
-+ int drain;
-+ int num_enter;
-+ int num_exit;
-+ struct list_head node;
-+ struct completion wakethread;
-+ struct completion wakeparent;
-+};
-+
-+static void cb_depletion(struct bman_portal *portal,
-+ struct bman_pool *pool,
-+ void *opaque,
-+ int depleted)
-+{
-+ struct affine_test_data *data = opaque;
-+ int c = smp_processor_id();
-+ pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n",
-+ bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
-+ /* We should be executing on the CPU of the thread that owns the pool if
-+ * and that CPU has an affine portal (ie. it isn't slaved). */
-+ BUG_ON((c != data->cpu) && data->expect_affinity);
-+ BUG_ON((c == data->cpu) && !data->expect_affinity);
-+ if (depleted)
-+ data->num_enter++;
-+ else
-+ data->num_exit++;
-+}
-+
-+/* Params used to set up a pool, this also dynamically allocates a BPID */
-+static const struct bman_pool_params params_nocb = {
-+ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
-+ .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
-+};
-+
-+/* Params used to set up each cpu's pool with callbacks enabled */
-+static struct bman_pool_params params_cb = {
-+ .bpid = 0, /* will be replaced to match pool_nocb */
-+ .flags = BMAN_POOL_FLAG_DEPLETION,
-+ .cb = cb_depletion
-+};
-+
-+static struct bman_pool *pool_nocb;
-+static LIST_HEAD(threads);
-+
-+static int affine_test(void *__data)
-+{
-+ struct bman_pool *pool;
-+ struct affine_test_data *data = __data;
-+ struct bman_pool_params my_params = params_cb;
-+
-+ pr_info("thread %d: starting\n", data->cpu);
-+ /* create the pool */
-+ my_params.cb_ctx = data;
-+ pool = bman_new_pool(&my_params);
-+ BUG_ON(!pool);
-+ complete(&data->wakeparent);
-+ wait_for_completion(&data->wakethread);
-+ init_completion(&data->wakethread);
-+
-+ /* if we're the drainer, we get signalled for that */
-+ if (data->drain) {
-+ struct bm_buffer buf;
-+ int ret;
-+ pr_info("thread %d: draining...\n", data->cpu);
-+ do {
-+ ret = bman_acquire(pool, &buf, 1, 0);
-+ } while (ret > 0);
-+ pr_info("thread %d: draining done.\n", data->cpu);
-+ complete(&data->wakeparent);
-+ wait_for_completion(&data->wakethread);
-+ init_completion(&data->wakethread);
-+ }
-+
-+ /* cleanup */
-+ bman_free_pool(pool);
-+ while (!kthread_should_stop())
-+ cpu_relax();
-+ pr_info("thread %d: exiting\n", data->cpu);
-+ return 0;
-+}
-+
-+static struct affine_test_data *start_affine_test(int cpu, int drain)
-+{
-+ struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
-+
-+ if (!data)
-+ return NULL;
-+ data->cpu = cpu;
-+ data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
-+ data->drain = drain;
-+ data->num_enter = 0;
-+ data->num_exit = 0;
-+ init_completion(&data->wakethread);
-+ init_completion(&data->wakeparent);
-+ list_add_tail(&data->node, &threads);
-+ data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
-+ BUG_ON(IS_ERR(data->t));
-+ kthread_bind(data->t, cpu);
-+ wake_up_process(data->t);
-+ return data;
-+}
-+
-+void bman_test_thresh(void)
-+{
-+ int loop = TEST_NUMBUFS;
-+ int ret, num_cpus = 0;
-+ struct affine_test_data *data, *drainer = NULL;
-+
-+ pr_info("bman_test_thresh: start\n");
-+
-+ /* allocate a BPID and seed it */
-+ pool_nocb = bman_new_pool(&params_nocb);
-+ BUG_ON(!pool_nocb);
-+ while (loop--) {
-+ struct bm_buffer buf;
-+ bm_buffer_set64(&buf, 0x0badbeef + loop);
-+ ret = bman_release(pool_nocb, &buf, 1,
-+ BMAN_RELEASE_FLAG_WAIT);
-+ BUG_ON(ret);
-+ }
-+ while (!bman_rcr_is_empty())
-+ cpu_relax();
-+ pr_info("bman_test_thresh: buffers are in\n");
-+
-+ /* create threads and wait for them to create pools */
-+ params_cb.bpid = bman_get_params(pool_nocb)->bpid;
-+ for_each_cpu(loop, cpu_online_mask) {
-+ data = start_affine_test(loop, drainer ? 0 : 1);
-+ BUG_ON(!data);
-+ if (!drainer)
-+ drainer = data;
-+ num_cpus++;
-+ wait_for_completion(&data->wakeparent);
-+ }
-+
-+ /* signal the drainer to start draining */
-+ complete(&drainer->wakethread);
-+ wait_for_completion(&drainer->wakeparent);
-+ init_completion(&drainer->wakeparent);
-+
-+ /* tear down */
-+ list_for_each_entry_safe(data, drainer, &threads, node) {
-+ complete(&data->wakethread);
-+ ret = kthread_stop(data->t);
-+ BUG_ON(ret);
-+ list_del(&data->node);
-+ /* check that we get the expected callbacks (and no others) */
-+ BUG_ON(data->num_enter != 1);
-+ BUG_ON(data->num_exit != 0);
-+ kfree(data);
-+ }
-+ bman_free_pool(pool_nocb);
-+
-+ pr_info("bman_test_thresh: done\n");
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/dpa_alloc.c
-@@ -0,0 +1,706 @@
-+/* Copyright 2009-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "dpa_sys.h"
-+#include <linux/fsl_qman.h>
-+#include <linux/fsl_bman.h>
-+
-+/* Qman and Bman APIs are front-ends to the common code; */
-+
-+static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */
-+static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */
-+static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */
-+static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */
-+static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */
-+static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */
-+static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */
-+static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */
-+
-+/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing
-+ * FQIDs (probably from user-space), it can filter out those that aren't in the
-+ * OOS state (better to leak a h/w resource than to crash). This function
-+ * returns the number of invalid IDs that were not released. */
-+static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count,
-+ int (*is_valid)(u32 id))
-+{
-+ int valid_mode = 0;
-+ u32 loop = id, total_invalid = 0;
-+ while (loop < (id + count)) {
-+ int isvalid = is_valid ? is_valid(loop) : 1;
-+ if (!valid_mode) {
-+ /* We're looking for a valid ID to terminate an invalid
-+ * range */
-+ if (isvalid) {
-+ /* We finished a range of invalid IDs, a valid
-+ * range is now underway */
-+ valid_mode = 1;
-+ count -= (loop - id);
-+ id = loop;
-+ } else
-+ total_invalid++;
-+ } else {
-+ /* We're looking for an invalid ID to terminate a
-+ * valid range */
-+ if (!isvalid) {
-+ /* Release the range of valid IDs, an unvalid
-+ * range is now underway */
-+ if (loop > id)
-+ dpa_alloc_free(alloc, id, loop - id);
-+ valid_mode = 0;
-+ }
-+ }
-+ loop++;
-+ }
-+ /* Release any unterminated range of valid IDs */
-+ if (valid_mode && count)
-+ dpa_alloc_free(alloc, id, count);
-+ return total_invalid;
-+}
-+
-+/* BPID allocator front-end */
-+
-+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
-+{
-+ return dpa_alloc_new(&bpalloc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(bman_alloc_bpid_range);
-+
-+static int bp_cleanup(u32 bpid)
-+{
-+ return bman_shutdown_pool(bpid) == 0;
-+}
-+void bman_release_bpid_range(u32 bpid, u32 count)
-+{
-+ u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
-+ if (total_invalid)
-+ pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
-+ bpid, bpid + count - 1, count, total_invalid);
-+}
-+EXPORT_SYMBOL(bman_release_bpid_range);
-+
-+void bman_seed_bpid_range(u32 bpid, u32 count)
-+{
-+ dpa_alloc_seed(&bpalloc, bpid, count);
-+}
-+EXPORT_SYMBOL(bman_seed_bpid_range);
-+
-+int bman_reserve_bpid_range(u32 bpid, u32 count)
-+{
-+ return dpa_alloc_reserve(&bpalloc, bpid, count);
-+}
-+EXPORT_SYMBOL(bman_reserve_bpid_range);
-+
-+
-+/* FQID allocator front-end */
-+
-+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
-+{
-+ return dpa_alloc_new(&fqalloc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(qman_alloc_fqid_range);
-+
-+static int fq_cleanup(u32 fqid)
-+{
-+ return qman_shutdown_fq(fqid) == 0;
-+}
-+void qman_release_fqid_range(u32 fqid, u32 count)
-+{
-+ u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup);
-+ if (total_invalid)
-+ pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
-+ fqid, fqid + count - 1, count, total_invalid);
-+}
-+EXPORT_SYMBOL(qman_release_fqid_range);
-+
-+int qman_reserve_fqid_range(u32 fqid, u32 count)
-+{
-+ return dpa_alloc_reserve(&fqalloc, fqid, count);
-+}
-+EXPORT_SYMBOL(qman_reserve_fqid_range);
-+
-+void qman_seed_fqid_range(u32 fqid, u32 count)
-+{
-+ dpa_alloc_seed(&fqalloc, fqid, count);
-+}
-+EXPORT_SYMBOL(qman_seed_fqid_range);
-+
-+/* Pool-channel allocator front-end */
-+
-+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
-+{
-+ return dpa_alloc_new(&qpalloc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(qman_alloc_pool_range);
-+
-+static int qpool_cleanup(u32 qp)
-+{
-+ /* We query all FQDs starting from
-+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
-+ * whose destination channel is the pool-channel being released.
-+ * When a non-OOS FQD is found we attempt to clean it up */
-+ struct qman_fq fq = {
-+ .fqid = 1
-+ };
-+ int err;
-+ do {
-+ struct qm_mcr_queryfq_np np;
-+ err = qman_query_fq_np(&fq, &np);
-+ if (err)
-+ /* FQID range exceeded, found no problems */
-+ return 1;
-+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
-+ struct qm_fqd fqd;
-+ err = qman_query_fq(&fq, &fqd);
-+ BUG_ON(err);
-+ if (fqd.dest.channel == qp) {
-+ /* The channel is the FQ's target, clean it */
-+ if (qman_shutdown_fq(fq.fqid) != 0)
-+ /* Couldn't shut down the FQ
-+ so the pool must be leaked */
-+ return 0;
-+ }
-+ }
-+ /* Move to the next FQID */
-+ fq.fqid++;
-+ } while (1);
-+}
-+void qman_release_pool_range(u32 qp, u32 count)
-+{
-+ u32 total_invalid = release_id_range(&qpalloc, qp,
-+ count, qpool_cleanup);
-+ if (total_invalid) {
-+ /* Pool channels are almost always used individually */
-+ if (count == 1)
-+ pr_err("Pool channel 0x%x had %d leaks\n",
-+ qp, total_invalid);
-+ else
-+ pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
-+ qp, qp + count - 1, count, total_invalid);
-+ }
-+}
-+EXPORT_SYMBOL(qman_release_pool_range);
-+
-+
-+void qman_seed_pool_range(u32 poolid, u32 count)
-+{
-+ dpa_alloc_seed(&qpalloc, poolid, count);
-+
-+}
-+EXPORT_SYMBOL(qman_seed_pool_range);
-+
-+int qman_reserve_pool_range(u32 poolid, u32 count)
-+{
-+ return dpa_alloc_reserve(&qpalloc, poolid, count);
-+}
-+EXPORT_SYMBOL(qman_reserve_pool_range);
-+
-+
-+/* CGR ID allocator front-end */
-+
-+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
-+{
-+ return dpa_alloc_new(&cgralloc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(qman_alloc_cgrid_range);
-+
-+static int cqr_cleanup(u32 cgrid)
-+{
-+ /* We query all FQDs starting from
-+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
-+ * whose CGR is the CGR being released.
-+ */
-+ struct qman_fq fq = {
-+ .fqid = 1
-+ };
-+ int err;
-+ do {
-+ struct qm_mcr_queryfq_np np;
-+ err = qman_query_fq_np(&fq, &np);
-+ if (err)
-+ /* FQID range exceeded, found no problems */
-+ return 1;
-+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
-+ struct qm_fqd fqd;
-+ err = qman_query_fq(&fq, &fqd);
-+ BUG_ON(err);
-+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
-+ (fqd.cgid == cgrid)) {
-+ pr_err("CRGID 0x%x is being used by FQID 0x%x,"
-+ " CGR will be leaked\n",
-+ cgrid, fq.fqid);
-+ return 1;
-+ }
-+ }
-+ /* Move to the next FQID */
-+ fq.fqid++;
-+ } while (1);
-+}
-+
-+void qman_release_cgrid_range(u32 cgrid, u32 count)
-+{
-+ u32 total_invalid = release_id_range(&cgralloc, cgrid,
-+ count, cqr_cleanup);
-+ if (total_invalid)
-+ pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
-+ cgrid, cgrid + count - 1, count, total_invalid);
-+}
-+EXPORT_SYMBOL(qman_release_cgrid_range);
-+
-+void qman_seed_cgrid_range(u32 cgrid, u32 count)
-+{
-+ dpa_alloc_seed(&cgralloc, cgrid, count);
-+
-+}
-+EXPORT_SYMBOL(qman_seed_cgrid_range);
-+
-+/* CEETM CHANNEL ID allocator front-end */
-+int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
-+ int partial)
-+{
-+ return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range);
-+
-+int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
-+ int partial)
-+{
-+ return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range);
-+
-+void qman_release_ceetm0_channel_range(u32 channelid, u32 count)
-+{
-+ u32 total_invalid;
-+
-+ total_invalid = release_id_range(&ceetm0_challoc, channelid, count,
-+ NULL);
-+ if (total_invalid)
-+ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
-+ channelid, channelid + count - 1, count, total_invalid);
-+}
-+EXPORT_SYMBOL(qman_release_ceetm0_channel_range);
-+
-+void qman_seed_ceetm0_channel_range(u32 channelid, u32 count)
-+{
-+ dpa_alloc_seed(&ceetm0_challoc, channelid, count);
-+
-+}
-+EXPORT_SYMBOL(qman_seed_ceetm0_channel_range);
-+
-+void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
-+{
-+ u32 total_invalid;
-+ total_invalid = release_id_range(&ceetm1_challoc, channelid, count,
-+ NULL);
-+ if (total_invalid)
-+ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
-+ channelid, channelid + count - 1, count, total_invalid);
-+}
-+EXPORT_SYMBOL(qman_release_ceetm1_channel_range);
-+
-+void qman_seed_ceetm1_channel_range(u32 channelid, u32 count)
-+{
-+ dpa_alloc_seed(&ceetm1_challoc, channelid, count);
-+
-+}
-+EXPORT_SYMBOL(qman_seed_ceetm1_channel_range);
-+
-+/* CEETM LFQID allocator front-end */
-+int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
-+ int partial)
-+{
-+ return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range);
-+
-+int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
-+ int partial)
-+{
-+ return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial);
-+}
-+EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range);
-+
-+void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count)
-+{
-+ u32 total_invalid;
-+
-+ total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count,
-+ NULL);
-+ if (total_invalid)
-+ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
-+ lfqid, lfqid + count - 1, count, total_invalid);
-+}
-+EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range);
-+
-+void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count)
-+{
-+ dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count);
-+
-+}
-+EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range);
-+
-+void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
-+{
-+ u32 total_invalid;
-+
-+ total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count,
-+ NULL);
-+ if (total_invalid)
-+ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
-+ lfqid, lfqid + count - 1, count, total_invalid);
-+}
-+EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range);
-+
-+void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count)
-+{
-+ dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count);
-+
-+}
-+EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range);
-+
-+
-+/* Everything else is the common backend to all the allocators */
-+
-+/* The allocator is a (possibly-empty) list of these; */
-+struct alloc_node {
-+ struct list_head list;
-+ u32 base;
-+ u32 num;
-+ /* refcount and is_alloced are only set
-+ when the node is in the used list */
-+ unsigned int refcount;
-+ int is_alloced;
-+};
-+
-+/* #define DPA_ALLOC_DEBUG */
-+
-+#ifdef DPA_ALLOC_DEBUG
-+#define DPRINT pr_info
-+static void DUMP(struct dpa_alloc *alloc)
-+{
-+ int off = 0;
-+ char buf[256];
-+ struct alloc_node *p;
-+ pr_info("Free Nodes\n");
-+ list_for_each_entry(p, &alloc->free, list) {
-+ if (off < 255)
-+ off += snprintf(buf + off, 255-off, "{%d,%d}",
-+ p->base, p->base + p->num - 1);
-+ }
-+ pr_info("%s\n", buf);
-+
-+ off = 0;
-+ pr_info("Used Nodes\n");
-+ list_for_each_entry(p, &alloc->used, list) {
-+ if (off < 255)
-+ off += snprintf(buf + off, 255-off, "{%d,%d}",
-+ p->base, p->base + p->num - 1);
-+ }
-+ pr_info("%s\n", buf);
-+
-+
-+
-+}
-+#else
-+#define DPRINT(x...)
-+#define DUMP(a)
-+#endif
-+
-+int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
-+ int partial)
-+{
-+ struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
-+ u32 base, next_best_base = 0, num = 0, next_best_num = 0;
-+ struct alloc_node *margin_left, *margin_right;
-+
-+ *result = (u32)-1;
-+ DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
-+ DUMP(alloc);
-+ /* If 'align' is 0, it should behave as though it was 1 */
-+ if (!align)
-+ align = 1;
-+ margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
-+ if (!margin_left)
-+ goto err;
-+ margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
-+ if (!margin_right) {
-+ kfree(margin_left);
-+ goto err;
-+ }
-+ spin_lock_irq(&alloc->lock);
-+ list_for_each_entry(i, &alloc->free, list) {
-+ base = (i->base + align - 1) / align;
-+ base *= align;
-+ if ((base - i->base) >= i->num)
-+ /* alignment is impossible, regardless of count */
-+ continue;
-+ num = i->num - (base - i->base);
-+ if (num >= count) {
-+ /* this one will do nicely */
-+ num = count;
-+ goto done;
-+ }
-+ if (num > next_best_num) {
-+ next_best = i;
-+ next_best_base = base;
-+ next_best_num = num;
-+ }
-+ }
-+ if (partial && next_best) {
-+ i = next_best;
-+ base = next_best_base;
-+ num = next_best_num;
-+ } else
-+ i = NULL;
-+done:
-+ if (i) {
-+ if (base != i->base) {
-+ margin_left->base = i->base;
-+ margin_left->num = base - i->base;
-+ list_add_tail(&margin_left->list, &i->list);
-+ } else
-+ kfree(margin_left);
-+ if ((base + num) < (i->base + i->num)) {
-+ margin_right->base = base + num;
-+ margin_right->num = (i->base + i->num) -
-+ (base + num);
-+ list_add(&margin_right->list, &i->list);
-+ } else
-+ kfree(margin_right);
-+ list_del(&i->list);
-+ kfree(i);
-+ *result = base;
-+ } else {
-+ spin_unlock_irq(&alloc->lock);
-+ kfree(margin_left);
-+ kfree(margin_right);
-+ }
-+
-+err:
-+ DPRINT("returning %d\n", i ? num : -ENOMEM);
-+ DUMP(alloc);
-+ if (!i)
-+ return -ENOMEM;
-+
-+ /* Add the allocation to the used list with a refcount of 1 */
-+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
-+ if (!used_node) {
-+ spin_unlock_irq(&alloc->lock);
-+ return -ENOMEM;
-+ }
-+ used_node->base = *result;
-+ used_node->num = num;
-+ used_node->refcount = 1;
-+ used_node->is_alloced = 1;
-+ list_add_tail(&used_node->list, &alloc->used);
-+ spin_unlock_irq(&alloc->lock);
-+ return (int)num;
-+}
-+
-+/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
-+ * forcing error-handling on to users in the deallocation path. */
-+static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
-+{
-+ struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
-+ BUG_ON(!node);
-+ DPRINT("release_range(%d,%d)\n", base_id, count);
-+ DUMP(alloc);
-+ BUG_ON(!count);
-+ spin_lock_irq(&alloc->lock);
-+
-+
-+ node->base = base_id;
-+ node->num = count;
-+ list_for_each_entry(i, &alloc->free, list) {
-+ if (i->base >= node->base) {
-+ /* BUG_ON(any overlapping) */
-+ BUG_ON(i->base < (node->base + node->num));
-+ list_add_tail(&node->list, &i->list);
-+ goto done;
-+ }
-+ }
-+ list_add_tail(&node->list, &alloc->free);
-+done:
-+ /* Merge to the left */
-+ i = list_entry(node->list.prev, struct alloc_node, list);
-+ if (node->list.prev != &alloc->free) {
-+ BUG_ON((i->base + i->num) > node->base);
-+ if ((i->base + i->num) == node->base) {
-+ node->base = i->base;
-+ node->num += i->num;
-+ list_del(&i->list);
-+ kfree(i);
-+ }
-+ }
-+ /* Merge to the right */
-+ i = list_entry(node->list.next, struct alloc_node, list);
-+ if (node->list.next != &alloc->free) {
-+ BUG_ON((node->base + node->num) > i->base);
-+ if ((node->base + node->num) == i->base) {
-+ node->num += i->num;
-+ list_del(&i->list);
-+ kfree(i);
-+ }
-+ }
-+ spin_unlock_irq(&alloc->lock);
-+ DUMP(alloc);
-+}
-+
-+
-+void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
-+{
-+ struct alloc_node *i = NULL;
-+ spin_lock_irq(&alloc->lock);
-+
-+ /* First find the node in the used list and decrement its ref count */
-+ list_for_each_entry(i, &alloc->used, list) {
-+ if (i->base == base_id && i->num == count) {
-+ --i->refcount;
-+ if (i->refcount == 0) {
-+ list_del(&i->list);
-+ spin_unlock_irq(&alloc->lock);
-+ if (i->is_alloced)
-+ _dpa_alloc_free(alloc, base_id, count);
-+ kfree(i);
-+ return;
-+ }
-+ spin_unlock_irq(&alloc->lock);
-+ return;
-+ }
-+ }
-+ /* Couldn't find the allocation */
-+ pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
-+ base_id, count);
-+ spin_unlock_irq(&alloc->lock);
-+}
-+
-+void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
-+{
-+ /* Same as free but no previous allocation checking is needed */
-+ _dpa_alloc_free(alloc, base_id, count);
-+}
-+
-+
-+int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
-+{
-+ struct alloc_node *i = NULL, *used_node;
-+
-+ DPRINT("alloc_reserve(%d,%d)\n", base, num);
-+ DUMP(alloc);
-+
-+ spin_lock_irq(&alloc->lock);
-+
-+ /* Check for the node in the used list.
-+ If found, increase it's refcount */
-+ list_for_each_entry(i, &alloc->used, list) {
-+ if ((i->base == base) && (i->num == num)) {
-+ ++i->refcount;
-+ spin_unlock_irq(&alloc->lock);
-+ return 0;
-+ }
-+ if ((base >= i->base) && (base < (i->base + i->num))) {
-+ /* This is an attempt to reserve a region that was
-+ already reserved or alloced with a different
-+ base or num */
-+ pr_err("Cannot reserve %d - %d, it overlaps with"
-+ " existing reservation from %d - %d\n",
-+ base, base + num - 1, i->base,
-+ i->base + i->num - 1);
-+ spin_unlock_irq(&alloc->lock);
-+ return -1;
-+ }
-+ }
-+ /* Check to make sure this ID isn't in the free list */
-+ list_for_each_entry(i, &alloc->free, list) {
-+ if ((base >= i->base) && (base < (i->base + i->num))) {
-+ /* yep, the reservation is within this node */
-+ pr_err("Cannot reserve %d - %d, it overlaps with"
-+ " free range %d - %d and must be alloced\n",
-+ base, base + num - 1,
-+ i->base, i->base + i->num - 1);
-+ spin_unlock_irq(&alloc->lock);
-+ return -1;
-+ }
-+ }
-+ /* Add the allocation to the used list with a refcount of 1 */
-+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
-+ if (!used_node) {
-+ spin_unlock_irq(&alloc->lock);
-+ return -ENOMEM;
-+
-+ }
-+ used_node->base = base;
-+ used_node->num = num;
-+ used_node->refcount = 1;
-+ used_node->is_alloced = 0;
-+ list_add_tail(&used_node->list, &alloc->used);
-+ spin_unlock_irq(&alloc->lock);
-+ return 0;
-+}
-+
-+
-+int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
-+{
-+ struct alloc_node *i = NULL;
-+ DPRINT("alloc_pop()\n");
-+ DUMP(alloc);
-+ spin_lock_irq(&alloc->lock);
-+ if (!list_empty(&alloc->free)) {
-+ i = list_entry(alloc->free.next, struct alloc_node, list);
-+ list_del(&i->list);
-+ }
-+ spin_unlock_irq(&alloc->lock);
-+ DPRINT("returning %d\n", i ? 0 : -ENOMEM);
-+ DUMP(alloc);
-+ if (!i)
-+ return -ENOMEM;
-+ *result = i->base;
-+ *count = i->num;
-+ kfree(i);
-+ return 0;
-+}
-+
-+int dpa_alloc_check(struct dpa_alloc *list_head, u32 item)
-+{
-+ struct alloc_node *i = NULL;
-+ int res = 0;
-+ DPRINT("alloc_check()\n");
-+ spin_lock_irq(&list_head->lock);
-+
-+ list_for_each_entry(i, &list_head->free, list) {
-+ if ((item >= i->base) && (item < (i->base + i->num))) {
-+ res = 1;
-+ break;
-+ }
-+ }
-+ spin_unlock_irq(&list_head->lock);
-+ return res;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/dpa_sys.h
-@@ -0,0 +1,259 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPA_SYS_H
-+#define DPA_SYS_H
-+
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/io.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/bootmem.h>
-+#include <linux/slab.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/kthread.h>
-+#include <linux/memblock.h>
-+#include <linux/completion.h>
-+#include <linux/log2.h>
-+#include <linux/types.h>
-+#include <linux/ioctl.h>
-+#include <linux/miscdevice.h>
-+#include <linux/uaccess.h>
-+#include <linux/debugfs.h>
-+#include <linux/seq_file.h>
-+#include <linux/device.h>
-+#include <linux/uio_driver.h>
-+#include <linux/smp.h>
-+#include <linux/fsl_hypervisor.h>
-+#include <linux/vmalloc.h>
-+#include <linux/ctype.h>
-+#include <linux/math64.h>
-+#include <linux/bitops.h>
-+
-+#include <linux/fsl_usdpaa.h>
-+
-+/* When copying aligned words or shorts, try to avoid memcpy() */
-+#define CONFIG_TRY_BETTER_MEMCPY
-+
-+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
-+#define DPA_PORTAL_CE 0
-+#define DPA_PORTAL_CI 1
-+
-+/***********************/
-+/* Misc inline assists */
-+/***********************/
-+
-+#if defined CONFIG_PPC32
-+#include "dpa_sys_ppc32.h"
-+#elif defined CONFIG_PPC64
-+#include "dpa_sys_ppc64.h"
-+#elif defined CONFIG_ARM
-+#include "dpa_sys_arm.h"
-+#elif defined CONFIG_ARM64
-+#include "dpa_sys_arm64.h"
-+#endif
-+
-+
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+#define DPA_ASSERT(x) \
-+ do { \
-+ if (!(x)) { \
-+ pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \
-+ __stringify_1(x)); \
-+ dump_stack(); \
-+ panic("assertion failure"); \
-+ } \
-+ } while (0)
-+#else
-+#define DPA_ASSERT(x)
-+#endif
-+
-+/* memcpy() stuff - when you know alignments in advance */
-+#ifdef CONFIG_TRY_BETTER_MEMCPY
-+static inline void copy_words(void *dest, const void *src, size_t sz)
-+{
-+ u32 *__dest = dest;
-+ const u32 *__src = src;
-+ size_t __sz = sz >> 2;
-+ BUG_ON((unsigned long)dest & 0x3);
-+ BUG_ON((unsigned long)src & 0x3);
-+ BUG_ON(sz & 0x3);
-+ while (__sz--)
-+ *(__dest++) = *(__src++);
-+}
-+static inline void copy_shorts(void *dest, const void *src, size_t sz)
-+{
-+ u16 *__dest = dest;
-+ const u16 *__src = src;
-+ size_t __sz = sz >> 1;
-+ BUG_ON((unsigned long)dest & 0x1);
-+ BUG_ON((unsigned long)src & 0x1);
-+ BUG_ON(sz & 0x1);
-+ while (__sz--)
-+ *(__dest++) = *(__src++);
-+}
-+static inline void copy_bytes(void *dest, const void *src, size_t sz)
-+{
-+ u8 *__dest = dest;
-+ const u8 *__src = src;
-+ while (sz--)
-+ *(__dest++) = *(__src++);
-+}
-+#else
-+#define copy_words memcpy
-+#define copy_shorts memcpy
-+#define copy_bytes memcpy
-+#endif
-+
-+/************/
-+/* RB-trees */
-+/************/
-+
-+/* We encapsulate RB-trees so that its easier to use non-linux forms in
-+ * non-linux systems. This also encapsulates the extra plumbing that linux code
-+ * usually provides when using RB-trees. This encapsulation assumes that the
-+ * data type held by the tree is u32. */
-+
-+struct dpa_rbtree {
-+ struct rb_root root;
-+};
-+#define DPA_RBTREE { .root = RB_ROOT }
-+
-+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
-+{
-+ tree->root = RB_ROOT;
-+}
-+
-+#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
-+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
-+{ \
-+ struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
-+ while (*p) { \
-+ u32 item; \
-+ parent = *p; \
-+ item = rb_entry(parent, type, node_field)->val_field; \
-+ if (obj->val_field < item) \
-+ p = &parent->rb_left; \
-+ else if (obj->val_field > item) \
-+ p = &parent->rb_right; \
-+ else \
-+ return -EBUSY; \
-+ } \
-+ rb_link_node(&obj->node_field, parent, p); \
-+ rb_insert_color(&obj->node_field, &tree->root); \
-+ return 0; \
-+} \
-+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
-+{ \
-+ rb_erase(&obj->node_field, &tree->root); \
-+} \
-+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
-+{ \
-+ type *ret; \
-+ struct rb_node *p = tree->root.rb_node; \
-+ while (p) { \
-+ ret = rb_entry(p, type, node_field); \
-+ if (val < ret->val_field) \
-+ p = p->rb_left; \
-+ else if (val > ret->val_field) \
-+ p = p->rb_right; \
-+ else \
-+ return ret; \
-+ } \
-+ return NULL; \
-+}
-+
-+/************/
-+/* Bootargs */
-+/************/
-+
-+/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax
-+ * though; a comma-separated list of items, each item being a cpu index and/or a
-+ * range of cpu indices, and each item optionally be prefixed by "s" to indicate
-+ * that the portal associated with that cpu should be shared. See bman_driver.c
-+ * for more specifics. */
-+static int __parse_portals_cpu(const char **s, unsigned int *cpu)
-+{
-+ *cpu = 0;
-+ if (!isdigit(**s))
-+ return -EINVAL;
-+ while (isdigit(**s))
-+ *cpu = *cpu * 10 + (*((*s)++) - '0');
-+ return 0;
-+}
-+static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
-+ struct cpumask *want_unshared,
-+ const char *argname)
-+{
-+ const char *s = str;
-+ unsigned int shared, cpu1, cpu2, loop;
-+
-+keep_going:
-+ if (*s == 's') {
-+ shared = 1;
-+ s++;
-+ } else
-+ shared = 0;
-+ if (__parse_portals_cpu(&s, &cpu1))
-+ goto err;
-+ if (*s == '-') {
-+ s++;
-+ if (__parse_portals_cpu(&s, &cpu2))
-+ goto err;
-+ if (cpu2 < cpu1)
-+ goto err;
-+ } else
-+ cpu2 = cpu1;
-+ for (loop = cpu1; loop <= cpu2; loop++)
-+ cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
-+ if (*s == ',') {
-+ s++;
-+ goto keep_going;
-+ } else if ((*s == '\0') || isspace(*s))
-+ return 0;
-+err:
-+ pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
-+ (unsigned long)s - (unsigned long)str);
-+ return -EINVAL;
-+}
-+#ifdef CONFIG_FSL_USDPAA
-+/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */
-+int usdpaa_get_portal_config(struct file *filp, void *cinh,
-+ enum usdpaa_portal_type ptype, unsigned int *irq,
-+ void **iir_reg);
-+#endif
-+#endif /* DPA_SYS_H */
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/dpa_sys_arm.h
-@@ -0,0 +1,95 @@
-+/* Copyright 2016 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPA_SYS_ARM_H
-+#define DPA_SYS_ARM_H
-+
-+#include <asm/cacheflush.h>
-+#include <asm/barrier.h>
-+
-+/* Implementation of ARM specific routines */
-+
-+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
-+ * barriers and that dcb*() won't fall victim to compiler or execution
-+ * reordering with respect to other code/instructions that manipulate the same
-+ * cacheline. */
-+#define hwsync() { asm volatile("dmb st" : : : "memory"); }
-+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
-+#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); }
-+#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); }
-+#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); }
-+#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); }
-+
-+#define dcbz_64(p) { memset(p, 0, sizeof(*p)); }
-+
-+#define dcbf_64(p) \
-+ do { \
-+ dcbf((u32)p); \
-+ } while (0)
-+/* Commonly used combo */
-+#define dcbit_ro(p) \
-+ do { \
-+ dcbi((u32)p); \
-+ dcbt_ro((u32)p); \
-+ } while (0)
-+
-+static inline u64 mfatb(void)
-+{
-+ return get_cycles();
-+}
-+
-+static inline u32 in_be32(volatile void *addr)
-+{
-+ return be32_to_cpu(*((volatile u32 *) addr));
-+}
-+
-+static inline void out_be32(void *addr, u32 val)
-+{
-+ *((u32 *) addr) = cpu_to_be32(val);
-+}
-+
-+
-+static inline void set_bits(unsigned long mask, volatile unsigned long *p)
-+{
-+ *p |= mask;
-+}
-+static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
-+{
-+ *p &= ~mask;
-+}
-+
-+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
-+{
-+ __cpuc_flush_dcache_area((void *) start, stop - start);
-+}
-+
-+#define hard_smp_processor_id() raw_smp_processor_id()
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/dpa_sys_arm64.h
-@@ -0,0 +1,102 @@
-+/* Copyright 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPA_SYS_ARM64_H
-+#define DPA_SYS_ARM64_H
-+
-+#include <asm/cacheflush.h>
-+#include <asm/barrier.h>
-+
-+/* Implementation of ARM 64 bit specific routines */
-+
-+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
-+ * barriers and that dcb*() won't fall victim to compiler or execution
-+ * reordering with respect to other code/instructions that manipulate the same
-+ * cacheline. */
-+#define hwsync() { asm volatile("dmb st" : : : "memory"); }
-+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
-+#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
-+#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p)); }
-+#define dcbt_rw(p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); }
-+#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
-+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
-+
-+#define dcbz_64(p) \
-+ do { \
-+ dcbz(p); \
-+ } while (0)
-+
-+#define dcbf_64(p) \
-+ do { \
-+ dcbf(p); \
-+ } while (0)
-+/* Commonly used combo */
-+#define dcbit_ro(p) \
-+ do { \
-+ dcbi(p); \
-+ dcbt_ro(p); \
-+ } while (0)
-+
-+static inline u64 mfatb(void)
-+{
-+ return get_cycles();
-+}
-+
-+static inline u32 in_be32(volatile void *addr)
-+{
-+ return be32_to_cpu(*((volatile u32 *) addr));
-+}
-+
-+static inline void out_be32(void *addr, u32 val)
-+{
-+ *((u32 *) addr) = cpu_to_be32(val);
-+}
-+
-+
-+static inline void set_bits(unsigned long mask, volatile unsigned long *p)
-+{
-+ *p |= mask;
-+}
-+static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
-+{
-+ *p &= ~mask;
-+}
-+
-+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
-+{
-+ __flush_dcache_area((void *) start, stop - start);
-+}
-+
-+#define hard_smp_processor_id() raw_smp_processor_id()
-+
-+
-+
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h
-@@ -0,0 +1,70 @@
-+/* Copyright 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPA_SYS_PPC32_H
-+#define DPA_SYS_PPC32_H
-+
-+/* Implementation of PowerPC 32 bit specific routines */
-+
-+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
-+ * barriers and that dcb*() won't fall victim to compiler or execution
-+ * reordering with respect to other code/instructions that manipulate the same
-+ * cacheline. */
-+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
-+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
-+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
-+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
-+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
-+#define dcbi(p) dcbf(p)
-+
-+#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
-+#define dcbz_64(p) dcbzl(p)
-+#define dcbf_64(p) dcbf(p)
-+
-+/* Commonly used combo */
-+#define dcbit_ro(p) \
-+ do { \
-+ dcbi(p); \
-+ dcbt_ro(p); \
-+ } while (0)
-+
-+static inline u64 mfatb(void)
-+{
-+ u32 hi, lo, chk;
-+ do {
-+ hi = mfspr(SPRN_ATBU);
-+ lo = mfspr(SPRN_ATBL);
-+ chk = mfspr(SPRN_ATBU);
-+ } while (unlikely(hi != chk));
-+ return ((u64)hi << 32) | (u64)lo;
-+}
-+
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h
-@@ -0,0 +1,79 @@
-+/* Copyright 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef DPA_SYS_PPC64_H
-+#define DPA_SYS_PPC64_H
-+
-+/* Implementation of PowerPC 64 bit specific routines */
-+
-+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
-+ * barriers and that dcb*() won't fall victim to compiler or execution
-+ * reordering with respect to other code/instructions that manipulate the same
-+ * cacheline. */
-+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
-+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
-+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
-+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
-+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
-+#define dcbi(p) dcbf(p)
-+
-+#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
-+#define dcbz_64(p) \
-+ do { \
-+ dcbz((void*)p + 32); \
-+ dcbz(p); \
-+ } while (0)
-+#define dcbf_64(p) \
-+ do { \
-+ dcbf((void*)p + 32); \
-+ dcbf(p); \
-+ } while (0)
-+/* Commonly used combo */
-+#define dcbit_ro(p) \
-+ do { \
-+ dcbi(p); \
-+ dcbi((void*)p + 32); \
-+ dcbt_ro(p); \
-+ dcbt_ro((void*)p + 32); \
-+ } while (0)
-+
-+static inline u64 mfatb(void)
-+{
-+ u32 hi, lo, chk;
-+ do {
-+ hi = mfspr(SPRN_ATBU);
-+ lo = mfspr(SPRN_ATBL);
-+ chk = mfspr(SPRN_ATBU);
-+ } while (unlikely(hi != chk));
-+ return ((u64)hi << 32) | (u64)lo;
-+}
-+
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
-@@ -0,0 +1,2008 @@
-+/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
-+ * Authors: Andy Fleming <afleming@freescale.com>
-+ * Timur Tabi <timur@freescale.com>
-+ * Geoff Thorpe <Geoff.Thorpe@freescale.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public License
-+ * version 2. This program is licensed "as is" without any warranty of any
-+ * kind, whether express or implied.
-+ */
-+
-+
-+#include <linux/miscdevice.h>
-+#include <linux/fs.h>
-+#include <linux/cdev.h>
-+#include <linux/mm.h>
-+#include <linux/of.h>
-+#include <linux/memblock.h>
-+#include <linux/slab.h>
-+#include <linux/mman.h>
-+#include <linux/of_reserved_mem.h>
-+
-+#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
-+#include <mm/mmu_decl.h>
-+#endif
-+
-+#include "dpa_sys.h"
-+#include <linux/fsl_usdpaa.h>
-+#include "bman_low.h"
-+#include "qman_low.h"
-+
-+/* Physical address range of the memory reservation, exported for mm/mem.c */
-+static u64 phys_start;
-+static u64 phys_size;
-+static u64 arg_phys_size;
-+
-+/* PFN versions of the above */
-+static unsigned long pfn_start;
-+static unsigned long pfn_size;
-+
-+/* Memory reservations are manipulated under this spinlock (which is why 'refs'
-+ * isn't atomic_t). */
-+static DEFINE_SPINLOCK(mem_lock);
-+
-+/* The range of TLB1 indices */
-+static unsigned int first_tlb;
-+static unsigned int num_tlb = 1;
-+static unsigned int current_tlb; /* loops around for fault handling */
-+
-+/* Memory reservation is represented as a list of 'mem_fragment's, some of which
-+ * may be mapped. Unmapped fragments are always merged where possible. */
-+static LIST_HEAD(mem_list);
-+
-+struct mem_mapping;
-+
-+/* Memory fragments are in 'mem_list'. */
-+struct mem_fragment {
-+ u64 base;
-+ u64 len;
-+ unsigned long pfn_base; /* PFN version of 'base' */
-+ unsigned long pfn_len; /* PFN version of 'len' */
-+ unsigned int refs; /* zero if unmapped */
-+ u64 root_len; /* Size of the orignal fragment */
-+ unsigned long root_pfn; /* PFN of the orignal fragment */
-+ struct list_head list;
-+ /* if mapped, flags+name captured at creation time */
-+ u32 flags;
-+ char name[USDPAA_DMA_NAME_MAX];
-+ u64 map_len;
-+ /* support multi-process locks per-memory-fragment. */
-+ int has_locking;
-+ wait_queue_head_t wq;
-+ struct mem_mapping *owner;
-+};
-+
-+/* Mappings of memory fragments in 'struct ctx'. These are created from
-+ * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a
-+ * mmap(). */
-+struct mem_mapping {
-+ struct mem_fragment *root_frag;
-+ u32 frag_count;
-+ u64 total_size;
-+ struct list_head list;
-+ int refs;
-+ void *virt_addr;
-+};
-+
-+struct portal_mapping {
-+ struct usdpaa_ioctl_portal_map user;
-+ union {
-+ struct qm_portal_config *qportal;
-+ struct bm_portal_config *bportal;
-+ };
-+ /* Declare space for the portals in case the process
-+ exits unexpectedly and needs to be cleaned by the kernel */
-+ union {
-+ struct qm_portal qman_portal_low;
-+ struct bm_portal bman_portal_low;
-+ };
-+ struct list_head list;
-+ struct resource *phys;
-+ struct iommu_domain *iommu_domain;
-+};
-+
-+/* Track the DPAA resources the process is using */
-+struct active_resource {
-+ struct list_head list;
-+ u32 id;
-+ u32 num;
-+ unsigned int refcount;
-+};
-+
-+/* Per-FD state (which should also be per-process but we don't enforce that) */
-+struct ctx {
-+ /* Lock to protect the context */
-+ spinlock_t lock;
-+ /* Allocated resources get put here for accounting */
-+ struct list_head resources[usdpaa_id_max];
-+ /* list of DMA maps */
-+ struct list_head maps;
-+ /* list of portal maps */
-+ struct list_head portals;
-+};
-+
-+/* Different resource classes */
-+static const struct alloc_backend {
-+ enum usdpaa_id_type id_type;
-+ int (*alloc)(u32 *, u32, u32, int);
-+ void (*release)(u32 base, unsigned int count);
-+ int (*reserve)(u32 base, unsigned int count);
-+ const char *acronym;
-+} alloc_backends[] = {
-+ {
-+ .id_type = usdpaa_id_fqid,
-+ .alloc = qman_alloc_fqid_range,
-+ .release = qman_release_fqid_range,
-+ .reserve = qman_reserve_fqid_range,
-+ .acronym = "FQID"
-+ },
-+ {
-+ .id_type = usdpaa_id_bpid,
-+ .alloc = bman_alloc_bpid_range,
-+ .release = bman_release_bpid_range,
-+ .reserve = bman_reserve_bpid_range,
-+ .acronym = "BPID"
-+ },
-+ {
-+ .id_type = usdpaa_id_qpool,
-+ .alloc = qman_alloc_pool_range,
-+ .release = qman_release_pool_range,
-+ .reserve = qman_reserve_pool_range,
-+ .acronym = "QPOOL"
-+ },
-+ {
-+ .id_type = usdpaa_id_cgrid,
-+ .alloc = qman_alloc_cgrid_range,
-+ .release = qman_release_cgrid_range,
-+ .acronym = "CGRID"
-+ },
-+ {
-+ .id_type = usdpaa_id_ceetm0_lfqid,
-+ .alloc = qman_alloc_ceetm0_lfqid_range,
-+ .release = qman_release_ceetm0_lfqid_range,
-+ .acronym = "CEETM0_LFQID"
-+ },
-+ {
-+ .id_type = usdpaa_id_ceetm0_channelid,
-+ .alloc = qman_alloc_ceetm0_channel_range,
-+ .release = qman_release_ceetm0_channel_range,
-+ .acronym = "CEETM0_LFQID"
-+ },
-+ {
-+ .id_type = usdpaa_id_ceetm1_lfqid,
-+ .alloc = qman_alloc_ceetm1_lfqid_range,
-+ .release = qman_release_ceetm1_lfqid_range,
-+ .acronym = "CEETM1_LFQID"
-+ },
-+ {
-+ .id_type = usdpaa_id_ceetm1_channelid,
-+ .alloc = qman_alloc_ceetm1_channel_range,
-+ .release = qman_release_ceetm1_channel_range,
-+ .acronym = "CEETM1_LFQID"
-+ },
-+ {
-+ /* This terminates the array */
-+ .id_type = usdpaa_id_max
-+ }
-+};
-+
-+/* Determines the largest acceptable page size for a given size
-+ The sizes are determined by what the TLB1 acceptable page sizes are */
-+static u32 largest_page_size(u32 size)
-+{
-+ int shift = 30; /* Start at 1G size */
-+ if (size < 4096)
-+ return 0;
-+ do {
-+ if (size >= (1<<shift))
-+ return 1<<shift;
-+ shift -= 2;
-+ } while (shift >= 12); /* Up to 4k */
-+ return 0;
-+}
-+
-+/* Determine if value is power of 4 */
-+static inline bool is_power_of_4(u64 x)
-+{
-+ if (x == 0 || ((x & (x - 1)) != 0))
-+ return false;
-+ return !!(x & 0x5555555555555555ull);
-+}
-+
-+/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This
-+ * splits the fragment into 4 and returns the upper-most. (The caller can loop
-+ * until it has a suitable fragment size.) */
-+static struct mem_fragment *split_frag(struct mem_fragment *frag)
-+{
-+ struct mem_fragment *x[3];
-+
-+ x[0] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
-+ x[1] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
-+ x[2] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
-+ if (!x[0] || !x[1] || !x[2]) {
-+ kfree(x[0]);
-+ kfree(x[1]);
-+ kfree(x[2]);
-+ return NULL;
-+ }
-+ BUG_ON(frag->refs);
-+ frag->len >>= 2;
-+ frag->pfn_len >>= 2;
-+ x[0]->base = frag->base + frag->len;
-+ x[1]->base = x[0]->base + frag->len;
-+ x[2]->base = x[1]->base + frag->len;
-+ x[0]->len = x[1]->len = x[2]->len = frag->len;
-+ x[0]->pfn_base = frag->pfn_base + frag->pfn_len;
-+ x[1]->pfn_base = x[0]->pfn_base + frag->pfn_len;
-+ x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len;
-+ x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len;
-+ x[0]->refs = x[1]->refs = x[2]->refs = 0;
-+ x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len;
-+ x[0]->root_pfn = x[1]->root_pfn = x[2]->root_pfn = frag->root_pfn;
-+ x[0]->name[0] = x[1]->name[0] = x[2]->name[0] = 0;
-+ list_add_tail(&x[0]->list, &frag->list);
-+ list_add_tail(&x[1]->list, &x[0]->list);
-+ list_add_tail(&x[2]->list, &x[1]->list);
-+ return x[2];
-+}
-+
-+static __maybe_unused void dump_frags(void)
-+{
-+ struct mem_fragment *frag;
-+ int i = 0;
-+ list_for_each_entry(frag, &mem_list, list) {
-+ pr_info("FRAG %d: base 0x%llx pfn_base 0x%lx len 0x%llx root_len 0x%llx root_pfn 0x%lx refs %d name %s\n",
-+ i, frag->base, frag->pfn_base,
-+ frag->len, frag->root_len, frag->root_pfn,
-+ frag->refs, frag->name);
-+ ++i;
-+ }
-+}
-+
-+/* Walk the list of fragments and adjoin neighbouring segments if possible */
-+static void compress_frags(void)
-+{
-+ /* Walk the fragment list and combine fragments */
-+ struct mem_fragment *frag, *nxtfrag;
-+ u64 len = 0;
-+
-+ int i, numfrags;
-+
-+
-+ frag = list_entry(mem_list.next, struct mem_fragment, list);
-+
-+ while (&frag->list != &mem_list) {
-+ /* Must combine consecutive fragemenst with
-+ same root_pfn such that they are power of 4 */
-+ if (frag->refs != 0) {
-+ frag = list_entry(frag->list.next,
-+ struct mem_fragment, list);
-+ continue; /* Not this window */
-+ }
-+ len = frag->len;
-+ numfrags = 0;
-+ nxtfrag = list_entry(frag->list.next,
-+ struct mem_fragment, list);
-+ while (true) {
-+ if (&nxtfrag->list == &mem_list) {
-+ numfrags = 0;
-+ break; /* End of list */
-+ }
-+ if (nxtfrag->refs) {
-+ numfrags = 0;
-+ break; /* In use still */
-+ }
-+ if (nxtfrag->root_pfn != frag->root_pfn) {
-+ numfrags = 0;
-+ break; /* Crosses root fragment boundary */
-+ }
-+ len += nxtfrag->len;
-+ numfrags++;
-+ if (is_power_of_4(len)) {
-+ /* These fragments can be combined */
-+ break;
-+ }
-+ nxtfrag = list_entry(nxtfrag->list.next,
-+ struct mem_fragment, list);
-+ }
-+ if (numfrags == 0) {
-+ frag = list_entry(frag->list.next,
-+ struct mem_fragment, list);
-+ continue; /* try the next window */
-+ }
-+ for (i = 0; i < numfrags; i++) {
-+ struct mem_fragment *todel =
-+ list_entry(nxtfrag->list.prev,
-+ struct mem_fragment, list);
-+ nxtfrag->len += todel->len;
-+ nxtfrag->pfn_len += todel->pfn_len;
-+ list_del(&todel->list);
-+ }
-+ /* Re evaluate the list, things may merge now */
-+ frag = list_entry(mem_list.next, struct mem_fragment, list);
-+ }
-+}
-+
-+/* Hook from arch/powerpc/mm/mem.c */
-+int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size)
-+{
-+ struct mem_fragment *frag;
-+ int idx = -1;
-+ if ((pfn < pfn_start) || (pfn >= (pfn_start + pfn_size)))
-+ return -1;
-+ /* It's in-range, we need to find the fragment */
-+ spin_lock(&mem_lock);
-+ list_for_each_entry(frag, &mem_list, list) {
-+ if ((pfn >= frag->pfn_base) && (pfn < (frag->pfn_base +
-+ frag->pfn_len))) {
-+ *phys_addr = frag->base;
-+ *size = frag->len;
-+ idx = current_tlb++;
-+ if (current_tlb >= (first_tlb + num_tlb))
-+ current_tlb = first_tlb;
-+ break;
-+ }
-+ }
-+ spin_unlock(&mem_lock);
-+ return idx;
-+}
-+
-+static int usdpaa_open(struct inode *inode, struct file *filp)
-+{
-+ const struct alloc_backend *backend = &alloc_backends[0];
-+ struct ctx *ctx = kmalloc(sizeof(struct ctx), GFP_KERNEL);
-+ if (!ctx)
-+ return -ENOMEM;
-+ filp->private_data = ctx;
-+
-+ while (backend->id_type != usdpaa_id_max) {
-+ INIT_LIST_HEAD(&ctx->resources[backend->id_type]);
-+ backend++;
-+ }
-+
-+ INIT_LIST_HEAD(&ctx->maps);
-+ INIT_LIST_HEAD(&ctx->portals);
-+ spin_lock_init(&ctx->lock);
-+
-+ //filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi;
-+
-+ return 0;
-+}
-+
-+#define DQRR_MAXFILL 15
-+
-+
-+/* Invalidate a portal */
-+void dbci_portal(void *addr)
-+{
-+ int i;
-+
-+ for (i = 0; i < 0x4000; i += 64)
-+ dcbi(addr + i);
-+}
-+
-+/* Reset a QMan portal to its default state */
-+static int init_qm_portal(struct qm_portal_config *config,
-+ struct qm_portal *portal)
-+{
-+ const struct qm_dqrr_entry *dqrr = NULL;
-+ int i;
-+
-+ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
-+ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
-+
-+ /* Make sure interrupts are inhibited */
-+ qm_out(IIR, 1);
-+
-+ /*
-+ * Invalidate the entire CE portal are to ensure no stale
-+ * cachelines are present. This should be done on all
-+ * cores as the portal is mapped as M=0 (non-coherent).
-+ */
-+ on_each_cpu(dbci_portal, portal->addr.addr_ce, 1);
-+
-+ /* Initialize the DQRR. This will stop any dequeue
-+ commands that are in progress */
-+ if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb,
-+ qm_dqrr_cdc, DQRR_MAXFILL)) {
-+ pr_err("qm_dqrr_init() failed when trying to"
-+ " recover portal, portal will be leaked\n");
-+ return 1;
-+ }
-+
-+ /* Discard any entries on the DQRR */
-+ /* If we consume the ring twice something is wrong */
-+ for (i = 0; i < DQRR_MAXFILL * 2; i++) {
-+ qm_dqrr_pvb_update(portal);
-+ dqrr = qm_dqrr_current(portal);
-+ if (!dqrr)
-+ break;
-+ qm_dqrr_cdc_consume_1ptr(portal, dqrr, 0);
-+ qm_dqrr_pvb_update(portal);
-+ qm_dqrr_next(portal);
-+ }
-+ /* Initialize the EQCR */
-+ if (qm_eqcr_init(portal, qm_eqcr_pvb,
-+ qm_eqcr_get_ci_stashing(portal), 1)) {
-+ pr_err("Qman EQCR initialisation failed\n");
-+ return 1;
-+ }
-+ /* initialize the MR */
-+ if (qm_mr_init(portal, qm_mr_pvb, qm_mr_cci)) {
-+ pr_err("Qman MR initialisation failed\n");
-+ return 1;
-+ }
-+ qm_mr_pvb_update(portal);
-+ while (qm_mr_current(portal)) {
-+ qm_mr_next(portal);
-+ qm_mr_cci_consume_to_current(portal);
-+ qm_mr_pvb_update(portal);
-+ }
-+
-+ if (qm_mc_init(portal)) {
-+ pr_err("Qman MC initialisation failed\n");
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static int init_bm_portal(struct bm_portal_config *config,
-+ struct bm_portal *portal)
-+{
-+ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
-+ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
-+
-+ /*
-+ * Invalidate the entire CE portal are to ensure no stale
-+ * cachelines are present. This should be done on all
-+ * cores as the portal is mapped as M=0 (non-coherent).
-+ */
-+ on_each_cpu(dbci_portal, portal->addr.addr_ce, 1);
-+
-+ if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) {
-+ pr_err("Bman RCR initialisation failed\n");
-+ return 1;
-+ }
-+ if (bm_mc_init(portal)) {
-+ pr_err("Bman MC initialisation failed\n");
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/* Function that will scan all FQ's in the system. For each FQ that is not
-+ OOS it will call the check_channel helper to determine if the FQ should
-+ be torn down. If the check_channel helper returns true the FQ will be
-+ transitioned to the OOS state */
-+static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
-+ bool (*check_channel)(void*, u32))
-+{
-+ u32 fq_id = 0;
-+ while (1) {
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ u8 state;
-+ u32 channel;
-+
-+ /* Determine the channel for the FQID */
-+ mcc = qm_mc_start(portal);
-+ mcc->queryfq.fqid = fq_id;
-+ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ);
-+ while (!(mcr = qm_mc_result(portal)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
-+ == QM_MCR_VERB_QUERYFQ);
-+ if (mcr->result != QM_MCR_RESULT_OK)
-+ break; /* End of valid FQIDs */
-+
-+ channel = mcr->queryfq.fqd.dest.channel;
-+ /* Determine the state of the FQID */
-+ mcc = qm_mc_start(portal);
-+ mcc->queryfq_np.fqid = fq_id;
-+ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP);
-+ while (!(mcr = qm_mc_result(portal)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
-+ == QM_MCR_VERB_QUERYFQ_NP);
-+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
-+ if (state == QM_MCR_NP_STATE_OOS)
-+ /* Already OOS, no need to do anymore checks */
-+ goto next;
-+
-+ if (check_channel(ctx, channel))
-+ qm_shutdown_fq(&portal, 1, fq_id);
-+ next:
-+ ++fq_id;
-+ }
-+ return 0;
-+}
-+
-+static bool check_channel_device(void *_ctx, u32 channel)
-+{
-+ struct ctx *ctx = _ctx;
-+ struct portal_mapping *portal, *tmpportal;
-+ struct active_resource *res;
-+
-+ /* See if the FQ is destined for one of the portals we're cleaning up */
-+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
-+ if (portal->user.type == usdpaa_portal_qman) {
-+ if (portal->qportal->public_cfg.channel == channel) {
-+ /* This FQs destination is a portal
-+ we're cleaning, send a retire */
-+ return true;
-+ }
-+ }
-+ }
-+
-+ /* Check the pool channels that will be released as well */
-+ list_for_each_entry(res, &ctx->resources[usdpaa_id_qpool], list) {
-+ if ((res->id >= channel) &&
-+ ((res->id + res->num - 1) <= channel))
-+ return true;
-+ }
-+ return false;
-+}
-+
-+static bool check_portal_channel(void *ctx, u32 channel)
-+{
-+ u32 portal_channel = *(u32 *)ctx;
-+ if (portal_channel == channel) {
-+ /* This FQs destination is a portal
-+ we're cleaning, send a retire */
-+ return true;
-+ }
-+ return false;
-+}
-+
-+
-+
-+
-+static int usdpaa_release(struct inode *inode, struct file *filp)
-+{
-+ struct ctx *ctx = filp->private_data;
-+ struct mem_mapping *map, *tmpmap;
-+ struct portal_mapping *portal, *tmpportal;
-+ const struct alloc_backend *backend = &alloc_backends[0];
-+ struct active_resource *res;
-+ struct qm_portal *qm_cleanup_portal = NULL;
-+ struct bm_portal *bm_cleanup_portal = NULL;
-+ struct qm_portal_config *qm_alloced_portal = NULL;
-+ struct bm_portal_config *bm_alloced_portal = NULL;
-+
-+ struct qm_portal *portal_array[qman_portal_max];
-+ int portal_count = 0;
-+
-+ /* Ensure the release operation cannot be migrated to another
-+ CPU as CPU specific variables may be needed during cleanup */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ migrate_disable();
-+#endif
-+ /* The following logic is used to recover resources that were not
-+ correctly released by the process that is closing the FD.
-+ Step 1: syncronize the HW with the qm_portal/bm_portal structures
-+ in the kernel
-+ */
-+
-+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
-+ /* Try to recover any portals that weren't shut down */
-+ if (portal->user.type == usdpaa_portal_qman) {
-+ portal_array[portal_count] = &portal->qman_portal_low;
-+ ++portal_count;
-+ init_qm_portal(portal->qportal,
-+ &portal->qman_portal_low);
-+ if (!qm_cleanup_portal) {
-+ qm_cleanup_portal = &portal->qman_portal_low;
-+ } else {
-+ /* Clean FQs on the dedicated channel */
-+ u32 chan = portal->qportal->public_cfg.channel;
-+ qm_check_and_destroy_fqs(
-+ &portal->qman_portal_low, &chan,
-+ check_portal_channel);
-+ }
-+ } else {
-+ /* BMAN */
-+ init_bm_portal(portal->bportal,
-+ &portal->bman_portal_low);
-+ if (!bm_cleanup_portal)
-+ bm_cleanup_portal = &portal->bman_portal_low;
-+ }
-+ }
-+ /* If no portal was found, allocate one for cleanup */
-+ if (!qm_cleanup_portal) {
-+ qm_alloced_portal = qm_get_unused_portal();
-+ if (!qm_alloced_portal) {
-+ pr_crit("No QMan portal avalaible for cleanup\n");
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ migrate_enable();
-+#endif
-+ return -1;
-+ }
-+ qm_cleanup_portal = kmalloc(sizeof(struct qm_portal),
-+ GFP_KERNEL);
-+ if (!qm_cleanup_portal) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ migrate_enable();
-+#endif
-+ return -ENOMEM;
-+ }
-+ init_qm_portal(qm_alloced_portal, qm_cleanup_portal);
-+ portal_array[portal_count] = qm_cleanup_portal;
-+ ++portal_count;
-+ }
-+ if (!bm_cleanup_portal) {
-+ bm_alloced_portal = bm_get_unused_portal();
-+ if (!bm_alloced_portal) {
-+ pr_crit("No BMan portal avalaible for cleanup\n");
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ migrate_enable();
-+#endif
-+ return -1;
-+ }
-+ bm_cleanup_portal = kmalloc(sizeof(struct bm_portal),
-+ GFP_KERNEL);
-+ if (!bm_cleanup_portal) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ migrate_enable();
-+#endif
-+ return -ENOMEM;
-+ }
-+ init_bm_portal(bm_alloced_portal, bm_cleanup_portal);
-+ }
-+
-+ /* OOS the FQs associated with this process */
-+ qm_check_and_destroy_fqs(qm_cleanup_portal, ctx, check_channel_device);
-+
-+ while (backend->id_type != usdpaa_id_max) {
-+ int leaks = 0;
-+ list_for_each_entry(res, &ctx->resources[backend->id_type],
-+ list) {
-+ if (backend->id_type == usdpaa_id_fqid) {
-+ int i = 0;
-+ for (; i < res->num; i++) {
-+ /* Clean FQs with the cleanup portal */
-+ qm_shutdown_fq(portal_array,
-+ portal_count,
-+ res->id + i);
-+ }
-+ }
-+ leaks += res->num;
-+ backend->release(res->id, res->num);
-+ }
-+ if (leaks)
-+ pr_crit("USDPAA process leaking %d %s%s\n", leaks,
-+ backend->acronym, (leaks > 1) ? "s" : "");
-+ backend++;
-+ }
-+ /* Release any DMA regions */
-+ spin_lock(&mem_lock);
-+ list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) {
-+ struct mem_fragment *current_frag = map->root_frag;
-+ int i;
-+ if (map->root_frag->has_locking &&
-+ (map->root_frag->owner == map)) {
-+ map->root_frag->owner = NULL;
-+ wake_up(&map->root_frag->wq);
-+ }
-+ /* Check each fragment and merge if the ref count is 0 */
-+ for (i = 0; i < map->frag_count; i++) {
-+ --current_frag->refs;
-+ current_frag = list_entry(current_frag->list.prev,
-+ struct mem_fragment, list);
-+ }
-+
-+ compress_frags();
-+ list_del(&map->list);
-+ kfree(map);
-+ }
-+ spin_unlock(&mem_lock);
-+
-+ /* Return portals */
-+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
-+ if (portal->user.type == usdpaa_portal_qman) {
-+ /* Give the portal back to the allocator */
-+ init_qm_portal(portal->qportal,
-+ &portal->qman_portal_low);
-+ qm_put_unused_portal(portal->qportal);
-+ } else {
-+ init_bm_portal(portal->bportal,
-+ &portal->bman_portal_low);
-+ bm_put_unused_portal(portal->bportal);
-+ }
-+ list_del(&portal->list);
-+ kfree(portal);
-+ }
-+ if (qm_alloced_portal) {
-+ qm_put_unused_portal(qm_alloced_portal);
-+ kfree(qm_cleanup_portal);
-+ }
-+ if (bm_alloced_portal) {
-+ bm_put_unused_portal(bm_alloced_portal);
-+ kfree(bm_cleanup_portal);
-+ }
-+
-+ kfree(ctx);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ migrate_enable();
-+#endif
-+ return 0;
-+}
-+
-+static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
-+ int *match, unsigned long *pfn)
-+{
-+ struct mem_mapping *map;
-+
-+ list_for_each_entry(map, &ctx->maps, list) {
-+ int i;
-+ struct mem_fragment *frag = map->root_frag;
-+
-+ for (i = 0; i < map->frag_count; i++) {
-+ if (frag->pfn_base == vma->vm_pgoff) {
-+ *match = 1;
-+ *pfn = frag->pfn_base;
-+ return 0;
-+ }
-+ frag = list_entry(frag->list.next, struct mem_fragment,
-+ list);
-+ }
-+ }
-+ *match = 0;
-+ return 0;
-+}
-+
-+static int check_mmap_resource(struct resource *res, struct vm_area_struct *vma,
-+ int *match, unsigned long *pfn)
-+{
-+ *pfn = res->start >> PAGE_SHIFT;
-+ if (*pfn == vma->vm_pgoff) {
-+ *match = 1;
-+ if ((vma->vm_end - vma->vm_start) != resource_size(res))
-+ return -EINVAL;
-+ } else
-+ *match = 0;
-+ return 0;
-+}
-+
-+static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma,
-+ int *match, unsigned long *pfn)
-+{
-+ struct portal_mapping *portal;
-+ int ret;
-+
-+ list_for_each_entry(portal, &ctx->portals, list) {
-+ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CE], vma,
-+ match, pfn);
-+ if (*match) {
-+ vma->vm_page_prot =
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ pgprot_cached_ns(vma->vm_page_prot);
-+#else
-+ pgprot_cached_noncoherent(vma->vm_page_prot);
-+#endif
-+ return ret;
-+ }
-+ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CI], vma,
-+ match, pfn);
-+ if (*match) {
-+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-+ return ret;
-+ }
-+ }
-+ *match = 0;
-+ return 0;
-+}
-+
-+static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma)
-+{
-+ struct ctx *ctx = filp->private_data;
-+ unsigned long pfn = 0;
-+ int match, ret;
-+
-+ spin_lock(&mem_lock);
-+ ret = check_mmap_dma(ctx, vma, &match, &pfn);
-+ if (!match)
-+ ret = check_mmap_portal(ctx, vma, &match, &pfn);
-+ spin_unlock(&mem_lock);
-+ if (!match)
-+ return -EINVAL;
-+ if (!ret)
-+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
-+ vma->vm_end - vma->vm_start,
-+ vma->vm_page_prot);
-+ return ret;
-+}
-+
-+/* Return the nearest rounded-up address >= 'addr' that is 'sz'-aligned. 'sz'
-+ * must be a power of 2, but both 'addr' and 'sz' can be expressions. */
-+#define USDPAA_MEM_ROUNDUP(addr, sz) \
-+ ({ \
-+ unsigned long foo_align = (sz) - 1; \
-+ ((addr) + foo_align) & ~foo_align; \
-+ })
-+/* Searching for a size-aligned virtual address range starting from 'addr' */
-+static unsigned long usdpaa_get_unmapped_area(struct file *file,
-+ unsigned long addr,
-+ unsigned long len,
-+ unsigned long pgoff,
-+ unsigned long flags)
-+{
-+ struct vm_area_struct *vma;
-+
-+ if (len % PAGE_SIZE)
-+ return -EINVAL;
-+ if (!len)
-+ return -EINVAL;
-+
-+ /* Need to align the address to the largest pagesize of the mapping
-+ * because the MMU requires the virtual address to have the same
-+ * alignment as the physical address */
-+ addr = USDPAA_MEM_ROUNDUP(addr, largest_page_size(len));
-+ vma = find_vma(current->mm, addr);
-+ /* Keep searching until we reach the end of currently-used virtual
-+ * address-space or we find a big enough gap. */
-+ while (vma) {
-+ if ((addr + len) < vma->vm_start)
-+ return addr;
-+
-+ addr = USDPAA_MEM_ROUNDUP(vma->vm_end, largest_page_size(len));
-+ vma = vma->vm_next;
-+ }
-+ if ((TASK_SIZE - len) < addr)
-+ return -ENOMEM;
-+ return addr;
-+}
-+
-+static long ioctl_id_alloc(struct ctx *ctx, void __user *arg)
-+{
-+ struct usdpaa_ioctl_id_alloc i;
-+ const struct alloc_backend *backend;
-+ struct active_resource *res;
-+ int ret = copy_from_user(&i, arg, sizeof(i));
-+ if (ret)
-+ return ret;
-+ if ((i.id_type >= usdpaa_id_max) || !i.num)
-+ return -EINVAL;
-+ backend = &alloc_backends[i.id_type];
-+ /* Allocate the required resource type */
-+ ret = backend->alloc(&i.base, i.num, i.align, i.partial);
-+ if (ret < 0)
-+ return ret;
-+ i.num = ret;
-+ /* Copy the result to user-space */
-+ ret = copy_to_user(arg, &i, sizeof(i));
-+ if (ret) {
-+ backend->release(i.base, i.num);
-+ return ret;
-+ }
-+ /* Assign the allocated range to the FD accounting */
-+ res = kmalloc(sizeof(*res), GFP_KERNEL);
-+ if (!res) {
-+ backend->release(i.base, i.num);
-+ return -ENOMEM;
-+ }
-+ spin_lock(&ctx->lock);
-+ res->id = i.base;
-+ res->num = i.num;
-+ res->refcount = 1;
-+ list_add(&res->list, &ctx->resources[i.id_type]);
-+ spin_unlock(&ctx->lock);
-+ return 0;
-+}
-+
-+static long ioctl_id_release(struct ctx *ctx, void __user *arg)
-+{
-+ struct usdpaa_ioctl_id_release i;
-+ const struct alloc_backend *backend;
-+ struct active_resource *tmp, *pos;
-+
-+ int ret = copy_from_user(&i, arg, sizeof(i));
-+ if (ret)
-+ return ret;
-+ if ((i.id_type >= usdpaa_id_max) || !i.num)
-+ return -EINVAL;
-+ backend = &alloc_backends[i.id_type];
-+ /* Pull the range out of the FD accounting - the range is valid iff this
-+ * succeeds. */
-+ spin_lock(&ctx->lock);
-+ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
-+ if (pos->id == i.base && pos->num == i.num) {
-+ pos->refcount--;
-+ if (pos->refcount) {
-+ spin_unlock(&ctx->lock);
-+ return 0; /* Still being used */
-+ }
-+ list_del(&pos->list);
-+ kfree(pos);
-+ spin_unlock(&ctx->lock);
-+ goto found;
-+ }
-+ }
-+ /* Failed to find the resource */
-+ spin_unlock(&ctx->lock);
-+ pr_err("Couldn't find resource type %d base 0x%x num %d\n",
-+ i.id_type, i.base, i.num);
-+ return -EINVAL;
-+found:
-+ /* Release the resource to the backend */
-+ backend->release(i.base, i.num);
-+ return 0;
-+}
-+
-+static long ioctl_id_reserve(struct ctx *ctx, void __user *arg)
-+{
-+ struct usdpaa_ioctl_id_reserve i;
-+ const struct alloc_backend *backend;
-+ struct active_resource *tmp, *pos;
-+
-+ int ret = copy_from_user(&i, arg, sizeof(i));
-+ if (ret)
-+ return ret;
-+ if ((i.id_type >= usdpaa_id_max) || !i.num)
-+ return -EINVAL;
-+ backend = &alloc_backends[i.id_type];
-+ if (!backend->reserve)
-+ return -EINVAL;
-+ /* Pull the range out of the FD accounting - the range is valid iff this
-+ * succeeds. */
-+ spin_lock(&ctx->lock);
-+ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
-+ if (pos->id == i.base && pos->num == i.num) {
-+ pos->refcount++;
-+ spin_unlock(&ctx->lock);
-+ return 0;
-+ }
-+ }
-+
-+ /* Failed to find the resource */
-+ spin_unlock(&ctx->lock);
-+
-+ /* Reserve the resource in the backend */
-+ ret = backend->reserve(i.base, i.num);
-+ if (ret)
-+ return ret;
-+ /* Assign the reserved range to the FD accounting */
-+ pos = kmalloc(sizeof(*pos), GFP_KERNEL);
-+ if (!pos) {
-+ backend->release(i.base, i.num);
-+ return -ENOMEM;
-+ }
-+ spin_lock(&ctx->lock);
-+ pos->id = i.base;
-+ pos->num = i.num;
-+ pos->refcount = 1;
-+ list_add(&pos->list, &ctx->resources[i.id_type]);
-+ spin_unlock(&ctx->lock);
-+ return 0;
-+}
-+
-+static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
-+ struct usdpaa_ioctl_dma_map *i)
-+{
-+ struct mem_fragment *frag, *start_frag, *next_frag;
-+ struct mem_mapping *map, *tmp;
-+ int ret = 0;
-+ u32 largest_page, so_far = 0;
-+ int frag_count = 0;
-+ unsigned long next_addr = PAGE_SIZE, populate;
-+
-+ /* error checking to ensure values copied from user space are valid */
-+ if (i->len % PAGE_SIZE)
-+ return -EINVAL;
-+
-+ map = kmalloc(sizeof(*map), GFP_KERNEL);
-+ if (!map)
-+ return -ENOMEM;
-+
-+ spin_lock(&mem_lock);
-+ if (i->flags & USDPAA_DMA_FLAG_SHARE) {
-+ list_for_each_entry(frag, &mem_list, list) {
-+ if (frag->refs && (frag->flags &
-+ USDPAA_DMA_FLAG_SHARE) &&
-+ !strncmp(i->name, frag->name,
-+ USDPAA_DMA_NAME_MAX)) {
-+ /* Matching entry */
-+ if ((i->flags & USDPAA_DMA_FLAG_CREATE) &&
-+ !(i->flags & USDPAA_DMA_FLAG_LAZY)) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ /* Check to ensure size matches record */
-+ if (i->len != frag->map_len && i->len) {
-+ pr_err("ioctl_dma_map() Size requested does not match %s and is none zero\n",
-+ frag->name);
-+ return -EINVAL;
-+ }
-+
-+ /* Check if this has already been mapped
-+ to this process */
-+ list_for_each_entry(tmp, &ctx->maps, list)
-+ if (tmp->root_frag == frag) {
-+ /* Already mapped, just need to
-+ inc ref count */
-+ tmp->refs++;
-+ kfree(map);
-+ i->did_create = 0;
-+ i->len = tmp->total_size;
-+ i->phys_addr = frag->base;
-+ i->ptr = tmp->virt_addr;
-+ spin_unlock(&mem_lock);
-+ return 0;
-+ }
-+ /* Matching entry - just need to map */
-+ i->has_locking = frag->has_locking;
-+ i->did_create = 0;
-+ i->len = frag->map_len;
-+ start_frag = frag;
-+ goto do_map;
-+ }
-+ }
-+ /* No matching entry */
-+ if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) {
-+ pr_err("ioctl_dma_map() No matching entry\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ }
-+ /* New fragment required, size must be provided. */
-+ if (!i->len) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ /* Find one of more contiguous fragments that satisfy the total length
-+ trying to minimize the number of fragments
-+ compute the largest page size that the allocation could use */
-+ largest_page = largest_page_size(i->len);
-+ start_frag = NULL;
-+ while (largest_page &&
-+ largest_page <= largest_page_size(phys_size) &&
-+ start_frag == NULL) {
-+ /* Search the list for a frag of that size */
-+ list_for_each_entry(frag, &mem_list, list) {
-+ if (!frag->refs && (frag->len == largest_page)) {
-+ /* See if the next x fragments are free
-+ and can accomidate the size */
-+ u32 found_size = largest_page;
-+ next_frag = list_entry(frag->list.prev,
-+ struct mem_fragment,
-+ list);
-+ /* If the fragement is too small check
-+ if the neighbours cab support it */
-+ while (found_size < i->len) {
-+ if (&mem_list == &next_frag->list)
-+ break; /* End of list */
-+ if (next_frag->refs != 0 ||
-+ next_frag->len == 0)
-+ break; /* not enough space */
-+ found_size += next_frag->len;
-+ next_frag = list_entry(
-+ next_frag->list.prev,
-+ struct mem_fragment,
-+ list);
-+ }
-+ if (found_size >= i->len) {
-+ /* Success! there is enough contigous
-+ free space */
-+ start_frag = frag;
-+ break;
-+ }
-+ }
-+ } /* next frag loop */
-+ /* Couldn't statisfy the request with this
-+ largest page size, try a smaller one */
-+ largest_page <<= 2;
-+ }
-+ if (start_frag == NULL) {
-+ /* Couldn't find proper amount of space */
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ i->did_create = 1;
-+do_map:
-+ /* Verify there is sufficient space to do the mapping */
-+ down_write(&current->mm->mmap_sem);
-+ next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0);
-+ up_write(&current->mm->mmap_sem);
-+
-+ if (next_addr & ~PAGE_MASK) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* We may need to divide the final fragment to accomidate the mapping */
-+ next_frag = start_frag;
-+ while (so_far != i->len) {
-+ BUG_ON(next_frag->len == 0);
-+ while ((next_frag->len + so_far) > i->len) {
-+ /* Split frag until they match */
-+ split_frag(next_frag);
-+ }
-+ so_far += next_frag->len;
-+ next_frag->refs++;
-+ ++frag_count;
-+ next_frag = list_entry(next_frag->list.prev,
-+ struct mem_fragment, list);
-+ }
-+ if (i->did_create) {
-+ size_t name_len = 0;
-+ start_frag->flags = i->flags;
-+ strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX);
-+ name_len = strnlen(start_frag->name, USDPAA_DMA_NAME_MAX);
-+ if (name_len >= USDPAA_DMA_NAME_MAX) {
-+ ret = -EFAULT;
-+ goto out;
-+ }
-+ start_frag->map_len = i->len;
-+ start_frag->has_locking = i->has_locking;
-+ init_waitqueue_head(&start_frag->wq);
-+ start_frag->owner = NULL;
-+ }
-+
-+ /* Setup the map entry */
-+ map->root_frag = start_frag;
-+ map->total_size = i->len;
-+ map->frag_count = frag_count;
-+ map->refs = 1;
-+ list_add(&map->list, &ctx->maps);
-+ i->phys_addr = start_frag->base;
-+out:
-+ spin_unlock(&mem_lock);
-+
-+ if (!ret) {
-+ unsigned long longret;
-+ down_write(&current->mm->mmap_sem);
-+ longret = do_mmap_pgoff(fp, next_addr, map->total_size,
-+ PROT_READ |
-+ (i->flags &
-+ USDPAA_DMA_FLAG_RDONLY ? 0
-+ : PROT_WRITE),
-+ MAP_SHARED,
-+ start_frag->pfn_base,
-+ &populate,
-+ NULL);
-+ up_write(&current->mm->mmap_sem);
-+ if (longret & ~PAGE_MASK) {
-+ ret = (int)longret;
-+ } else {
-+ i->ptr = (void *)longret;
-+ map->virt_addr = i->ptr;
-+ }
-+ } else
-+ kfree(map);
-+ return ret;
-+}
-+
-+static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg)
-+{
-+ struct mem_mapping *map;
-+ struct vm_area_struct *vma;
-+ int ret, i;
-+ struct mem_fragment *current_frag;
-+ size_t sz;
-+ unsigned long base;
-+ unsigned long vaddr;
-+
-+ down_write(&current->mm->mmap_sem);
-+ vma = find_vma(current->mm, (unsigned long)arg);
-+ if (!vma || (vma->vm_start > (unsigned long)arg)) {
-+ up_write(&current->mm->mmap_sem);
-+ return -EFAULT;
-+ }
-+ spin_lock(&mem_lock);
-+ list_for_each_entry(map, &ctx->maps, list) {
-+ if (map->root_frag->pfn_base == vma->vm_pgoff) {
-+ /* Drop the map lock if we hold it */
-+ if (map->root_frag->has_locking &&
-+ (map->root_frag->owner == map)) {
-+ map->root_frag->owner = NULL;
-+ wake_up(&map->root_frag->wq);
-+ }
-+ goto map_match;
-+ }
-+ }
-+ /* Failed to find a matching mapping for this process */
-+ ret = -EFAULT;
-+ spin_unlock(&mem_lock);
-+ goto out;
-+map_match:
-+ map->refs--;
-+ if (map->refs != 0) {
-+ /* Another call the dma_map is referencing this */
-+ ret = 0;
-+ spin_unlock(&mem_lock);
-+ goto out;
-+ }
-+
-+ current_frag = map->root_frag;
-+ vaddr = (unsigned long) map->virt_addr;
-+ for (i = 0; i < map->frag_count; i++) {
-+ DPA_ASSERT(current_frag->refs > 0);
-+ --current_frag->refs;
-+#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
-+ /*
-+ * Make sure we invalidate the TLB entry for
-+ * this fragment, otherwise a remap of a different
-+ * page to this vaddr would give acces to an
-+ * incorrect piece of memory
-+ */
-+ cleartlbcam(vaddr, mfspr(SPRN_PID));
-+#endif
-+ vaddr += current_frag->len;
-+ current_frag = list_entry(current_frag->list.prev,
-+ struct mem_fragment, list);
-+ }
-+ map->root_frag->name[0] = 0;
-+ list_del(&map->list);
-+ compress_frags();
-+ spin_unlock(&mem_lock);
-+
-+ base = vma->vm_start;
-+ sz = vma->vm_end - vma->vm_start;
-+ do_munmap(current->mm, base, sz, NULL);
-+ ret = 0;
-+ out:
-+ up_write(&current->mm->mmap_sem);
-+ return ret;
-+}
-+
-+static long ioctl_dma_stats(struct ctx *ctx, void __user *arg)
-+{
-+ struct mem_fragment *frag;
-+ struct usdpaa_ioctl_dma_used result;
-+
-+ result.free_bytes = 0;
-+ result.total_bytes = phys_size;
-+
-+ list_for_each_entry(frag, &mem_list, list) {
-+ if (frag->refs == 0)
-+ result.free_bytes += frag->len;
-+ }
-+
-+ return copy_to_user(arg, &result, sizeof(result)); }
-+
-+static int test_lock(struct mem_mapping *map)
-+{
-+ int ret = 0;
-+ spin_lock(&mem_lock);
-+ if (!map->root_frag->owner) {
-+ map->root_frag->owner = map;
-+ ret = 1;
-+ }
-+ spin_unlock(&mem_lock);
-+ return ret;
-+}
-+
-+static long ioctl_dma_lock(struct ctx *ctx, void __user *arg)
-+{
-+ struct mem_mapping *map;
-+ struct vm_area_struct *vma;
-+
-+ down_read(&current->mm->mmap_sem);
-+ vma = find_vma(current->mm, (unsigned long)arg);
-+ if (!vma || (vma->vm_start > (unsigned long)arg)) {
-+ up_read(&current->mm->mmap_sem);
-+ return -EFAULT;
-+ }
-+ spin_lock(&mem_lock);
-+ list_for_each_entry(map, &ctx->maps, list) {
-+ if (map->root_frag->pfn_base == vma->vm_pgoff)
-+ goto map_match;
-+ }
-+ map = NULL;
-+map_match:
-+ spin_unlock(&mem_lock);
-+ up_read(&current->mm->mmap_sem);
-+
-+ if (!map)
-+ return -EFAULT;
-+ if (!map->root_frag->has_locking)
-+ return -ENODEV;
-+ return wait_event_interruptible(map->root_frag->wq, test_lock(map));
-+}
-+
-+static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg)
-+{
-+ struct mem_mapping *map;
-+ struct vm_area_struct *vma;
-+ int ret;
-+
-+ down_read(&current->mm->mmap_sem);
-+ vma = find_vma(current->mm, (unsigned long)arg);
-+ if (!vma || (vma->vm_start > (unsigned long)arg))
-+ ret = -EFAULT;
-+ else {
-+ spin_lock(&mem_lock);
-+ list_for_each_entry(map, &ctx->maps, list) {
-+ if (map->root_frag->pfn_base == vma->vm_pgoff) {
-+ if (!map->root_frag->has_locking)
-+ ret = -ENODEV;
-+ else if (map->root_frag->owner == map) {
-+ map->root_frag->owner = NULL;
-+ wake_up(&map->root_frag->wq);
-+ ret = 0;
-+ } else
-+ ret = -EBUSY;
-+ goto map_match;
-+ }
-+ }
-+ ret = -EINVAL;
-+map_match:
-+ spin_unlock(&mem_lock);
-+ }
-+ up_read(&current->mm->mmap_sem);
-+ return ret;
-+}
-+
-+static int portal_mmap(struct file *fp, struct resource *res, void **ptr)
-+{
-+ unsigned long longret = 0, populate;
-+ resource_size_t len;
-+
-+ down_write(&current->mm->mmap_sem);
-+ len = resource_size(res);
-+ if (len != (unsigned long)len)
-+ return -EINVAL;
-+ longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len,
-+ PROT_READ | PROT_WRITE, MAP_SHARED,
-+ res->start >> PAGE_SHIFT, &populate, NULL);
-+ up_write(&current->mm->mmap_sem);
-+
-+ if (longret & ~PAGE_MASK)
-+ return (int)longret;
-+
-+ *ptr = (void *) longret;
-+ return 0;
-+}
-+
-+static void portal_munmap(struct resource *res, void *ptr)
-+{
-+ down_write(&current->mm->mmap_sem);
-+ do_munmap(current->mm, (unsigned long)ptr, resource_size(res), NULL);
-+ up_write(&current->mm->mmap_sem);
-+}
-+
-+static long ioctl_portal_map(struct file *fp, struct ctx *ctx,
-+ struct usdpaa_ioctl_portal_map *arg)
-+{
-+ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
-+ int ret;
-+
-+ if (!mapping)
-+ return -ENOMEM;
-+
-+ mapping->user = *arg;
-+ mapping->iommu_domain = NULL;
-+
-+ if (mapping->user.type == usdpaa_portal_qman) {
-+ mapping->qportal =
-+ qm_get_unused_portal_idx(mapping->user.index);
-+ if (!mapping->qportal) {
-+ ret = -ENODEV;
-+ goto err_get_portal;
-+ }
-+ mapping->phys = &mapping->qportal->addr_phys[0];
-+ mapping->user.channel = mapping->qportal->public_cfg.channel;
-+ mapping->user.pools = mapping->qportal->public_cfg.pools;
-+ mapping->user.index = mapping->qportal->public_cfg.index;
-+ } else if (mapping->user.type == usdpaa_portal_bman) {
-+ mapping->bportal =
-+ bm_get_unused_portal_idx(mapping->user.index);
-+ if (!mapping->bportal) {
-+ ret = -ENODEV;
-+ goto err_get_portal;
-+ }
-+ mapping->phys = &mapping->bportal->addr_phys[0];
-+ mapping->user.index = mapping->bportal->public_cfg.index;
-+ } else {
-+ ret = -EINVAL;
-+ goto err_copy_from_user;
-+ }
-+ /* Need to put pcfg in ctx's list before the mmaps because the mmap
-+ * handlers look it up. */
-+ spin_lock(&mem_lock);
-+ list_add(&mapping->list, &ctx->portals);
-+ spin_unlock(&mem_lock);
-+ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CE],
-+ &mapping->user.addr.cena);
-+ if (ret)
-+ goto err_mmap_cena;
-+ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CI],
-+ &mapping->user.addr.cinh);
-+ if (ret)
-+ goto err_mmap_cinh;
-+ *arg = mapping->user;
-+ return ret;
-+
-+err_mmap_cinh:
-+ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
-+err_mmap_cena:
-+ if ((mapping->user.type == usdpaa_portal_qman) && mapping->qportal)
-+ qm_put_unused_portal(mapping->qportal);
-+ else if ((mapping->user.type == usdpaa_portal_bman) && mapping->bportal)
-+ bm_put_unused_portal(mapping->bportal);
-+ spin_lock(&mem_lock);
-+ list_del(&mapping->list);
-+ spin_unlock(&mem_lock);
-+err_get_portal:
-+err_copy_from_user:
-+ kfree(mapping);
-+ return ret;
-+}
-+
-+static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i)
-+{
-+ struct portal_mapping *mapping;
-+ struct vm_area_struct *vma;
-+ unsigned long pfn;
-+ u32 channel;
-+
-+ /* Get the PFN corresponding to one of the virt addresses */
-+ down_read(&current->mm->mmap_sem);
-+ vma = find_vma(current->mm, (unsigned long)i->cinh);
-+ if (!vma || (vma->vm_start > (unsigned long)i->cinh)) {
-+ up_read(&current->mm->mmap_sem);
-+ return -EFAULT;
-+ }
-+ pfn = vma->vm_pgoff;
-+ up_read(&current->mm->mmap_sem);
-+
-+ /* Find the corresponding portal */
-+ spin_lock(&mem_lock);
-+ list_for_each_entry(mapping, &ctx->portals, list) {
-+ if (pfn == (mapping->phys[DPA_PORTAL_CI].start >> PAGE_SHIFT))
-+ goto found;
-+ }
-+ mapping = NULL;
-+found:
-+ if (mapping)
-+ list_del(&mapping->list);
-+ spin_unlock(&mem_lock);
-+ if (!mapping)
-+ return -ENODEV;
-+ portal_munmap(&mapping->phys[DPA_PORTAL_CI], mapping->user.addr.cinh);
-+ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
-+ if (mapping->user.type == usdpaa_portal_qman) {
-+ init_qm_portal(mapping->qportal,
-+ &mapping->qman_portal_low);
-+
-+ /* Tear down any FQs this portal is referencing */
-+ channel = mapping->qportal->public_cfg.channel;
-+ qm_check_and_destroy_fqs(&mapping->qman_portal_low,
-+ &channel,
-+ check_portal_channel);
-+ qm_put_unused_portal(mapping->qportal);
-+ } else if (mapping->user.type == usdpaa_portal_bman) {
-+ init_bm_portal(mapping->bportal,
-+ &mapping->bman_portal_low);
-+ bm_put_unused_portal(mapping->bportal);
-+ }
-+ kfree(mapping);
-+ return 0;
-+}
-+
-+static void portal_config_pamu(struct qm_portal_config *pcfg, uint8_t sdest,
-+ uint32_t cpu, uint32_t cache, uint32_t window)
-+{
-+#ifdef CONFIG_FSL_PAMU
-+ int ret;
-+ int window_count = 1;
-+ struct iommu_domain_geometry geom_attr;
-+ struct pamu_stash_attribute stash_attr;
-+
-+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
-+ if (!pcfg->iommu_domain) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
-+ __func__);
-+ goto _no_iommu;
-+ }
-+ geom_attr.aperture_start = 0;
-+ geom_attr.aperture_end =
-+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
-+ geom_attr.force_aperture = true;
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
-+ &geom_attr);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
-+ &window_count);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ stash_attr.cpu = cpu;
-+ stash_attr.cache = cache;
-+ /* set stash information for the window */
-+ stash_attr.window = 0;
-+
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
-+ DOMAIN_ATTR_FSL_PAMU_STASH,
-+ &stash_attr);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
-+ IOMMU_READ | IOMMU_WRITE);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
-+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
-+ &window_count);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_detach_device;
-+ }
-+_no_iommu:
-+#endif
-+
-+#ifdef CONFIG_FSL_QMAN_CONFIG
-+ if (qman_set_sdest(pcfg->public_cfg.channel, sdest))
-+#endif
-+ pr_warn("Failed to set QMan portal's stash request queue\n");
-+
-+ return;
-+
-+#ifdef CONFIG_FSL_PAMU
-+_iommu_detach_device:
-+ iommu_detach_device(pcfg->iommu_domain, NULL);
-+_iommu_domain_free:
-+ iommu_domain_free(pcfg->iommu_domain);
-+#endif
-+}
-+
-+static long ioctl_allocate_raw_portal(struct file *fp, struct ctx *ctx,
-+ struct usdpaa_ioctl_raw_portal *arg)
-+{
-+ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
-+ int ret;
-+
-+ if (!mapping)
-+ return -ENOMEM;
-+
-+ mapping->user.type = arg->type;
-+ mapping->iommu_domain = NULL;
-+ if (arg->type == usdpaa_portal_qman) {
-+ mapping->qportal = qm_get_unused_portal_idx(arg->index);
-+ if (!mapping->qportal) {
-+ ret = -ENODEV;
-+ goto err;
-+ }
-+ mapping->phys = &mapping->qportal->addr_phys[0];
-+ arg->index = mapping->qportal->public_cfg.index;
-+ arg->cinh = mapping->qportal->addr_phys[DPA_PORTAL_CI].start;
-+ arg->cena = mapping->qportal->addr_phys[DPA_PORTAL_CE].start;
-+ if (arg->enable_stash) {
-+ /* Setup the PAMU with the supplied parameters */
-+ portal_config_pamu(mapping->qportal, arg->sdest,
-+ arg->cpu, arg->cache, arg->window);
-+ }
-+ } else if (mapping->user.type == usdpaa_portal_bman) {
-+ mapping->bportal =
-+ bm_get_unused_portal_idx(arg->index);
-+ if (!mapping->bportal) {
-+ ret = -ENODEV;
-+ goto err;
-+ }
-+ mapping->phys = &mapping->bportal->addr_phys[0];
-+ arg->index = mapping->bportal->public_cfg.index;
-+ arg->cinh = mapping->bportal->addr_phys[DPA_PORTAL_CI].start;
-+ arg->cena = mapping->bportal->addr_phys[DPA_PORTAL_CE].start;
-+ } else {
-+ ret = -EINVAL;
-+ goto err;
-+ }
-+ /* Need to put pcfg in ctx's list before the mmaps because the mmap
-+ * handlers look it up. */
-+ spin_lock(&mem_lock);
-+ list_add(&mapping->list, &ctx->portals);
-+ spin_unlock(&mem_lock);
-+ return 0;
-+err:
-+ kfree(mapping);
-+ return ret;
-+}
-+
-+static long ioctl_free_raw_portal(struct file *fp, struct ctx *ctx,
-+ struct usdpaa_ioctl_raw_portal *arg)
-+{
-+ struct portal_mapping *mapping;
-+ u32 channel;
-+
-+ /* Find the corresponding portal */
-+ spin_lock(&mem_lock);
-+ list_for_each_entry(mapping, &ctx->portals, list) {
-+ if (mapping->phys[DPA_PORTAL_CI].start == arg->cinh)
-+ goto found;
-+ }
-+ mapping = NULL;
-+found:
-+ if (mapping)
-+ list_del(&mapping->list);
-+ spin_unlock(&mem_lock);
-+ if (!mapping)
-+ return -ENODEV;
-+ if (mapping->user.type == usdpaa_portal_qman) {
-+ init_qm_portal(mapping->qportal,
-+ &mapping->qman_portal_low);
-+
-+ /* Tear down any FQs this portal is referencing */
-+ channel = mapping->qportal->public_cfg.channel;
-+ qm_check_and_destroy_fqs(&mapping->qman_portal_low,
-+ &channel,
-+ check_portal_channel);
-+ qm_put_unused_portal(mapping->qportal);
-+ } else if (mapping->user.type == usdpaa_portal_bman) {
-+ init_bm_portal(mapping->bportal,
-+ &mapping->bman_portal_low);
-+ bm_put_unused_portal(mapping->bportal);
-+ }
-+ kfree(mapping);
-+ return 0;
-+}
-+
-+static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-+{
-+ struct ctx *ctx = fp->private_data;
-+ void __user *a = (void __user *)arg;
-+ switch (cmd) {
-+ case USDPAA_IOCTL_ID_ALLOC:
-+ return ioctl_id_alloc(ctx, a);
-+ case USDPAA_IOCTL_ID_RELEASE:
-+ return ioctl_id_release(ctx, a);
-+ case USDPAA_IOCTL_ID_RESERVE:
-+ return ioctl_id_reserve(ctx, a);
-+ case USDPAA_IOCTL_DMA_MAP:
-+ {
-+ struct usdpaa_ioctl_dma_map input;
-+ int ret;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ ret = ioctl_dma_map(fp, ctx, &input);
-+ if (copy_to_user(a, &input, sizeof(input)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ case USDPAA_IOCTL_DMA_UNMAP:
-+ return ioctl_dma_unmap(ctx, a);
-+ case USDPAA_IOCTL_DMA_LOCK:
-+ return ioctl_dma_lock(ctx, a);
-+ case USDPAA_IOCTL_DMA_UNLOCK:
-+ return ioctl_dma_unlock(ctx, a);
-+ case USDPAA_IOCTL_PORTAL_MAP:
-+ {
-+ struct usdpaa_ioctl_portal_map input;
-+ int ret;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ ret = ioctl_portal_map(fp, ctx, &input);
-+ if (copy_to_user(a, &input, sizeof(input)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ case USDPAA_IOCTL_PORTAL_UNMAP:
-+ {
-+ struct usdpaa_portal_map input;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ return ioctl_portal_unmap(ctx, &input);
-+ }
-+ case USDPAA_IOCTL_DMA_USED:
-+ return ioctl_dma_stats(ctx, a);
-+ case USDPAA_IOCTL_ALLOC_RAW_PORTAL:
-+ {
-+ struct usdpaa_ioctl_raw_portal input;
-+ int ret;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ ret = ioctl_allocate_raw_portal(fp, ctx, &input);
-+ if (copy_to_user(a, &input, sizeof(input)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ case USDPAA_IOCTL_FREE_RAW_PORTAL:
-+ {
-+ struct usdpaa_ioctl_raw_portal input;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ return ioctl_free_raw_portal(fp, ctx, &input);
-+ }
-+ }
-+ return -EINVAL;
-+}
-+
-+static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+#ifdef CONFIG_COMPAT
-+ struct ctx *ctx = fp->private_data;
-+ void __user *a = (void __user *)arg;
-+#endif
-+ switch (cmd) {
-+#ifdef CONFIG_COMPAT
-+ case USDPAA_IOCTL_DMA_MAP_COMPAT:
-+ {
-+ int ret;
-+ struct usdpaa_ioctl_dma_map_compat input;
-+ struct usdpaa_ioctl_dma_map converted;
-+
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+
-+ converted.ptr = compat_ptr(input.ptr);
-+ converted.phys_addr = input.phys_addr;
-+ converted.len = input.len;
-+ converted.flags = input.flags;
-+ strncpy(converted.name, input.name, USDPAA_DMA_NAME_MAX);
-+ converted.has_locking = input.has_locking;
-+ converted.did_create = input.did_create;
-+
-+ ret = ioctl_dma_map(fp, ctx, &converted);
-+ input.ptr = ptr_to_compat(converted.ptr);
-+ input.phys_addr = converted.phys_addr;
-+ input.len = converted.len;
-+ input.flags = converted.flags;
-+ strncpy(input.name, converted.name, USDPAA_DMA_NAME_MAX);
-+ input.has_locking = converted.has_locking;
-+ input.did_create = converted.did_create;
-+ if (copy_to_user(a, &input, sizeof(input)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ case USDPAA_IOCTL_PORTAL_MAP_COMPAT:
-+ {
-+ int ret;
-+ struct compat_usdpaa_ioctl_portal_map input;
-+ struct usdpaa_ioctl_portal_map converted;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ converted.type = input.type;
-+ converted.index = input.index;
-+ ret = ioctl_portal_map(fp, ctx, &converted);
-+ input.addr.cinh = ptr_to_compat(converted.addr.cinh);
-+ input.addr.cena = ptr_to_compat(converted.addr.cena);
-+ input.channel = converted.channel;
-+ input.pools = converted.pools;
-+ input.index = converted.index;
-+ if (copy_to_user(a, &input, sizeof(input)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ case USDPAA_IOCTL_PORTAL_UNMAP_COMPAT:
-+ {
-+ struct usdpaa_portal_map_compat input;
-+ struct usdpaa_portal_map converted;
-+
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ converted.cinh = compat_ptr(input.cinh);
-+ converted.cena = compat_ptr(input.cena);
-+ return ioctl_portal_unmap(ctx, &converted);
-+ }
-+ case USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT:
-+ {
-+ int ret;
-+ struct usdpaa_ioctl_raw_portal converted;
-+ struct compat_ioctl_raw_portal input;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ converted.type = input.type;
-+ converted.index = input.index;
-+ converted.enable_stash = input.enable_stash;
-+ converted.cpu = input.cpu;
-+ converted.cache = input.cache;
-+ converted.window = input.window;
-+ converted.sdest = input.sdest;
-+ ret = ioctl_allocate_raw_portal(fp, ctx, &converted);
-+
-+ input.cinh = converted.cinh;
-+ input.cena = converted.cena;
-+ input.index = converted.index;
-+
-+ if (copy_to_user(a, &input, sizeof(input)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ case USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT:
-+ {
-+ struct usdpaa_ioctl_raw_portal converted;
-+ struct compat_ioctl_raw_portal input;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ converted.type = input.type;
-+ converted.index = input.index;
-+ converted.cinh = input.cinh;
-+ converted.cena = input.cena;
-+ return ioctl_free_raw_portal(fp, ctx, &converted);
-+ }
-+#endif
-+ default:
-+ return usdpaa_ioctl(fp, cmd, arg);
-+ }
-+ return -EINVAL;
-+}
-+
-+int usdpaa_get_portal_config(struct file *filp, void *cinh,
-+ enum usdpaa_portal_type ptype, unsigned int *irq,
-+ void **iir_reg)
-+{
-+ /* Walk the list of portals for filp and return the config
-+ for the portal that matches the hint */
-+ struct ctx *context;
-+ struct portal_mapping *portal;
-+
-+ /* First sanitize the filp */
-+ if (filp->f_op->open != usdpaa_open)
-+ return -ENODEV;
-+ context = filp->private_data;
-+ spin_lock(&context->lock);
-+ list_for_each_entry(portal, &context->portals, list) {
-+ if (portal->user.type == ptype &&
-+ portal->user.addr.cinh == cinh) {
-+ if (ptype == usdpaa_portal_qman) {
-+ *irq = portal->qportal->public_cfg.irq;
-+ *iir_reg = portal->qportal->addr_virt[1] +
-+ QM_REG_IIR;
-+ } else {
-+ *irq = portal->bportal->public_cfg.irq;
-+ *iir_reg = portal->bportal->addr_virt[1] +
-+ BM_REG_IIR;
-+ }
-+ spin_unlock(&context->lock);
-+ return 0;
-+ }
-+ }
-+ spin_unlock(&context->lock);
-+ return -EINVAL;
-+}
-+
-+static const struct file_operations usdpaa_fops = {
-+ .open = usdpaa_open,
-+ .release = usdpaa_release,
-+ .mmap = usdpaa_mmap,
-+ .get_unmapped_area = usdpaa_get_unmapped_area,
-+ .unlocked_ioctl = usdpaa_ioctl,
-+ .compat_ioctl = usdpaa_ioctl_compat
-+};
-+
-+static struct miscdevice usdpaa_miscdev = {
-+ .name = "fsl-usdpaa",
-+ .fops = &usdpaa_fops,
-+ .minor = MISC_DYNAMIC_MINOR,
-+};
-+
-+/* Early-boot memory allocation. The boot-arg "usdpaa_mem=<x>" is used to
-+ * indicate how much memory (if any) to allocate during early boot. If the
-+ * format "usdpaa_mem=<x>,<y>" is used, then <y> will be interpreted as the
-+ * number of TLB1 entries to reserve (default is 1). If there are more mappings
-+ * than there are TLB1 entries, fault-handling will occur. */
-+
-+static __init int usdpaa_mem(char *arg)
-+{
-+ pr_warn("uspdaa_mem argument is depracated\n");
-+ arg_phys_size = memparse(arg, &arg);
-+ num_tlb = 1;
-+ if (*arg == ',') {
-+ unsigned long ul;
-+ int err = kstrtoul(arg + 1, 0, &ul);
-+ if (err < 0) {
-+ num_tlb = 1;
-+ pr_warn("ERROR, usdpaa_mem arg is invalid\n");
-+ } else
-+ num_tlb = (unsigned int)ul;
-+ }
-+ return 0;
-+}
-+early_param("usdpaa_mem", usdpaa_mem);
-+
-+static int usdpaa_mem_init(struct reserved_mem *rmem)
-+{
-+ phys_start = rmem->base;
-+ phys_size = rmem->size;
-+
-+ WARN_ON(!(phys_start && phys_size));
-+
-+ return 0;
-+}
-+RESERVEDMEM_OF_DECLARE(usdpaa_mem_init, "fsl,usdpaa-mem", usdpaa_mem_init);
-+
-+__init int fsl_usdpaa_init_early(void)
-+{
-+ if (!phys_size || !phys_start) {
-+ pr_info("No USDPAA memory, no 'fsl,usdpaa-mem' in device-tree\n");
-+ return 0;
-+ }
-+ if (phys_size % PAGE_SIZE) {
-+ pr_err("'fsl,usdpaa-mem' size must be a multiple of page size\n");
-+ phys_size = 0;
-+ return 0;
-+ }
-+ if (arg_phys_size && phys_size != arg_phys_size) {
-+ pr_err("'usdpaa_mem argument size (0x%llx) does not match device tree size (0x%llx)\n",
-+ arg_phys_size, phys_size);
-+ phys_size = 0;
-+ return 0;
-+ }
-+ pfn_start = phys_start >> PAGE_SHIFT;
-+ pfn_size = phys_size >> PAGE_SHIFT;
-+#ifdef CONFIG_PPC
-+ first_tlb = current_tlb = tlbcam_index;
-+ tlbcam_index += num_tlb;
-+#endif
-+ pr_info("USDPAA region at %llx:%llx(%lx:%lx), %d TLB1 entries)\n",
-+ phys_start, phys_size, pfn_start, pfn_size, num_tlb);
-+ return 0;
-+}
-+subsys_initcall(fsl_usdpaa_init_early);
-+
-+
-+static int __init usdpaa_init(void)
-+{
-+ struct mem_fragment *frag;
-+ int ret;
-+ u64 tmp_size = phys_size;
-+ u64 tmp_start = phys_start;
-+ u64 tmp_pfn_size = pfn_size;
-+ u64 tmp_pfn_start = pfn_start;
-+
-+ pr_info("Freescale USDPAA process driver\n");
-+ if (!phys_start) {
-+ pr_warn("fsl-usdpaa: no region found\n");
-+ return 0;
-+ }
-+
-+ while (tmp_size != 0) {
-+ u32 frag_size = largest_page_size(tmp_size);
-+ frag = kmalloc(sizeof(*frag), GFP_KERNEL);
-+ if (!frag) {
-+ pr_err("Failed to setup USDPAA memory accounting\n");
-+ return -ENOMEM;
-+ }
-+ frag->base = tmp_start;
-+ frag->len = frag->root_len = frag_size;
-+ frag->root_pfn = tmp_pfn_start;
-+ frag->pfn_base = tmp_pfn_start;
-+ frag->pfn_len = frag_size / PAGE_SIZE;
-+ frag->refs = 0;
-+ init_waitqueue_head(&frag->wq);
-+ frag->owner = NULL;
-+ list_add(&frag->list, &mem_list);
-+
-+ /* Adjust for this frag */
-+ tmp_start += frag_size;
-+ tmp_size -= frag_size;
-+ tmp_pfn_start += frag_size / PAGE_SIZE;
-+ tmp_pfn_size -= frag_size / PAGE_SIZE;
-+ }
-+ ret = misc_register(&usdpaa_miscdev);
-+ if (ret)
-+ pr_err("fsl-usdpaa: failed to register misc device\n");
-+ return ret;
-+}
-+
-+static void __exit usdpaa_exit(void)
-+{
-+ misc_deregister(&usdpaa_miscdev);
-+}
-+
-+module_init(usdpaa_init);
-+module_exit(usdpaa_exit);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Freescale Semiconductor");
-+MODULE_DESCRIPTION("Freescale USDPAA process driver");
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
-@@ -0,0 +1,289 @@
-+/* Copyright (c) 2013 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/* define a device that allows USPDAA processes to open a file
-+ descriptor and specify which IRQ it wants to montior using an ioctl()
-+ When an IRQ is received, the device becomes readable so that a process
-+ can use read() or select() type calls to monitor for IRQs */
-+
-+#include <linux/miscdevice.h>
-+#include <linux/fs.h>
-+#include <linux/cdev.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/poll.h>
-+#include <linux/uaccess.h>
-+#include <linux/fsl_usdpaa.h>
-+#include <linux/module.h>
-+#include <linux/fdtable.h>
-+#include <linux/file.h>
-+
-+#include "qman_low.h"
-+#include "bman_low.h"
-+
-+struct usdpaa_irq_ctx {
-+ int irq_set; /* Set to true once the irq is set via ioctl */
-+ unsigned int irq_num;
-+ u32 last_irq_count; /* Last value returned from read */
-+ u32 irq_count; /* Number of irqs since last read */
-+ wait_queue_head_t wait_queue; /* Waiting processes */
-+ spinlock_t lock;
-+ void *inhibit_addr; /* inhibit register address */
-+ struct file *usdpaa_filp;
-+ char irq_name[128];
-+};
-+
-+static int usdpaa_irq_open(struct inode *inode, struct file *filp)
-+{
-+ struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
-+ if (!ctx)
-+ return -ENOMEM;
-+ ctx->irq_set = 0;
-+ ctx->irq_count = 0;
-+ ctx->last_irq_count = 0;
-+ init_waitqueue_head(&ctx->wait_queue);
-+ spin_lock_init(&ctx->lock);
-+ filp->private_data = ctx;
-+ return 0;
-+}
-+
-+static int usdpaa_irq_release(struct inode *inode, struct file *filp)
-+{
-+ struct usdpaa_irq_ctx *ctx = filp->private_data;
-+ if (ctx->irq_set) {
-+ /* Inhibit the IRQ */
-+ out_be32(ctx->inhibit_addr, 0x1);
-+ irq_set_affinity_hint(ctx->irq_num, NULL);
-+ free_irq(ctx->irq_num, ctx);
-+ ctx->irq_set = 0;
-+ fput(ctx->usdpaa_filp);
-+ }
-+ kfree(filp->private_data);
-+ return 0;
-+}
-+
-+static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
-+{
-+ unsigned long flags;
-+ struct usdpaa_irq_ctx *ctx = _ctx;
-+ spin_lock_irqsave(&ctx->lock, flags);
-+ ++ctx->irq_count;
-+ spin_unlock_irqrestore(&ctx->lock, flags);
-+ wake_up_all(&ctx->wait_queue);
-+ /* Set the inhibit register. This will be reenabled
-+ once the USDPAA code handles the IRQ */
-+ out_be32(ctx->inhibit_addr, 0x1);
-+ pr_debug("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count);
-+ return IRQ_HANDLED;
-+}
-+
-+static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map)
-+{
-+ struct usdpaa_irq_ctx *ctx = fp->private_data;
-+ int ret;
-+
-+ if (ctx->irq_set) {
-+ pr_debug("Setting USDPAA IRQ when it was already set!\n");
-+ return -EBUSY;
-+ }
-+
-+ ctx->usdpaa_filp = fget(irq_map->fd);
-+ if (!ctx->usdpaa_filp) {
-+ pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd);
-+ return -EINVAL;
-+ }
-+
-+ ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh,
-+ irq_map->type, &ctx->irq_num,
-+ &ctx->inhibit_addr);
-+ if (ret) {
-+ pr_debug("USDPAA IRQ couldn't identify portal\n");
-+ fput(ctx->usdpaa_filp);
-+ return ret;
-+ }
-+
-+ ctx->irq_set = 1;
-+
-+ snprintf(ctx->irq_name, sizeof(ctx->irq_name),
-+ "usdpaa_irq %d", ctx->irq_num);
-+
-+ ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0,
-+ ctx->irq_name, ctx);
-+ if (ret) {
-+ pr_err("USDPAA request_irq(%d) failed, ret= %d\n",
-+ ctx->irq_num, ret);
-+ ctx->irq_set = 0;
-+ fput(ctx->usdpaa_filp);
-+ return ret;
-+ }
-+ ret = irq_set_affinity(ctx->irq_num, &current->cpus_allowed);
-+ if (ret)
-+ pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret);
-+
-+ ret = irq_set_affinity_hint(ctx->irq_num, &current->cpus_allowed);
-+ if (ret)
-+ pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret);
-+
-+ return 0;
-+}
-+
-+static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int ret;
-+ struct usdpaa_ioctl_irq_map irq_map;
-+
-+ if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) {
-+ pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd);
-+ return -EINVAL;
-+ }
-+
-+ ret = copy_from_user(&irq_map, (void __user *)arg,
-+ sizeof(irq_map));
-+ if (ret)
-+ return ret;
-+ return map_irq(fp, &irq_map);
-+}
-+
-+static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
-+ size_t count, loff_t *offp)
-+{
-+ struct usdpaa_irq_ctx *ctx = filp->private_data;
-+ int ret;
-+
-+ if (!ctx->irq_set) {
-+ pr_debug("Reading USDPAA IRQ before it was set\n");
-+ return -EINVAL;
-+ }
-+
-+ if (count < sizeof(ctx->irq_count)) {
-+ pr_debug("USDPAA IRQ Read too small\n");
-+ return -EINVAL;
-+ }
-+ if (ctx->irq_count == ctx->last_irq_count) {
-+ if (filp->f_flags & O_NONBLOCK)
-+ return -EAGAIN;
-+
-+ ret = wait_event_interruptible(ctx->wait_queue,
-+ ctx->irq_count != ctx->last_irq_count);
-+ if (ret == -ERESTARTSYS)
-+ return ret;
-+ }
-+
-+ ctx->last_irq_count = ctx->irq_count;
-+
-+ if (copy_to_user(buff, &ctx->last_irq_count,
-+ sizeof(ctx->last_irq_count)))
-+ return -EFAULT;
-+ return sizeof(ctx->irq_count);
-+}
-+
-+static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
-+{
-+ struct usdpaa_irq_ctx *ctx = filp->private_data;
-+ unsigned int ret = 0;
-+ unsigned long flags;
-+
-+ if (!ctx->irq_set)
-+ return POLLHUP;
-+
-+ poll_wait(filp, &ctx->wait_queue, wait);
-+
-+ spin_lock_irqsave(&ctx->lock, flags);
-+ if (ctx->irq_count != ctx->last_irq_count)
-+ ret |= POLLIN | POLLRDNORM;
-+ spin_unlock_irqrestore(&ctx->lock, flags);
-+ return ret;
-+}
-+
-+static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+#ifdef CONFIG_COMPAT
-+ void __user *a = (void __user *)arg;
-+#endif
-+ switch (cmd) {
-+#ifdef CONFIG_COMPAT
-+ case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT:
-+ {
-+ struct compat_ioctl_irq_map input;
-+ struct usdpaa_ioctl_irq_map converted;
-+ if (copy_from_user(&input, a, sizeof(input)))
-+ return -EFAULT;
-+ converted.type = input.type;
-+ converted.fd = input.fd;
-+ converted.portal_cinh = compat_ptr(input.portal_cinh);
-+ return map_irq(fp, &converted);
-+ }
-+#endif
-+ default:
-+ return usdpaa_irq_ioctl(fp, cmd, arg);
-+ }
-+}
-+
-+static const struct file_operations usdpaa_irq_fops = {
-+ .open = usdpaa_irq_open,
-+ .release = usdpaa_irq_release,
-+ .unlocked_ioctl = usdpaa_irq_ioctl,
-+ .compat_ioctl = usdpaa_irq_ioctl_compat,
-+ .read = usdpaa_irq_read,
-+ .poll = usdpaa_irq_poll
-+};
-+
-+static struct miscdevice usdpaa_miscdev = {
-+ .name = "fsl-usdpaa-irq",
-+ .fops = &usdpaa_irq_fops,
-+ .minor = MISC_DYNAMIC_MINOR,
-+};
-+
-+static int __init usdpaa_irq_init(void)
-+{
-+ int ret;
-+
-+ pr_info("Freescale USDPAA process IRQ driver\n");
-+ ret = misc_register(&usdpaa_miscdev);
-+ if (ret)
-+ pr_err("fsl-usdpaa-irq: failed to register misc device\n");
-+ return ret;
-+}
-+
-+static void __exit usdpaa_irq_exit(void)
-+{
-+ misc_deregister(&usdpaa_miscdev);
-+}
-+
-+module_init(usdpaa_irq_init);
-+module_exit(usdpaa_irq_exit);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Freescale Semiconductor");
-+MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver");
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qbman_driver.c
-@@ -0,0 +1,88 @@
-+/* Copyright 2013 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/time.h>
-+#include "qman_private.h"
-+#include "bman_private.h"
-+__init void qman_init_early(void);
-+__init void bman_init_early(void);
-+
-+static __init int qbman_init(void)
-+{
-+ struct device_node *dn;
-+ u32 is_portal_available;
-+
-+ bman_init();
-+ qman_init();
-+
-+ is_portal_available = 0;
-+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
-+ if (!of_device_is_available(dn))
-+ continue;
-+ else
-+ is_portal_available = 1;
-+ }
-+
-+ if (!qman_have_ccsr() && is_portal_available) {
-+ struct qman_fq fq = {
-+ .fqid = 1
-+ };
-+ struct qm_mcr_queryfq_np np;
-+ int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
-+ struct timespec nowts, diffts, startts = current_kernel_time();
-+ /* Loop while querying given fqid succeeds or time out */
-+ while (1) {
-+ err = qman_query_fq_np(&fq, &np);
-+ if (!err) {
-+ /* success, control-plane has configured QMan */
-+ break;
-+ } else if (err != -ERANGE) {
-+ pr_err("QMan: I/O error, continuing anyway\n");
-+ break;
-+ }
-+ nowts = current_kernel_time();
-+ diffts = timespec_sub(nowts, startts);
-+ if (diffts.tv_sec > 0) {
-+ if (!retry--) {
-+ pr_err("QMan: time out, control-plane"
-+ " dead?\n");
-+ break;
-+ }
-+ pr_warn("QMan: polling for the control-plane"
-+ " (%d)\n", retry);
-+ }
-+ }
-+ }
-+ bman_resource_init();
-+ qman_resource_init();
-+ return 0;
-+}
-+subsys_initcall(qbman_init);
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_config.c
-@@ -0,0 +1,1224 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <asm/cacheflush.h>
-+#include "qman_private.h"
-+#include <linux/highmem.h>
-+#include <linux/of_reserved_mem.h>
-+
-+/* Last updated for v00.800 of the BG */
-+
-+/* Register offsets */
-+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
-+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
-+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
-+#define REG_DD_CFG 0x0200
-+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
-+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
-+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
-+#define REG_PFDR_FPC 0x0400
-+#define REG_PFDR_FP_HEAD 0x0404
-+#define REG_PFDR_FP_TAIL 0x0408
-+#define REG_PFDR_FP_LWIT 0x0410
-+#define REG_PFDR_CFG 0x0414
-+#define REG_SFDR_CFG 0x0500
-+#define REG_SFDR_IN_USE 0x0504
-+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
-+#define REG_WQ_DEF_ENC_WQID 0x0630
-+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
-+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
-+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
-+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
-+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
-+#define REG_CM_CFG 0x0800
-+#define REG_ECSR 0x0a00
-+#define REG_ECIR 0x0a04
-+#define REG_EADR 0x0a08
-+#define REG_ECIR2 0x0a0c
-+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
-+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
-+#define REG_MCR 0x0b00
-+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
-+#define REG_MISC_CFG 0x0be0
-+#define REG_HID_CFG 0x0bf0
-+#define REG_IDLE_STAT 0x0bf4
-+#define REG_IP_REV_1 0x0bf8
-+#define REG_IP_REV_2 0x0bfc
-+#define REG_FQD_BARE 0x0c00
-+#define REG_PFDR_BARE 0x0c20
-+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
-+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
-+#define REG_QCSP_BARE 0x0c80
-+#define REG_QCSP_BAR 0x0c84
-+#define REG_CI_SCHED_CFG 0x0d00
-+#define REG_SRCIDR 0x0d04
-+#define REG_LIODNR 0x0d08
-+#define REG_CI_RLM_AVG 0x0d14
-+#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */
-+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
-+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
-+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
-+#define REG_CEETM_CFG_IDX 0x900
-+#define REG_CEETM_CFG_PRES 0x904
-+#define REG_CEETM_XSFDR_IN_USE 0x908
-+
-+/* Assists for QMAN_MCR */
-+#define MCR_INIT_PFDR 0x01000000
-+#define MCR_get_rslt(v) (u8)((v) >> 24)
-+#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0))
-+#define MCR_rslt_ok(r) (rslt == 0xf0)
-+#define MCR_rslt_eaccess(r) (rslt == 0xf8)
-+#define MCR_rslt_inval(r) (rslt == 0xff)
-+
-+struct qman;
-+
-+/* Follows WQ_CS_CFG0-5 */
-+enum qm_wq_class {
-+ qm_wq_portal = 0,
-+ qm_wq_pool = 1,
-+ qm_wq_fman0 = 2,
-+ qm_wq_fman1 = 3,
-+ qm_wq_caam = 4,
-+ qm_wq_pme = 5,
-+ qm_wq_first = qm_wq_portal,
-+ qm_wq_last = qm_wq_pme
-+};
-+
-+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
-+enum qm_memory {
-+ qm_memory_fqd,
-+ qm_memory_pfdr
-+};
-+
-+/* Used by all error interrupt registers except 'inhibit' */
-+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
-+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
-+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
-+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
-+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
-+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
-+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
-+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
-+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
-+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
-+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
-+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
-+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
-+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
-+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
-+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
-+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
-+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
-+
-+/* QMAN_ECIR valid error bit */
-+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
-+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
-+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
-+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
-+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
-+ QM_EIRQ_IFSI)
-+
-+union qman_ecir {
-+ u32 ecir_raw;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 __reserved:2;
-+ u32 portal_type:1;
-+ u32 portal_num:5;
-+ u32 fqid:24;
-+#else
-+ u32 fqid:24;
-+ u32 portal_num:5;
-+ u32 portal_type:1;
-+ u32 __reserved:2;
-+#endif
-+ } __packed info;
-+};
-+
-+union qman_ecir2 {
-+ u32 ecir2_raw;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 portal_type:1;
-+ u32 __reserved:21;
-+ u32 portal_num:10;
-+#else
-+ u32 portal_num:10;
-+ u32 __reserved:21;
-+ u32 portal_type:1;
-+#endif
-+ } __packed info;
-+};
-+
-+union qman_eadr {
-+ u32 eadr_raw;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 __reserved1:4;
-+ u32 memid:4;
-+ u32 __reserved2:12;
-+ u32 eadr:12;
-+#else
-+ u32 eadr:12;
-+ u32 __reserved2:12;
-+ u32 memid:4;
-+ u32 __reserved1:4;
-+#endif
-+ } __packed info;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 __reserved1:3;
-+ u32 memid:5;
-+ u32 __reserved:8;
-+ u32 eadr:16;
-+#else
-+ u32 eadr:16;
-+ u32 __reserved:8;
-+ u32 memid:5;
-+ u32 __reserved1:3;
-+#endif
-+ } __packed info_rev3;
-+};
-+
-+struct qman_hwerr_txt {
-+ u32 mask;
-+ const char *txt;
-+};
-+
-+#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
-+
-+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
-+ QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
-+ QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
-+ QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
-+ QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
-+ QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
-+ QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
-+ QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
-+ QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
-+ QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
-+ QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
-+ QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
-+ QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
-+ QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
-+ QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
-+ QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
-+ QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
-+ QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
-+ QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
-+};
-+#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
-+
-+struct qman_error_info_mdata {
-+ u16 addr_mask;
-+ u16 bits;
-+ const char *txt;
-+};
-+
-+#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
-+static const struct qman_error_info_mdata error_mdata[] = {
-+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
-+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
-+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
-+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
-+ QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
-+ QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
-+ QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
-+ QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
-+ QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
-+ QMAN_ERR_MDATA(0x7FFF, 256, "SW portal ring memory"),
-+ QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"),
-+ QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"),
-+ QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"),
-+ QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"),
-+ QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"),
-+ QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"),
-+ QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"),
-+ QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"),
-+};
-+#define QMAN_ERR_MDATA_COUNT \
-+ (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
-+
-+/* Add this in Kconfig */
-+#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
-+
-+/**
-+ * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
-+ * @v: for accessors that write values, this is the 32-bit value
-+ *
-+ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
-+ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
-+ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
-+ * "write the enable register" rather than "enable the write register"!
-+ */
-+#define qm_err_isr_status_read(qm) \
-+ __qm_err_isr_read(qm, qm_isr_status)
-+#define qm_err_isr_status_clear(qm, m) \
-+ __qm_err_isr_write(qm, qm_isr_status, m)
-+#define qm_err_isr_enable_read(qm) \
-+ __qm_err_isr_read(qm, qm_isr_enable)
-+#define qm_err_isr_enable_write(qm, v) \
-+ __qm_err_isr_write(qm, qm_isr_enable, v)
-+#define qm_err_isr_disable_read(qm) \
-+ __qm_err_isr_read(qm, qm_isr_disable)
-+#define qm_err_isr_disable_write(qm, v) \
-+ __qm_err_isr_write(qm, qm_isr_disable, v)
-+#define qm_err_isr_inhibit(qm) \
-+ __qm_err_isr_write(qm, qm_isr_inhibit, 1)
-+#define qm_err_isr_uninhibit(qm) \
-+ __qm_err_isr_write(qm, qm_isr_inhibit, 0)
-+
-+/*
-+ * TODO: unimplemented registers
-+ *
-+ * Keeping a list here of Qman registers I have not yet covered;
-+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
-+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
-+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
-+ */
-+
-+/* Encapsulate "struct qman *" as a cast of the register space address. */
-+
-+static struct qman *qm_create(void *regs)
-+{
-+ return (struct qman *)regs;
-+}
-+
-+static inline u32 __qm_in(struct qman *qm, u32 offset)
-+{
-+ return in_be32((void *)qm + offset);
-+}
-+static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
-+{
-+ out_be32((void *)qm + offset, val);
-+}
-+#define qm_in(reg) __qm_in(qm, REG_##reg)
-+#define qm_out(reg, val) __qm_out(qm, REG_##reg, val)
-+
-+static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
-+{
-+ return __qm_in(qm, REG_ERR_ISR + (n << 2));
-+}
-+
-+static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
-+{
-+ __qm_out(qm, REG_ERR_ISR + (n << 2), val);
-+}
-+
-+static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
-+ int ed, u8 sernd)
-+{
-+ DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
-+ (portal == qm_dc_portal_fman1));
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-+ qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
-+ else
-+ qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
-+}
-+
-+static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
-+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
-+ u8 csw6, u8 csw7)
-+{
-+ qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
-+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
-+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
-+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
-+}
-+
-+static void qm_set_hid(struct qman *qm)
-+{
-+ qm_out(HID_CFG, 0);
-+}
-+
-+static void qm_set_corenet_initiator(struct qman *qm)
-+{
-+ qm_out(CI_SCHED_CFG,
-+ 0x80000000 | /* write srcciv enable */
-+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
-+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
-+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
-+ CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W);
-+}
-+
-+static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor,
-+ u8 *cfg)
-+{
-+ u32 v = qm_in(IP_REV_1);
-+ u32 v2 = qm_in(IP_REV_2);
-+ *id = (v >> 16);
-+ *major = (v >> 8) & 0xff;
-+ *minor = v & 0xff;
-+ *cfg = v2 & 0xff;
-+}
-+
-+static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
-+ int enable, int prio, int stash, u32 size)
-+{
-+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
-+ u32 exp = ilog2(size);
-+ /* choke if size isn't within range */
-+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
-+ is_power_of_2(size));
-+ /* choke if 'ba' has lower-alignment than 'size' */
-+ DPA_ASSERT(!(ba & (size - 1)));
-+ __qm_out(qm, offset, upper_32_bits(ba));
-+ __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
-+ __qm_out(qm, offset + REG_offset_AR,
-+ (enable ? 0x80000000 : 0) |
-+ (prio ? 0x40000000 : 0) |
-+ (stash ? 0x20000000 : 0) |
-+ (exp - 1));
-+}
-+
-+static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
-+{
-+ qm_out(PFDR_FP_LWIT, th & 0xffffff);
-+ qm_out(PFDR_CFG, k);
-+}
-+
-+static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
-+{
-+ qm_out(SFDR_CFG, th & 0x3ff);
-+}
-+
-+static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
-+{
-+ u8 rslt = MCR_get_rslt(qm_in(MCR));
-+
-+ DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
-+ /* Make sure the command interface is 'idle' */
-+ if (!MCR_rslt_idle(rslt))
-+ panic("QMAN_MCR isn't idle");
-+
-+ /* Write the MCR command params then the verb */
-+ qm_out(MCP(0), pfdr_start);
-+ /* TODO: remove this - it's a workaround for a model bug that is
-+ * corrected in more recent versions. We use the workaround until
-+ * everyone has upgraded. */
-+ qm_out(MCP(1), (pfdr_start + num - 16));
-+ lwsync();
-+ qm_out(MCR, MCR_INIT_PFDR);
-+ /* Poll for the result */
-+ do {
-+ rslt = MCR_get_rslt(qm_in(MCR));
-+ } while (!MCR_rslt_idle(rslt));
-+ if (MCR_rslt_ok(rslt))
-+ return 0;
-+ if (MCR_rslt_eaccess(rslt))
-+ return -EACCES;
-+ if (MCR_rslt_inval(rslt))
-+ return -EINVAL;
-+ pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
-+ return -ENOSYS;
-+}
-+
-+/*****************/
-+/* Config driver */
-+/*****************/
-+
-+#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ)
-+#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ)
-+
-+/* We support only one of these */
-+static struct qman *qm;
-+static struct device_node *qm_node;
-+
-+/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
-+ * during qman_init_ccsr(). */
-+static dma_addr_t fqd_a, pfdr_a;
-+static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ;
-+
-+static int qman_fqd(struct reserved_mem *rmem)
-+{
-+ fqd_a = rmem->base;
-+ fqd_sz = rmem->size;
-+
-+ WARN_ON(!(fqd_a && fqd_sz));
-+
-+ return 0;
-+}
-+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
-+
-+static int qman_pfdr(struct reserved_mem *rmem)
-+{
-+ pfdr_a = rmem->base;
-+ pfdr_sz = rmem->size;
-+
-+ WARN_ON(!(pfdr_a && pfdr_sz));
-+
-+ return 0;
-+}
-+RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
-+
-+size_t get_qman_fqd_size()
-+{
-+ return fqd_sz;
-+}
-+
-+/* Parse the <name> property to extract the memory location and size and
-+ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
-+ * size. Also flush this memory range from data cache so that QMAN originated
-+ * transactions for this memory region could be marked non-coherent.
-+ */
-+static __init int parse_mem_property(struct device_node *node, const char *name,
-+ dma_addr_t *addr, size_t *sz, int zero)
-+{
-+ int ret;
-+
-+ /* If using a "zero-pma", don't try to zero it, even if you asked */
-+ if (zero && of_find_property(node, "zero-pma", &ret)) {
-+ pr_info(" it's a 'zero-pma', not zeroing from s/w\n");
-+ zero = 0;
-+ }
-+
-+ if (zero) {
-+ /* map as cacheable, non-guarded */
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ void __iomem *tmpp = ioremap_cache(*addr, *sz);
-+#else
-+ void __iomem *tmpp = ioremap(*addr, *sz);
-+#endif
-+
-+ if (!tmpp)
-+ return -ENOMEM;
-+ memset_io(tmpp, 0, *sz);
-+ flush_dcache_range((unsigned long)tmpp,
-+ (unsigned long)tmpp + *sz);
-+ iounmap(tmpp);
-+ }
-+
-+ return 0;
-+}
-+
-+/* TODO:
-+ * - there is obviously no handling of errors,
-+ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
-+ * both memory resources to zero.
-+ */
-+static int __init fsl_qman_init(struct device_node *node)
-+{
-+ struct resource res;
-+ resource_size_t len;
-+ u32 __iomem *regs;
-+ const char *s;
-+ int ret, standby = 0;
-+ u16 id;
-+ u8 major, minor, cfg;
-+ ret = of_address_to_resource(node, 0, &res);
-+ if (ret) {
-+ pr_err("Can't get %s property '%s'\n", node->full_name, "reg");
-+ return ret;
-+ }
-+ s = of_get_property(node, "fsl,hv-claimable", &ret);
-+ if (s && !strcmp(s, "standby"))
-+ standby = 1;
-+ if (!standby) {
-+ ret = parse_mem_property(node, "fsl,qman-fqd",
-+ &fqd_a, &fqd_sz, 1);
-+ pr_info("qman-fqd addr %pad size 0x%zx\n", &fqd_a, fqd_sz);
-+ BUG_ON(ret);
-+ ret = parse_mem_property(node, "fsl,qman-pfdr",
-+ &pfdr_a, &pfdr_sz, 0);
-+ pr_info("qman-pfdr addr %pad size 0x%zx\n", &pfdr_a, pfdr_sz);
-+ BUG_ON(ret);
-+ }
-+ /* Global configuration */
-+ len = resource_size(&res);
-+ if (len != (unsigned long)len)
-+ return -EINVAL;
-+ regs = ioremap(res.start, (unsigned long)len);
-+ qm = qm_create(regs);
-+ qm_node = node;
-+ qm_get_version(qm, &id, &major, &minor, &cfg);
-+ pr_info("Qman ver:%04x,%02x,%02x,%02x\n", id, major, minor, cfg);
-+ if (!qman_ip_rev) {
-+ if ((major == 1) && (minor == 0)) {
-+ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
-+ iounmap(regs);
-+ return -ENODEV;
-+ } else if ((major == 1) && (minor == 1))
-+ qman_ip_rev = QMAN_REV11;
-+ else if ((major == 1) && (minor == 2))
-+ qman_ip_rev = QMAN_REV12;
-+ else if ((major == 2) && (minor == 0))
-+ qman_ip_rev = QMAN_REV20;
-+ else if ((major == 3) && (minor == 0))
-+ qman_ip_rev = QMAN_REV30;
-+ else if ((major == 3) && (minor == 1))
-+ qman_ip_rev = QMAN_REV31;
-+ else if ((major == 3) && (minor == 2))
-+ qman_ip_rev = QMAN_REV32;
-+ else {
-+ pr_warn("unknown Qman version, default to rev1.1\n");
-+ qman_ip_rev = QMAN_REV11;
-+ }
-+ qman_ip_cfg = cfg;
-+ }
-+
-+ if (standby) {
-+ pr_info(" -> in standby mode\n");
-+ return 0;
-+ }
-+ return 0;
-+}
-+
-+int qman_have_ccsr(void)
-+{
-+ return qm ? 1 : 0;
-+}
-+
-+__init int qman_init_early(void)
-+{
-+ struct device_node *dn;
-+ int ret;
-+
-+ for_each_compatible_node(dn, NULL, "fsl,qman") {
-+ if (qm)
-+ pr_err("%s: only one 'fsl,qman' allowed\n",
-+ dn->full_name);
-+ else {
-+ if (!of_device_is_available(dn))
-+ continue;
-+
-+ ret = fsl_qman_init(dn);
-+ BUG_ON(ret);
-+ }
-+ }
-+ return 0;
-+}
-+postcore_initcall_sync(qman_init_early);
-+
-+static void log_edata_bits(u32 bit_count)
-+{
-+ u32 i, j, mask = 0xffffffff;
-+
-+ pr_warn("Qman ErrInt, EDATA:\n");
-+ i = bit_count/32;
-+ if (bit_count%32) {
-+ i++;
-+ mask = ~(mask << bit_count%32);
-+ }
-+ j = 16-i;
-+ pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask);
-+ j++;
-+ for (; j < 16; j++)
-+ pr_warn(" 0x%08x\n", qm_in(EDATA(j)));
-+}
-+
-+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
-+{
-+ union qman_ecir ecir_val;
-+ union qman_eadr eadr_val;
-+
-+ ecir_val.ecir_raw = qm_in(ECIR);
-+ /* Is portal info valid */
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
-+ union qman_ecir2 ecir2_val;
-+ ecir2_val.ecir2_raw = qm_in(ECIR2);
-+ if (ecsr_val & PORTAL_ECSR_ERR) {
-+ pr_warn("Qman ErrInt: %s id %d\n",
-+ (ecir2_val.info.portal_type) ?
-+ "DCP" : "SWP", ecir2_val.info.portal_num);
-+ }
-+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) {
-+ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
-+ ecir_val.info.fqid);
-+ }
-+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
-+ eadr_val.eadr_raw = qm_in(EADR);
-+ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
-+ error_mdata[eadr_val.info_rev3.memid].txt,
-+ error_mdata[eadr_val.info_rev3.memid].addr_mask
-+ & eadr_val.info_rev3.eadr);
-+ log_edata_bits(
-+ error_mdata[eadr_val.info_rev3.memid].bits);
-+ }
-+ } else {
-+ if (ecsr_val & PORTAL_ECSR_ERR) {
-+ pr_warn("Qman ErrInt: %s id %d\n",
-+ (ecir_val.info.portal_type) ?
-+ "DCP" : "SWP", ecir_val.info.portal_num);
-+ }
-+ if (ecsr_val & FQID_ECSR_ERR) {
-+ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
-+ ecir_val.info.fqid);
-+ }
-+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
-+ eadr_val.eadr_raw = qm_in(EADR);
-+ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
-+ error_mdata[eadr_val.info.memid].txt,
-+ error_mdata[eadr_val.info.memid].addr_mask
-+ & eadr_val.info.eadr);
-+ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
-+ }
-+ }
-+}
-+
-+/* Qman interrupt handler */
-+static irqreturn_t qman_isr(int irq, void *ptr)
-+{
-+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
-+
-+ ier_val = qm_err_isr_enable_read(qm);
-+ isr_val = qm_err_isr_status_read(qm);
-+ ecsr_val = qm_in(ECSR);
-+ isr_mask = isr_val & ier_val;
-+
-+ if (!isr_mask)
-+ return IRQ_NONE;
-+ for (i = 0; i < QMAN_HWE_COUNT; i++) {
-+ if (qman_hwerr_txts[i].mask & isr_mask) {
-+ pr_warn("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt);
-+ if (qman_hwerr_txts[i].mask & ecsr_val) {
-+ log_additional_error_info(isr_mask, ecsr_val);
-+ /* Re-arm error capture registers */
-+ qm_out(ECSR, ecsr_val);
-+ }
-+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
-+ pr_devel("Qman un-enabling error 0x%x\n",
-+ qman_hwerr_txts[i].mask);
-+ ier_val &= ~qman_hwerr_txts[i].mask;
-+ qm_err_isr_enable_write(qm, ier_val);
-+ }
-+ }
-+ }
-+ qm_err_isr_status_clear(qm, isr_val);
-+ return IRQ_HANDLED;
-+}
-+
-+static int __bind_irq(void)
-+{
-+ int ret, err_irq;
-+
-+ err_irq = of_irq_to_resource(qm_node, 0, NULL);
-+ if (err_irq == 0) {
-+ pr_info("Can't get %s property '%s'\n", qm_node->full_name,
-+ "interrupts");
-+ return -ENODEV;
-+ }
-+ ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node);
-+ if (ret) {
-+ pr_err("request_irq() failed %d for '%s'\n", ret,
-+ qm_node->full_name);
-+ return -ENODEV;
-+ }
-+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
-+ * to resource allocation during driver init). */
-+ qm_err_isr_status_clear(qm, 0xffffffff);
-+ /* Enable Error Interrupts */
-+ qm_err_isr_enable_write(qm, 0xffffffff);
-+ return 0;
-+}
-+
-+int qman_init_ccsr(struct device_node *node)
-+{
-+ int ret;
-+ if (!qman_have_ccsr())
-+ return 0;
-+ if (node != qm_node)
-+ return -EINVAL;
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ /* TEMP for LS1043 : should be done in uboot */
-+ qm_out(QCSP_BARE, 0x5);
-+ qm_out(QCSP_BAR, 0x0);
-+#endif
-+ /* FQD memory */
-+ qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
-+ /* PFDR memory */
-+ qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
-+ qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
-+ /* thresholds */
-+ qm_set_pfdr_threshold(qm, 512, 64);
-+ qm_set_sfdr_threshold(qm, 128);
-+ /* clear stale PEBI bit from interrupt status register */
-+ qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
-+ /* corenet initiator settings */
-+ qm_set_corenet_initiator(qm);
-+ /* HID settings */
-+ qm_set_hid(qm);
-+ /* Set scheduling weights to defaults */
-+ for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
-+ qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
-+ /* We are not prepared to accept ERNs for hardware enqueues */
-+ qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
-+ qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
-+ /* Initialise Error Interrupt Handler */
-+ ret = __bind_irq();
-+ if (ret)
-+ return ret;
-+ return 0;
-+}
-+
-+#define LIO_CFG_LIODN_MASK 0x0fff0000
-+void qman_liodn_fixup(u16 channel)
-+{
-+ static int done;
-+ static u32 liodn_offset;
-+ u32 before, after;
-+ int idx = channel - QM_CHANNEL_SWPORTAL0;
-+
-+ if (!qman_have_ccsr())
-+ return;
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-+ before = qm_in(REV3_QCSP_LIO_CFG(idx));
-+ else
-+ before = qm_in(QCSP_LIO_CFG(idx));
-+ if (!done) {
-+ liodn_offset = before & LIO_CFG_LIODN_MASK;
-+ done = 1;
-+ return;
-+ }
-+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-+ qm_out(REV3_QCSP_LIO_CFG(idx), after);
-+ else
-+ qm_out(QCSP_LIO_CFG(idx), after);
-+}
-+
-+#define IO_CFG_SDEST_MASK 0x00ff0000
-+int qman_set_sdest(u16 channel, unsigned int cpu_idx)
-+{
-+ int idx = channel - QM_CHANNEL_SWPORTAL0;
-+ u32 before, after;
-+
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+ if ((qman_ip_rev & 0xFF00) == QMAN_REV31) {
-+ /* LS1043A - only one L2 cache */
-+ cpu_idx = 0;
-+ }
-+
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
-+ before = qm_in(REV3_QCSP_IO_CFG(idx));
-+ /* Each pair of vcpu share the same SRQ(SDEST) */
-+ cpu_idx /= 2;
-+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
-+ qm_out(REV3_QCSP_IO_CFG(idx), after);
-+ } else {
-+ before = qm_in(QCSP_IO_CFG(idx));
-+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
-+ qm_out(QCSP_IO_CFG(idx), after);
-+ }
-+ return 0;
-+}
-+
-+#define MISC_CFG_WPM_MASK 0x00000002
-+int qm_set_wpm(int wpm)
-+{
-+ u32 before;
-+ u32 after;
-+
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+
-+ before = qm_in(MISC_CFG);
-+ after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
-+ qm_out(MISC_CFG, after);
-+ return 0;
-+}
-+
-+int qm_get_wpm(int *wpm)
-+{
-+ u32 before;
-+
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+
-+ before = qm_in(MISC_CFG);
-+ *wpm = (before & MISC_CFG_WPM_MASK) >> 1;
-+ return 0;
-+}
-+
-+/* CEETM_CFG_PRES register has PRES field which is calculated by:
-+ * PRES = (2^22 / credit update reference period) * QMan clock period
-+ * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk
-+ */
-+
-+int qman_ceetm_set_prescaler(enum qm_dc_portal portal)
-+{
-+ u64 temp;
-+ u16 pres;
-+
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+
-+ temp = 0x400000 * 100;
-+ do_div(temp, CONFIG_QMAN_CEETM_UPDATE_PERIOD);
-+ temp *= 10000000;
-+ do_div(temp, qman_clk);
-+ pres = (u16) temp;
-+ qm_out(CEETM_CFG_IDX, portal);
-+ qm_out(CEETM_CFG_PRES, pres);
-+ return 0;
-+}
-+
-+int qman_ceetm_get_prescaler(u16 *pres)
-+{
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+ *pres = (u16)qm_in(CEETM_CFG_PRES);
-+ return 0;
-+}
-+
-+#define DCP_CFG_CEETME_MASK 0xFFFF0000
-+#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n))
-+int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
-+{
-+ u32 dcp_cfg;
-+
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+
-+ dcp_cfg = qm_in(DCP_CFG(portal));
-+ dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal);
-+ qm_out(DCP_CFG(portal), dcp_cfg);
-+ return 0;
-+}
-+
-+int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
-+{
-+ u32 dcp_cfg;
-+
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+ dcp_cfg = qm_in(DCP_CFG(portal));
-+ dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal));
-+ qm_out(DCP_CFG(portal), dcp_cfg);
-+ return 0;
-+}
-+
-+int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num)
-+{
-+ if (!qman_have_ccsr())
-+ return -ENODEV;
-+ *num = qm_in(CEETM_XSFDR_IN_USE);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_get_xsfdr);
-+
-+#ifdef CONFIG_SYSFS
-+
-+#define DRV_NAME "fsl-qman"
-+#define DCP_MAX_ID 3
-+#define DCP_MIN_ID 0
-+
-+static ssize_t show_pfdr_fpc(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
-+};
-+
-+static ssize_t show_dlm_avg(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ u32 data;
-+ int i;
-+
-+ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
-+ return -EINVAL;
-+ if (i < DCP_MIN_ID || i > DCP_MAX_ID)
-+ return -EINVAL;
-+ data = qm_in(DCP_DLM_AVG(i));
-+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
-+ (data & 0x000000ff)*390625);
-+};
-+
-+static ssize_t set_dlm_avg(struct device *dev,
-+ struct device_attribute *dev_attr, const char *buf, size_t count)
-+{
-+ unsigned long val;
-+ int i;
-+
-+ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
-+ return -EINVAL;
-+ if (i < DCP_MIN_ID || i > DCP_MAX_ID)
-+ return -EINVAL;
-+ if (kstrtoul(buf, 0, &val)) {
-+ dev_dbg(dev, "invalid input %s\n", buf);
-+ return -EINVAL;
-+ }
-+ qm_out(DCP_DLM_AVG(i), val);
-+ return count;
-+};
-+
-+static ssize_t show_pfdr_cfg(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
-+};
-+
-+static ssize_t set_pfdr_cfg(struct device *dev,
-+ struct device_attribute *dev_attr, const char *buf, size_t count)
-+{
-+ unsigned long val;
-+
-+ if (kstrtoul(buf, 0, &val)) {
-+ dev_dbg(dev, "invalid input %s\n", buf);
-+ return -EINVAL;
-+ }
-+ qm_out(PFDR_CFG, val);
-+ return count;
-+};
-+
-+static ssize_t show_sfdr_in_use(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
-+};
-+
-+static ssize_t show_idle_stat(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
-+};
-+
-+static ssize_t show_ci_rlm_avg(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ u32 data = qm_in(CI_RLM_AVG);
-+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
-+ (data & 0x000000ff)*390625);
-+};
-+
-+static ssize_t set_ci_rlm_avg(struct device *dev,
-+ struct device_attribute *dev_attr, const char *buf, size_t count)
-+{
-+ unsigned long val;
-+
-+ if (kstrtoul(buf, 0, &val)) {
-+ dev_dbg(dev, "invalid input %s\n", buf);
-+ return -EINVAL;
-+ }
-+ qm_out(CI_RLM_AVG, val);
-+ return count;
-+};
-+
-+static ssize_t show_err_isr(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
-+};
-+
-+#define SBEC_MAX_ID 14
-+#define SBEC_MIN_ID 0
-+
-+static ssize_t show_sbec(struct device *dev,
-+ struct device_attribute *dev_attr, char *buf)
-+{
-+ int i;
-+
-+ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
-+ return -EINVAL;
-+ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
-+ return -EINVAL;
-+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
-+};
-+
-+static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
-+static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
-+static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
-+static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
-+ show_ci_rlm_avg, set_ci_rlm_avg);
-+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
-+static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
-+
-+static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-+static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-+static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-+static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
-+
-+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
-+static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
-+
-+static struct attribute *qman_dev_attributes[] = {
-+ &dev_attr_pfdr_fpc.attr,
-+ &dev_attr_pfdr_cfg.attr,
-+ &dev_attr_idle_stat.attr,
-+ &dev_attr_ci_rlm_avg.attr,
-+ &dev_attr_err_isr.attr,
-+ &dev_attr_dcp0_dlm_avg.attr,
-+ &dev_attr_dcp1_dlm_avg.attr,
-+ &dev_attr_dcp2_dlm_avg.attr,
-+ &dev_attr_dcp3_dlm_avg.attr,
-+ /* sfdr_in_use will be added if necessary */
-+ NULL
-+};
-+
-+static struct attribute *qman_dev_ecr_attributes[] = {
-+ &dev_attr_sbec_0.attr,
-+ &dev_attr_sbec_1.attr,
-+ &dev_attr_sbec_2.attr,
-+ &dev_attr_sbec_3.attr,
-+ &dev_attr_sbec_4.attr,
-+ &dev_attr_sbec_5.attr,
-+ &dev_attr_sbec_6.attr,
-+ &dev_attr_sbec_7.attr,
-+ &dev_attr_sbec_8.attr,
-+ &dev_attr_sbec_9.attr,
-+ &dev_attr_sbec_10.attr,
-+ &dev_attr_sbec_11.attr,
-+ &dev_attr_sbec_12.attr,
-+ &dev_attr_sbec_13.attr,
-+ &dev_attr_sbec_14.attr,
-+ NULL
-+};
-+
-+/* root level */
-+static const struct attribute_group qman_dev_attr_grp = {
-+ .name = NULL,
-+ .attrs = qman_dev_attributes
-+};
-+static const struct attribute_group qman_dev_ecr_grp = {
-+ .name = "error_capture",
-+ .attrs = qman_dev_ecr_attributes
-+};
-+
-+static int of_fsl_qman_remove(struct platform_device *ofdev)
-+{
-+ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
-+ return 0;
-+};
-+
-+static int of_fsl_qman_probe(struct platform_device *ofdev)
-+{
-+ int ret;
-+
-+ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
-+ if (ret)
-+ goto done;
-+ ret = sysfs_add_file_to_group(&ofdev->dev.kobj,
-+ &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
-+ if (ret)
-+ goto del_group_0;
-+ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp);
-+ if (ret)
-+ goto del_group_0;
-+
-+ goto done;
-+
-+del_group_0:
-+ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
-+done:
-+ if (ret)
-+ dev_err(&ofdev->dev,
-+ "Cannot create dev attributes ret=%d\n", ret);
-+ return ret;
-+};
-+
-+static struct of_device_id of_fsl_qman_ids[] = {
-+ {
-+ .compatible = "fsl,qman",
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, of_fsl_qman_ids);
-+
-+#ifdef CONFIG_SUSPEND
-+
-+static u32 saved_isdr;
-+static int qman_pm_suspend_noirq(struct device *dev)
-+{
-+ uint32_t idle_state;
-+
-+ suspend_unused_qportal();
-+ /* save isdr, disable all, clear isr */
-+ saved_isdr = qm_err_isr_disable_read(qm);
-+ qm_err_isr_disable_write(qm, 0xffffffff);
-+ qm_err_isr_status_clear(qm, 0xffffffff);
-+ idle_state = qm_in(IDLE_STAT);
-+ if (!(idle_state & 0x1)) {
-+ pr_err("Qman not idle 0x%x aborting\n", idle_state);
-+ qm_err_isr_disable_write(qm, saved_isdr);
-+ resume_unused_qportal();
-+ return -EBUSY;
-+ }
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Qman suspend code, IDLE_STAT = 0x%x\n", idle_state);
-+#endif
-+ return 0;
-+}
-+
-+static int qman_pm_resume_noirq(struct device *dev)
-+{
-+ /* restore isdr */
-+ qm_err_isr_disable_write(qm, saved_isdr);
-+ resume_unused_qportal();
-+ return 0;
-+}
-+#else
-+#define qman_pm_suspend_noirq NULL
-+#define qman_pm_resume_noirq NULL
-+#endif
-+
-+static const struct dev_pm_ops qman_pm_ops = {
-+ .suspend_noirq = qman_pm_suspend_noirq,
-+ .resume_noirq = qman_pm_resume_noirq,
-+};
-+
-+static struct platform_driver of_fsl_qman_driver = {
-+ .driver = {
-+ .owner = THIS_MODULE,
-+ .name = DRV_NAME,
-+ .of_match_table = of_fsl_qman_ids,
-+ .pm = &qman_pm_ops,
-+ },
-+ .probe = of_fsl_qman_probe,
-+ .remove = of_fsl_qman_remove,
-+};
-+
-+static int qman_ctrl_init(void)
-+{
-+ return platform_driver_register(&of_fsl_qman_driver);
-+}
-+
-+static void qman_ctrl_exit(void)
-+{
-+ platform_driver_unregister(&of_fsl_qman_driver);
-+}
-+
-+module_init(qman_ctrl_init);
-+module_exit(qman_ctrl_exit);
-+
-+#endif /* CONFIG_SYSFS */
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_debugfs.c
-@@ -0,0 +1,1594 @@
-+/* Copyright 2010-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include "qman_private.h"
-+
-+#define MAX_FQID (0x00ffffff)
-+#define QM_FQD_BLOCK_SIZE 64
-+#define QM_FQD_AR (0xC10)
-+
-+static u32 fqid_max;
-+static u64 qman_ccsr_start;
-+static u64 qman_ccsr_size;
-+
-+static const char * const state_txt[] = {
-+ "Out of Service",
-+ "Retired",
-+ "Tentatively Scheduled",
-+ "Truly Scheduled",
-+ "Parked",
-+ "Active, Active Held or Held Suspended",
-+ "Unknown State 6",
-+ "Unknown State 7",
-+ NULL,
-+};
-+
-+static const u8 fqd_states[] = {
-+ QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED,
-+ QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED,
-+ QM_MCR_NP_STATE_ACTIVE};
-+
-+struct mask_to_text {
-+ u16 mask;
-+ const char *txt;
-+};
-+
-+struct mask_filter_s {
-+ u16 mask;
-+ u8 filter;
-+};
-+
-+static const struct mask_filter_s mask_filter[] = {
-+ {QM_FQCTRL_PREFERINCACHE, 0},
-+ {QM_FQCTRL_PREFERINCACHE, 1},
-+ {QM_FQCTRL_HOLDACTIVE, 0},
-+ {QM_FQCTRL_HOLDACTIVE, 1},
-+ {QM_FQCTRL_AVOIDBLOCK, 0},
-+ {QM_FQCTRL_AVOIDBLOCK, 1},
-+ {QM_FQCTRL_FORCESFDR, 0},
-+ {QM_FQCTRL_FORCESFDR, 1},
-+ {QM_FQCTRL_CPCSTASH, 0},
-+ {QM_FQCTRL_CPCSTASH, 1},
-+ {QM_FQCTRL_CTXASTASHING, 0},
-+ {QM_FQCTRL_CTXASTASHING, 1},
-+ {QM_FQCTRL_ORP, 0},
-+ {QM_FQCTRL_ORP, 1},
-+ {QM_FQCTRL_TDE, 0},
-+ {QM_FQCTRL_TDE, 1},
-+ {QM_FQCTRL_CGE, 0},
-+ {QM_FQCTRL_CGE, 1}
-+};
-+
-+static const struct mask_to_text fq_ctrl_text_list[] = {
-+ {
-+ .mask = QM_FQCTRL_PREFERINCACHE,
-+ .txt = "Prefer in cache",
-+ },
-+ {
-+ .mask = QM_FQCTRL_HOLDACTIVE,
-+ .txt = "Hold active in portal",
-+ },
-+ {
-+ .mask = QM_FQCTRL_AVOIDBLOCK,
-+ .txt = "Avoid Blocking",
-+ },
-+ {
-+ .mask = QM_FQCTRL_FORCESFDR,
-+ .txt = "High-priority SFDRs",
-+ },
-+ {
-+ .mask = QM_FQCTRL_CPCSTASH,
-+ .txt = "CPC Stash Enable",
-+ },
-+ {
-+ .mask = QM_FQCTRL_CTXASTASHING,
-+ .txt = "Context-A stashing",
-+ },
-+ {
-+ .mask = QM_FQCTRL_ORP,
-+ .txt = "ORP Enable",
-+ },
-+ {
-+ .mask = QM_FQCTRL_TDE,
-+ .txt = "Tail-Drop Enable",
-+ },
-+ {
-+ .mask = QM_FQCTRL_CGE,
-+ .txt = "Congestion Group Enable",
-+ },
-+ {
-+ .mask = 0,
-+ .txt = NULL,
-+ }
-+};
-+
-+static const char *get_fqd_ctrl_text(u16 mask)
-+{
-+ int i = 0;
-+
-+ while (fq_ctrl_text_list[i].txt != NULL) {
-+ if (fq_ctrl_text_list[i].mask == mask)
-+ return fq_ctrl_text_list[i].txt;
-+ i++;
-+ }
-+ return NULL;
-+}
-+
-+static const struct mask_to_text stashing_text_list[] = {
-+ {
-+ .mask = QM_STASHING_EXCL_CTX,
-+ .txt = "FQ Ctx Stash"
-+ },
-+ {
-+ .mask = QM_STASHING_EXCL_DATA,
-+ .txt = "Frame Data Stash",
-+ },
-+ {
-+ .mask = QM_STASHING_EXCL_ANNOTATION,
-+ .txt = "Frame Annotation Stash",
-+ },
-+ {
-+ .mask = 0,
-+ .txt = NULL,
-+ },
-+};
-+
-+static int user_input_convert(const char __user *user_buf, size_t count,
-+ unsigned long *val)
-+{
-+ char buf[12];
-+
-+ if (count > sizeof(buf) - 1)
-+ return -EINVAL;
-+ if (copy_from_user(buf, user_buf, count))
-+ return -EFAULT;
-+ buf[count] = '\0';
-+ if (kstrtoul(buf, 0, val))
-+ return -EINVAL;
-+ return 0;
-+}
-+
-+struct line_buffer_fq {
-+ u32 buf[8];
-+ u32 buf_cnt;
-+ int line_cnt;
-+};
-+
-+static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid,
-+ struct seq_file *file)
-+{
-+ line_buf->buf[line_buf->buf_cnt] = fqid;
-+ line_buf->buf_cnt++;
-+ if (line_buf->buf_cnt == 8) {
-+ /* Buffer is full, flush it */
-+ if (line_buf->line_cnt != 0)
-+ seq_puts(file, ",\n");
-+ seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x,"
-+ "0x%06x,0x%06x,0x%06x",
-+ line_buf->buf[0], line_buf->buf[1], line_buf->buf[2],
-+ line_buf->buf[3], line_buf->buf[4], line_buf->buf[5],
-+ line_buf->buf[6], line_buf->buf[7]);
-+ line_buf->buf_cnt = 0;
-+ line_buf->line_cnt++;
-+ }
-+}
-+
-+static void flush_line_buffer(struct line_buffer_fq *line_buf,
-+ struct seq_file *file)
-+{
-+ if (line_buf->buf_cnt) {
-+ int y = 0;
-+ if (line_buf->line_cnt != 0)
-+ seq_puts(file, ",\n");
-+ while (y != line_buf->buf_cnt) {
-+ if (y+1 == line_buf->buf_cnt)
-+ seq_printf(file, "0x%06x", line_buf->buf[y]);
-+ else
-+ seq_printf(file, "0x%06x,", line_buf->buf[y]);
-+ y++;
-+ }
-+ line_buf->line_cnt++;
-+ }
-+ if (line_buf->line_cnt)
-+ seq_putc(file, '\n');
-+}
-+
-+static struct dentry *dfs_root; /* debugfs root directory */
-+
-+/*******************************************************************************
-+ * Query Frame Queue Non Programmable Fields
-+ ******************************************************************************/
-+struct query_fq_np_fields_data_s {
-+ u32 fqid;
-+};
-+static struct query_fq_np_fields_data_s query_fq_np_fields_data = {
-+ .fqid = 1,
-+};
-+
-+static int query_fq_np_fields_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_mcr_queryfq_np np;
-+ struct qman_fq fq;
-+
-+ fq.fqid = query_fq_np_fields_data.fqid;
-+ ret = qman_query_fq_np(&fq, &np);
-+ if (ret)
-+ return ret;
-+ /* Print state */
-+ seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n",
-+ fq.fqid);
-+ seq_printf(file, " force eligible pending: %s\n",
-+ (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no");
-+ seq_printf(file, " retirement pending: %s\n",
-+ (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no");
-+ seq_printf(file, " state: %s\n",
-+ state_txt[np.state & QM_MCR_NP_STATE_MASK]);
-+ seq_printf(file, " fq_link: 0x%x\n", np.fqd_link);
-+ seq_printf(file, " odp_seq: %u\n", np.odp_seq);
-+ seq_printf(file, " orp_nesn: %u\n", np.orp_nesn);
-+ seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq);
-+ seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq);
-+ seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr);
-+ seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr);
-+ seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr);
-+ seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr);
-+ seq_printf(file, " is: ics_surp contains a %s\n",
-+ (np.is) ? "deficit" : "surplus");
-+ seq_printf(file, " ics_surp: %u\n", np.ics_surp);
-+ seq_printf(file, " byte_cnt: %u\n", np.byte_cnt);
-+ seq_printf(file, " frm_cnt: %u\n", np.frm_cnt);
-+ seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr);
-+ seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr);
-+ seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr);
-+ seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr);
-+ seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr);
-+ return 0;
-+}
-+
-+static int query_fq_np_fields_open(struct inode *inode,
-+ struct file *file)
-+{
-+ return single_open(file, query_fq_np_fields_show, NULL);
-+}
-+
-+static ssize_t query_fq_np_fields_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ if (val > MAX_FQID)
-+ return -EINVAL;
-+ query_fq_np_fields_data.fqid = (u32)val;
-+ return count;
-+}
-+
-+static const struct file_operations query_fq_np_fields_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_fq_np_fields_open,
-+ .read = seq_read,
-+ .write = query_fq_np_fields_write,
-+ .release = single_release,
-+};
-+
-+/*******************************************************************************
-+ * Frame Queue Programmable Fields
-+ ******************************************************************************/
-+struct query_fq_fields_data_s {
-+ u32 fqid;
-+};
-+
-+static struct query_fq_fields_data_s query_fq_fields_data = {
-+ .fqid = 1,
-+};
-+
-+static int query_fq_fields_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_fqd fqd;
-+ struct qman_fq fq;
-+ int i = 0;
-+
-+ memset(&fqd, 0, sizeof(struct qm_fqd));
-+ fq.fqid = query_fq_fields_data.fqid;
-+ ret = qman_query_fq(&fq, &fqd);
-+ if (ret)
-+ return ret;
-+ seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n",
-+ fq.fqid);
-+ seq_printf(file, " orprws: %u\n", fqd.orprws);
-+ seq_printf(file, " oa: %u\n", fqd.oa);
-+ seq_printf(file, " olws: %u\n", fqd.olws);
-+
-+ seq_printf(file, " cgid: %u\n", fqd.cgid);
-+
-+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0)
-+ seq_puts(file, " fq_ctrl: None\n");
-+ else {
-+ i = 0;
-+ seq_puts(file, " fq_ctrl:\n");
-+ while (fq_ctrl_text_list[i].txt != NULL) {
-+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
-+ fq_ctrl_text_list[i].mask)
-+ seq_printf(file, " %s\n",
-+ fq_ctrl_text_list[i].txt);
-+ i++;
-+ }
-+ }
-+ seq_printf(file, " dest_channel: %u\n", fqd.dest.channel);
-+ seq_printf(file, " dest_wq: %u\n", fqd.dest.wq);
-+ seq_printf(file, " ics_cred: %u\n", fqd.ics_cred);
-+ seq_printf(file, " td_mant: %u\n", fqd.td.mant);
-+ seq_printf(file, " td_exp: %u\n", fqd.td.exp);
-+
-+ seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b);
-+
-+ seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd));
-+ /* Any stashing configured */
-+ if ((fqd.context_a.stashing.exclusive & 0x7) == 0)
-+ seq_puts(file, " ctx_a_stash_exclusive: None\n");
-+ else {
-+ seq_puts(file, " ctx_a_stash_exclusive:\n");
-+ i = 0;
-+ while (stashing_text_list[i].txt != NULL) {
-+ if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask)
-+ seq_printf(file, " %s\n",
-+ stashing_text_list[i].txt);
-+ i++;
-+ }
-+ }
-+ seq_printf(file, " ctx_a_stash_annotation_cl: %u\n",
-+ fqd.context_a.stashing.annotation_cl);
-+ seq_printf(file, " ctx_a_stash_data_cl: %u\n",
-+ fqd.context_a.stashing.data_cl);
-+ seq_printf(file, " ctx_a_stash_context_cl: %u\n",
-+ fqd.context_a.stashing.context_cl);
-+ return 0;
-+}
-+
-+static int query_fq_fields_open(struct inode *inode,
-+ struct file *file)
-+{
-+ return single_open(file, query_fq_fields_show, NULL);
-+}
-+
-+static ssize_t query_fq_fields_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ if (val > MAX_FQID)
-+ return -EINVAL;
-+ query_fq_fields_data.fqid = (u32)val;
-+ return count;
-+}
-+
-+static const struct file_operations query_fq_fields_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_fq_fields_open,
-+ .read = seq_read,
-+ .write = query_fq_fields_write,
-+ .release = single_release,
-+};
-+
-+/*******************************************************************************
-+ * Query WQ lengths
-+ ******************************************************************************/
-+struct query_wq_lengths_data_s {
-+ union {
-+ u16 channel_wq; /* ignores wq (3 lsbits) */
-+ struct {
-+ u16 id:13; /* qm_channel */
-+ u16 __reserved:3;
-+ } __packed channel;
-+ };
-+};
-+static struct query_wq_lengths_data_s query_wq_lengths_data;
-+static int query_wq_lengths_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_mcr_querywq wq;
-+ int i;
-+
-+ memset(&wq, 0, sizeof(struct qm_mcr_querywq));
-+ wq.channel.id = query_wq_lengths_data.channel.id;
-+ ret = qman_query_wq(0, &wq);
-+ if (ret)
-+ return ret;
-+ seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id);
-+ for (i = 0; i < 8; i++)
-+ /* mask out upper 4 bits since they are not part of length */
-+ seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff);
-+ return 0;
-+}
-+
-+static int query_wq_lengths_open(struct inode *inode,
-+ struct file *file)
-+{
-+ return single_open(file, query_wq_lengths_show, NULL);
-+}
-+
-+static ssize_t query_wq_lengths_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ if (val > 0xfff8)
-+ return -EINVAL;
-+ query_wq_lengths_data.channel.id = (u16)val;
-+ return count;
-+}
-+
-+static const struct file_operations query_wq_lengths_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_wq_lengths_open,
-+ .read = seq_read,
-+ .write = query_wq_lengths_write,
-+ .release = single_release,
-+};
-+
-+/*******************************************************************************
-+ * Query CGR
-+ ******************************************************************************/
-+struct query_cgr_s {
-+ u8 cgid;
-+};
-+static struct query_cgr_s query_cgr_data;
-+
-+static int query_cgr_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_mcr_querycgr cgrd;
-+ struct qman_cgr cgr;
-+ int i, j;
-+ u32 mask;
-+
-+ memset(&cgr, 0, sizeof(cgr));
-+ memset(&cgrd, 0, sizeof(cgrd));
-+ cgr.cgrid = query_cgr_data.cgid;
-+ ret = qman_query_cgr(&cgr, &cgrd);
-+ if (ret)
-+ return ret;
-+ seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid);
-+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn,
-+ cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn,
-+ cgrd.cgr.wr_parm_g.Pn);
-+
-+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn,
-+ cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn,
-+ cgrd.cgr.wr_parm_y.Pn);
-+
-+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn,
-+ cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn,
-+ cgrd.cgr.wr_parm_r.Pn);
-+
-+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
-+ cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r);
-+
-+ seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en);
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
-+ seq_puts(file, " cscn_targ_dcp:\n");
-+ mask = 0x80000000;
-+ for (i = 0; i < 32; i++) {
-+ if (cgrd.cgr.cscn_targ & mask)
-+ seq_printf(file, " send CSCN to dcp %u\n",
-+ (31 - i));
-+ mask >>= 1;
-+ }
-+
-+ seq_puts(file, " cscn_targ_swp:\n");
-+ for (i = 0; i < 4; i++) {
-+ mask = 0x80000000;
-+ for (j = 0; j < 32; j++) {
-+ if (cgrd.cscn_targ_swp[i] & mask)
-+ seq_printf(file, " send CSCN to swp"
-+ " %u\n", (127 - (i * 32) - j));
-+ mask >>= 1;
-+ }
-+ }
-+ } else {
-+ seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ);
-+ }
-+ seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en);
-+ seq_printf(file, " cs: %u\n", cgrd.cgr.cs);
-+
-+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
-+ cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn);
-+
-+ seq_printf(file, " mode: %s\n",
-+ (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ?
-+ "frame count" : "byte count");
-+ seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd));
-+ seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd));
-+
-+ return 0;
-+}
-+
-+static int query_cgr_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, query_cgr_show, NULL);
-+}
-+
-+static ssize_t query_cgr_write(struct file *f, const char __user *buf,
-+ size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ if (val > 0xff)
-+ return -EINVAL;
-+ query_cgr_data.cgid = (u8)val;
-+ return count;
-+}
-+
-+static const struct file_operations query_cgr_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_cgr_open,
-+ .read = seq_read,
-+ .write = query_cgr_write,
-+ .release = single_release,
-+};
-+
-+/*******************************************************************************
-+ * Test Write CGR
-+ ******************************************************************************/
-+struct test_write_cgr_s {
-+ u64 i_bcnt;
-+ u8 cgid;
-+};
-+static struct test_write_cgr_s test_write_cgr_data;
-+
-+static int testwrite_cgr_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_mcr_cgrtestwrite result;
-+ struct qman_cgr cgr;
-+ u64 i_bcnt;
-+
-+ memset(&cgr, 0, sizeof(struct qman_cgr));
-+ memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite));
-+ cgr.cgrid = test_write_cgr_data.cgid;
-+ i_bcnt = test_write_cgr_data.i_bcnt;
-+ ret = qman_testwrite_cgr(&cgr, i_bcnt, &result);
-+ if (ret)
-+ return ret;
-+ seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid);
-+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn,
-+ result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn,
-+ result.cgr.wr_parm_g.Pn);
-+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn,
-+ result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn,
-+ result.cgr.wr_parm_y.Pn);
-+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn,
-+ result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn,
-+ result.cgr.wr_parm_r.Pn);
-+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
-+ result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r);
-+ seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en);
-+ seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ);
-+ seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en);
-+ seq_printf(file, " cs: %u\n", result.cgr.cs);
-+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
-+ result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn);
-+
-+ /* Add Mode for Si 2 */
-+ seq_printf(file, " mode: %s\n",
-+ (result.cgr.mode & QMAN_CGR_MODE_FRAME) ?
-+ "frame count" : "byte count");
-+
-+ seq_printf(file, " i_bcnt: %llu\n",
-+ qm_mcr_cgrtestwrite_i_get64(&result));
-+ seq_printf(file, " a_bcnt: %llu\n",
-+ qm_mcr_cgrtestwrite_a_get64(&result));
-+ seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g);
-+ seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y);
-+ seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r);
-+ return 0;
-+}
-+
-+static int testwrite_cgr_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, testwrite_cgr_show, NULL);
-+}
-+
-+static const struct file_operations testwrite_cgr_fops = {
-+ .owner = THIS_MODULE,
-+ .open = testwrite_cgr_open,
-+ .read = seq_read,
-+ .release = single_release,
-+};
-+
-+
-+static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset)
-+{
-+ seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt);
-+ return 0;
-+}
-+static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, testwrite_cgr_ibcnt_show, NULL);
-+}
-+
-+static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf,
-+ size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ test_write_cgr_data.i_bcnt = val;
-+ return count;
-+}
-+
-+static const struct file_operations teswrite_cgr_ibcnt_fops = {
-+ .owner = THIS_MODULE,
-+ .open = testwrite_cgr_ibcnt_open,
-+ .read = seq_read,
-+ .write = testwrite_cgr_ibcnt_write,
-+ .release = single_release,
-+};
-+
-+static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset)
-+{
-+ seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid);
-+ return 0;
-+}
-+static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, testwrite_cgr_cgrid_show, NULL);
-+}
-+
-+static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf,
-+ size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ if (val > 0xff)
-+ return -EINVAL;
-+ test_write_cgr_data.cgid = (u8)val;
-+ return count;
-+}
-+
-+static const struct file_operations teswrite_cgr_cgrid_fops = {
-+ .owner = THIS_MODULE,
-+ .open = testwrite_cgr_cgrid_open,
-+ .read = seq_read,
-+ .write = testwrite_cgr_cgrid_write,
-+ .release = single_release,
-+};
-+
-+/*******************************************************************************
-+ * Query Congestion State
-+ ******************************************************************************/
-+static int query_congestion_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_mcr_querycongestion cs;
-+ int i, j, in_cong = 0;
-+ u32 mask;
-+
-+ memset(&cs, 0, sizeof(struct qm_mcr_querycongestion));
-+ ret = qman_query_congestion(&cs);
-+ if (ret)
-+ return ret;
-+ seq_puts(file, "Query Congestion Result\n");
-+ for (i = 0; i < 8; i++) {
-+ mask = 0x80000000;
-+ for (j = 0; j < 32; j++) {
-+ if (cs.state.__state[i] & mask) {
-+ in_cong = 1;
-+ seq_printf(file, " cg %u: %s\n", (i*32)+j,
-+ "in congestion");
-+ }
-+ mask >>= 1;
-+ }
-+ }
-+ if (!in_cong)
-+ seq_puts(file, " All congestion groups not congested.\n");
-+ return 0;
-+}
-+
-+static int query_congestion_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, query_congestion_show, NULL);
-+}
-+
-+static const struct file_operations query_congestion_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_congestion_open,
-+ .read = seq_read,
-+ .release = single_release,
-+};
-+
-+/*******************************************************************************
-+ * Query CCGR
-+ ******************************************************************************/
-+struct query_ccgr_s {
-+ u32 ccgid;
-+};
-+static struct query_ccgr_s query_ccgr_data;
-+
-+static int query_ccgr_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_mcr_ceetm_ccgr_query ccgr_query;
-+ struct qm_mcc_ceetm_ccgr_query query_opts;
-+ int i, j;
-+ u32 mask;
-+
-+ memset(&ccgr_query, 0, sizeof(struct qm_mcr_ceetm_ccgr_query));
-+ memset(&query_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_query));
-+
-+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
-+ return -EINVAL;
-+
-+ seq_printf(file, "Query CCGID %x\n", query_ccgr_data.ccgid);
-+ query_opts.dcpid = ((query_ccgr_data.ccgid & 0xFF000000) >> 24);
-+ query_opts.ccgrid = query_ccgr_data.ccgid & 0x000001FF;
-+ ret = qman_ceetm_query_ccgr(&query_opts, &ccgr_query);
-+ if (ret)
-+ return ret;
-+ seq_printf(file, "Query CCGR id %x in DCP %d\n", query_opts.ccgrid,
-+ query_opts.dcpid);
-+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ ccgr_query.cm_query.wr_parm_g.MA,
-+ ccgr_query.cm_query.wr_parm_g.Mn,
-+ ccgr_query.cm_query.wr_parm_g.SA,
-+ ccgr_query.cm_query.wr_parm_g.Sn,
-+ ccgr_query.cm_query.wr_parm_g.Pn);
-+
-+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ ccgr_query.cm_query.wr_parm_y.MA,
-+ ccgr_query.cm_query.wr_parm_y.Mn,
-+ ccgr_query.cm_query.wr_parm_y.SA,
-+ ccgr_query.cm_query.wr_parm_y.Sn,
-+ ccgr_query.cm_query.wr_parm_y.Pn);
-+
-+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
-+ ccgr_query.cm_query.wr_parm_r.MA,
-+ ccgr_query.cm_query.wr_parm_r.Mn,
-+ ccgr_query.cm_query.wr_parm_r.SA,
-+ ccgr_query.cm_query.wr_parm_r.Sn,
-+ ccgr_query.cm_query.wr_parm_r.Pn);
-+
-+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
-+ ccgr_query.cm_query.ctl_wr_en_g,
-+ ccgr_query.cm_query.ctl_wr_en_y,
-+ ccgr_query.cm_query.ctl_wr_en_r);
-+
-+ seq_printf(file, " cscn_en: %u\n", ccgr_query.cm_query.ctl_cscn_en);
-+ seq_puts(file, " cscn_targ_dcp:\n");
-+ mask = 0x80000000;
-+ for (i = 0; i < 32; i++) {
-+ if (ccgr_query.cm_query.cscn_targ_dcp & mask)
-+ seq_printf(file, " send CSCN to dcp %u\n", (31 - i));
-+ mask >>= 1;
-+ }
-+
-+ seq_puts(file, " cscn_targ_swp:\n");
-+ for (i = 0; i < 4; i++) {
-+ mask = 0x80000000;
-+ for (j = 0; j < 32; j++) {
-+ if (ccgr_query.cm_query.cscn_targ_swp[i] & mask)
-+ seq_printf(file, " send CSCN to swp"
-+ "%u\n", (127 - (i * 32) - j));
-+ mask >>= 1;
-+ }
-+ }
-+
-+ seq_printf(file, " td_en: %u\n", ccgr_query.cm_query.ctl_td_en);
-+
-+ seq_printf(file, " cs_thresh_in_TA: %u, cs_thresh_in_Tn: %u\n",
-+ ccgr_query.cm_query.cs_thres.TA,
-+ ccgr_query.cm_query.cs_thres.Tn);
-+
-+ seq_printf(file, " cs_thresh_out_TA: %u, cs_thresh_out_Tn: %u\n",
-+ ccgr_query.cm_query.cs_thres_x.TA,
-+ ccgr_query.cm_query.cs_thres_x.Tn);
-+
-+ seq_printf(file, " td_thresh_TA: %u, td_thresh_Tn: %u\n",
-+ ccgr_query.cm_query.td_thres.TA,
-+ ccgr_query.cm_query.td_thres.Tn);
-+
-+ seq_printf(file, " mode: %s\n",
-+ (ccgr_query.cm_query.ctl_mode &
-+ QMAN_CGR_MODE_FRAME) ?
-+ "frame count" : "byte count");
-+ seq_printf(file, " i_cnt: %llu\n", (u64)ccgr_query.cm_query.i_cnt);
-+ seq_printf(file, " a_cnt: %llu\n", (u64)ccgr_query.cm_query.a_cnt);
-+
-+ return 0;
-+}
-+
-+static int query_ccgr_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, query_ccgr_show, NULL);
-+}
-+
-+static ssize_t query_ccgr_write(struct file *f, const char __user *buf,
-+ size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ query_ccgr_data.ccgid = val;
-+ return count;
-+}
-+
-+static const struct file_operations query_ccgr_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_ccgr_open,
-+ .read = seq_read,
-+ .write = query_ccgr_write,
-+ .release = single_release,
-+};
-+/*******************************************************************************
-+ * QMan register
-+ ******************************************************************************/
-+struct qman_register_s {
-+ u32 val;
-+};
-+static struct qman_register_s qman_register_data;
-+
-+static void init_ccsrmempeek(void)
-+{
-+ struct device_node *dn;
-+ const u32 *regaddr_p;
-+
-+ dn = of_find_compatible_node(NULL, NULL, "fsl,qman");
-+ if (!dn) {
-+ pr_info("No fsl,qman node\n");
-+ return;
-+ }
-+ regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL);
-+ if (!regaddr_p) {
-+ of_node_put(dn);
-+ return;
-+ }
-+ qman_ccsr_start = of_translate_address(dn, regaddr_p);
-+ of_node_put(dn);
-+}
-+/* This function provides access to QMan ccsr memory map */
-+static int qman_ccsrmempeek(u32 *val, u32 offset)
-+{
-+ void __iomem *addr;
-+ u64 phys_addr;
-+
-+ if (!qman_ccsr_start)
-+ return -EINVAL;
-+
-+ if (offset > (qman_ccsr_size - sizeof(u32)))
-+ return -EINVAL;
-+
-+ phys_addr = qman_ccsr_start + offset;
-+ addr = ioremap(phys_addr, sizeof(u32));
-+ if (!addr) {
-+ pr_err("ccsrmempeek, ioremap failed\n");
-+ return -EINVAL;
-+ }
-+ *val = in_be32(addr);
-+ iounmap(addr);
-+ return 0;
-+}
-+
-+static int qman_ccsrmempeek_show(struct seq_file *file, void *offset)
-+{
-+ u32 b;
-+
-+ qman_ccsrmempeek(&b, qman_register_data.val);
-+ seq_printf(file, "QMan register offset = 0x%x\n",
-+ qman_register_data.val);
-+ seq_printf(file, "value = 0x%08x\n", b);
-+
-+ return 0;
-+}
-+
-+static int qman_ccsrmempeek_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, qman_ccsrmempeek_show, NULL);
-+}
-+
-+static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf,
-+ size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ /* multiple of 4 */
-+ if (val > (qman_ccsr_size - sizeof(u32))) {
-+ pr_info("Input 0x%lx > 0x%llx\n",
-+ val, (qman_ccsr_size - sizeof(u32)));
-+ return -EINVAL;
-+ }
-+ if (val & 0x3) {
-+ pr_info("Input 0x%lx not multiple of 4\n", val);
-+ return -EINVAL;
-+ }
-+ qman_register_data.val = val;
-+ return count;
-+}
-+
-+static const struct file_operations qman_ccsrmempeek_fops = {
-+ .owner = THIS_MODULE,
-+ .open = qman_ccsrmempeek_open,
-+ .read = seq_read,
-+ .write = qman_ccsrmempeek_write,
-+};
-+
-+/*******************************************************************************
-+ * QMan state
-+ ******************************************************************************/
-+static int qman_fqd_state_show(struct seq_file *file, void *offset)
-+{
-+ struct qm_mcr_queryfq_np np;
-+ struct qman_fq fq;
-+ struct line_buffer_fq line_buf;
-+ int ret, i;
-+ u8 *state = file->private;
-+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
-+
-+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
-+ memset(&line_buf, 0, sizeof(line_buf));
-+
-+ seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]);
-+
-+ for (i = 1; i < fqid_max; i++) {
-+ fq.fqid = i;
-+ ret = qman_query_fq_np(&fq, &np);
-+ if (ret)
-+ return ret;
-+ if (*state == (np.state & QM_MCR_NP_STATE_MASK))
-+ add_to_line_buffer(&line_buf, fq.fqid, file);
-+ /* Keep a summary count of all states */
-+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
-+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
-+ }
-+ flush_line_buffer(&line_buf, file);
-+
-+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
-+ seq_printf(file, "%s count = %u\n", state_txt[i],
-+ qm_fq_state_cnt[i]);
-+ }
-+ return 0;
-+}
-+
-+static int qman_fqd_state_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, qman_fqd_state_show, inode->i_private);
-+}
-+
-+static const struct file_operations qman_fqd_state_fops = {
-+ .owner = THIS_MODULE,
-+ .open = qman_fqd_state_open,
-+ .read = seq_read,
-+};
-+
-+static int qman_fqd_ctrl_show(struct seq_file *file, void *offset)
-+{
-+ struct qm_fqd fqd;
-+ struct qman_fq fq;
-+ u32 fq_en_cnt = 0, fq_di_cnt = 0;
-+ int ret, i;
-+ struct mask_filter_s *data = file->private;
-+ const char *ctrl_txt = get_fqd_ctrl_text(data->mask);
-+ struct line_buffer_fq line_buf;
-+
-+ memset(&line_buf, 0, sizeof(line_buf));
-+ seq_printf(file, "List of fq ids with: %s :%s\n",
-+ ctrl_txt, (data->filter) ? "enabled" : "disabled");
-+ for (i = 1; i < fqid_max; i++) {
-+ fq.fqid = i;
-+ memset(&fqd, 0, sizeof(struct qm_fqd));
-+ ret = qman_query_fq(&fq, &fqd);
-+ if (ret)
-+ return ret;
-+ if (data->filter) {
-+ if (fqd.fq_ctrl & data->mask)
-+ add_to_line_buffer(&line_buf, fq.fqid, file);
-+ } else {
-+ if (!(fqd.fq_ctrl & data->mask))
-+ add_to_line_buffer(&line_buf, fq.fqid, file);
-+ }
-+ if (fqd.fq_ctrl & data->mask)
-+ fq_en_cnt++;
-+ else
-+ fq_di_cnt++;
-+ }
-+ flush_line_buffer(&line_buf, file);
-+
-+ seq_printf(file, "Total FQD with: %s : enabled = %u\n",
-+ ctrl_txt, fq_en_cnt);
-+ seq_printf(file, "Total FQD with: %s : disabled = %u\n",
-+ ctrl_txt, fq_di_cnt);
-+ return 0;
-+}
-+
-+/*******************************************************************************
-+ * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE
-+ ******************************************************************************/
-+static int qman_fqd_ctrl_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, qman_fqd_ctrl_show, inode->i_private);
-+}
-+
-+static const struct file_operations qman_fqd_ctrl_fops = {
-+ .owner = THIS_MODULE,
-+ .open = qman_fqd_ctrl_open,
-+ .read = seq_read,
-+};
-+
-+/*******************************************************************************
-+ * QMan ctrl summary
-+ ******************************************************************************/
-+/*******************************************************************************
-+ * QMan summary state
-+ ******************************************************************************/
-+static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset)
-+{
-+ struct qm_mcr_queryfq_np np;
-+ struct qman_fq fq;
-+ int ret, i;
-+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
-+
-+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
-+
-+ for (i = 1; i < fqid_max; i++) {
-+ fq.fqid = i;
-+ ret = qman_query_fq_np(&fq, &np);
-+ if (ret)
-+ return ret;
-+ /* Keep a summary count of all states */
-+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
-+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
-+ }
-+
-+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
-+ seq_printf(file, "%s count = %u\n", state_txt[i],
-+ qm_fq_state_cnt[i]);
-+ }
-+ return 0;
-+}
-+
-+static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset)
-+{
-+ struct qm_fqd fqd;
-+ struct qman_fq fq;
-+ int ret, i , j;
-+ u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2];
-+
-+ memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt));
-+
-+ for (i = 1; i < fqid_max; i++) {
-+ memset(&fqd, 0, sizeof(struct qm_fqd));
-+ fq.fqid = i;
-+ ret = qman_query_fq(&fq, &fqd);
-+ if (ret)
-+ return ret;
-+ /* Keep a summary count of all states */
-+ for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2)
-+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
-+ mask_filter[j].mask)
-+ qm_prog_cnt[j/2]++;
-+ }
-+ for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) {
-+ seq_printf(file, "%s count = %u\n",
-+ get_fqd_ctrl_text(mask_filter[i*2].mask),
-+ qm_prog_cnt[i]);
-+ }
-+ return 0;
-+}
-+
-+static int qman_fqd_summary_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+
-+ /* Display summary of non programmable fields */
-+ ret = qman_fqd_non_prog_summary_show(file, offset);
-+ if (ret)
-+ return ret;
-+ seq_puts(file, "-----------------------------------------\n");
-+ /* Display programmable fields */
-+ ret = qman_fqd_prog_summary_show(file, offset);
-+ if (ret)
-+ return ret;
-+ return 0;
-+}
-+
-+static int qman_fqd_summary_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, qman_fqd_summary_show, NULL);
-+}
-+
-+static const struct file_operations qman_fqd_summary_fops = {
-+ .owner = THIS_MODULE,
-+ .open = qman_fqd_summary_open,
-+ .read = seq_read,
-+};
-+
-+/*******************************************************************************
-+ * QMan destination work queue
-+ ******************************************************************************/
-+struct qman_dest_wq_s {
-+ u16 wq_id;
-+};
-+static struct qman_dest_wq_s qman_dest_wq_data = {
-+ .wq_id = 0,
-+};
-+
-+static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset)
-+{
-+ struct qm_fqd fqd;
-+ struct qman_fq fq;
-+ int ret, i;
-+ u16 *wq, wq_id = qman_dest_wq_data.wq_id;
-+ struct line_buffer_fq line_buf;
-+
-+ memset(&line_buf, 0, sizeof(line_buf));
-+ /* use vmalloc : need to allocate large memory region and don't
-+ * require the memory to be physically contiguous. */
-+ wq = vzalloc(sizeof(u16) * (0xFFFF+1));
-+ if (!wq)
-+ return -ENOMEM;
-+
-+ seq_printf(file, "List of fq ids with destination work queue id"
-+ " = 0x%x\n", wq_id);
-+
-+ for (i = 1; i < fqid_max; i++) {
-+ fq.fqid = i;
-+ memset(&fqd, 0, sizeof(struct qm_fqd));
-+ ret = qman_query_fq(&fq, &fqd);
-+ if (ret) {
-+ vfree(wq);
-+ return ret;
-+ }
-+ if (wq_id == fqd.dest_wq)
-+ add_to_line_buffer(&line_buf, fq.fqid, file);
-+ wq[fqd.dest_wq]++;
-+ }
-+ flush_line_buffer(&line_buf, file);
-+
-+ seq_puts(file, "Summary of all FQD destination work queue values\n");
-+ for (i = 0; i < 0xFFFF; i++) {
-+ if (wq[i])
-+ seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, "
-+ "count = %u\n", i >> 3, i & 0x3, i, wq[i]);
-+ }
-+ vfree(wq);
-+ return 0;
-+}
-+
-+static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf,
-+ size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ if (val > 0xFFFF)
-+ return -EINVAL;
-+ qman_dest_wq_data.wq_id = val;
-+ return count;
-+}
-+
-+static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, qman_fqd_dest_wq_show, NULL);
-+}
-+
-+static const struct file_operations qman_fqd_dest_wq_fops = {
-+ .owner = THIS_MODULE,
-+ .open = qman_fqd_dest_wq_open,
-+ .read = seq_read,
-+ .write = qman_fqd_dest_wq_write,
-+};
-+
-+/*******************************************************************************
-+ * QMan Intra-Class Scheduling Credit
-+ ******************************************************************************/
-+static int qman_fqd_cred_show(struct seq_file *file, void *offset)
-+{
-+ struct qm_fqd fqd;
-+ struct qman_fq fq;
-+ int ret, i;
-+ u32 fq_cnt = 0;
-+ struct line_buffer_fq line_buf;
-+
-+ memset(&line_buf, 0, sizeof(line_buf));
-+ seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0"
-+ "\n");
-+
-+ for (i = 1; i < fqid_max; i++) {
-+ fq.fqid = i;
-+ memset(&fqd, 0, sizeof(struct qm_fqd));
-+ ret = qman_query_fq(&fq, &fqd);
-+ if (ret)
-+ return ret;
-+ if (fqd.ics_cred > 0) {
-+ add_to_line_buffer(&line_buf, fq.fqid, file);
-+ fq_cnt++;
-+ }
-+ }
-+ flush_line_buffer(&line_buf, file);
-+
-+ seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt);
-+ return 0;
-+}
-+
-+static int qman_fqd_cred_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, qman_fqd_cred_show, NULL);
-+}
-+
-+static const struct file_operations qman_fqd_cred_fops = {
-+ .owner = THIS_MODULE,
-+ .open = qman_fqd_cred_open,
-+ .read = seq_read,
-+};
-+
-+/*******************************************************************************
-+ * Class Queue Fields
-+ ******************************************************************************/
-+struct query_cq_fields_data_s {
-+ u32 cqid;
-+};
-+
-+static struct query_cq_fields_data_s query_cq_fields_data = {
-+ .cqid = 1,
-+};
-+
-+static int query_cq_fields_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ struct qm_mcr_ceetm_cq_query query_result;
-+ unsigned int cqid;
-+ unsigned int portal;
-+
-+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
-+ return -EINVAL;
-+
-+ cqid = query_cq_fields_data.cqid & 0x00FFFFFF;
-+ portal = query_cq_fields_data.cqid >> 24;
-+ if (portal > qm_dc_portal_fman1)
-+ return -EINVAL;
-+
-+ ret = qman_ceetm_query_cq(cqid, portal, &query_result);
-+ if (ret)
-+ return ret;
-+ seq_printf(file, "Query CQ Fields Result cqid 0x%x on DCP %d\n",
-+ cqid, portal);
-+ seq_printf(file, " ccgid: %u\n", query_result.ccgid);
-+ seq_printf(file, " state: %u\n", query_result.state);
-+ seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr);
-+ seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr);
-+ seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr);
-+ seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr);
-+ seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr);
-+ seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr);
-+ seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr);
-+ seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr);
-+ seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr);
-+ seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr);
-+ seq_printf(file, " frame_count: %u\n", query_result.frm_cnt);
-+
-+ return 0;
-+}
-+
-+static int query_cq_fields_open(struct inode *inode,
-+ struct file *file)
-+{
-+ return single_open(file, query_cq_fields_show, NULL);
-+}
-+
-+static ssize_t query_cq_fields_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ query_cq_fields_data.cqid = (u32)val;
-+ return count;
-+}
-+
-+static const struct file_operations query_cq_fields_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_cq_fields_open,
-+ .read = seq_read,
-+ .write = query_cq_fields_write,
-+ .release = single_release,
-+};
-+
-+/*******************************************************************************
-+ * READ CEETM_XSFDR_IN_USE
-+ ******************************************************************************/
-+struct query_ceetm_xsfdr_data_s {
-+ enum qm_dc_portal dcp_portal;
-+};
-+
-+static struct query_ceetm_xsfdr_data_s query_ceetm_xsfdr_data;
-+
-+static int query_ceetm_xsfdr_show(struct seq_file *file, void *offset)
-+{
-+ int ret;
-+ unsigned int xsfdr_in_use;
-+ enum qm_dc_portal portal;
-+
-+
-+ if (qman_ip_rev < QMAN_REV31)
-+ return -EINVAL;
-+
-+ portal = query_ceetm_xsfdr_data.dcp_portal;
-+ ret = qman_ceetm_get_xsfdr(portal, &xsfdr_in_use);
-+ if (ret) {
-+ seq_printf(file, "Read CEETM_XSFDR_IN_USE on DCP %d failed\n",
-+ portal);
-+ return ret;
-+ }
-+
-+ seq_printf(file, "DCP%d: CEETM_XSFDR_IN_USE number is %u\n", portal,
-+ (xsfdr_in_use & 0x1FFF));
-+ return 0;
-+}
-+
-+static int query_ceetm_xsfdr_open(struct inode *inode,
-+ struct file *file)
-+{
-+ return single_open(file, query_ceetm_xsfdr_show, NULL);
-+}
-+
-+static ssize_t query_ceetm_xsfdr_write(struct file *f,
-+ const char __user *buf, size_t count, loff_t *off)
-+{
-+ int ret;
-+ unsigned long val;
-+
-+ ret = user_input_convert(buf, count, &val);
-+ if (ret)
-+ return ret;
-+ if (val > qm_dc_portal_fman1)
-+ return -EINVAL;
-+ query_ceetm_xsfdr_data.dcp_portal = (u32)val;
-+ return count;
-+}
-+
-+static const struct file_operations query_ceetm_xsfdr_fops = {
-+ .owner = THIS_MODULE,
-+ .open = query_ceetm_xsfdr_open,
-+ .read = seq_read,
-+ .write = query_ceetm_xsfdr_write,
-+ .release = single_release,
-+};
-+
-+/* helper macros used in qman_debugfs_module_init */
-+#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \
-+ do { \
-+ d = debugfs_create_file(name, \
-+ mode, parent, \
-+ data, \
-+ fops); \
-+ if (d == NULL) { \
-+ ret = -ENOMEM; \
-+ goto _return; \
-+ } \
-+ } while (0)
-+
-+/* dfs_root as parent */
-+#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \
-+ QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops)
-+
-+/* fqd_root as parent */
-+#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \
-+ QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops)
-+
-+/* fqd state */
-+#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \
-+ QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \
-+ (void *)&mask_filter[index], &qman_fqd_ctrl_fops)
-+
-+static int __init qman_debugfs_module_init(void)
-+{
-+ int ret = 0;
-+ struct dentry *d, *fqd_root;
-+ u32 reg;
-+
-+ fqid_max = 0;
-+ init_ccsrmempeek();
-+ if (qman_ccsr_start) {
-+ if (!qman_ccsrmempeek(&reg, QM_FQD_AR)) {
-+ /* extract the size of the FQD window */
-+ reg = reg & 0x3f;
-+ /* calculate valid frame queue descriptor range */
-+ fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE;
-+ }
-+ }
-+ dfs_root = debugfs_create_dir("qman", NULL);
-+ fqd_root = debugfs_create_dir("fqd", dfs_root);
-+ if (dfs_root == NULL || fqd_root == NULL) {
-+ ret = -ENOMEM;
-+ pr_err("Cannot create qman/fqd debugfs dir\n");
-+ goto _return;
-+ }
-+ if (fqid_max) {
-+ QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO,
-+ NULL, &qman_ccsrmempeek_fops);
-+ }
-+ QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO,
-+ &query_fq_np_fields_data, &query_fq_np_fields_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO,
-+ &query_fq_fields_data, &query_fq_fields_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO,
-+ &query_wq_lengths_data, &query_wq_lengths_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO,
-+ &query_cgr_data, &query_cgr_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO,
-+ NULL, &query_congestion_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO,
-+ NULL, &testwrite_cgr_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO,
-+ NULL, &teswrite_cgr_cgrid_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO,
-+ NULL, &teswrite_cgr_ibcnt_fops);
-+
-+ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_ccgr", S_IRUGO | S_IWUGO,
-+ &query_ccgr_data, &query_ccgr_fops);
-+ /* Create files with fqd_root as parent */
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO,
-+ (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO,
-+ (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED],
-+ &qman_fqd_state_fops);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO,
-+ (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED],
-+ &qman_fqd_state_fops);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO,
-+ (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED],
-+ &qman_fqd_state_fops);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO,
-+ (void *)&fqd_states[QM_MCR_NP_STATE_PARKED],
-+ &qman_fqd_state_fops);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO,
-+ (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE],
-+ &qman_fqd_state_fops);
-+ QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO,
-+ &query_cq_fields_data, &query_cq_fields_fops);
-+ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_xsfdr_in_use", S_IRUGO | S_IWUGO,
-+ &query_ceetm_xsfdr_data, &query_ceetm_xsfdr_fops);
-+
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1);
-+
-+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO,
-+ NULL, &qman_fqd_summary_fops);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO,
-+ NULL, &qman_fqd_dest_wq_fops);
-+
-+ QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO,
-+ NULL, &qman_fqd_cred_fops);
-+
-+ return 0;
-+
-+_return:
-+ debugfs_remove_recursive(dfs_root);
-+ return ret;
-+}
-+
-+static void __exit qman_debugfs_module_exit(void)
-+{
-+ debugfs_remove_recursive(dfs_root);
-+}
-+
-+module_init(qman_debugfs_module_init);
-+module_exit(qman_debugfs_module_exit);
-+MODULE_LICENSE("Dual BSD/GPL");
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_driver.c
-@@ -0,0 +1,961 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qman_private.h"
-+
-+#include <asm/smp.h> /* hard_smp_processor_id() if !CONFIG_SMP */
-+#ifdef CONFIG_HOTPLUG_CPU
-+#include <linux/cpu.h>
-+#endif
-+
-+/* Global variable containing revision id (even on non-control plane systems
-+ * where CCSR isn't available) */
-+u16 qman_ip_rev;
-+EXPORT_SYMBOL(qman_ip_rev);
-+u8 qman_ip_cfg;
-+EXPORT_SYMBOL(qman_ip_cfg);
-+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
-+EXPORT_SYMBOL(qm_channel_pool1);
-+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
-+EXPORT_SYMBOL(qm_channel_caam);
-+u16 qm_channel_pme = QMAN_CHANNEL_PME;
-+EXPORT_SYMBOL(qm_channel_pme);
-+u16 qm_channel_dce = QMAN_CHANNEL_DCE;
-+EXPORT_SYMBOL(qm_channel_dce);
-+u16 qman_portal_max;
-+EXPORT_SYMBOL(qman_portal_max);
-+
-+u32 qman_clk;
-+struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
-+/* the qman ceetm instances on the given SoC */
-+u8 num_ceetms;
-+
-+/* For these variables, and the portal-initialisation logic, the
-+ * comments in bman_driver.c apply here so won't be repeated. */
-+static struct qman_portal *shared_portals[NR_CPUS];
-+static int num_shared_portals;
-+static int shared_portals_idx;
-+static LIST_HEAD(unused_pcfgs);
-+static DEFINE_SPINLOCK(unused_pcfgs_lock);
-+
-+/* A SDQCR mask comprising all the available/visible pool channels */
-+static u32 pools_sdqcr;
-+
-+#define STR_ERR_NOPROP "No '%s' property in node %s\n"
-+#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
-+#define STR_FQID_RANGE "fsl,fqid-range"
-+#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
-+#define STR_CGRID_RANGE "fsl,cgrid-range"
-+
-+/* A "fsl,fqid-range" node; release the given range to the allocator */
-+static __init int fsl_fqid_range_init(struct device_node *node)
-+{
-+ int ret;
-+ const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
-+ if (!range) {
-+ pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
-+ return -EINVAL;
-+ }
-+ qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ pr_info("Qman: FQID allocator includes range %d:%d\n",
-+ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ return 0;
-+}
-+
-+/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
-+static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
-+{
-+ int ret;
-+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
-+ if (!chanid) {
-+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
-+ return -EINVAL;
-+ }
-+ for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++)
-+ pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret);
-+ return 0;
-+}
-+
-+/* A "fsl,pool-channel-range" node; release the given range to the allocator */
-+static __init int fsl_pool_channel_range_init(struct device_node *node)
-+{
-+ int ret;
-+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
-+ if (!chanid) {
-+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
-+ return -EINVAL;
-+ }
-+ qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
-+ pr_info("Qman: pool channel allocator includes range %d:%d\n",
-+ be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
-+ return 0;
-+}
-+
-+/* A "fsl,cgrid-range" node; release the given range to the allocator */
-+static __init int fsl_cgrid_range_init(struct device_node *node)
-+{
-+ struct qman_cgr cgr;
-+ int ret, errors = 0;
-+ const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
-+ if (!range) {
-+ pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
-+ return -EINVAL;
-+ }
-+ qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ pr_info("Qman: CGRID allocator includes range %d:%d\n",
-+ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
-+ ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
-+ if (ret)
-+ errors++;
-+ }
-+ if (errors)
-+ pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
-+ errors, (errors > 1) ? "s" : "", range[0], range[1]);
-+ return 0;
-+}
-+
-+static __init int fsl_ceetm_init(struct device_node *node)
-+{
-+ enum qm_dc_portal dcp_portal;
-+ struct qm_ceetm_sp *sp;
-+ struct qm_ceetm_lni *lni;
-+ int ret, i;
-+ const u32 *range;
-+
-+ /* Find LFQID range */
-+ range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret);
-+ if (!range) {
-+ pr_err("No fsl,ceetm-lfqid-range in node %s\n",
-+ node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node"
-+ " %s\n", node->full_name);
-+ return -EINVAL;
-+ }
-+
-+ dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16;
-+ if (dcp_portal > qm_dc_portal_fman1) {
-+ pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal);
-+ return -EINVAL;
-+ }
-+
-+ if (dcp_portal == qm_dc_portal_fman0)
-+ qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ if (dcp_portal == qm_dc_portal_fman1)
-+ qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ pr_debug("Qman: The lfqid allocator of CEETM %d includes range"
-+ " 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+
-+ qman_ceetms[dcp_portal].idx = dcp_portal;
-+ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals);
-+ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis);
-+
-+ /* Find Sub-portal range */
-+ range = of_get_property(node, "fsl,ceetm-sp-range", &ret);
-+ if (!range) {
-+ pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n",
-+ node->full_name);
-+ return -EINVAL;
-+ }
-+
-+ for (i = 0; i < be32_to_cpu(range[1]); i++) {
-+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
-+ if (!sp) {
-+ pr_err("Can't alloc memory for sub-portal %d\n",
-+ range[0] + i);
-+ return -ENOMEM;
-+ }
-+ sp->idx = be32_to_cpu(range[0]) + i;
-+ sp->dcp_idx = dcp_portal;
-+ sp->is_claimed = 0;
-+ list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals);
-+ sp++;
-+ }
-+ pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n",
-+ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
-+ qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]);
-+ qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]);
-+
-+ /* Find LNI range */
-+ range = of_get_property(node, "fsl,ceetm-lni-range", &ret);
-+ if (!range) {
-+ pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n",
-+ node->full_name);
-+ return -EINVAL;
-+ }
-+
-+ for (i = 0; i < be32_to_cpu(range[1]); i++) {
-+ lni = kzalloc(sizeof(*lni), GFP_KERNEL);
-+ if (!lni) {
-+ pr_err("Can't alloc memory for LNI %d\n",
-+ range[0] + i);
-+ return -ENOMEM;
-+ }
-+ lni->idx = be32_to_cpu(range[0]) + i;
-+ lni->dcp_idx = dcp_portal;
-+ lni->is_claimed = 0;
-+ INIT_LIST_HEAD(&lni->channels);
-+ list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis);
-+ lni++;
-+ }
-+ pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n",
-+ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
-+ qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]);
-+ qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]);
-+
-+ /* Find CEETM channel range */
-+ range = of_get_property(node, "fsl,ceetm-channel-range", &ret);
-+ if (!range) {
-+ pr_err("No fsl,ceetm-channel-range in node %s\n",
-+ node->full_name);
-+ return -EINVAL;
-+ }
-+ if (ret != 8) {
-+ pr_err("fsl,ceetm-channel-range is not a 2-cell range in node"
-+ "%s\n", node->full_name);
-+ return -EINVAL;
-+ }
-+
-+ if (dcp_portal == qm_dc_portal_fman0)
-+ qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ if (dcp_portal == qm_dc_portal_fman1)
-+ qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+ pr_debug("Qman: The channel allocator of CEETM %d includes"
-+ " range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
-+
-+ /* Set CEETM PRES register */
-+ ret = qman_ceetm_set_prescaler(dcp_portal);
-+ if (ret)
-+ return ret;
-+ return 0;
-+}
-+
-+static void qman_get_ip_revision(struct device_node *dn)
-+{
-+ u16 ip_rev = 0;
-+ u8 ip_cfg = QMAN_REV_CFG_0;
-+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
-+ if (!of_device_is_available(dn))
-+ continue;
-+ if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
-+ of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
-+ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
-+ BUG_ON(1);
-+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
-+ of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
-+ ip_rev = QMAN_REV11;
-+ qman_portal_max = 10;
-+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
-+ of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
-+ ip_rev = QMAN_REV12;
-+ qman_portal_max = 10;
-+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
-+ of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
-+ ip_rev = QMAN_REV20;
-+ qman_portal_max = 3;
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.0.0")) {
-+ ip_rev = QMAN_REV30;
-+ qman_portal_max = 50;
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.0.1")) {
-+ ip_rev = QMAN_REV30;
-+ qman_portal_max = 25;
-+ ip_cfg = QMAN_REV_CFG_1;
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.1.0")) {
-+ ip_rev = QMAN_REV31;
-+ qman_portal_max = 50;
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.1.1")) {
-+ ip_rev = QMAN_REV31;
-+ qman_portal_max = 25;
-+ ip_cfg = QMAN_REV_CFG_1;
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.1.2")) {
-+ ip_rev = QMAN_REV31;
-+ qman_portal_max = 18;
-+ ip_cfg = QMAN_REV_CFG_2;
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.1.3")) {
-+ ip_rev = QMAN_REV31;
-+ qman_portal_max = 10;
-+ ip_cfg = QMAN_REV_CFG_3;
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.2.0")) {
-+ ip_rev = QMAN_REV32;
-+ qman_portal_max = 10;
-+ ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043
-+ } else if (of_device_is_compatible(dn,
-+ "fsl,qman-portal-3.2.1")) {
-+ ip_rev = QMAN_REV32;
-+ qman_portal_max = 10;
-+ ip_cfg = QMAN_REV_CFG_3;
-+ } else {
-+ pr_warn("unknown QMan version in portal node,"
-+ "default to rev1.1\n");
-+ ip_rev = QMAN_REV11;
-+ qman_portal_max = 10;
-+ }
-+
-+ if (!qman_ip_rev) {
-+ if (ip_rev) {
-+ qman_ip_rev = ip_rev;
-+ qman_ip_cfg = ip_cfg;
-+ } else {
-+ pr_warn("unknown Qman version,"
-+ " default to rev1.1\n");
-+ qman_ip_rev = QMAN_REV11;
-+ qman_ip_cfg = QMAN_REV_CFG_0;
-+ }
-+ } else if (ip_rev && (qman_ip_rev != ip_rev))
-+ pr_warn("Revision=0x%04x, but portal '%s' has"
-+ " 0x%04x\n",
-+ qman_ip_rev, dn->full_name, ip_rev);
-+ if (qman_ip_rev == ip_rev)
-+ break;
-+ }
-+}
-+
-+/* Parse a portal node, perform generic mapping duties and return the config. It
-+ * is not known at this stage for what purpose (or even if) the portal will be
-+ * used. */
-+static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
-+{
-+ struct qm_portal_config *pcfg;
-+ const u32 *index_p;
-+ u32 index, channel;
-+ int irq, ret;
-+ resource_size_t len;
-+
-+ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
-+ if (!pcfg) {
-+ pr_err("can't allocate portal config");
-+ return NULL;
-+ }
-+
-+ /*
-+ * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
-+ * 'struct device' in order to get the PAMU stashing setup and the QMan
-+ * portal [driver] won't function at all without ring stashing
-+ *
-+ * Making the QMan portal driver nice and proper is part of the
-+ * upstreaming effort
-+ */
-+ pcfg->dev.bus = &platform_bus_type;
-+ pcfg->dev.of_node = node;
-+#ifdef CONFIG_FSL_PAMU
-+ pcfg->dev.archdata.iommu_domain = NULL;
-+#endif
-+
-+ ret = of_address_to_resource(node, DPA_PORTAL_CE,
-+ &pcfg->addr_phys[DPA_PORTAL_CE]);
-+ if (ret) {
-+ pr_err("Can't get %s property '%s'\n", node->full_name,
-+ "reg::CE");
-+ goto err;
-+ }
-+ ret = of_address_to_resource(node, DPA_PORTAL_CI,
-+ &pcfg->addr_phys[DPA_PORTAL_CI]);
-+ if (ret) {
-+ pr_err("Can't get %s property '%s'\n", node->full_name,
-+ "reg::CI");
-+ goto err;
-+ }
-+ index_p = of_get_property(node, "cell-index", &ret);
-+ if (!index_p || (ret != 4)) {
-+ pr_err("Can't get %s property '%s'\n", node->full_name,
-+ "cell-index");
-+ goto err;
-+ }
-+ index = be32_to_cpu(*index_p);
-+ if (index >= qman_portal_max) {
-+ pr_err("QMan portal index %d is beyond max (%d)\n",
-+ index, qman_portal_max);
-+ goto err;
-+ }
-+
-+ channel = index + QM_CHANNEL_SWPORTAL0;
-+ pcfg->public_cfg.channel = channel;
-+ pcfg->public_cfg.cpu = -1;
-+ irq = irq_of_parse_and_map(node, 0);
-+ if (irq == 0) {
-+ pr_err("Can't get %s property '%s'\n", node->full_name,
-+ "interrupts");
-+ goto err;
-+ }
-+ pcfg->public_cfg.irq = irq;
-+ pcfg->public_cfg.index = index;
-+#ifdef CONFIG_FSL_QMAN_CONFIG
-+ /* We need the same LIODN offset for all portals */
-+ qman_liodn_fixup(pcfg->public_cfg.channel);
-+#endif
-+
-+ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
-+ if (len != (unsigned long)len)
-+ goto err;
-+
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
-+ pcfg->addr_phys[DPA_PORTAL_CE].start,
-+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
-+
-+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
-+ pcfg->addr_phys[DPA_PORTAL_CI].start,
-+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
-+#else
-+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
-+ pcfg->addr_phys[DPA_PORTAL_CE].start,
-+ (unsigned long)len,
-+ 0);
-+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
-+ pcfg->addr_phys[DPA_PORTAL_CI].start,
-+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
-+ _PAGE_GUARDED | _PAGE_NO_CACHE);
-+#endif
-+ return pcfg;
-+err:
-+ kfree(pcfg);
-+ return NULL;
-+}
-+
-+static struct qm_portal_config *get_pcfg(struct list_head *list)
-+{
-+ struct qm_portal_config *pcfg;
-+ if (list_empty(list))
-+ return NULL;
-+ pcfg = list_entry(list->prev, struct qm_portal_config, list);
-+ list_del(&pcfg->list);
-+ return pcfg;
-+}
-+
-+static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
-+{
-+ struct qm_portal_config *pcfg;
-+ if (list_empty(list))
-+ return NULL;
-+ list_for_each_entry(pcfg, list, list) {
-+ if (pcfg->public_cfg.index == idx) {
-+ list_del(&pcfg->list);
-+ return pcfg;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
-+{
-+#ifdef CONFIG_FSL_PAMU
-+ int ret;
-+ int window_count = 1;
-+ struct iommu_domain_geometry geom_attr;
-+ struct pamu_stash_attribute stash_attr;
-+
-+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
-+ if (!pcfg->iommu_domain) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
-+ __func__);
-+ goto _no_iommu;
-+ }
-+ geom_attr.aperture_start = 0;
-+ geom_attr.aperture_end =
-+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
-+ geom_attr.force_aperture = true;
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
-+ &geom_attr);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
-+ &window_count);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ stash_attr.cpu = cpu;
-+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
-+ /* set stash information for the window */
-+ stash_attr.window = 0;
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
-+ DOMAIN_ATTR_FSL_PAMU_STASH,
-+ &stash_attr);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
-+ IOMMU_READ | IOMMU_WRITE);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
-+ __func__, ret);
-+ goto _iommu_domain_free;
-+ }
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
-+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
-+ &window_count);
-+ if (ret < 0) {
-+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
-+ __func__, ret);
-+ goto _iommu_detach_device;
-+ }
-+
-+_no_iommu:
-+#endif
-+#ifdef CONFIG_FSL_QMAN_CONFIG
-+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
-+#endif
-+ pr_warn("Failed to set QMan portal's stash request queue\n");
-+
-+ return;
-+
-+#ifdef CONFIG_FSL_PAMU
-+_iommu_detach_device:
-+ iommu_detach_device(pcfg->iommu_domain, NULL);
-+_iommu_domain_free:
-+ iommu_domain_free(pcfg->iommu_domain);
-+#endif
-+}
-+
-+struct qm_portal_config *qm_get_unused_portal_idx(u32 idx)
-+{
-+ struct qm_portal_config *ret;
-+ spin_lock(&unused_pcfgs_lock);
-+ if (idx == QBMAN_ANY_PORTAL_IDX)
-+ ret = get_pcfg(&unused_pcfgs);
-+ else
-+ ret = get_pcfg_idx(&unused_pcfgs, idx);
-+ spin_unlock(&unused_pcfgs_lock);
-+ /* Bind stashing LIODNs to the CPU we are currently executing on, and
-+ * set the portal to use the stashing request queue corresonding to the
-+ * cpu as well. The user-space driver assumption is that the pthread has
-+ * to already be affine to one cpu only before opening a portal. If that
-+ * check is circumvented, the only risk is a performance degradation -
-+ * stashing will go to whatever cpu they happened to be running on when
-+ * opening the device file, and if that isn't the cpu they subsequently
-+ * bind to and do their polling on, tough. */
-+ if (ret)
-+ portal_set_cpu(ret, hard_smp_processor_id());
-+ return ret;
-+}
-+
-+struct qm_portal_config *qm_get_unused_portal(void)
-+{
-+ return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
-+}
-+
-+void qm_put_unused_portal(struct qm_portal_config *pcfg)
-+{
-+ spin_lock(&unused_pcfgs_lock);
-+ list_add(&pcfg->list, &unused_pcfgs);
-+ spin_unlock(&unused_pcfgs_lock);
-+}
-+
-+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
-+{
-+ struct qman_portal *p;
-+
-+ pcfg->iommu_domain = NULL;
-+ portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
-+ p = qman_create_affine_portal(pcfg, NULL);
-+ if (p) {
-+ u32 irq_sources = 0;
-+ /* Determine what should be interrupt-vs-poll driven */
-+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
-+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
-+ QM_PIRQ_CSCI | QM_PIRQ_CCSCI;
-+#endif
-+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
-+ irq_sources |= QM_PIRQ_DQRI;
-+#endif
-+ qman_p_irqsource_add(p, irq_sources);
-+ pr_info("Qman portal %sinitialised, cpu %d\n",
-+ pcfg->public_cfg.is_shared ? "(shared) " : "",
-+ pcfg->public_cfg.cpu);
-+ } else
-+ pr_crit("Qman portal failure on cpu %d\n",
-+ pcfg->public_cfg.cpu);
-+ return p;
-+}
-+
-+static void init_slave(int cpu)
-+{
-+ struct qman_portal *p;
-+ struct cpumask oldmask = current->cpus_allowed;
-+ set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
-+ p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
-+ if (!p)
-+ pr_err("Qman slave portal failure on cpu %d\n", cpu);
-+ else
-+ pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
-+ set_cpus_allowed_ptr(current, &oldmask);
-+ if (shared_portals_idx >= num_shared_portals)
-+ shared_portals_idx = 0;
-+}
-+
-+static struct cpumask want_unshared __initdata;
-+static struct cpumask want_shared __initdata;
-+
-+static int __init parse_qportals(char *str)
-+{
-+ return parse_portals_bootarg(str, &want_shared, &want_unshared,
-+ "qportals");
-+}
-+__setup("qportals=", parse_qportals);
-+
-+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
-+ unsigned int cpu)
-+{
-+#ifdef CONFIG_FSL_PAMU
-+ struct pamu_stash_attribute stash_attr;
-+ int ret;
-+
-+ if (pcfg->iommu_domain) {
-+ stash_attr.cpu = cpu;
-+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
-+ /* set stash information for the window */
-+ stash_attr.window = 0;
-+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
-+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
-+ if (ret < 0) {
-+ pr_err("Failed to update pamu stash setting\n");
-+ return;
-+ }
-+ }
-+#endif
-+#ifdef CONFIG_FSL_QMAN_CONFIG
-+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
-+ pr_warn("Failed to update portal's stash request queue\n");
-+#endif
-+}
-+
-+static int qman_offline_cpu(unsigned int cpu)
-+{
-+ struct qman_portal *p;
-+ const struct qm_portal_config *pcfg;
-+ p = (struct qman_portal *)affine_portals[cpu];
-+ if (p) {
-+ pcfg = qman_get_qm_portal_config(p);
-+ if (pcfg) {
-+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
-+ qman_portal_update_sdest(pcfg, 0);
-+ }
-+ }
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static int qman_online_cpu(unsigned int cpu)
-+{
-+ struct qman_portal *p;
-+ const struct qm_portal_config *pcfg;
-+ p = (struct qman_portal *)affine_portals[cpu];
-+ if (p) {
-+ pcfg = qman_get_qm_portal_config(p);
-+ if (pcfg) {
-+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
-+ qman_portal_update_sdest(pcfg, cpu);
-+ }
-+ }
-+ return 0;
-+}
-+
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+__init int qman_init(void)
-+{
-+ struct cpumask slave_cpus;
-+ struct cpumask unshared_cpus = *cpu_none_mask;
-+ struct cpumask shared_cpus = *cpu_none_mask;
-+ LIST_HEAD(unshared_pcfgs);
-+ LIST_HEAD(shared_pcfgs);
-+ struct device_node *dn;
-+ struct qm_portal_config *pcfg;
-+ struct qman_portal *p;
-+ int cpu, ret;
-+ const u32 *clk;
-+ struct cpumask offline_cpus;
-+
-+ /* Initialise the Qman (CCSR) device */
-+ for_each_compatible_node(dn, NULL, "fsl,qman") {
-+ if (!qman_init_ccsr(dn))
-+ pr_info("Qman err interrupt handler present\n");
-+ else
-+ pr_err("Qman CCSR setup failed\n");
-+
-+ clk = of_get_property(dn, "clock-frequency", NULL);
-+ if (!clk)
-+ pr_warn("Can't find Qman clock frequency\n");
-+ else
-+ qman_clk = be32_to_cpu(*clk);
-+ }
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ /* Setup lookup table for FQ demux */
-+ ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64);
-+ if (ret)
-+ return ret;
-+#endif
-+
-+ /* Get qman ip revision */
-+ qman_get_ip_revision(dn);
-+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
-+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
-+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
-+ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
-+ }
-+
-+ if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2))
-+ qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312;
-+
-+ /*
-+ * Parse the ceetm node to get how many ceetm instances are supported
-+ * on the current silicon. num_ceetms must be confirmed before portals
-+ * are intiailized.
-+ */
-+ num_ceetms = 0;
-+ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm")
-+ num_ceetms++;
-+
-+ /* Parse pool channels into the SDQCR mask. (Must happen before portals
-+ * are initialised.) */
-+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
-+ ret = fsl_pool_channel_range_sdqcr(dn);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
-+ /* Initialise portals. See bman_driver.c for comments */
-+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
-+ if (!of_device_is_available(dn))
-+ continue;
-+ pcfg = parse_pcfg(dn);
-+ if (pcfg) {
-+ pcfg->public_cfg.pools = pools_sdqcr;
-+ list_add_tail(&pcfg->list, &unused_pcfgs);
-+ }
-+ }
-+ for_each_possible_cpu(cpu) {
-+ if (cpumask_test_cpu(cpu, &want_shared)) {
-+ pcfg = get_pcfg(&unused_pcfgs);
-+ if (!pcfg)
-+ break;
-+ pcfg->public_cfg.cpu = cpu;
-+ list_add_tail(&pcfg->list, &shared_pcfgs);
-+ cpumask_set_cpu(cpu, &shared_cpus);
-+ }
-+ if (cpumask_test_cpu(cpu, &want_unshared)) {
-+ if (cpumask_test_cpu(cpu, &shared_cpus))
-+ continue;
-+ pcfg = get_pcfg(&unused_pcfgs);
-+ if (!pcfg)
-+ break;
-+ pcfg->public_cfg.cpu = cpu;
-+ list_add_tail(&pcfg->list, &unshared_pcfgs);
-+ cpumask_set_cpu(cpu, &unshared_cpus);
-+ }
-+ }
-+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
-+ for_each_online_cpu(cpu) {
-+ pcfg = get_pcfg(&unused_pcfgs);
-+ if (!pcfg)
-+ break;
-+ pcfg->public_cfg.cpu = cpu;
-+ list_add_tail(&pcfg->list, &unshared_pcfgs);
-+ cpumask_set_cpu(cpu, &unshared_cpus);
-+ }
-+ }
-+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
-+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
-+ if (cpumask_empty(&slave_cpus)) {
-+ if (!list_empty(&shared_pcfgs)) {
-+ cpumask_or(&unshared_cpus, &unshared_cpus,
-+ &shared_cpus);
-+ cpumask_clear(&shared_cpus);
-+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
-+ INIT_LIST_HEAD(&shared_pcfgs);
-+ }
-+ } else {
-+ if (list_empty(&shared_pcfgs)) {
-+ pcfg = get_pcfg(&unshared_pcfgs);
-+ if (!pcfg) {
-+ pr_crit("No QMan portals available!\n");
-+ return 0;
-+ }
-+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
-+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
-+ list_add_tail(&pcfg->list, &shared_pcfgs);
-+ }
-+ }
-+ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
-+ pcfg->public_cfg.is_shared = 0;
-+ p = init_pcfg(pcfg);
-+ if (!p) {
-+ pr_crit("Unable to configure portals\n");
-+ return 0;
-+ }
-+ }
-+ list_for_each_entry(pcfg, &shared_pcfgs, list) {
-+ pcfg->public_cfg.is_shared = 1;
-+ p = init_pcfg(pcfg);
-+ if (p)
-+ shared_portals[num_shared_portals++] = p;
-+ }
-+ if (!cpumask_empty(&slave_cpus))
-+ for_each_cpu(cpu, &slave_cpus)
-+ init_slave(cpu);
-+ pr_info("Qman portals initialised\n");
-+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
-+ for_each_cpu(cpu, &offline_cpus)
-+ qman_offline_cpu(cpu);
-+#ifdef CONFIG_HOTPLUG_CPU
-+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-+ "soc/qman_portal:online",
-+ qman_online_cpu, qman_offline_cpu);
-+ if (ret < 0) {
-+ pr_err("qman: failed to register hotplug callbacks.\n");
-+ return ret;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+__init int qman_resource_init(void)
-+{
-+ struct device_node *dn;
-+ int ret;
-+
-+ /* Initialise FQID allocation ranges */
-+ for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
-+ ret = fsl_fqid_range_init(dn);
-+ if (ret)
-+ return ret;
-+ }
-+ /* Initialise CGRID allocation ranges */
-+ for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
-+ ret = fsl_cgrid_range_init(dn);
-+ if (ret)
-+ return ret;
-+ }
-+ /* Parse pool channels into the allocator. (Must happen after portals
-+ * are initialised.) */
-+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
-+ ret = fsl_pool_channel_range_init(dn);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Parse CEETM */
-+ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") {
-+ ret = fsl_ceetm_init(dn);
-+ if (ret)
-+ return ret;
-+ }
-+ return 0;
-+}
-+
-+#ifdef CONFIG_SUSPEND
-+void suspend_unused_qportal(void)
-+{
-+ struct qm_portal_config *pcfg;
-+
-+ if (list_empty(&unused_pcfgs))
-+ return;
-+
-+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Need to save qportal %d\n", pcfg->public_cfg.index);
-+#endif
-+ /* save isdr, disable all via isdr, clear isr */
-+ pcfg->saved_isdr =
-+ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
-+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
-+ 0xe08);
-+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
-+ 0xe00);
-+ }
-+ return;
-+}
-+
-+void resume_unused_qportal(void)
-+{
-+ struct qm_portal_config *pcfg;
-+
-+ if (list_empty(&unused_pcfgs))
-+ return;
-+
-+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index);
-+#endif
-+ /* restore isdr */
-+ __raw_writel(pcfg->saved_isdr,
-+ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
-+ }
-+ return;
-+}
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_high.c
-@@ -0,0 +1,5655 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qman_low.h"
-+
-+/* Compilation constants */
-+#define DQRR_MAXFILL 15
-+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
-+#define IRQNAME "QMan portal %d"
-+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
-+
-+/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
-+ * positive, and rounding to the closest value if it's zero. NB, this macro
-+ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
-+ * that are compatible with this. NB, these arguments should not be expressions
-+ * unless it is safe for them to be evaluated multiple times. Eg. do not pass
-+ * in "some_value++" as a parameter to the macro! */
-+#define ROUNDING(n, d, r) \
-+ (((r) < 0) ? div64_u64((n), (d)) : \
-+ (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
-+ div64_u64(((n) + ((d) / 2)), (d))))
-+
-+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
-+ * inter-processor locking only. Note, FQLOCK() is always called either under a
-+ * local_irq_save() or from interrupt context - hence there's no need for irq
-+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
-+ * the "irq en/disable" machinery isn't recursive...). */
-+#define FQLOCK(fq) \
-+ do { \
-+ struct qman_fq *__fq478 = (fq); \
-+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
-+ spin_lock(&__fq478->fqlock); \
-+ } while (0)
-+#define FQUNLOCK(fq) \
-+ do { \
-+ struct qman_fq *__fq478 = (fq); \
-+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
-+ spin_unlock(&__fq478->fqlock); \
-+ } while (0)
-+
-+static inline void fq_set(struct qman_fq *fq, u32 mask)
-+{
-+ set_bits(mask, &fq->flags);
-+}
-+static inline void fq_clear(struct qman_fq *fq, u32 mask)
-+{
-+ clear_bits(mask, &fq->flags);
-+}
-+static inline int fq_isset(struct qman_fq *fq, u32 mask)
-+{
-+ return fq->flags & mask;
-+}
-+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
-+{
-+ return !(fq->flags & mask);
-+}
-+
-+struct qman_portal {
-+ struct qm_portal p;
-+ unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
-+ unsigned long irq_sources;
-+ u32 use_eqcr_ci_stashing;
-+ u32 slowpoll; /* only used when interrupts are off */
-+ struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
-+#endif
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ raw_spinlock_t sharing_lock; /* only used if is_shared */
-+ int is_shared;
-+ struct qman_portal *sharing_redirect;
-+#endif
-+ u32 sdqcr;
-+ int dqrr_disable_ref;
-+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
-+ * handler is called instead. */
-+ qman_cb_dc_ern cb_dc_ern;
-+ /* When the cpu-affine portal is activated, this is non-NULL */
-+ const struct qm_portal_config *config;
-+ /* This is needed for providing a non-NULL device to dma_map_***() */
-+ struct platform_device *pdev;
-+ struct dpa_rbtree retire_table;
-+ char irqname[MAX_IRQNAME];
-+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
-+ struct qman_cgrs *cgrs;
-+ /* linked-list of CSCN handlers. */
-+ struct list_head cgr_cbs;
-+ /* list lock */
-+ spinlock_t cgr_lock;
-+ /* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */
-+ struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX];
-+ /* 256-element array, each is a linked-list of CCSCN handlers. */
-+ struct list_head ccgr_cbs[QMAN_CEETM_MAX];
-+ /* list lock */
-+ spinlock_t ccgr_lock;
-+ /* track if memory was allocated by the driver */
-+ u8 alloced;
-+ /* power management data */
-+ u32 save_isdr;
-+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-+ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
-+ * do byte swaps of DQRR read only memory. First entry must be aligned
-+ * to 2 ** 10 to ensure DQRR index calculations based shadow copy
-+ * address (6 bits for address shift + 4 bits for the DQRR size).
-+ */
-+ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE] __aligned(1024);
-+#endif
-+};
-+
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+#define PORTAL_IRQ_LOCK(p, irqflags) \
-+ do { \
-+ if ((p)->is_shared) \
-+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
-+ else \
-+ local_irq_save(irqflags); \
-+ } while (0)
-+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
-+ do { \
-+ if ((p)->is_shared) \
-+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
-+ irqflags); \
-+ else \
-+ local_irq_restore(irqflags); \
-+ } while (0)
-+#else
-+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
-+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
-+#endif
-+
-+/* Global handler for DCP ERNs. Used when the portal receiving the message does
-+ * not have a portal-specific handler. */
-+static qman_cb_dc_ern cb_dc_ern;
-+
-+static cpumask_t affine_mask;
-+static DEFINE_SPINLOCK(affine_mask_lock);
-+static u16 affine_channels[NR_CPUS];
-+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
-+void *affine_portals[NR_CPUS];
-+
-+/* "raw" gets the cpu-local struct whether it's a redirect or not. */
-+static inline struct qman_portal *get_raw_affine_portal(void)
-+{
-+ return &get_cpu_var(qman_affine_portal);
-+}
-+/* For ops that can redirect, this obtains the portal to use */
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+static inline struct qman_portal *get_affine_portal(void)
-+{
-+ struct qman_portal *p = get_raw_affine_portal();
-+ if (p->sharing_redirect)
-+ return p->sharing_redirect;
-+ return p;
-+}
-+#else
-+#define get_affine_portal() get_raw_affine_portal()
-+#endif
-+/* For every "get", there must be a "put" */
-+static inline void put_affine_portal(void)
-+{
-+ put_cpu_var(qman_affine_portal);
-+}
-+/* Exception: poll functions assume the caller is cpu-affine and in no risk of
-+ * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
-+ * semantic - ie. to disable pre-emption. Some use-cases expect the execution
-+ * context to remain as non-atomic during poll-triggered callbacks as it was
-+ * when the poll API was first called (eg. NAPI), so we go out of our way in
-+ * this case to not disable pre-emption. */
-+static inline struct qman_portal *get_poll_portal(void)
-+{
-+ return &get_cpu_var(qman_affine_portal);
-+}
-+#define put_poll_portal()
-+
-+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
-+ * retirement notifications (the fact they are sometimes h/w-consumed means that
-+ * contextB isn't always a s/w demux - and as we can't know which case it is
-+ * when looking at the notification, we have to use the slow lookup for all of
-+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
-+ * (though at most one of them should be the consumer), so this table isn't for
-+ * all FQs - FQs are added when retirement commands are issued, and removed when
-+ * they complete, which also massively reduces the size of this table. */
-+IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
-+
-+/* This is what everything can wait on, even if it migrates to a different cpu
-+ * to the one whose affine portal it is waiting on. */
-+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
-+
-+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
-+{
-+ int ret = fqtree_push(&p->retire_table, fq);
-+ if (ret)
-+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
-+ return ret;
-+}
-+
-+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
-+{
-+ fqtree_del(&p->retire_table, fq);
-+}
-+
-+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
-+{
-+ return fqtree_find(&p->retire_table, fqid);
-+}
-+
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+static void **qman_fq_lookup_table;
-+static size_t qman_fq_lookup_table_size;
-+
-+int qman_setup_fq_lookup_table(size_t num_entries)
-+{
-+ num_entries++;
-+ /* Allocate 1 more entry since the first entry is not used */
-+ qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
-+ if (!qman_fq_lookup_table) {
-+ pr_err("QMan: Could not allocate fq lookup table\n");
-+ return -ENOMEM;
-+ }
-+ qman_fq_lookup_table_size = num_entries;
-+ pr_info("QMan: Allocated lookup table at %p, entry count %lu\n",
-+ qman_fq_lookup_table,
-+ (unsigned long)qman_fq_lookup_table_size);
-+ return 0;
-+}
-+
-+/* global structure that maintains fq object mapping */
-+static DEFINE_SPINLOCK(fq_hash_table_lock);
-+
-+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
-+{
-+ u32 i;
-+
-+ spin_lock(&fq_hash_table_lock);
-+ /* Can't use index zero because this has special meaning
-+ * in context_b field. */
-+ for (i = 1; i < qman_fq_lookup_table_size; i++) {
-+ if (qman_fq_lookup_table[i] == NULL) {
-+ *entry = i;
-+ qman_fq_lookup_table[i] = fq;
-+ spin_unlock(&fq_hash_table_lock);
-+ return 0;
-+ }
-+ }
-+ spin_unlock(&fq_hash_table_lock);
-+ return -ENOMEM;
-+}
-+
-+static void clear_fq_table_entry(u32 entry)
-+{
-+ spin_lock(&fq_hash_table_lock);
-+ BUG_ON(entry >= qman_fq_lookup_table_size);
-+ qman_fq_lookup_table[entry] = NULL;
-+ spin_unlock(&fq_hash_table_lock);
-+}
-+
-+static inline struct qman_fq *get_fq_table_entry(u32 entry)
-+{
-+ BUG_ON(entry >= qman_fq_lookup_table_size);
-+ return qman_fq_lookup_table[entry];
-+}
-+#endif
-+
-+static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
-+{
-+ /* Byteswap the FQD to HW format */
-+ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
-+ fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
-+ fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
-+ fqd->context_b = cpu_to_be32(fqd->context_b);
-+ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
-+}
-+
-+static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
-+{
-+ /* Byteswap the FQD to CPU format */
-+ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
-+ fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
-+ fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
-+ fqd->context_b = be32_to_cpu(fqd->context_b);
-+ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
-+}
-+
-+/* Swap a 40 bit address */
-+static inline u64 cpu_to_be40(u64 in)
-+{
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ return in;
-+#else
-+ u64 out = 0;
-+ u8 *p = (u8 *) &out;
-+ p[0] = in >> 32;
-+ p[1] = in >> 24;
-+ p[2] = in >> 16;
-+ p[3] = in >> 8;
-+ p[4] = in >> 0;
-+ return out;
-+#endif
-+}
-+static inline u64 be40_to_cpu(u64 in)
-+{
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ return in;
-+#else
-+ u64 out = 0;
-+ u8 *pout = (u8 *) &out;
-+ u8 *pin = (u8 *) &in;
-+ pout[0] = pin[4];
-+ pout[1] = pin[3];
-+ pout[2] = pin[2];
-+ pout[3] = pin[1];
-+ pout[4] = pin[0];
-+ return out;
-+#endif
-+}
-+
-+/* Swap a 24 bit value */
-+static inline u32 cpu_to_be24(u32 in)
-+{
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ return in;
-+#else
-+ u32 out = 0;
-+ u8 *p = (u8 *) &out;
-+ p[0] = in >> 16;
-+ p[1] = in >> 8;
-+ p[2] = in >> 0;
-+ return out;
-+#endif
-+}
-+
-+static inline u32 be24_to_cpu(u32 in)
-+{
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ return in;
-+#else
-+ u32 out = 0;
-+ u8 *pout = (u8 *) &out;
-+ u8 *pin = (u8 *) &in;
-+ pout[0] = pin[2];
-+ pout[1] = pin[1];
-+ pout[2] = pin[0];
-+ return out;
-+#endif
-+}
-+
-+static inline u64 be48_to_cpu(u64 in)
-+{
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ return in;
-+#else
-+ u64 out = 0;
-+ u8 *pout = (u8 *) &out;
-+ u8 *pin = (u8 *) &in;
-+
-+ pout[0] = pin[5];
-+ pout[1] = pin[4];
-+ pout[2] = pin[3];
-+ pout[3] = pin[2];
-+ pout[4] = pin[1];
-+ pout[5] = pin[0];
-+ return out;
-+#endif
-+}
-+static inline void cpu_to_hw_fd(struct qm_fd *fd)
-+{
-+ fd->opaque_addr = cpu_to_be64(fd->opaque_addr);
-+ fd->status = cpu_to_be32(fd->status);
-+ fd->opaque = cpu_to_be32(fd->opaque);
-+}
-+
-+static inline void hw_fd_to_cpu(struct qm_fd *fd)
-+{
-+ fd->opaque_addr = be64_to_cpu(fd->opaque_addr);
-+ fd->status = be32_to_cpu(fd->status);
-+ fd->opaque = be32_to_cpu(fd->opaque);
-+}
-+
-+static inline void hw_cq_query_to_cpu(struct qm_mcr_ceetm_cq_query *cq_query)
-+{
-+ cq_query->ccgid = be16_to_cpu(cq_query->ccgid);
-+ cq_query->state = be16_to_cpu(cq_query->state);
-+ cq_query->pfdr_hptr = be24_to_cpu(cq_query->pfdr_hptr);
-+ cq_query->pfdr_tptr = be24_to_cpu(cq_query->pfdr_tptr);
-+ cq_query->od1_xsfdr = be16_to_cpu(cq_query->od1_xsfdr);
-+ cq_query->od2_xsfdr = be16_to_cpu(cq_query->od2_xsfdr);
-+ cq_query->od3_xsfdr = be16_to_cpu(cq_query->od3_xsfdr);
-+ cq_query->od4_xsfdr = be16_to_cpu(cq_query->od4_xsfdr);
-+ cq_query->od5_xsfdr = be16_to_cpu(cq_query->od5_xsfdr);
-+ cq_query->od6_xsfdr = be16_to_cpu(cq_query->od6_xsfdr);
-+ cq_query->ra1_xsfdr = be16_to_cpu(cq_query->ra1_xsfdr);
-+ cq_query->ra2_xsfdr = be16_to_cpu(cq_query->ra2_xsfdr);
-+ cq_query->frm_cnt = be24_to_cpu(cq_query->frm_cnt);
-+}
-+
-+static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q)
-+{
-+ int i;
-+
-+ ccgr_q->cm_query.cs_thres.hword =
-+ be16_to_cpu(ccgr_q->cm_query.cs_thres.hword);
-+ ccgr_q->cm_query.cs_thres_x.hword =
-+ be16_to_cpu(ccgr_q->cm_query.cs_thres_x.hword);
-+ ccgr_q->cm_query.td_thres.hword =
-+ be16_to_cpu(ccgr_q->cm_query.td_thres.hword);
-+ ccgr_q->cm_query.wr_parm_g.word =
-+ be32_to_cpu(ccgr_q->cm_query.wr_parm_g.word);
-+ ccgr_q->cm_query.wr_parm_y.word =
-+ be32_to_cpu(ccgr_q->cm_query.wr_parm_y.word);
-+ ccgr_q->cm_query.wr_parm_r.word =
-+ be32_to_cpu(ccgr_q->cm_query.wr_parm_r.word);
-+ ccgr_q->cm_query.cscn_targ_dcp =
-+ be16_to_cpu(ccgr_q->cm_query.cscn_targ_dcp);
-+ ccgr_q->cm_query.i_cnt = be40_to_cpu(ccgr_q->cm_query.i_cnt);
-+ ccgr_q->cm_query.a_cnt = be40_to_cpu(ccgr_q->cm_query.a_cnt);
-+ for (i = 0; i < ARRAY_SIZE(ccgr_q->cm_query.cscn_targ_swp); i++)
-+ ccgr_q->cm_query.cscn_targ_swp[i] =
-+ be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]);
-+}
-+
-+/* In the case that slow- and fast-path handling are both done by qman_poll()
-+ * (ie. because there is no interrupt handling), we ought to balance how often
-+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
-+ * sources, so we call the fast poll 'n' times before calling the slow poll
-+ * once. The idle decrementer constant is used when the last slow-poll detected
-+ * no work to do, and the busy decrementer constant when the last slow-poll had
-+ * work to do. */
-+#define SLOW_POLL_IDLE 1000
-+#define SLOW_POLL_BUSY 10
-+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
-+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
-+ unsigned int poll_limit);
-+
-+/* Portal interrupt handler */
-+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
-+{
-+ struct qman_portal *p = ptr;
-+ /*
-+ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
-+ * it could race against a Query Congestion State command also given
-+ * as part of the handling of this interrupt source. We mustn't
-+ * clear it a second time in this top-level function.
-+ */
-+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
-+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
-+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
-+ /* DQRR-handling if it's interrupt-driven */
-+ if (is & QM_PIRQ_DQRI)
-+ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
-+ /* Handling of anything else that's interrupt-driven */
-+ clear |= __poll_portal_slow(p, is);
-+ qm_isr_status_clear(&p->p, clear);
-+ return IRQ_HANDLED;
-+}
-+
-+/* This inner version is used privately by qman_create_affine_portal(), as well
-+ * as by the exported qman_stop_dequeues(). */
-+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ if (!(p->dqrr_disable_ref++))
-+ qm_dqrr_set_maxfill(&p->p, 0);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+}
-+
-+static int drain_mr_fqrni(struct qm_portal *p)
-+{
-+ const struct qm_mr_entry *msg;
-+loop:
-+ msg = qm_mr_current(p);
-+ if (!msg) {
-+ /* if MR was full and h/w had other FQRNI entries to produce, we
-+ * need to allow it time to produce those entries once the
-+ * existing entries are consumed. A worst-case situation
-+ * (fully-loaded system) means h/w sequencers may have to do 3-4
-+ * other things before servicing the portal's MR pump, each of
-+ * which (if slow) may take ~50 qman cycles (which is ~200
-+ * processor cycles). So rounding up and then multiplying this
-+ * worst-case estimate by a factor of 10, just to be
-+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
-+ * one entry at a time, so h/w has an opportunity to produce new
-+ * entries well before the ring has been fully consumed, so
-+ * we're being *really* paranoid here. */
-+ u64 now, then = mfatb();
-+ do {
-+ now = mfatb();
-+ } while ((then + 10000) > now);
-+ msg = qm_mr_current(p);
-+ if (!msg)
-+ return 0;
-+ }
-+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
-+ /* We aren't draining anything but FQRNIs */
-+ pr_err("QMan found verb 0x%x in MR\n", msg->verb);
-+ return -1;
-+ }
-+ qm_mr_next(p);
-+ qm_mr_cci_consume(p, 1);
-+ goto loop;
-+}
-+
-+#ifdef CONFIG_SUSPEND
-+static int _qman_portal_suspend_noirq(struct device *dev)
-+{
-+ struct qman_portal *p = (struct qman_portal *)dev->platform_data;
-+#ifdef CONFIG_PM_DEBUG
-+ struct platform_device *pdev = to_platform_device(dev);
-+#endif
-+
-+ p->save_isdr = qm_isr_disable_read(&p->p);
-+ qm_isr_disable_write(&p->p, 0xffffffff);
-+ qm_isr_status_clear(&p->p, 0xffffffff);
-+#ifdef CONFIG_PM_DEBUG
-+ pr_info("Suspend for %s\n", pdev->name);
-+#endif
-+ return 0;
-+}
-+
-+static int _qman_portal_resume_noirq(struct device *dev)
-+{
-+ struct qman_portal *p = (struct qman_portal *)dev->platform_data;
-+
-+ /* restore isdr */
-+ qm_isr_disable_write(&p->p, p->save_isdr);
-+ return 0;
-+}
-+#else
-+#define _qman_portal_suspend_noirq NULL
-+#define _qman_portal_resume_noirq NULL
-+#endif
-+
-+struct dev_pm_domain qman_portal_device_pm_domain = {
-+ .ops = {
-+ USE_PLATFORM_PM_SLEEP_OPS
-+ .suspend_noirq = _qman_portal_suspend_noirq,
-+ .resume_noirq = _qman_portal_resume_noirq,
-+ }
-+};
-+
-+struct qman_portal *qman_create_portal(
-+ struct qman_portal *portal,
-+ const struct qm_portal_config *config,
-+ const struct qman_cgrs *cgrs)
-+{
-+ struct qm_portal *__p;
-+ char buf[16];
-+ int ret;
-+ u32 isdr;
-+ struct platform_device_info pdev_info;
-+
-+ if (!portal) {
-+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
-+ if (!portal)
-+ return portal;
-+ portal->alloced = 1;
-+ } else
-+ portal->alloced = 0;
-+
-+ __p = &portal->p;
-+
-+#if (defined CONFIG_PPC || defined CONFIG_PPC64) && defined CONFIG_FSL_PAMU
-+ /* PAMU is required for stashing */
-+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
-+ 1 : 0);
-+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ portal->use_eqcr_ci_stashing = 1;
-+#else
-+ portal->use_eqcr_ci_stashing = 0;
-+#endif
-+
-+ /* prep the low-level portal struct with the mapped addresses from the
-+ * config, everything that follows depends on it and "config" is more
-+ * for (de)reference... */
-+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
-+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
-+ /*
-+ * If CI-stashing is used, the current defaults use a threshold of 3,
-+ * and stash with high-than-DQRR priority.
-+ */
-+ if (qm_eqcr_init(__p, qm_eqcr_pvb,
-+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
-+ pr_err("Qman EQCR initialisation failed\n");
-+ goto fail_eqcr;
-+ }
-+ if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
-+ qm_dqrr_cdc, DQRR_MAXFILL)) {
-+ pr_err("Qman DQRR initialisation failed\n");
-+ goto fail_dqrr;
-+ }
-+ if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
-+ pr_err("Qman MR initialisation failed\n");
-+ goto fail_mr;
-+ }
-+ if (qm_mc_init(__p)) {
-+ pr_err("Qman MC initialisation failed\n");
-+ goto fail_mc;
-+ }
-+ if (qm_isr_init(__p)) {
-+ pr_err("Qman ISR initialisation failed\n");
-+ goto fail_isr;
-+ }
-+ /* static interrupt-gating controls */
-+ qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH);
-+ qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH);
-+ qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD);
-+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
-+ if (!portal->cgrs)
-+ goto fail_cgrs;
-+ /* initial snapshot is no-depletion */
-+ qman_cgrs_init(&portal->cgrs[1]);
-+ if (cgrs)
-+ portal->cgrs[0] = *cgrs;
-+ else
-+ /* if the given mask is NULL, assume all CGRs can be seen */
-+ qman_cgrs_fill(&portal->cgrs[0]);
-+ INIT_LIST_HEAD(&portal->cgr_cbs);
-+ spin_lock_init(&portal->cgr_lock);
-+ if (num_ceetms) {
-+ for (ret = 0; ret < num_ceetms; ret++) {
-+ portal->ccgrs[ret] = kmalloc(2 *
-+ sizeof(struct qman_ccgrs), GFP_KERNEL);
-+ if (!portal->ccgrs[ret])
-+ goto fail_ccgrs;
-+ qman_ccgrs_init(&portal->ccgrs[ret][1]);
-+ qman_ccgrs_fill(&portal->ccgrs[ret][0]);
-+ INIT_LIST_HEAD(&portal->ccgr_cbs[ret]);
-+ }
-+ }
-+ spin_lock_init(&portal->ccgr_lock);
-+ portal->bits = 0;
-+ portal->slowpoll = 0;
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ portal->eqci_owned = NULL;
-+#endif
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ raw_spin_lock_init(&portal->sharing_lock);
-+ portal->is_shared = config->public_cfg.is_shared;
-+ portal->sharing_redirect = NULL;
-+#endif
-+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
-+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
-+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
-+ portal->dqrr_disable_ref = 0;
-+ portal->cb_dc_ern = NULL;
-+ sprintf(buf, "qportal-%d", config->public_cfg.channel);
-+
-+ memset(&pdev_info, 0, sizeof(pdev_info));
-+ pdev_info.name = buf;
-+ pdev_info.id = PLATFORM_DEVID_NONE;
-+ pdev_info.dma_mask = DMA_BIT_MASK(40);
-+
-+ portal->pdev = platform_device_register_full(&pdev_info);
-+ if (!portal->pdev) {
-+ pr_err("qman_portal - platform_device_alloc() failed\n");
-+ goto fail_devregister;
-+ }
-+
-+ arch_setup_dma_ops(&portal->pdev->dev, 0, 0, NULL, true);
-+
-+ portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain;
-+ portal->pdev->dev.platform_data = portal;
-+ dpa_rbtree_init(&portal->retire_table);
-+ isdr = 0xffffffff;
-+ qm_isr_disable_write(__p, isdr);
-+ portal->irq_sources = 0;
-+ qm_isr_enable_write(__p, portal->irq_sources);
-+ qm_isr_status_clear(__p, 0xffffffff);
-+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
-+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
-+ portal)) {
-+ pr_err("request_irq() failed\n");
-+ goto fail_irq;
-+ }
-+ if ((config->public_cfg.cpu != -1) &&
-+ irq_can_set_affinity(config->public_cfg.irq) &&
-+ irq_set_affinity(config->public_cfg.irq,
-+ cpumask_of(config->public_cfg.cpu))) {
-+ pr_err("irq_set_affinity() failed\n");
-+ goto fail_affinity;
-+ }
-+
-+ /* Need EQCR to be empty before continuing */
-+ isdr ^= QM_PIRQ_EQCI;
-+ qm_isr_disable_write(__p, isdr);
-+ ret = qm_eqcr_get_fill(__p);
-+ if (ret) {
-+ pr_err("Qman EQCR unclean\n");
-+ goto fail_eqcr_empty;
-+ }
-+ isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
-+ qm_isr_disable_write(__p, isdr);
-+ if (qm_dqrr_current(__p) != NULL) {
-+ pr_err("Qman DQRR unclean\n");
-+ qm_dqrr_cdc_consume_n(__p, 0xffff);
-+ }
-+ if (qm_mr_current(__p) != NULL) {
-+ /* special handling, drain just in case it's a few FQRNIs */
-+ if (drain_mr_fqrni(__p)) {
-+ const struct qm_mr_entry *e = qm_mr_current(__p);
-+ /*
-+ * Message ring cannot be empty no need to check
-+ * qm_mr_current returned successfully
-+ */
-+ pr_err("Qman MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
-+ e->verb, e->ern.rc, e->ern.fd.addr_lo);
-+ goto fail_dqrr_mr_empty;
-+ }
-+ }
-+ /* Success */
-+ portal->config = config;
-+ qm_isr_disable_write(__p, 0);
-+ qm_isr_uninhibit(__p);
-+ /* Write a sane SDQCR */
-+ qm_dqrr_sdqcr_set(__p, portal->sdqcr);
-+ return portal;
-+fail_dqrr_mr_empty:
-+fail_eqcr_empty:
-+fail_affinity:
-+ free_irq(config->public_cfg.irq, portal);
-+fail_irq:
-+ platform_device_unregister(portal->pdev);
-+fail_devregister:
-+ if (num_ceetms)
-+ for (ret = 0; ret < num_ceetms; ret++)
-+ kfree(portal->ccgrs[ret]);
-+fail_ccgrs:
-+ kfree(portal->cgrs);
-+fail_cgrs:
-+ qm_isr_finish(__p);
-+fail_isr:
-+ qm_mc_finish(__p);
-+fail_mc:
-+ qm_mr_finish(__p);
-+fail_mr:
-+ qm_dqrr_finish(__p);
-+fail_dqrr:
-+ qm_eqcr_finish(__p);
-+fail_eqcr:
-+ if (portal->alloced)
-+ kfree(portal);
-+ return NULL;
-+}
-+
-+struct qman_portal *qman_create_affine_portal(
-+ const struct qm_portal_config *config,
-+ const struct qman_cgrs *cgrs)
-+{
-+ struct qman_portal *res;
-+ struct qman_portal *portal;
-+
-+ portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
-+ res = qman_create_portal(portal, config, cgrs);
-+ if (res) {
-+ spin_lock(&affine_mask_lock);
-+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
-+ affine_channels[config->public_cfg.cpu] =
-+ config->public_cfg.channel;
-+ affine_portals[config->public_cfg.cpu] = portal;
-+ spin_unlock(&affine_mask_lock);
-+ }
-+ return res;
-+}
-+
-+/* These checks are BUG_ON()s because the driver is already supposed to avoid
-+ * these cases. */
-+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
-+ int cpu)
-+{
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ struct qman_portal *p;
-+ p = &per_cpu(qman_affine_portal, cpu);
-+ /* Check that we don't already have our own portal */
-+ BUG_ON(p->config);
-+ /* Check that we aren't already slaving to another portal */
-+ BUG_ON(p->is_shared);
-+ /* Check that 'redirect' is prepared to have us */
-+ BUG_ON(!redirect->config->public_cfg.is_shared);
-+ /* These are the only elements to initialise when redirecting */
-+ p->irq_sources = 0;
-+ p->sharing_redirect = redirect;
-+ affine_portals[cpu] = p;
-+ return p;
-+#else
-+ BUG();
-+ return NULL;
-+#endif
-+}
-+
-+void qman_destroy_portal(struct qman_portal *qm)
-+{
-+ const struct qm_portal_config *pcfg;
-+ int i;
-+
-+ /* Stop dequeues on the portal */
-+ qm_dqrr_sdqcr_set(&qm->p, 0);
-+
-+ /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
-+ * something related to QM_PIRQ_EQCI, this may need fixing.
-+ * Also, due to the prefetching model used for CI updates in the enqueue
-+ * path, this update will only invalidate the CI cacheline *after*
-+ * working on it, so we need to call this twice to ensure a full update
-+ * irrespective of where the enqueue processing was at when the teardown
-+ * began. */
-+ qm_eqcr_cce_update(&qm->p);
-+ qm_eqcr_cce_update(&qm->p);
-+ pcfg = qm->config;
-+
-+ free_irq(pcfg->public_cfg.irq, qm);
-+
-+ kfree(qm->cgrs);
-+ if (num_ceetms)
-+ for (i = 0; i < num_ceetms; i++)
-+ kfree(qm->ccgrs[i]);
-+ qm_isr_finish(&qm->p);
-+ qm_mc_finish(&qm->p);
-+ qm_mr_finish(&qm->p);
-+ qm_dqrr_finish(&qm->p);
-+ qm_eqcr_finish(&qm->p);
-+
-+ platform_device_unregister(qm->pdev);
-+
-+ qm->config = NULL;
-+ if (qm->alloced)
-+ kfree(qm);
-+}
-+
-+const struct qm_portal_config *qman_destroy_affine_portal(void)
-+{
-+ /* We don't want to redirect if we're a slave, use "raw" */
-+ struct qman_portal *qm = get_raw_affine_portal();
-+ const struct qm_portal_config *pcfg;
-+ int cpu;
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (qm->sharing_redirect) {
-+ qm->sharing_redirect = NULL;
-+ put_affine_portal();
-+ return NULL;
-+ }
-+ qm->is_shared = 0;
-+#endif
-+ pcfg = qm->config;
-+ cpu = pcfg->public_cfg.cpu;
-+
-+ qman_destroy_portal(qm);
-+
-+ spin_lock(&affine_mask_lock);
-+ cpumask_clear_cpu(cpu, &affine_mask);
-+ spin_unlock(&affine_mask_lock);
-+ put_affine_portal();
-+ return pcfg;
-+}
-+
-+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
-+{
-+ return &p->config->public_cfg;
-+}
-+EXPORT_SYMBOL(qman_p_get_portal_config);
-+
-+const struct qman_portal_config *qman_get_portal_config(void)
-+{
-+ struct qman_portal *p = get_affine_portal();
-+ const struct qman_portal_config *ret = qman_p_get_portal_config(p);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_get_portal_config);
-+
-+/* Inline helper to reduce nesting in __poll_portal_slow() */
-+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_mr_entry *msg, u8 verb)
-+{
-+ FQLOCK(fq);
-+ switch (verb) {
-+ case QM_MR_VERB_FQRL:
-+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
-+ fq_clear(fq, QMAN_FQ_STATE_ORL);
-+ table_del_fq(p, fq);
-+ break;
-+ case QM_MR_VERB_FQRN:
-+ DPA_ASSERT((fq->state == qman_fq_state_parked) ||
-+ (fq->state == qman_fq_state_sched));
-+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
-+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
-+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
-+ fq_set(fq, QMAN_FQ_STATE_NE);
-+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
-+ fq_set(fq, QMAN_FQ_STATE_ORL);
-+ else
-+ table_del_fq(p, fq);
-+ fq->state = qman_fq_state_retired;
-+ break;
-+ case QM_MR_VERB_FQPN:
-+ DPA_ASSERT(fq->state == qman_fq_state_sched);
-+ DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
-+ fq->state = qman_fq_state_parked;
-+ }
-+ FQUNLOCK(fq);
-+}
-+
-+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
-+{
-+ const struct qm_mr_entry *msg;
-+ struct qm_mr_entry swapped_msg;
-+ int k;
-+
-+ if (is & QM_PIRQ_CSCI) {
-+ struct qman_cgrs rr, c;
-+ struct qm_mc_result *mcr;
-+ struct qman_cgr *cgr;
-+ unsigned long irqflags __maybe_unused;
-+
-+ spin_lock_irqsave(&p->cgr_lock, irqflags);
-+ /*
-+ * The CSCI bit must be cleared _before_ issuing the
-+ * Query Congestion State command, to ensure that a long
-+ * CGR State Change callback cannot miss an intervening
-+ * state change.
-+ */
-+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
-+ qm_mc_start(&p->p);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ for (k = 0; k < 8; k++)
-+ mcr->querycongestion.state.__state[k] = be32_to_cpu(
-+ mcr->querycongestion.state.__state[k]);
-+ /* mask out the ones I'm not interested in */
-+ qman_cgrs_and(&rr, (const struct qman_cgrs *)
-+ &mcr->querycongestion.state, &p->cgrs[0]);
-+ /* check previous snapshot for delta, enter/exit congestion */
-+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
-+ /* update snapshot */
-+ qman_cgrs_cp(&p->cgrs[1], &rr);
-+ /* Invoke callback */
-+ list_for_each_entry(cgr, &p->cgr_cbs, node)
-+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
-+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
-+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
-+ }
-+ if (is & QM_PIRQ_CCSCI) {
-+ struct qman_ccgrs rr, c, congestion_result;
-+ struct qm_mc_result *mcr;
-+ struct qm_mc_command *mcc;
-+ struct qm_ceetm_ccg *ccg;
-+ unsigned long irqflags __maybe_unused;
-+ int i, j;
-+
-+ spin_lock_irqsave(&p->ccgr_lock, irqflags);
-+ /*
-+ * The CCSCI bit must be cleared _before_ issuing the
-+ * Query Congestion State command, to ensure that a long
-+ * CCGR State Change callback cannot miss an intervening
-+ * state change.
-+ */
-+ qm_isr_status_clear(&p->p, QM_PIRQ_CCSCI);
-+
-+ for (i = 0; i < num_ceetms; i++) {
-+ for (j = 0; j < 2; j++) {
-+ mcc = qm_mc_start(&p->p);
-+ mcc->ccgr_query.ccgrid = cpu_to_be16(
-+ CEETM_QUERY_CONGESTION_STATE | j);
-+ mcc->ccgr_query.dcpid = i;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ for (k = 0; k < 8; k++)
-+ mcr->ccgr_query.congestion_state.state.
-+ __state[k] = be32_to_cpu(
-+ mcr->ccgr_query.
-+ congestion_state.state.
-+ __state[k]);
-+ congestion_result.q[j] =
-+ mcr->ccgr_query.congestion_state.state;
-+ }
-+ /* mask out the ones I'm not interested in */
-+ qman_ccgrs_and(&rr, &congestion_result,
-+ &p->ccgrs[i][0]);
-+ /*
-+ * check previous snapshot for delta, enter/exit
-+ * congestion.
-+ */
-+ qman_ccgrs_xor(&c, &rr, &p->ccgrs[i][1]);
-+ /* update snapshot */
-+ qman_ccgrs_cp(&p->ccgrs[i][1], &rr);
-+ /* Invoke callback */
-+ list_for_each_entry(ccg, &p->ccgr_cbs[i], cb_node)
-+ if (ccg->cb && qman_ccgrs_get(&c,
-+ (ccg->parent->idx << 4) | ccg->idx))
-+ ccg->cb(ccg, ccg->cb_ctx,
-+ qman_ccgrs_get(&rr,
-+ (ccg->parent->idx << 4)
-+ | ccg->idx));
-+ }
-+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
-+ }
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (is & QM_PIRQ_EQCI) {
-+ unsigned long irqflags;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ p->eqci_owned = NULL;
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ wake_up(&affine_queue);
-+ }
-+#endif
-+
-+ if (is & QM_PIRQ_EQRI) {
-+ unsigned long irqflags __maybe_unused;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ qm_eqcr_cce_update(&p->p);
-+ qm_eqcr_set_ithresh(&p->p, 0);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ wake_up(&affine_queue);
-+ }
-+
-+ if (is & QM_PIRQ_MRI) {
-+ struct qman_fq *fq;
-+ u8 verb, num = 0;
-+mr_loop:
-+ qm_mr_pvb_update(&p->p);
-+ msg = qm_mr_current(&p->p);
-+ if (!msg)
-+ goto mr_done;
-+ swapped_msg = *msg;
-+ hw_fd_to_cpu(&swapped_msg.ern.fd);
-+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
-+ /* The message is a software ERN iff the 0x20 bit is set */
-+ if (verb & 0x20) {
-+ switch (verb) {
-+ case QM_MR_VERB_FQRNI:
-+ /* nada, we drop FQRNIs on the floor */
-+ break;
-+ case QM_MR_VERB_FQRN:
-+ case QM_MR_VERB_FQRL:
-+ /* Lookup in the retirement table */
-+ fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid));
-+ BUG_ON(!fq);
-+ fq_state_change(p, fq, &swapped_msg, verb);
-+ if (fq->cb.fqs)
-+ fq->cb.fqs(p, fq, &swapped_msg);
-+ break;
-+ case QM_MR_VERB_FQPN:
-+ /* Parked */
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ fq = get_fq_table_entry(
-+ be32_to_cpu(msg->fq.contextB));
-+#else
-+ fq = (void *)(uintptr_t)
-+ be32_to_cpu(msg->fq.contextB);
-+#endif
-+ fq_state_change(p, fq, msg, verb);
-+ if (fq->cb.fqs)
-+ fq->cb.fqs(p, fq, &swapped_msg);
-+ break;
-+ case QM_MR_VERB_DC_ERN:
-+ /* DCP ERN */
-+ if (p->cb_dc_ern)
-+ p->cb_dc_ern(p, msg);
-+ else if (cb_dc_ern)
-+ cb_dc_ern(p, msg);
-+ else {
-+ static int warn_once;
-+ if (!warn_once) {
-+ pr_crit("Leaking DCP ERNs!\n");
-+ warn_once = 1;
-+ }
-+ }
-+ break;
-+ default:
-+ pr_crit("Invalid MR verb 0x%02x\n", verb);
-+ }
-+ } else {
-+ /* Its a software ERN */
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
-+#else
-+ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
-+#endif
-+ fq->cb.ern(p, fq, &swapped_msg);
-+ }
-+ num++;
-+ qm_mr_next(&p->p);
-+ goto mr_loop;
-+mr_done:
-+ qm_mr_cci_consume(&p->p, num);
-+ }
-+ /*
-+ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
-+ * processing. If that interrupt source has meanwhile been re-asserted,
-+ * we mustn't clear it here (or in the top-level interrupt handler).
-+ */
-+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
-+}
-+
-+/* remove some slowish-path stuff from the "fast path" and make sure it isn't
-+ * inlined. */
-+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
-+{
-+ p->vdqcr_owned = NULL;
-+ FQLOCK(fq);
-+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
-+ FQUNLOCK(fq);
-+ wake_up(&affine_queue);
-+}
-+
-+/* Copy a DQRR entry ensuring reads reach QBMan in order */
-+static inline void safe_copy_dqrr(struct qm_dqrr_entry *dst,
-+ const struct qm_dqrr_entry *src)
-+{
-+ int i = 0;
-+ const u64 *s64 = (u64*)src;
-+ u64 *d64 = (u64*)dst;
-+
-+ /* DQRR only has 32 bytes of valid data so only need to
-+ * copy 4 - 64 bit values */
-+ *d64 = *s64;
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ {
-+ u32 res, zero = 0;
-+ /* Create a dependancy after copying first bytes ensures no wrap
-+ transaction generated to QBMan */
-+ /* Logical AND the value pointed to by s64 with 0x0 and
-+ store the result in res */
-+ asm volatile("and %[result], %[in1], %[in2]"
-+ : [result] "=r" (res)
-+ : [in1] "r" (zero), [in2] "r" (*s64)
-+ : "memory");
-+ /* Add res to s64 - this creates a dependancy on the result of
-+ reading the value of s64 before the next read. The side
-+ effect of this is that the core must stall until the first
-+ aligned read is complete therefore preventing a WRAP
-+ transaction to be seen by the QBMan */
-+ asm volatile("add %[result], %[in1], %[in2]"
-+ : [result] "=r" (s64)
-+ : [in1] "r" (res), [in2] "r" (s64)
-+ : "memory");
-+ }
-+#endif
-+ /* Copy the last 3 64 bit parts */
-+ d64++; s64++;
-+ for (;i<3; i++)
-+ *d64++ = *s64++;
-+}
-+
-+/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
-+ * that would conflict with other things if they ran at the same time on the
-+ * same cpu are;
-+ *
-+ * (i) setting/clearing vdqcr_owned, and
-+ * (ii) clearing the NE (Not Empty) flag.
-+ *
-+ * Both are safe. Because;
-+ *
-+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
-+ * vdqcr_owned field (which it does before setting VDQCR), and
-+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
-+ * done so that we can't interfere.
-+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
-+ * with (i) that API prevents us from interfering until it's safe.
-+ *
-+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
-+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
-+ * advantage comes from this function not having to "lock" anything at all.
-+ *
-+ * Note also that the callbacks are invoked at points which are safe against the
-+ * above potential conflicts, but that this function itself is not re-entrant
-+ * (this is because the function tracks one end of each FIFO in the portal and
-+ * we do *not* want to lock that). So the consequence is that it is safe for
-+ * user callbacks to call into any Qman API *except* qman_poll() (as that's the
-+ * sole API that could be invoking the callback through this function).
-+ */
-+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
-+ unsigned int poll_limit)
-+{
-+ const struct qm_dqrr_entry *dq;
-+ struct qman_fq *fq;
-+ enum qman_cb_dqrr_result res;
-+ unsigned int limit = 0;
-+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-+ struct qm_dqrr_entry *shadow;
-+ const struct qm_dqrr_entry *orig_dq;
-+#endif
-+loop:
-+ qm_dqrr_pvb_update(&p->p);
-+ dq = qm_dqrr_current(&p->p);
-+ if (!dq)
-+ goto done;
-+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-+ /* If running on an LE system the fields of the
-+ dequeue entry must be swapped. Because the
-+ QMan HW will ignore writes the DQRR entry is
-+ copied and the index stored within the copy */
-+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
-+ /* Use safe copy here to avoid WRAP transaction */
-+ safe_copy_dqrr(shadow, dq);
-+ orig_dq = dq;
-+ dq = shadow;
-+ shadow->fqid = be32_to_cpu(shadow->fqid);
-+ shadow->contextB = be32_to_cpu(shadow->contextB);
-+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
-+ hw_fd_to_cpu(&shadow->fd);
-+#endif
-+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
-+ /* VDQCR: don't trust contextB as the FQ may have been
-+ * configured for h/w consumption and we're draining it
-+ * post-retirement. */
-+ fq = p->vdqcr_owned;
-+ /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
-+ * to check for clearing it when doing volatile dequeues. It's
-+ * one less thing to check in the critical path (SDQCR). */
-+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
-+ fq_clear(fq, QMAN_FQ_STATE_NE);
-+ /* this is duplicated from the SDQCR code, but we have stuff to
-+ * do before *and* after this callback, and we don't want
-+ * multiple if()s in the critical path (SDQCR). */
-+ res = fq->cb.dqrr(p, fq, dq);
-+ if (res == qman_cb_dqrr_stop)
-+ goto done;
-+ /* Check for VDQCR completion */
-+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
-+ clear_vdqcr(p, fq);
-+ } else {
-+ /* SDQCR: contextB points to the FQ */
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ fq = get_fq_table_entry(dq->contextB);
-+#else
-+ fq = (void *)(uintptr_t)dq->contextB;
-+#endif
-+ /* Now let the callback do its stuff */
-+ res = fq->cb.dqrr(p, fq, dq);
-+
-+ /* The callback can request that we exit without consuming this
-+ * entry nor advancing; */
-+ if (res == qman_cb_dqrr_stop)
-+ goto done;
-+ }
-+ /* Interpret 'dq' from a driver perspective. */
-+ /* Parking isn't possible unless HELDACTIVE was set. NB,
-+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
-+ * check for HELDACTIVE to cover both. */
-+ DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
-+ (res != qman_cb_dqrr_park));
-+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-+ if (res != qman_cb_dqrr_defer)
-+ qm_dqrr_cdc_consume_1ptr(&p->p, orig_dq,
-+ (res == qman_cb_dqrr_park));
-+#else
-+ /* Defer just means "skip it, I'll consume it myself later on" */
-+ if (res != qman_cb_dqrr_defer)
-+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
-+#endif
-+ /* Move forward */
-+ qm_dqrr_next(&p->p);
-+ /* Entry processed and consumed, increment our counter. The callback can
-+ * request that we exit after consuming the entry, and we also exit if
-+ * we reach our processing limit, so loop back only if neither of these
-+ * conditions is met. */
-+ if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
-+ goto loop;
-+done:
-+ return limit;
-+}
-+
-+u32 qman_irqsource_get(void)
-+{
-+ /* "irqsource" and "poll" APIs mustn't redirect when sharing, they
-+ * should shut the user out if they are not the primary CPU hosting the
-+ * portal. That's why we use the "raw" interface. */
-+ struct qman_portal *p = get_raw_affine_portal();
-+ u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_irqsource_get);
-+
-+int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
-+{
-+ __maybe_unused unsigned long irqflags;
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (p->sharing_redirect)
-+ return -EINVAL;
-+ else
-+#endif
-+ {
-+ bits = bits & QM_PIRQ_VISIBLE;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ /* Clear any previously remaining interrupt conditions in
-+ * QCSP_ISR. This prevents raising a false interrupt when
-+ * interrupt conditions are enabled in QCSP_IER.
-+ */
-+ qm_isr_status_clear(&p->p, bits);
-+ set_bits(bits, &p->irq_sources);
-+ qm_isr_enable_write(&p->p, p->irq_sources);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_p_irqsource_add);
-+
-+int qman_irqsource_add(u32 bits __maybe_unused)
-+{
-+ struct qman_portal *p = get_raw_affine_portal();
-+ int ret;
-+ ret = qman_p_irqsource_add(p, bits);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_irqsource_add);
-+
-+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
-+{
-+ __maybe_unused unsigned long irqflags;
-+ u32 ier;
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (p->sharing_redirect) {
-+ put_affine_portal();
-+ return -EINVAL;
-+ }
-+#endif
-+ /* Our interrupt handler only processes+clears status register bits that
-+ * are in p->irq_sources. As we're trimming that mask, if one of them
-+ * were to assert in the status register just before we remove it from
-+ * the enable register, there would be an interrupt-storm when we
-+ * release the IRQ lock. So we wait for the enable register update to
-+ * take effect in h/w (by reading it back) and then clear all other bits
-+ * in the status register. Ie. we clear them from ISR once it's certain
-+ * IER won't allow them to reassert. */
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ bits &= QM_PIRQ_VISIBLE;
-+ clear_bits(bits, &p->irq_sources);
-+ qm_isr_enable_write(&p->p, p->irq_sources);
-+
-+ ier = qm_isr_enable_read(&p->p);
-+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
-+ * data-dependency, ie. to protect against re-ordering. */
-+ qm_isr_status_clear(&p->p, ~ier);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_p_irqsource_remove);
-+
-+int qman_irqsource_remove(u32 bits)
-+{
-+ struct qman_portal *p = get_raw_affine_portal();
-+ int ret;
-+ ret = qman_p_irqsource_remove(p, bits);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_irqsource_remove);
-+
-+const cpumask_t *qman_affine_cpus(void)
-+{
-+ return &affine_mask;
-+}
-+EXPORT_SYMBOL(qman_affine_cpus);
-+
-+u16 qman_affine_channel(int cpu)
-+{
-+ if (cpu < 0) {
-+ struct qman_portal *portal = get_raw_affine_portal();
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ BUG_ON(portal->sharing_redirect);
-+#endif
-+ cpu = portal->config->public_cfg.cpu;
-+ put_affine_portal();
-+ }
-+ BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
-+ return affine_channels[cpu];
-+}
-+EXPORT_SYMBOL(qman_affine_channel);
-+
-+void *qman_get_affine_portal(int cpu)
-+{
-+ return affine_portals[cpu];
-+}
-+EXPORT_SYMBOL(qman_get_affine_portal);
-+
-+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
-+{
-+ int ret;
-+
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (unlikely(p->sharing_redirect))
-+ ret = -EINVAL;
-+ else
-+#endif
-+ {
-+ BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
-+ ret = __poll_portal_fast(p, limit);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_p_poll_dqrr);
-+
-+int qman_poll_dqrr(unsigned int limit)
-+{
-+ struct qman_portal *p = get_poll_portal();
-+ int ret;
-+ ret = qman_p_poll_dqrr(p, limit);
-+ put_poll_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_poll_dqrr);
-+
-+u32 qman_p_poll_slow(struct qman_portal *p)
-+{
-+ u32 ret;
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (unlikely(p->sharing_redirect))
-+ ret = (u32)-1;
-+ else
-+#endif
-+ {
-+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
-+ ret = __poll_portal_slow(p, is);
-+ qm_isr_status_clear(&p->p, ret);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_p_poll_slow);
-+
-+u32 qman_poll_slow(void)
-+{
-+ struct qman_portal *p = get_poll_portal();
-+ u32 ret;
-+ ret = qman_p_poll_slow(p);
-+ put_poll_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_poll_slow);
-+
-+/* Legacy wrapper */
-+void qman_p_poll(struct qman_portal *p)
-+{
-+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
-+ if (unlikely(p->sharing_redirect))
-+ return;
-+#endif
-+ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
-+ if (!(p->slowpoll--)) {
-+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
-+ u32 active = __poll_portal_slow(p, is);
-+ if (active) {
-+ qm_isr_status_clear(&p->p, active);
-+ p->slowpoll = SLOW_POLL_BUSY;
-+ } else
-+ p->slowpoll = SLOW_POLL_IDLE;
-+ }
-+ }
-+ if ((~p->irq_sources) & QM_PIRQ_DQRI)
-+ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
-+}
-+EXPORT_SYMBOL(qman_p_poll);
-+
-+void qman_poll(void)
-+{
-+ struct qman_portal *p = get_poll_portal();
-+ qman_p_poll(p);
-+ put_poll_portal();
-+}
-+EXPORT_SYMBOL(qman_poll);
-+
-+void qman_p_stop_dequeues(struct qman_portal *p)
-+{
-+ qman_stop_dequeues_ex(p);
-+}
-+EXPORT_SYMBOL(qman_p_stop_dequeues);
-+
-+void qman_stop_dequeues(void)
-+{
-+ struct qman_portal *p = get_affine_portal();
-+ qman_p_stop_dequeues(p);
-+ put_affine_portal();
-+}
-+EXPORT_SYMBOL(qman_stop_dequeues);
-+
-+void qman_p_start_dequeues(struct qman_portal *p)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ DPA_ASSERT(p->dqrr_disable_ref > 0);
-+ if (!(--p->dqrr_disable_ref))
-+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+}
-+EXPORT_SYMBOL(qman_p_start_dequeues);
-+
-+void qman_start_dequeues(void)
-+{
-+ struct qman_portal *p = get_affine_portal();
-+ qman_p_start_dequeues(p);
-+ put_affine_portal();
-+}
-+EXPORT_SYMBOL(qman_start_dequeues);
-+
-+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ pools &= p->config->public_cfg.pools;
-+ p->sdqcr |= pools;
-+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+}
-+EXPORT_SYMBOL(qman_p_static_dequeue_add);
-+
-+void qman_static_dequeue_add(u32 pools)
-+{
-+ struct qman_portal *p = get_affine_portal();
-+ qman_p_static_dequeue_add(p, pools);
-+ put_affine_portal();
-+}
-+EXPORT_SYMBOL(qman_static_dequeue_add);
-+
-+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ pools &= p->config->public_cfg.pools;
-+ p->sdqcr &= ~pools;
-+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+}
-+EXPORT_SYMBOL(qman_p_static_dequeue_del);
-+
-+void qman_static_dequeue_del(u32 pools)
-+{
-+ struct qman_portal *p = get_affine_portal();
-+ qman_p_static_dequeue_del(p, pools);
-+ put_affine_portal();
-+}
-+EXPORT_SYMBOL(qman_static_dequeue_del);
-+
-+u32 qman_p_static_dequeue_get(struct qman_portal *p)
-+{
-+ return p->sdqcr;
-+}
-+EXPORT_SYMBOL(qman_p_static_dequeue_get);
-+
-+u32 qman_static_dequeue_get(void)
-+{
-+ struct qman_portal *p = get_affine_portal();
-+ u32 ret = qman_p_static_dequeue_get(p);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_static_dequeue_get);
-+
-+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
-+ int park_request)
-+{
-+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
-+}
-+EXPORT_SYMBOL(qman_p_dca);
-+
-+void qman_dca(struct qm_dqrr_entry *dq, int park_request)
-+{
-+ struct qman_portal *p = get_affine_portal();
-+ qman_p_dca(p, dq, park_request);
-+ put_affine_portal();
-+}
-+EXPORT_SYMBOL(qman_dca);
-+
-+/*******************/
-+/* Frame queue API */
-+/*******************/
-+
-+static const char *mcr_result_str(u8 result)
-+{
-+ switch (result) {
-+ case QM_MCR_RESULT_NULL:
-+ return "QM_MCR_RESULT_NULL";
-+ case QM_MCR_RESULT_OK:
-+ return "QM_MCR_RESULT_OK";
-+ case QM_MCR_RESULT_ERR_FQID:
-+ return "QM_MCR_RESULT_ERR_FQID";
-+ case QM_MCR_RESULT_ERR_FQSTATE:
-+ return "QM_MCR_RESULT_ERR_FQSTATE";
-+ case QM_MCR_RESULT_ERR_NOTEMPTY:
-+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
-+ case QM_MCR_RESULT_PENDING:
-+ return "QM_MCR_RESULT_PENDING";
-+ case QM_MCR_RESULT_ERR_BADCOMMAND:
-+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
-+ }
-+ return "<unknown MCR result>";
-+}
-+
-+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
-+{
-+ struct qm_fqd fqd;
-+ struct qm_mcr_queryfq_np np;
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+
-+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
-+ int ret = qman_alloc_fqid(&fqid);
-+ if (ret)
-+ return ret;
-+ }
-+ spin_lock_init(&fq->fqlock);
-+ fq->fqid = fqid;
-+ fq->flags = flags;
-+ fq->state = qman_fq_state_oos;
-+ fq->cgr_groupid = 0;
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
-+ return -ENOMEM;
-+#endif
-+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
-+ return 0;
-+ /* Everything else is AS_IS support */
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ mcc = qm_mc_start(&p->p);
-+ mcc->queryfq.fqid = cpu_to_be32(fqid);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
-+ if (mcr->result != QM_MCR_RESULT_OK) {
-+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
-+ goto err;
-+ }
-+ fqd = mcr->queryfq.fqd;
-+ hw_fqd_to_cpu(&fqd);
-+ mcc = qm_mc_start(&p->p);
-+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
-+ if (mcr->result != QM_MCR_RESULT_OK) {
-+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
-+ goto err;
-+ }
-+ np = mcr->queryfq_np;
-+ /* Phew, have queryfq and queryfq_np results, stitch together
-+ * the FQ object from those. */
-+ fq->cgr_groupid = fqd.cgid;
-+ switch (np.state & QM_MCR_NP_STATE_MASK) {
-+ case QM_MCR_NP_STATE_OOS:
-+ break;
-+ case QM_MCR_NP_STATE_RETIRED:
-+ fq->state = qman_fq_state_retired;
-+ if (np.frm_cnt)
-+ fq_set(fq, QMAN_FQ_STATE_NE);
-+ break;
-+ case QM_MCR_NP_STATE_TEN_SCHED:
-+ case QM_MCR_NP_STATE_TRU_SCHED:
-+ case QM_MCR_NP_STATE_ACTIVE:
-+ fq->state = qman_fq_state_sched;
-+ if (np.state & QM_MCR_NP_STATE_R)
-+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
-+ break;
-+ case QM_MCR_NP_STATE_PARKED:
-+ fq->state = qman_fq_state_parked;
-+ break;
-+ default:
-+ DPA_ASSERT(NULL == "invalid FQ state");
-+ }
-+ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
-+ fq->state |= QMAN_FQ_STATE_CGR_EN;
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return 0;
-+err:
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
-+ qman_release_fqid(fqid);
-+ return -EIO;
-+}
-+EXPORT_SYMBOL(qman_create_fq);
-+
-+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
-+{
-+
-+ /* We don't need to lock the FQ as it is a pre-condition that the FQ be
-+ * quiesced. Instead, run some checks. */
-+ switch (fq->state) {
-+ case qman_fq_state_parked:
-+ DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
-+ case qman_fq_state_oos:
-+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
-+ qman_release_fqid(fq->fqid);
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ clear_fq_table_entry(fq->key);
-+#endif
-+ return;
-+ default:
-+ break;
-+ }
-+ DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
-+}
-+EXPORT_SYMBOL(qman_destroy_fq);
-+
-+u32 qman_fq_fqid(struct qman_fq *fq)
-+{
-+ return fq->fqid;
-+}
-+EXPORT_SYMBOL(qman_fq_fqid);
-+
-+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
-+{
-+ if (state)
-+ *state = fq->state;
-+ if (flags)
-+ *flags = fq->flags;
-+}
-+EXPORT_SYMBOL(qman_fq_state);
-+
-+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
-+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
-+
-+ if ((fq->state != qman_fq_state_oos) &&
-+ (fq->state != qman_fq_state_parked))
-+ return -EINVAL;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-+ return -EINVAL;
-+#endif
-+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
-+ /* And can't be set at the same time as TDTHRESH */
-+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
-+ return -EINVAL;
-+ }
-+ /* Issue an INITFQ_[PARKED|SCHED] management command */
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ FQLOCK(fq);
-+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-+ ((fq->state != qman_fq_state_oos) &&
-+ (fq->state != qman_fq_state_parked)))) {
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return -EBUSY;
-+ }
-+ mcc = qm_mc_start(&p->p);
-+ if (opts)
-+ mcc->initfq = *opts;
-+ mcc->initfq.fqid = cpu_to_be32(fq->fqid);
-+ mcc->initfq.count = 0;
-+
-+ /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
-+ * demux pointer. Otherwise, the caller-provided value is allowed to
-+ * stand, don't overwrite it. */
-+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
-+ dma_addr_t phys_fq;
-+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ mcc->initfq.fqd.context_b = fq->key;
-+#else
-+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
-+#endif
-+ /* and the physical address - NB, if the user wasn't trying to
-+ * set CONTEXTA, clear the stashing settings. */
-+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
-+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
-+ memset(&mcc->initfq.fqd.context_a, 0,
-+ sizeof(mcc->initfq.fqd.context_a));
-+ } else {
-+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(&p->pdev->dev, phys_fq)) {
-+ dev_err(&p->pdev->dev,
-+ "dma_map_single failed for fqid: %u\n",
-+ fq->fqid);
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return -EIO;
-+ }
-+
-+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
-+ }
-+ }
-+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
-+ mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
-+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
-+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
-+ mcc->initfq.fqd.dest.wq = 4;
-+ }
-+ }
-+ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
-+ cpu_to_hw_fqd(&mcc->initfq.fqd);
-+ qm_mc_commit(&p->p, myverb);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return -EIO;
-+ }
-+ if (opts) {
-+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
-+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
-+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
-+ else
-+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
-+ }
-+ if (opts->we_mask & QM_INITFQ_WE_CGID)
-+ fq->cgr_groupid = opts->fqd.cgid;
-+ }
-+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
-+ qman_fq_state_sched : qman_fq_state_parked;
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_init_fq);
-+
-+int qman_schedule_fq(struct qman_fq *fq)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ int ret = 0;
-+ u8 res;
-+
-+ if (fq->state != qman_fq_state_parked)
-+ return -EINVAL;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-+ return -EINVAL;
-+#endif
-+ /* Issue a ALTERFQ_SCHED management command */
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ FQLOCK(fq);
-+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-+ (fq->state != qman_fq_state_parked))) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+ mcc = qm_mc_start(&p->p);
-+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ ret = -EIO;
-+ goto out;
-+ }
-+ fq->state = qman_fq_state_sched;
-+out:
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_schedule_fq);
-+
-+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ int rval;
-+ u8 res;
-+
-+ if ((fq->state != qman_fq_state_parked) &&
-+ (fq->state != qman_fq_state_sched))
-+ return -EINVAL;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-+ return -EINVAL;
-+#endif
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ FQLOCK(fq);
-+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-+ (fq->state == qman_fq_state_retired) ||
-+ (fq->state == qman_fq_state_oos))) {
-+ rval = -EBUSY;
-+ goto out;
-+ }
-+ rval = table_push_fq(p, fq);
-+ if (rval)
-+ goto out;
-+ mcc = qm_mc_start(&p->p);
-+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
-+ res = mcr->result;
-+ /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
-+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
-+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
-+ * friendly, otherwise the caller doesn't necessarily have a fully
-+ * "retired" FQ on return even if the retirement was immediate. However
-+ * this does mean some code duplication between here and
-+ * fq_state_change(). */
-+ if (likely(res == QM_MCR_RESULT_OK)) {
-+ rval = 0;
-+ /* Process 'fq' right away, we'll ignore FQRNI */
-+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
-+ fq_set(fq, QMAN_FQ_STATE_NE);
-+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
-+ fq_set(fq, QMAN_FQ_STATE_ORL);
-+ else
-+ table_del_fq(p, fq);
-+ if (flags)
-+ *flags = fq->flags;
-+ fq->state = qman_fq_state_retired;
-+ if (fq->cb.fqs) {
-+ /* Another issue with supporting "immediate" retirement
-+ * is that we're forced to drop FQRNIs, because by the
-+ * time they're seen it may already be "too late" (the
-+ * fq may have been OOS'd and free()'d already). But if
-+ * the upper layer wants a callback whether it's
-+ * immediate or not, we have to fake a "MR" entry to
-+ * look like an FQRNI... */
-+ struct qm_mr_entry msg;
-+ msg.verb = QM_MR_VERB_FQRNI;
-+ msg.fq.fqs = mcr->alterfq.fqs;
-+ msg.fq.fqid = fq->fqid;
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ msg.fq.contextB = fq->key;
-+#else
-+ msg.fq.contextB = (u32)(uintptr_t)fq;
-+#endif
-+ fq->cb.fqs(p, fq, &msg);
-+ }
-+ } else if (res == QM_MCR_RESULT_PENDING) {
-+ rval = 1;
-+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
-+ } else {
-+ rval = -EIO;
-+ table_del_fq(p, fq);
-+ }
-+out:
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return rval;
-+}
-+EXPORT_SYMBOL(qman_retire_fq);
-+
-+int qman_oos_fq(struct qman_fq *fq)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ int ret = 0;
-+ u8 res;
-+
-+ if (fq->state != qman_fq_state_retired)
-+ return -EINVAL;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-+ return -EINVAL;
-+#endif
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ FQLOCK(fq);
-+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
-+ (fq->state != qman_fq_state_retired))) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+ mcc = qm_mc_start(&p->p);
-+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ ret = -EIO;
-+ goto out;
-+ }
-+ fq->state = qman_fq_state_oos;
-+out:
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_oos_fq);
-+
-+int qman_fq_flow_control(struct qman_fq *fq, int xon)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ int ret = 0;
-+ u8 res;
-+ u8 myverb;
-+
-+ if ((fq->state == qman_fq_state_oos) ||
-+ (fq->state == qman_fq_state_retired) ||
-+ (fq->state == qman_fq_state_parked))
-+ return -EINVAL;
-+
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-+ return -EINVAL;
-+#endif
-+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ FQLOCK(fq);
-+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-+ (fq->state == qman_fq_state_parked) ||
-+ (fq->state == qman_fq_state_oos) ||
-+ (fq->state == qman_fq_state_retired))) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+ mcc = qm_mc_start(&p->p);
-+ mcc->alterfq.fqid = fq->fqid;
-+ mcc->alterfq.count = 0;
-+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
-+
-+ qm_mc_commit(&p->p, myverb);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-+
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ ret = -EIO;
-+ goto out;
-+ }
-+out:
-+ FQUNLOCK(fq);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_fq_flow_control);
-+
-+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ mcc = qm_mc_start(&p->p);
-+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK)
-+ *fqd = mcr->queryfq.fqd;
-+ hw_fqd_to_cpu(fqd);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res != QM_MCR_RESULT_OK)
-+ return -EIO;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_query_fq);
-+
-+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ mcc = qm_mc_start(&p->p);
-+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK) {
-+ *np = mcr->queryfq_np;
-+ np->fqd_link = be24_to_cpu(np->fqd_link);
-+ np->odp_seq = be16_to_cpu(np->odp_seq);
-+ np->orp_nesn = be16_to_cpu(np->orp_nesn);
-+ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
-+ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
-+ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
-+ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
-+ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
-+ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
-+ np->ics_surp = be16_to_cpu(np->ics_surp);
-+ np->byte_cnt = be32_to_cpu(np->byte_cnt);
-+ np->frm_cnt = be24_to_cpu(np->frm_cnt);
-+ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
-+ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
-+ np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
-+ np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
-+ np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
-+ }
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res == QM_MCR_RESULT_ERR_FQID)
-+ return -ERANGE;
-+ else if (res != QM_MCR_RESULT_OK)
-+ return -EIO;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_query_fq_np);
-+
-+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res, myverb;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
-+ QM_MCR_VERB_QUERYWQ;
-+ mcc = qm_mc_start(&p->p);
-+ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
-+ qm_mc_commit(&p->p, myverb);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK) {
-+ int i, array_len;
-+ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
-+ array_len = ARRAY_SIZE(mcr->querywq.wq_len);
-+ for (i = 0; i < array_len; i++)
-+ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
-+ }
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_query_wq);
-+
-+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
-+ struct qm_mcr_cgrtestwrite *result)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ mcc = qm_mc_start(&p->p);
-+ mcc->cgrtestwrite.cgid = cgr->cgrid;
-+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
-+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
-+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK)
-+ *result = mcr->cgrtestwrite;
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_testwrite_cgr);
-+
-+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+ int i;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ mcc = qm_mc_start(&p->p);
-+ mcc->querycgr.cgid = cgr->cgrid;
-+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK)
-+ *cgrd = mcr->querycgr;
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
-+ return -EIO;
-+ }
-+ cgrd->cgr.wr_parm_g.word =
-+ be32_to_cpu(cgrd->cgr.wr_parm_g.word);
-+ cgrd->cgr.wr_parm_y.word =
-+ be32_to_cpu(cgrd->cgr.wr_parm_y.word);
-+ cgrd->cgr.wr_parm_r.word =
-+ be32_to_cpu(cgrd->cgr.wr_parm_r.word);
-+ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
-+ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
-+ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
-+ be32_to_cpus(&cgrd->cscn_targ_swp[i]);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_query_cgr);
-+
-+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
-+{
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+ int i;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ qm_mc_start(&p->p);
-+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_MCC_VERB_QUERYCONGESTION);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK)
-+ memcpy_fromio(congestion, &mcr->querycongestion,
-+ sizeof(*congestion));
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
-+ return -EIO;
-+ }
-+
-+ for (i = 0; i < ARRAY_SIZE(congestion->state.__state); i++)
-+ be32_to_cpus(&congestion->state.__state[i]);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_query_congestion);
-+
-+/* internal function used as a wait_event() expression */
-+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ int ret = -EBUSY;
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ if (!p->vdqcr_owned) {
-+ FQLOCK(fq);
-+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
-+ goto escape;
-+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
-+ FQUNLOCK(fq);
-+ p->vdqcr_owned = fq;
-+ ret = 0;
-+ }
-+escape:
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ if (!ret)
-+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
-+ return ret;
-+}
-+
-+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
-+{
-+ int ret;
-+ *p = get_affine_portal();
-+ ret = set_p_vdqcr(*p, fq, vdqcr);
-+ put_affine_portal();
-+ return ret;
-+}
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
-+ u32 vdqcr, u32 flags)
-+{
-+ int ret = 0;
-+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
-+ ret = wait_event_interruptible(affine_queue,
-+ !(ret = set_p_vdqcr(p, fq, vdqcr)));
-+ else
-+ wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
-+ return ret;
-+}
-+
-+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
-+ u32 vdqcr, u32 flags)
-+{
-+ int ret = 0;
-+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
-+ ret = wait_event_interruptible(affine_queue,
-+ !(ret = set_vdqcr(p, fq, vdqcr)));
-+ else
-+ wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
-+ return ret;
-+}
-+#endif
-+
-+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
-+ u32 flags __maybe_unused, u32 vdqcr)
-+{
-+ int ret;
-+
-+ if ((fq->state != qman_fq_state_parked) &&
-+ (fq->state != qman_fq_state_retired))
-+ return -EINVAL;
-+ if (vdqcr & QM_VDQCR_FQID_MASK)
-+ return -EINVAL;
-+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
-+ return -EBUSY;
-+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
-+ ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
-+ else
-+#endif
-+ ret = set_p_vdqcr(p, fq, vdqcr);
-+ if (ret)
-+ return ret;
-+ /* VDQCR is set */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
-+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
-+ /* NB: don't propagate any error - the caller wouldn't
-+ * know whether the VDQCR was issued or not. A signal
-+ * could arrive after returning anyway, so the caller
-+ * can check signal_pending() if that's an issue. */
-+ wait_event_interruptible(affine_queue,
-+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
-+ else
-+ wait_event(affine_queue,
-+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_p_volatile_dequeue);
-+
-+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
-+ u32 vdqcr)
-+{
-+ struct qman_portal *p;
-+ int ret;
-+
-+ if ((fq->state != qman_fq_state_parked) &&
-+ (fq->state != qman_fq_state_retired))
-+ return -EINVAL;
-+ if (vdqcr & QM_VDQCR_FQID_MASK)
-+ return -EINVAL;
-+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
-+ return -EBUSY;
-+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
-+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
-+ else
-+#endif
-+ ret = set_vdqcr(&p, fq, vdqcr);
-+ if (ret)
-+ return ret;
-+ /* VDQCR is set */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
-+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
-+ /* NB: don't propagate any error - the caller wouldn't
-+ * know whether the VDQCR was issued or not. A signal
-+ * could arrive after returning anyway, so the caller
-+ * can check signal_pending() if that's an issue. */
-+ wait_event_interruptible(affine_queue,
-+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
-+ else
-+ wait_event(affine_queue,
-+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_volatile_dequeue);
-+
-+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
-+{
-+ if (avail)
-+ qm_eqcr_cce_prefetch(&p->p);
-+ else
-+ qm_eqcr_cce_update(&p->p);
-+}
-+
-+int qman_eqcr_is_empty(void)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ struct qman_portal *p = get_affine_portal();
-+ u8 avail;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ update_eqcr_ci(p, 0);
-+ avail = qm_eqcr_get_fill(&p->p);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return avail == 0;
-+}
-+EXPORT_SYMBOL(qman_eqcr_is_empty);
-+
-+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
-+{
-+ if (affine) {
-+ unsigned long irqflags __maybe_unused;
-+ struct qman_portal *p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ p->cb_dc_ern = handler;
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ } else
-+ cb_dc_ern = handler;
-+}
-+EXPORT_SYMBOL(qman_set_dc_ern);
-+
-+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
-+ unsigned long *irqflags __maybe_unused,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd,
-+ u32 flags)
-+{
-+ struct qm_eqcr_entry *eq;
-+ u8 avail;
-+ PORTAL_IRQ_LOCK(p, (*irqflags));
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-+ if (p->eqci_owned) {
-+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
-+ return NULL;
-+ }
-+ p->eqci_owned = fq;
-+ }
-+#endif
-+ if (p->use_eqcr_ci_stashing) {
-+ /*
-+ * The stashing case is easy, only update if we need to in
-+ * order to try and liberate ring entries.
-+ */
-+ eq = qm_eqcr_start_stash(&p->p);
-+ } else {
-+ /*
-+ * The non-stashing case is harder, need to prefetch ahead of
-+ * time.
-+ */
-+ avail = qm_eqcr_get_avail(&p->p);
-+ if (avail < 2)
-+ update_eqcr_ci(p, avail);
-+ eq = qm_eqcr_start_no_stash(&p->p);
-+ }
-+
-+ if (unlikely(!eq)) {
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
-+ p->eqci_owned = NULL;
-+#endif
-+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
-+ return NULL;
-+ }
-+ if (flags & QMAN_ENQUEUE_FLAG_DCA)
-+ eq->dca = QM_EQCR_DCA_ENABLE |
-+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
-+ QM_EQCR_DCA_PARK : 0) |
-+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
-+ eq->fqid = cpu_to_be32(fq->fqid);
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ eq->tag = cpu_to_be32(fq->key);
-+#else
-+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
-+#endif
-+ eq->fd = *fd;
-+ cpu_to_hw_fd(&eq->fd);
-+ return eq;
-+}
-+
-+static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
-+ unsigned long *irqflags __maybe_unused,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd,
-+ u32 flags)
-+{
-+ struct qm_eqcr_entry *eq;
-+ *p = get_affine_portal();
-+ eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
-+ if (!eq)
-+ put_affine_portal();
-+ return eq;
-+}
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
-+ unsigned long *irqflags __maybe_unused,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd,
-+ u32 flags)
-+{
-+ struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
-+ if (!eq)
-+ qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
-+ return eq;
-+}
-+static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
-+ unsigned long *irqflags __maybe_unused,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd,
-+ u32 flags)
-+{
-+ struct qm_eqcr_entry *eq;
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return NULL if signal occurs before completion. Signal
-+ * can occur during return. Caller must check for signal */
-+ wait_event_interruptible(affine_queue,
-+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
-+ else
-+ wait_event(affine_queue,
-+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
-+ return eq;
-+}
-+static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
-+ unsigned long *irqflags __maybe_unused,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd,
-+ u32 flags)
-+{
-+ struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
-+ if (!eq)
-+ qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
-+ return eq;
-+}
-+static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
-+ unsigned long *irqflags __maybe_unused,
-+ struct qman_fq *fq,
-+ const struct qm_fd *fd,
-+ u32 flags)
-+{
-+ struct qm_eqcr_entry *eq;
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return NULL if signal occurs before completion. Signal
-+ * can occur during return. Caller must check for signal */
-+ wait_event_interruptible(affine_queue,
-+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
-+ else
-+ wait_event(affine_queue,
-+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
-+ return eq;
-+}
-+#endif
-+
-+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_fd *fd, u32 flags)
-+{
-+ struct qm_eqcr_entry *eq;
-+ unsigned long irqflags __maybe_unused;
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
-+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
-+ else
-+#endif
-+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
-+ if (!eq)
-+ return -EBUSY;
-+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
-+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-+ /* Factor the below out, it's used from qman_enqueue_orp() too */
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return success even if signal occurs before
-+ * condition is true. pvb_commit guarantees success */
-+ wait_event_interruptible(affine_queue,
-+ (p->eqci_owned != fq));
-+ else
-+ wait_event(affine_queue, (p->eqci_owned != fq));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_p_enqueue);
-+
-+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
-+{
-+ struct qman_portal *p;
-+ struct qm_eqcr_entry *eq;
-+ unsigned long irqflags __maybe_unused;
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
-+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
-+ else
-+#endif
-+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
-+ if (!eq)
-+ return -EBUSY;
-+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
-+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-+ /* Factor the below out, it's used from qman_enqueue_orp() too */
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return success even if signal occurs before
-+ * condition is true. pvb_commit guarantees success */
-+ wait_event_interruptible(affine_queue,
-+ (p->eqci_owned != fq));
-+ else
-+ wait_event(affine_queue, (p->eqci_owned != fq));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_enqueue);
-+
-+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_fd *fd, u32 flags,
-+ struct qman_fq *orp, u16 orp_seqnum)
-+{
-+ struct qm_eqcr_entry *eq;
-+ unsigned long irqflags __maybe_unused;
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
-+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
-+ else
-+#endif
-+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
-+ if (!eq)
-+ return -EBUSY;
-+ /* Process ORP-specifics here */
-+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
-+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
-+ else {
-+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
-+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
-+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
-+ else
-+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
-+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
-+ }
-+ eq->seqnum = cpu_to_be16(orp_seqnum);
-+ eq->orp = cpu_to_be32(orp->fqid);
-+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
-+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
-+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
-+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return success even if signal occurs before
-+ * condition is true. pvb_commit guarantees success */
-+ wait_event_interruptible(affine_queue,
-+ (p->eqci_owned != fq));
-+ else
-+ wait_event(affine_queue, (p->eqci_owned != fq));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_p_enqueue_orp);
-+
-+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
-+ struct qman_fq *orp, u16 orp_seqnum)
-+{
-+ struct qman_portal *p;
-+ struct qm_eqcr_entry *eq;
-+ unsigned long irqflags __maybe_unused;
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
-+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
-+ else
-+#endif
-+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
-+ if (!eq)
-+ return -EBUSY;
-+ /* Process ORP-specifics here */
-+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
-+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
-+ else {
-+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
-+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
-+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
-+ else
-+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
-+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
-+ }
-+ eq->seqnum = cpu_to_be16(orp_seqnum);
-+ eq->orp = cpu_to_be32(orp->fqid);
-+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
-+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
-+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
-+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return success even if signal occurs before
-+ * condition is true. pvb_commit guarantees success */
-+ wait_event_interruptible(affine_queue,
-+ (p->eqci_owned != fq));
-+ else
-+ wait_event(affine_queue, (p->eqci_owned != fq));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_enqueue_orp);
-+
-+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_fd *fd, u32 flags,
-+ qman_cb_precommit cb, void *cb_arg)
-+{
-+ struct qm_eqcr_entry *eq;
-+ unsigned long irqflags __maybe_unused;
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
-+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
-+ else
-+#endif
-+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
-+ if (!eq)
-+ return -EBUSY;
-+ /* invoke user supplied callback function before writing commit verb */
-+ if (cb(cb_arg)) {
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ return -EINVAL;
-+ }
-+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
-+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-+ /* Factor the below out, it's used from qman_enqueue_orp() too */
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return success even if signal occurs before
-+ * condition is true. pvb_commit guarantees success */
-+ wait_event_interruptible(affine_queue,
-+ (p->eqci_owned != fq));
-+ else
-+ wait_event(affine_queue, (p->eqci_owned != fq));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_p_enqueue_precommit);
-+
-+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
-+ u32 flags, qman_cb_precommit cb, void *cb_arg)
-+{
-+ struct qman_portal *p;
-+ struct qm_eqcr_entry *eq;
-+ unsigned long irqflags __maybe_unused;
-+
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
-+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
-+ else
-+#endif
-+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
-+ if (!eq)
-+ return -EBUSY;
-+ /* invoke user supplied callback function before writing commit verb */
-+ if (cb(cb_arg)) {
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return -EINVAL;
-+ }
-+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
-+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-+ /* Factor the below out, it's used from qman_enqueue_orp() too */
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
-+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
-+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
-+ /* NB: return success even if signal occurs before
-+ * condition is true. pvb_commit guarantees success */
-+ wait_event_interruptible(affine_queue,
-+ (p->eqci_owned != fq));
-+ else
-+ wait_event(affine_queue, (p->eqci_owned != fq));
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_enqueue_precommit);
-+
-+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
-+ struct qm_mcc_initcgr *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+ u8 verb = QM_MCC_VERB_MODIFYCGR;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ mcc = qm_mc_start(&p->p);
-+ if (opts)
-+ mcc->initcgr = *opts;
-+ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
-+ mcc->initcgr.cgr.wr_parm_g.word =
-+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
-+ mcc->initcgr.cgr.wr_parm_y.word =
-+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
-+ mcc->initcgr.cgr.wr_parm_r.word =
-+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
-+ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
-+ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
-+
-+ mcc->initcgr.cgid = cgr->cgrid;
-+ if (flags & QMAN_CGR_FLAG_USE_INIT)
-+ verb = QM_MCC_VERB_INITCGR;
-+ qm_mc_commit(&p->p, verb);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
-+ res = mcr->result;
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
-+}
-+EXPORT_SYMBOL(qman_modify_cgr);
-+
-+#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
-+ QM_CHANNEL_SWPORTAL0))
-+#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
-+#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
-+
-+static u8 qman_cgr_cpus[__CGR_NUM];
-+
-+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
-+ struct qm_mcc_initcgr *opts)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ struct qm_mcr_querycgr cgr_state;
-+ struct qm_mcc_initcgr local_opts;
-+ int ret;
-+ struct qman_portal *p;
-+
-+ /* We have to check that the provided CGRID is within the limits of the
-+ * data-structures, for obvious reasons. However we'll let h/w take
-+ * care of determining whether it's within the limits of what exists on
-+ * the SoC. */
-+ if (cgr->cgrid >= __CGR_NUM)
-+ return -EINVAL;
-+
-+ preempt_disable();
-+ p = get_affine_portal();
-+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
-+ preempt_enable();
-+
-+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
-+ cgr->chan = p->config->public_cfg.channel;
-+ spin_lock_irqsave(&p->cgr_lock, irqflags);
-+
-+ /* if no opts specified, just add it to the list */
-+ if (!opts)
-+ goto add_list;
-+
-+ ret = qman_query_cgr(cgr, &cgr_state);
-+ if (ret)
-+ goto release_lock;
-+ if (opts)
-+ local_opts = *opts;
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-+ local_opts.cgr.cscn_targ_upd_ctrl =
-+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
-+ else
-+ /* Overwrite TARG */
-+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
-+ TARG_MASK(p);
-+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
-+
-+ /* send init if flags indicate so */
-+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
-+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
-+ else
-+ ret = qman_modify_cgr(cgr, 0, &local_opts);
-+ if (ret)
-+ goto release_lock;
-+add_list:
-+ list_add(&cgr->node, &p->cgr_cbs);
-+
-+ /* Determine if newly added object requires its callback to be called */
-+ ret = qman_query_cgr(cgr, &cgr_state);
-+ if (ret) {
-+ /* we can't go back, so proceed and return success, but screen
-+ * and wail to the log file */
-+ pr_crit("CGR HW state partially modified\n");
-+ ret = 0;
-+ goto release_lock;
-+ }
-+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
-+ cgr->cgrid))
-+ cgr->cb(p, cgr, 1);
-+release_lock:
-+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_create_cgr);
-+
-+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
-+ struct qm_mcc_initcgr *opts)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ struct qm_mcc_initcgr local_opts;
-+ struct qm_mcr_querycgr cgr_state;
-+ int ret;
-+
-+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
-+ pr_warn("This QMan version doesn't support to send CSCN to DCP portal\n");
-+ return -EINVAL;
-+ }
-+ /* We have to check that the provided CGRID is within the limits of the
-+ * data-structures, for obvious reasons. However we'll let h/w take
-+ * care of determining whether it's within the limits of what exists on
-+ * the SoC.
-+ */
-+ if (cgr->cgrid >= __CGR_NUM)
-+ return -EINVAL;
-+
-+ ret = qman_query_cgr(cgr, &cgr_state);
-+ if (ret)
-+ return ret;
-+
-+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
-+ if (opts)
-+ local_opts = *opts;
-+
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-+ local_opts.cgr.cscn_targ_upd_ctrl =
-+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
-+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
-+ else
-+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
-+ TARG_DCP_MASK(dcp_portal);
-+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
-+
-+ /* send init if flags indicate so */
-+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
-+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
-+ &local_opts);
-+ else
-+ ret = qman_modify_cgr(cgr, 0, &local_opts);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_create_cgr_to_dcp);
-+
-+int qman_delete_cgr(struct qman_cgr *cgr)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ struct qm_mcr_querycgr cgr_state;
-+ struct qm_mcc_initcgr local_opts;
-+ int ret = 0;
-+ struct qman_cgr *i;
-+ struct qman_portal *p = get_affine_portal();
-+
-+ if (cgr->chan != p->config->public_cfg.channel) {
-+ pr_crit("Attempting to delete cgr from different portal "
-+ "than it was create: create 0x%x, delete 0x%x\n",
-+ cgr->chan, p->config->public_cfg.channel);
-+ ret = -EINVAL;
-+ goto put_portal;
-+ }
-+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
-+ spin_lock_irqsave(&p->cgr_lock, irqflags);
-+ list_del(&cgr->node);
-+ /*
-+ * If there are no other CGR objects for this CGRID in the list, update
-+ * CSCN_TARG accordingly
-+ */
-+ list_for_each_entry(i, &p->cgr_cbs, node)
-+ if ((i->cgrid == cgr->cgrid) && i->cb)
-+ goto release_lock;
-+ ret = qman_query_cgr(cgr, &cgr_state);
-+ if (ret) {
-+ /* add back to the list */
-+ list_add(&cgr->node, &p->cgr_cbs);
-+ goto release_lock;
-+ }
-+ /* Overwrite TARG */
-+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
-+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
-+ else
-+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
-+ ~(TARG_MASK(p));
-+ ret = qman_modify_cgr(cgr, 0, &local_opts);
-+ if (ret)
-+ /* add back to the list */
-+ list_add(&cgr->node, &p->cgr_cbs);
-+release_lock:
-+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
-+put_portal:
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_delete_cgr);
-+
-+struct cgr_comp {
-+ struct qman_cgr *cgr;
-+ struct completion completion;
-+};
-+
-+static void qman_delete_cgr_smp_call(void *p)
-+{
-+ qman_delete_cgr((struct qman_cgr *)p);
-+}
-+
-+void qman_delete_cgr_safe(struct qman_cgr *cgr)
-+{
-+ preempt_disable();
-+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
-+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
-+ qman_delete_cgr_smp_call, cgr, true);
-+ preempt_enable();
-+ return;
-+ }
-+ qman_delete_cgr(cgr);
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(qman_delete_cgr_safe);
-+
-+int qm_get_clock(u64 *clock_hz)
-+{
-+ if (!qman_clk) {
-+ pr_warn("Qman clock speed is unknown\n");
-+ return -EINVAL;
-+ }
-+ *clock_hz = (u64)qman_clk;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qm_get_clock);
-+
-+int qm_set_clock(u64 clock_hz)
-+{
-+ if (qman_clk)
-+ return -1;
-+ qman_clk = (u32)clock_hz;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qm_set_clock);
-+
-+/* CEETM management command */
-+static int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->lfqmt_config = *opts;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_LFQMT_CONFIG);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: CONFIGURE LFQMT failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+int qman_ceetm_query_lfqmt(int lfqid,
-+ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->lfqmt_query.lfqid = lfqid;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK)
-+ *lfqmt_query = mcr->lfqmt_query;
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: QUERY LFQMT failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_query_lfqmt);
-+
-+static int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->cq_config = *opts;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ res = mcr->result;
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG);
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: CONFIGURE CQ failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
-+ struct qm_mcr_ceetm_cq_query *cq_query)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->cq_query.cqid = cpu_to_be16(cqid);
-+ mcc->cq_query.dcpid = dcpid;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK) {
-+ *cq_query = mcr->cq_query;
-+ hw_cq_query_to_cpu(cq_query);
-+ }
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: QUERY CQ failed\n");
-+ return -EIO;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_query_cq);
-+
-+static int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->dct_config = *opts;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG);
-+ res = mcr->result;
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: CONFIGURE DCT failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts,
-+ struct qm_mcr_ceetm_dct_query *dct_query)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p = get_affine_portal();
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->dct_query = *opts;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY);
-+ res = mcr->result;
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: QUERY DCT failed\n");
-+ return -EIO;
-+ }
-+
-+ *dct_query = mcr->dct_query;
-+ return 0;
-+}
-+
-+static int qman_ceetm_configure_class_scheduler(
-+ struct qm_mcc_ceetm_class_scheduler_config *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->csch_config = *opts;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
-+ res = mcr->result;
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel,
-+ struct qm_mcr_ceetm_class_scheduler_query *query)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->csch_query.cqcid = cpu_to_be16(channel->idx);
-+ mcc->csch_query.dcpid = channel->dcp_idx;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
-+ res = mcr->result;
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: QUERY CLASS SCHEDULER failed\n");
-+ return -EIO;
-+ }
-+ *query = mcr->csch_query;
-+ return 0;
-+}
-+
-+static int qman_ceetm_configure_mapping_shaper_tcfc(
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->mst_config = *opts;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
-+ res = mcr->result;
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+static int qman_ceetm_query_mapping_shaper_tcfc(
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts,
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->mst_query = *opts;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
-+ res = mcr->result;
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: QUERY CHANNEL MAPPING failed\n");
-+ return -EIO;
-+ }
-+
-+ *response = mcr->mst_query;
-+ return 0;
-+}
-+
-+static int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->ccgr_config = *opts;
-+
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG);
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: CONFIGURE CCGR failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+
-+int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
-+ struct qm_mcr_ceetm_ccgr_query *response)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->ccgr_query.ccgrid = cpu_to_be16(ccgr_query->ccgrid);
-+ mcc->ccgr_query.dcpid = ccgr_query->dcpid;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
-+
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK) {
-+ *response = mcr->ccgr_query;
-+ hw_ccgr_query_to_cpu(response);
-+ }
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: QUERY CCGR failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_query_ccgr);
-+
-+static int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq,
-+ u8 command_type, u16 xsfdr,
-+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ switch (command_type) {
-+ case 0:
-+ case 1:
-+ mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx;
-+ break;
-+ case 2:
-+ mcc->cq_ppxr.xsfdr = xsfdr;
-+ break;
-+ default:
-+ break;
-+ }
-+ mcc->cq_ppxr.ct = command_type;
-+ mcc->cq_ppxr.dcpid = cq->parent->dcp_idx;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n");
-+ return -EIO;
-+ }
-+ *cq_ppxr = mcr->cq_ppxr;
-+ return 0;
-+}
-+
-+static int qman_ceetm_query_statistics(u16 cid,
-+ enum qm_dc_portal dcp_idx,
-+ u16 command_type,
-+ struct qm_mcr_ceetm_statistics_query *query_result)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->stats_query_write.cid = cid;
-+ mcc->stats_query_write.dcpid = dcp_idx;
-+ mcc->stats_query_write.ct = command_type;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
-+
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: STATISTICS QUERY failed\n");
-+ return -EIO;
-+ }
-+ *query_result = mcr->stats_query;
-+ return 0;
-+}
-+
-+int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
-+ u16 command_type, u64 frame_count,
-+ u64 byte_count)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ mcc->stats_query_write.cid = cid;
-+ mcc->stats_query_write.dcpid = dcp_idx;
-+ mcc->stats_query_write.ct = command_type;
-+ mcc->stats_query_write.frm_cnt = frame_count;
-+ mcc->stats_query_write.byte_cnt = byte_count;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
-+
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
-+
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+
-+ res = mcr->result;
-+ if (res != QM_MCR_RESULT_OK) {
-+ pr_err("CEETM: STATISTICS WRITE failed\n");
-+ return -EIO;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_query_write_statistics);
-+
-+int qman_ceetm_bps2tokenrate(u64 bps, struct qm_ceetm_rate *token_rate,
-+ int rounding)
-+{
-+ u16 pres;
-+ u64 temp;
-+ u64 qman_freq;
-+ int ret;
-+
-+ /* Read PRES from CEET_CFG_PRES register */
-+ ret = qman_ceetm_get_prescaler(&pres);
-+ if (ret)
-+ return -EINVAL;
-+
-+ ret = qm_get_clock(&qman_freq);
-+ if (ret)
-+ return -EINVAL;
-+
-+ /* token-rate = bytes-per-second * update-reference-period
-+ *
-+ * Where token-rate is N/8192 for a integer N, and
-+ * update-reference-period is (2^22)/(PRES*QHz), where PRES
-+ * is the prescalar value and QHz is the QMan clock frequency.
-+ * So:
-+ *
-+ * token-rate = (byte-per-second*2^22)/PRES*QHZ)
-+ *
-+ * Converting to bits-per-second gives;
-+ *
-+ * token-rate = (bps*2^19) / (PRES*QHZ)
-+ * N = (bps*2^32) / (PRES*QHz)
-+ *
-+ * And to avoid 64-bit overflow if 'bps' is larger than 4Gbps
-+ * (yet minimise rounding error if 'bps' is small), we reorganise
-+ * the formula to use two 16-bit shifts rather than 1 32-bit shift.
-+ * N = (((bps*2^16)/PRES)*2^16)/QHz
-+ */
-+ temp = ROUNDING((bps << 16), pres, rounding);
-+ temp = ROUNDING((temp << 16), qman_freq, rounding);
-+ token_rate->whole = temp >> 13;
-+ token_rate->fraction = temp & (((u64)1 << 13) - 1);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_bps2tokenrate);
-+
-+int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u64 *bps,
-+ int rounding)
-+{
-+ u16 pres;
-+ u64 temp;
-+ u64 qman_freq;
-+ int ret;
-+
-+ /* Read PRES from CEET_CFG_PRES register */
-+ ret = qman_ceetm_get_prescaler(&pres);
-+ if (ret)
-+ return -EINVAL;
-+
-+ ret = qm_get_clock(&qman_freq);
-+ if (ret)
-+ return -EINVAL;
-+
-+ /* bytes-per-second = token-rate / update-reference-period
-+ *
-+ * where "token-rate" is N/8192 for an integer N, and
-+ * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is
-+ * the prescalar value and QHz is the QMan clock frequency. So;
-+ *
-+ * bytes-per-second = (N/8192) / (4194304/PRES*QHz)
-+ * = N*PRES*QHz / (4194304*8192)
-+ * = N*PRES*QHz / (2^35)
-+ *
-+ * Converting to bits-per-second gives;
-+ *
-+ * bps = N*PRES*QHZ / (2^32)
-+ *
-+ * Note, the numerator has a maximum width of 72 bits! So to
-+ * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum
-+ * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before
-+ * multiplying by N (goes to maximum of 63 bits).
-+ *
-+ * temp = PRES*QHZ / (2^16)
-+ * kbps = temp*N / (2^16)
-+ */
-+ temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding);
-+ temp *= ((token_rate->whole << 13) + token_rate->fraction);
-+ *bps = ROUNDING(temp, (u64)(1) << 16, rounding);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_tokenrate2bps);
-+
-+int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx,
-+ unsigned int sp_idx)
-+{
-+ struct qm_ceetm_sp *p;
-+
-+ DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) ||
-+ (dcp_idx == qm_dc_portal_fman1));
-+
-+ if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) ||
-+ (sp_idx >= (qman_ceetms[dcp_idx].sp_range[0] +
-+ qman_ceetms[dcp_idx].sp_range[1]))) {
-+ pr_err("Sub-portal index doesn't exist\n");
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) {
-+ if ((p->idx == sp_idx) && (p->is_claimed == 0)) {
-+ p->is_claimed = 1;
-+ *sp = p;
-+ return 0;
-+ }
-+ }
-+ pr_err("The sub-portal#%d is not available!\n", sp_idx);
-+ return -ENODEV;
-+}
-+EXPORT_SYMBOL(qman_ceetm_sp_claim);
-+
-+int qman_ceetm_sp_release(struct qm_ceetm_sp *sp)
-+{
-+ struct qm_ceetm_sp *p;
-+
-+ if (sp->lni && sp->lni->is_claimed == 1) {
-+ pr_err("The dependency of sub-portal has not been released!\n");
-+ return -EBUSY;
-+ }
-+
-+ list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) {
-+ if (p->idx == sp->idx) {
-+ p->is_claimed = 0;
-+ p->lni = NULL;
-+ }
-+ }
-+ /* Disable CEETM mode of this sub-portal */
-+ qman_sp_disable_ceetm_mode(sp->dcp_idx, sp->idx);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_sp_release);
-+
-+int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx,
-+ unsigned int lni_idx)
-+{
-+ struct qm_ceetm_lni *p;
-+
-+ if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) ||
-+ (lni_idx >= (qman_ceetms[dcp_idx].lni_range[0] +
-+ qman_ceetms[dcp_idx].lni_range[1]))) {
-+ pr_err("The lni index is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) {
-+ if ((p->idx == lni_idx) && (p->is_claimed == 0)) {
-+ *lni = p;
-+ p->is_claimed = 1;
-+ return 0;
-+ }
-+ }
-+
-+ pr_err("The LNI#%d is not available!\n", lni_idx);
-+ return -EINVAL;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_claim);
-+
-+int qman_ceetm_lni_release(struct qm_ceetm_lni *lni)
-+{
-+ struct qm_ceetm_lni *p;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+ if (!list_empty(&lni->channels)) {
-+ pr_err("The LNI dependencies are not released!\n");
-+ return -EBUSY;
-+ }
-+
-+ list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) {
-+ if (p->idx == lni->idx) {
-+ p->shaper_enable = 0;
-+ p->shaper_couple = 0;
-+ p->cr_token_rate.whole = 0;
-+ p->cr_token_rate.fraction = 0;
-+ p->er_token_rate.whole = 0;
-+ p->er_token_rate.fraction = 0;
-+ p->cr_token_bucket_limit = 0;
-+ p->er_token_bucket_limit = 0;
-+ p->is_claimed = 0;
-+ }
-+ }
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ config_opts.dcpid = lni->dcp_idx;
-+ memset(&config_opts.shaper_config, 0,
-+ sizeof(config_opts.shaper_config));
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_release);
-+
-+int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
-+ config_opts.dcpid = sp->dcp_idx;
-+ config_opts.sp_mapping.map_lni_id = lni->idx;
-+ sp->lni = lni;
-+
-+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts))
-+ return -EINVAL;
-+
-+ /* Enable CEETM mode for this sub-portal */
-+ return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx);
-+}
-+EXPORT_SYMBOL(qman_ceetm_sp_set_lni);
-+
-+int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
-+ query_opts.dcpid = sp->dcp_idx;
-+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
-+ pr_err("Can't get SP <-> LNI mapping\n");
-+ return -EINVAL;
-+ }
-+ *lni_idx = query_result.sp_mapping_query.map_lni_id;
-+ sp->lni->idx = query_result.sp_mapping_query.map_lni_id;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_sp_get_lni);
-+
-+int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
-+ int oal)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+ if (lni->shaper_enable) {
-+ pr_err("The shaper has already been enabled\n");
-+ return -EINVAL;
-+ }
-+ lni->shaper_enable = 1;
-+ lni->shaper_couple = coupled;
-+ lni->oal = oal;
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ config_opts.dcpid = lni->dcp_idx;
-+ config_opts.shaper_config.cpl = coupled;
-+ config_opts.shaper_config.oal = oal;
-+ config_opts.shaper_config.crtcr = cpu_to_be24((lni->cr_token_rate.whole
-+ << 13) | lni->cr_token_rate.fraction);
-+ config_opts.shaper_config.ertcr = cpu_to_be24((lni->er_token_rate.whole
-+ << 13) | lni->er_token_rate.fraction);
-+ config_opts.shaper_config.crtbl =
-+ cpu_to_be16(lni->cr_token_bucket_limit);
-+ config_opts.shaper_config.ertbl =
-+ cpu_to_be16(lni->er_token_bucket_limit);
-+ config_opts.shaper_config.mps = 60;
-+
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper);
-+
-+int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+ if (!lni->shaper_enable) {
-+ pr_err("The shaper has been disabled\n");
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ config_opts.dcpid = lni->dcp_idx;
-+ config_opts.shaper_config.cpl = lni->shaper_couple;
-+ config_opts.shaper_config.oal = lni->oal;
-+ config_opts.shaper_config.crtbl =
-+ cpu_to_be16(lni->cr_token_bucket_limit);
-+ config_opts.shaper_config.ertbl =
-+ cpu_to_be16(lni->er_token_bucket_limit);
-+ /* Set CR/ER rate with all 1's to configure an infinite rate, thus
-+ * disable the shaping.
-+ */
-+ config_opts.shaper_config.crtcr = 0xFFFFFF;
-+ config_opts.shaper_config.ertcr = 0xFFFFFF;
-+ config_opts.shaper_config.mps = 60;
-+ lni->shaper_enable = 0;
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper);
-+
-+int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni)
-+{
-+ return lni->shaper_enable;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_is_shaper_enabled);
-+
-+int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ lni->cr_token_rate.whole = token_rate->whole;
-+ lni->cr_token_rate.fraction = token_rate->fraction;
-+ lni->cr_token_bucket_limit = token_limit;
-+ if (!lni->shaper_enable)
-+ return 0;
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ query_opts.dcpid = lni->dcp_idx;
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
-+ &query_result);
-+ if (ret) {
-+ pr_err("Fail to get current LNI shaper setting\n");
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ config_opts.dcpid = lni->dcp_idx;
-+ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole << 13)
-+ | (token_rate->fraction));
-+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
-+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
-+ config_opts.shaper_config.oal = query_result.shaper_query.oal;
-+ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
-+ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
-+ config_opts.shaper_config.mps = query_result.shaper_query.mps;
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate);
-+
-+int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 bps,
-+ u16 token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
-+ if (ret) {
-+ pr_err("Can not convert bps to token rate\n");
-+ return -EINVAL;
-+ }
-+
-+ return qman_ceetm_lni_set_commit_rate(lni, &token_rate, token_limit);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate_bps);
-+
-+int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ query_opts.dcpid = lni->dcp_idx;
-+
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret) {
-+ pr_err("The LNI CR rate or limit is not set\n");
-+ return -EINVAL;
-+ }
-+ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
-+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
-+ 0x1FFF;
-+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate);
-+
-+int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 *bps, u16 *token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_lni_get_commit_rate(lni, &token_rate, token_limit);
-+ if (ret) {
-+ pr_err("The LNI CR rate or limit is not available\n");
-+ return -EINVAL;
-+ }
-+
-+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate_bps);
-+
-+int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ lni->er_token_rate.whole = token_rate->whole;
-+ lni->er_token_rate.fraction = token_rate->fraction;
-+ lni->er_token_bucket_limit = token_limit;
-+ if (!lni->shaper_enable)
-+ return 0;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ query_opts.dcpid = lni->dcp_idx;
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
-+ &query_result);
-+ if (ret) {
-+ pr_err("Fail to get current LNI shaper setting\n");
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ config_opts.dcpid = lni->dcp_idx;
-+ config_opts.shaper_config.ertcr = cpu_to_be24(
-+ (token_rate->whole << 13) | (token_rate->fraction));
-+ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
-+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
-+ config_opts.shaper_config.oal = query_result.shaper_query.oal;
-+ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
-+ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
-+ config_opts.shaper_config.mps = query_result.shaper_query.mps;
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate);
-+
-+int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 bps,
-+ u16 token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
-+ if (ret) {
-+ pr_err("Can not convert bps to token rate\n");
-+ return -EINVAL;
-+ }
-+ return qman_ceetm_lni_set_excess_rate(lni, &token_rate, token_limit);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate_bps);
-+
-+int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
-+ query_opts.dcpid = lni->dcp_idx;
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret) {
-+ pr_err("The LNI ER rate or limit is not set\n");
-+ return -EINVAL;
-+ }
-+ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
-+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
-+ 0x1FFF;
-+ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate);
-+
-+int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 *bps, u16 *token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_lni_get_excess_rate(lni, &token_rate, token_limit);
-+ if (ret) {
-+ pr_err("The LNI ER rate or limit is not available\n");
-+ return -EINVAL;
-+ }
-+
-+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate_bps);
-+
-+#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4)
-+#define QMAN_CEETM_LNITCFCC_ENABLE 0x8
-+int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
-+ unsigned int cq_level,
-+ int traffic_class)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ u64 lnitcfcc;
-+
-+ if ((cq_level > 15) | (traffic_class > 7)) {
-+ pr_err("The CQ or traffic class id is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
-+ query_opts.dcpid = lni->dcp_idx;
-+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
-+ pr_err("Fail to query tcfcc\n");
-+ return -EINVAL;
-+ }
-+
-+ lnitcfcc = be64_to_cpu(query_result.tcfc_query.lnitcfcc);
-+ if (traffic_class == -1) {
-+ /* disable tcfc for this CQ */
-+ lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE <<
-+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
-+ } else {
-+ lnitcfcc &= ~((u64)0xF <<
-+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
-+ lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE |
-+ traffic_class)) <<
-+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level);
-+ }
-+ config_opts.tcfc_config.lnitcfcc = cpu_to_be64(lnitcfcc);
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
-+ config_opts.dcpid = lni->dcp_idx;
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc);
-+
-+#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7
-+int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level,
-+ int *traffic_class)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+ u8 lnitcfcc;
-+
-+ if (cq_level > 15) {
-+ pr_err("the CQ level is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
-+ query_opts.dcpid = lni->dcp_idx;
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret)
-+ return ret;
-+ lnitcfcc = (u8)be64_to_cpu((query_result.tcfc_query.lnitcfcc) >>
-+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
-+ if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE)
-+ *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK;
-+ else
-+ *traffic_class = -1;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc);
-+
-+int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
-+ struct qm_ceetm_lni *lni)
-+{
-+ struct qm_ceetm_channel *p;
-+ u32 channel_idx;
-+ int ret = 0;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+ if (lni->dcp_idx == qm_dc_portal_fman0) {
-+ ret = qman_alloc_ceetm0_channel(&channel_idx);
-+ } else if (lni->dcp_idx == qm_dc_portal_fman1) {
-+ ret = qman_alloc_ceetm1_channel(&channel_idx);
-+ } else {
-+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
-+ lni->dcp_idx);
-+ return -EINVAL;
-+ }
-+
-+ if (ret) {
-+ pr_err("The is no channel available for LNI#%d\n", lni->idx);
-+ return -ENODEV;
-+ }
-+
-+ p = kzalloc(sizeof(*p), GFP_KERNEL);
-+ if (!p)
-+ return -ENOMEM;
-+ p->idx = channel_idx;
-+ p->dcp_idx = lni->dcp_idx;
-+ p->lni_idx = lni->idx;
-+ list_add_tail(&p->node, &lni->channels);
-+ INIT_LIST_HEAD(&p->class_queues);
-+ INIT_LIST_HEAD(&p->ccgs);
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
-+ channel_idx);
-+ config_opts.dcpid = lni->dcp_idx;
-+ config_opts.channel_mapping.map_lni_id = lni->idx;
-+ config_opts.channel_mapping.map_shaped = 0;
-+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
-+ pr_err("Can't map channel#%d for LNI#%d\n",
-+ channel_idx, lni->idx);
-+ return -EINVAL;
-+ }
-+ *channel = p;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_claim);
-+
-+int qman_ceetm_channel_release(struct qm_ceetm_channel *channel)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+ if (!list_empty(&channel->class_queues)) {
-+ pr_err("CEETM channel#%d has class queue unreleased!\n",
-+ channel->idx);
-+ return -EBUSY;
-+ }
-+ if (!list_empty(&channel->ccgs)) {
-+ pr_err("CEETM channel#%d has ccg unreleased!\n",
-+ channel->idx);
-+ return -EBUSY;
-+ }
-+
-+ /* channel->dcp_idx corresponds to known fman validation */
-+ if ((channel->dcp_idx != qm_dc_portal_fman0) &&
-+ (channel->dcp_idx != qm_dc_portal_fman1)) {
-+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
-+ channel->dcp_idx);
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ config_opts.dcpid = channel->dcp_idx;
-+ memset(&config_opts.shaper_config, 0,
-+ sizeof(config_opts.shaper_config));
-+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
-+ pr_err("Can't reset channel shapping parameters\n");
-+ return -EINVAL;
-+ }
-+
-+ if (channel->dcp_idx == qm_dc_portal_fman0) {
-+ qman_release_ceetm0_channelid(channel->idx);
-+ } else if (channel->dcp_idx == qm_dc_portal_fman1) {
-+ qman_release_ceetm1_channelid(channel->idx);
-+ } else {
-+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
-+ channel->dcp_idx);
-+ return -EINVAL;
-+ }
-+ list_del(&channel->node);
-+ kfree(channel);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_release);
-+
-+int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
-+ int coupled)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+ if (channel->shaper_enable == 1) {
-+ pr_err("This channel shaper has been enabled!\n");
-+ return -EINVAL;
-+ }
-+
-+ channel->shaper_enable = 1;
-+ channel->shaper_couple = coupled;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+
-+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
-+ pr_err("Can't query channel mapping\n");
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
-+ channel->idx);
-+ config_opts.dcpid = channel->dcp_idx;
-+ config_opts.channel_mapping.map_lni_id =
-+ query_result.channel_mapping_query.map_lni_id;
-+ config_opts.channel_mapping.map_shaped = 1;
-+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
-+ pr_err("Can't enable shaper for channel #%d\n", channel->idx);
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ config_opts.shaper_config.cpl = coupled;
-+ config_opts.shaper_config.crtcr =
-+ cpu_to_be24((channel->cr_token_rate.whole
-+ << 13) |
-+ channel->cr_token_rate.fraction);
-+ config_opts.shaper_config.ertcr =
-+ cpu_to_be24(channel->er_token_rate.whole
-+ << 13 |
-+ channel->er_token_rate.fraction);
-+ config_opts.shaper_config.crtbl =
-+ cpu_to_be16(channel->cr_token_bucket_limit);
-+ config_opts.shaper_config.ertbl =
-+ cpu_to_be16(channel->er_token_bucket_limit);
-+
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper);
-+
-+int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+
-+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
-+ pr_err("Can't query channel mapping\n");
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
-+ channel->idx);
-+ config_opts.dcpid = channel->dcp_idx;
-+ config_opts.channel_mapping.map_shaped = 0;
-+ config_opts.channel_mapping.map_lni_id =
-+ query_result.channel_mapping_query.map_lni_id;
-+
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper);
-+
-+int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+
-+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
-+ pr_err("Can't query channel mapping\n");
-+ return -EINVAL;
-+ }
-+
-+ return query_result.channel_mapping_query.map_shaped;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_is_shaper_enabled);
-+
-+int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret) {
-+ pr_err("Fail to get the current channel shaper setting\n");
-+ return -EINVAL;
-+ }
-+
-+ channel->cr_token_rate.whole = token_rate->whole;
-+ channel->cr_token_rate.fraction = token_rate->fraction;
-+ channel->cr_token_bucket_limit = token_limit;
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ config_opts.dcpid = channel->dcp_idx;
-+ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole
-+ << 13) | (token_rate->fraction));
-+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
-+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
-+ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
-+ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate);
-+
-+int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 bps, u16 token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
-+ if (ret) {
-+ pr_err("Can not convert bps to token rate\n");
-+ return -EINVAL;
-+ }
-+ return qman_ceetm_channel_set_commit_rate(channel, &token_rate,
-+ token_limit);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate_bps);
-+
-+int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret | !query_result.shaper_query.crtcr |
-+ !query_result.shaper_query.crtbl) {
-+ pr_err("The channel commit rate or limit is not set\n");
-+ return -EINVAL;
-+ }
-+ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
-+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
-+ 0x1FFF;
-+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate);
-+
-+int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 *bps, u16 *token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_channel_get_commit_rate(channel, &token_rate,
-+ token_limit);
-+ if (ret) {
-+ pr_err("The channel CR rate or limit is not available\n");
-+ return -EINVAL;
-+ }
-+
-+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate_bps);
-+
-+int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret) {
-+ pr_err("Fail to get the current channel shaper setting\n");
-+ return -EINVAL;
-+ }
-+
-+ channel->er_token_rate.whole = token_rate->whole;
-+ channel->er_token_rate.fraction = token_rate->fraction;
-+ channel->er_token_bucket_limit = token_limit;
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ config_opts.dcpid = channel->dcp_idx;
-+ config_opts.shaper_config.ertcr = cpu_to_be24(
-+ (token_rate->whole << 13) | (token_rate->fraction));
-+ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
-+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
-+ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
-+ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate);
-+
-+int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 bps, u16 token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
-+ if (ret) {
-+ pr_err("Can not convert bps to token rate\n");
-+ return -EINVAL;
-+ }
-+ return qman_ceetm_channel_set_excess_rate(channel, &token_rate,
-+ token_limit);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate_bps);
-+
-+int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret | !query_result.shaper_query.ertcr |
-+ !query_result.shaper_query.ertbl) {
-+ pr_err("The channel excess rate or limit is not set\n");
-+ return -EINVAL;
-+ }
-+ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
-+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
-+ 0x1FFF;
-+ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate);
-+
-+int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 *bps, u16 *token_limit)
-+{
-+ struct qm_ceetm_rate token_rate;
-+ int ret;
-+
-+ ret = qman_ceetm_channel_get_excess_rate(channel, &token_rate,
-+ token_limit);
-+ if (ret) {
-+ pr_err("The channel ER rate or limit is not available\n");
-+ return -EINVAL;
-+ }
-+
-+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate_bps);
-+
-+int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
-+ u16 token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
-+
-+ if (channel->shaper_enable) {
-+ pr_err("This channel is a shaped one\n");
-+ return -EINVAL;
-+ }
-+
-+ channel->cr_token_bucket_limit = token_limit;
-+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ config_opts.dcpid = channel->dcp_idx;
-+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
-+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_weight);
-+
-+int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
-+ u16 *token_limit)
-+{
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
-+ int ret;
-+
-+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
-+ channel->idx);
-+ query_opts.dcpid = channel->dcp_idx;
-+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
-+ if (ret | !query_result.shaper_query.crtbl) {
-+ pr_err("This unshaped channel's uFQ wight is unavailable\n");
-+ return -EINVAL;
-+ }
-+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_get_weight);
-+
-+int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b,
-+ unsigned int prio_a, unsigned int prio_b)
-+{
-+ struct qm_mcc_ceetm_class_scheduler_config config_opts;
-+ struct qm_mcr_ceetm_class_scheduler_query query_result;
-+ int i;
-+
-+ if (prio_a > 7) {
-+ pr_err("The priority of group A is out of range\n");
-+ return -EINVAL;
-+ }
-+ if (group_b && (prio_b > 7)) {
-+ pr_err("The priority of group B is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
-+ pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cqcid = cpu_to_be16(channel->idx);
-+ config_opts.dcpid = channel->dcp_idx;
-+ config_opts.gpc_combine_flag = !group_b;
-+ config_opts.gpc_prio_a = prio_a;
-+ config_opts.gpc_prio_b = prio_b;
-+
-+ for (i = 0; i < 8; i++)
-+ config_opts.w[i] = query_result.w[i];
-+ config_opts.crem = query_result.crem;
-+ config_opts.erem = query_result.erem;
-+
-+ return qman_ceetm_configure_class_scheduler(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_group);
-+
-+int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b,
-+ unsigned int *prio_a, unsigned int *prio_b)
-+{
-+ struct qm_mcr_ceetm_class_scheduler_query query_result;
-+
-+ if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
-+ pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
-+ return -EINVAL;
-+ }
-+ *group_b = !query_result.gpc_combine_flag;
-+ *prio_a = query_result.gpc_prio_a;
-+ *prio_b = query_result.gpc_prio_b;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_get_group);
-+
-+#define GROUP_A_ELIGIBILITY_SET (1 << 8)
-+#define GROUP_B_ELIGIBILITY_SET (1 << 9)
-+#define CQ_ELIGIBILITY_SET(n) (1 << (7 - n))
-+int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
-+ *channel, int group_b, int cre)
-+{
-+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
-+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
-+ int i;
-+
-+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
-+ pr_err("Cannot get the channel %d scheduler setting.\n",
-+ channel->idx);
-+ return -EINVAL;
-+ }
-+ csch_config.cqcid = cpu_to_be16(channel->idx);
-+ csch_config.dcpid = channel->dcp_idx;
-+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
-+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
-+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
-+
-+ for (i = 0; i < 8; i++)
-+ csch_config.w[i] = csch_query.w[i];
-+ csch_config.erem = csch_query.erem;
-+ if (group_b)
-+ csch_config.crem = (be16_to_cpu(csch_query.crem)
-+ & ~GROUP_B_ELIGIBILITY_SET)
-+ | (cre ? GROUP_B_ELIGIBILITY_SET : 0);
-+ else
-+ csch_config.crem = (be16_to_cpu(csch_query.crem)
-+ & ~GROUP_A_ELIGIBILITY_SET)
-+ | (cre ? GROUP_A_ELIGIBILITY_SET : 0);
-+
-+ csch_config.crem = cpu_to_be16(csch_config.crem);
-+
-+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
-+ pr_err("Cannot config channel %d's scheduler with "
-+ "group_%c's cr eligibility\n", channel->idx,
-+ group_b ? 'b' : 'a');
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_group_cr_eligibility);
-+
-+int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
-+ *channel, int group_b, int ere)
-+{
-+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
-+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
-+ int i;
-+
-+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
-+ pr_err("Cannot get the channel %d scheduler setting.\n",
-+ channel->idx);
-+ return -EINVAL;
-+ }
-+ csch_config.cqcid = cpu_to_be16(channel->idx);
-+ csch_config.dcpid = channel->dcp_idx;
-+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
-+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
-+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
-+
-+ for (i = 0; i < 8; i++)
-+ csch_config.w[i] = csch_query.w[i];
-+ csch_config.crem = csch_query.crem;
-+ if (group_b)
-+ csch_config.erem = (be16_to_cpu(csch_query.erem)
-+ & ~GROUP_B_ELIGIBILITY_SET)
-+ | (ere ? GROUP_B_ELIGIBILITY_SET : 0);
-+ else
-+ csch_config.erem = (be16_to_cpu(csch_query.erem)
-+ & ~GROUP_A_ELIGIBILITY_SET)
-+ | (ere ? GROUP_A_ELIGIBILITY_SET : 0);
-+
-+ csch_config.erem = cpu_to_be16(csch_config.erem);
-+
-+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
-+ pr_err("Cannot config channel %d's scheduler with "
-+ "group_%c's er eligibility\n", channel->idx,
-+ group_b ? 'b' : 'a');
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_group_er_eligibility);
-+
-+int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
-+ unsigned int idx, int cre)
-+{
-+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
-+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
-+ int i;
-+
-+ if (idx > 7) {
-+ pr_err("CQ index is out of range\n");
-+ return -EINVAL;
-+ }
-+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
-+ pr_err("Cannot get the channel %d scheduler setting.\n",
-+ channel->idx);
-+ return -EINVAL;
-+ }
-+ csch_config.cqcid = cpu_to_be16(channel->idx);
-+ csch_config.dcpid = channel->dcp_idx;
-+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
-+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
-+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
-+ for (i = 0; i < 8; i++)
-+ csch_config.w[i] = csch_query.w[i];
-+ csch_config.erem = csch_query.erem;
-+ csch_config.crem = (be16_to_cpu(csch_query.crem)
-+ & ~CQ_ELIGIBILITY_SET(idx)) |
-+ (cre ? CQ_ELIGIBILITY_SET(idx) : 0);
-+ csch_config.crem = cpu_to_be16(csch_config.crem);
-+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
-+ pr_err("Cannot config channel scheduler to set "
-+ "cr eligibility mask for CQ#%d\n", idx);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_cq_cr_eligibility);
-+
-+int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
-+ unsigned int idx, int ere)
-+{
-+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
-+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
-+ int i;
-+
-+ if (idx > 7) {
-+ pr_err("CQ index is out of range\n");
-+ return -EINVAL;
-+ }
-+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
-+ pr_err("Cannot get the channel %d scheduler setting.\n",
-+ channel->idx);
-+ return -EINVAL;
-+ }
-+ csch_config.cqcid = cpu_to_be16(channel->idx);
-+ csch_config.dcpid = channel->dcp_idx;
-+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
-+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
-+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
-+ for (i = 0; i < 8; i++)
-+ csch_config.w[i] = csch_query.w[i];
-+ csch_config.crem = csch_query.crem;
-+ csch_config.erem = (be16_to_cpu(csch_query.erem)
-+ & ~CQ_ELIGIBILITY_SET(idx)) |
-+ (ere ? CQ_ELIGIBILITY_SET(idx) : 0);
-+ csch_config.erem = cpu_to_be16(csch_config.erem);
-+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
-+ pr_err("Cannot config channel scheduler to set "
-+ "er eligibility mask for CQ#%d\n", idx);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_channel_set_cq_er_eligibility);
-+
-+int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
-+ struct qm_ceetm_channel *channel, unsigned int idx,
-+ struct qm_ceetm_ccg *ccg)
-+{
-+ struct qm_ceetm_cq *p;
-+ struct qm_mcc_ceetm_cq_config cq_config;
-+
-+ if (idx > 7) {
-+ pr_err("The independent class queue id is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(p, &channel->class_queues, node) {
-+ if (p->idx == idx) {
-+ pr_err("The CQ#%d has been claimed!\n", idx);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ p = kmalloc(sizeof(*p), GFP_KERNEL);
-+ if (!p) {
-+ pr_err("Can't allocate memory for CQ#%d!\n", idx);
-+ return -ENOMEM;
-+ }
-+
-+ list_add_tail(&p->node, &channel->class_queues);
-+ p->idx = idx;
-+ p->is_claimed = 1;
-+ p->parent = channel;
-+ INIT_LIST_HEAD(&p->bound_lfqids);
-+
-+ if (ccg) {
-+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
-+ cq_config.dcpid = channel->dcp_idx;
-+ cq_config.ccgid = cpu_to_be16(ccg->idx);
-+ if (qman_ceetm_configure_cq(&cq_config)) {
-+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
-+ idx, ccg->idx);
-+ list_del(&p->node);
-+ kfree(p);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ *cq = p;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cq_claim);
-+
-+int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
-+ struct qm_ceetm_channel *channel, unsigned int idx,
-+ struct qm_ceetm_ccg *ccg)
-+{
-+ struct qm_ceetm_cq *p;
-+ struct qm_mcc_ceetm_cq_config cq_config;
-+
-+ if ((idx < 8) || (idx > 15)) {
-+ pr_err("This grouped class queue id is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(p, &channel->class_queues, node) {
-+ if (p->idx == idx) {
-+ pr_err("The CQ#%d has been claimed!\n", idx);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ p = kmalloc(sizeof(*p), GFP_KERNEL);
-+ if (!p) {
-+ pr_err("Can't allocate memory for CQ#%d!\n", idx);
-+ return -ENOMEM;
-+ }
-+
-+ list_add_tail(&p->node, &channel->class_queues);
-+ p->idx = idx;
-+ p->is_claimed = 1;
-+ p->parent = channel;
-+ INIT_LIST_HEAD(&p->bound_lfqids);
-+
-+ if (ccg) {
-+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
-+ cq_config.dcpid = channel->dcp_idx;
-+ cq_config.ccgid = cpu_to_be16(ccg->idx);
-+ if (qman_ceetm_configure_cq(&cq_config)) {
-+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
-+ idx, ccg->idx);
-+ list_del(&p->node);
-+ kfree(p);
-+ return -EINVAL;
-+ }
-+ }
-+ *cq = p;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cq_claim_A);
-+
-+int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
-+ struct qm_ceetm_channel *channel, unsigned int idx,
-+ struct qm_ceetm_ccg *ccg)
-+{
-+ struct qm_ceetm_cq *p;
-+ struct qm_mcc_ceetm_cq_config cq_config;
-+
-+ if ((idx < 12) || (idx > 15)) {
-+ pr_err("This grouped class queue id is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(p, &channel->class_queues, node) {
-+ if (p->idx == idx) {
-+ pr_err("The CQ#%d has been claimed!\n", idx);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ p = kmalloc(sizeof(*p), GFP_KERNEL);
-+ if (!p) {
-+ pr_err("Can't allocate memory for CQ#%d!\n", idx);
-+ return -ENOMEM;
-+ }
-+
-+ list_add_tail(&p->node, &channel->class_queues);
-+ p->idx = idx;
-+ p->is_claimed = 1;
-+ p->parent = channel;
-+ INIT_LIST_HEAD(&p->bound_lfqids);
-+
-+ if (ccg) {
-+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
-+ cq_config.dcpid = channel->dcp_idx;
-+ cq_config.ccgid = cpu_to_be16(ccg->idx);
-+ if (qman_ceetm_configure_cq(&cq_config)) {
-+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
-+ idx, ccg->idx);
-+ list_del(&p->node);
-+ kfree(p);
-+ return -EINVAL;
-+ }
-+ }
-+ *cq = p;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cq_claim_B);
-+
-+int qman_ceetm_cq_release(struct qm_ceetm_cq *cq)
-+{
-+ if (!list_empty(&cq->bound_lfqids)) {
-+ pr_err("The CQ#%d has unreleased LFQID\n", cq->idx);
-+ return -EBUSY;
-+ }
-+ list_del(&cq->node);
-+ qman_ceetm_drain_cq(cq);
-+ kfree(cq);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cq_release);
-+
-+int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
-+ struct qm_ceetm_weight_code *weight_code)
-+{
-+ struct qm_mcc_ceetm_class_scheduler_config config_opts;
-+ struct qm_mcr_ceetm_class_scheduler_query query_result;
-+ int i;
-+
-+ if (cq->idx < 8) {
-+ pr_err("Can not set weight for ungrouped class queue\n");
-+ return -EINVAL;
-+ }
-+
-+ if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) {
-+ pr_err("Can't query channel#%d's scheduler!\n",
-+ cq->parent->idx);
-+ return -EINVAL;
-+ }
-+
-+ config_opts.cqcid = cpu_to_be16(cq->parent->idx);
-+ config_opts.dcpid = cq->parent->dcp_idx;
-+ config_opts.crem = query_result.crem;
-+ config_opts.erem = query_result.erem;
-+ config_opts.gpc_combine_flag = query_result.gpc_combine_flag;
-+ config_opts.gpc_prio_a = query_result.gpc_prio_a;
-+ config_opts.gpc_prio_b = query_result.gpc_prio_b;
-+
-+ for (i = 0; i < 8; i++)
-+ config_opts.w[i] = query_result.w[i];
-+ config_opts.w[cq->idx - 8] = ((weight_code->y << 3) |
-+ (weight_code->x & 0x7));
-+ return qman_ceetm_configure_class_scheduler(&config_opts);
-+}
-+EXPORT_SYMBOL(qman_ceetm_set_queue_weight);
-+
-+int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
-+ struct qm_ceetm_weight_code *weight_code)
-+{
-+ struct qm_mcr_ceetm_class_scheduler_query query_result;
-+
-+ if (cq->idx < 8) {
-+ pr_err("Can not get weight for ungrouped class queue\n");
-+ return -EINVAL;
-+ }
-+
-+ if (qman_ceetm_query_class_scheduler(cq->parent,
-+ &query_result)) {
-+ pr_err("Can't get the weight code for CQ#%d!\n", cq->idx);
-+ return -EINVAL;
-+ }
-+ weight_code->y = query_result.w[cq->idx - 8] >> 3;
-+ weight_code->x = query_result.w[cq->idx - 8] & 0x7;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_get_queue_weight);
-+
-+/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as:
-+ * effective weight = 2^x / (1 - (y/64))
-+ * = 2^(x+6) / (64 - y)
-+ */
-+static void reduce_fraction(u32 *n, u32 *d)
-+{
-+ u32 factor = 2;
-+ u32 lesser = (*n < *d) ? *n : *d;
-+ /* If factor exceeds the square-root of the lesser of *n and *d,
-+ * then there's no point continuing. Proof: if there was a factor
-+ * bigger than the square root, that would imply there exists
-+ * another factor smaller than the square-root with which it
-+ * multiplies to give 'lesser' - but that's a contradiction
-+ * because the other factor would have already been found and
-+ * divided out.
-+ */
-+ while ((factor * factor) <= lesser) {
-+ /* If 'factor' is a factor of *n and *d, divide them both
-+ * by 'factor' as many times as possible.
-+ */
-+ while (!(*n % factor) && !(*d % factor)) {
-+ *n /= factor;
-+ *d /= factor;
-+ lesser /= factor;
-+ }
-+ if (factor == 2)
-+ factor = 3;
-+ else
-+ factor += 2;
-+ }
-+}
-+
-+int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
-+ u32 *numerator,
-+ u32 *denominator)
-+{
-+ *numerator = (u32) 1 << (weight_code->x + 6);
-+ *denominator = 64 - weight_code->y;
-+ reduce_fraction(numerator, denominator);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_wbfs2ratio);
-+
-+/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive).
-+ * So find 'x' by range, and then estimate 'y' using:
-+ * 64 - y = 2^(x + 6) / weight
-+ * = 2^(x + 6) / (n/d)
-+ * = d * 2^(x+6) / n
-+ * y = 64 - (d * 2^(x+6) / n)
-+ */
-+int qman_ceetm_ratio2wbfs(u32 numerator,
-+ u32 denominator,
-+ struct qm_ceetm_weight_code *weight_code,
-+ int rounding)
-+{
-+ unsigned int y, x = 0;
-+ /* search incrementing 'x' until:
-+ * weight < 2^(x+1)
-+ * n/d < 2^(x+1)
-+ * n < d * 2^(x+1)
-+ */
-+ while ((x < 8) && (numerator >= (denominator << (x + 1))))
-+ x++;
-+ if (x >= 8)
-+ return -ERANGE;
-+ /* because of the subtraction, use '-rounding' */
-+ y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding);
-+ if (y >= 32)
-+ return -ERANGE;
-+ weight_code->x = x;
-+ weight_code->y = y;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_ratio2wbfs);
-+
-+int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio)
-+{
-+ struct qm_ceetm_weight_code weight_code;
-+
-+ if (qman_ceetm_ratio2wbfs(ratio, 100, &weight_code, 0)) {
-+ pr_err("Cannot get wbfs code for cq %x\n", cq->idx);
-+ return -EINVAL;
-+ }
-+ return qman_ceetm_set_queue_weight(cq, &weight_code);
-+}
-+EXPORT_SYMBOL(qman_ceetm_set_queue_weight_in_ratio);
-+
-+int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio)
-+{
-+ struct qm_ceetm_weight_code weight_code;
-+ u32 n, d;
-+
-+ if (qman_ceetm_get_queue_weight(cq, &weight_code)) {
-+ pr_err("Cannot query the weight code for cq%x\n", cq->idx);
-+ return -EINVAL;
-+ }
-+
-+ if (qman_ceetm_wbfs2ratio(&weight_code, &n, &d)) {
-+ pr_err("Cannot get the ratio with wbfs code\n");
-+ return -EINVAL;
-+ }
-+
-+ *ratio = (n * 100) / d;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_get_queue_weight_in_ratio);
-+
-+int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
-+ u64 *frame_count, u64 *byte_count)
-+{
-+ struct qm_mcr_ceetm_statistics_query result;
-+ u16 cid, command_type;
-+ enum qm_dc_portal dcp_idx;
-+ int ret;
-+
-+ cid = cpu_to_be16((cq->parent->idx << 4) | cq->idx);
-+ dcp_idx = cq->parent->dcp_idx;
-+ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
-+ command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS;
-+ else
-+ command_type = CEETM_QUERY_DEQUEUE_STATISTICS;
-+
-+ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
-+ if (ret) {
-+ pr_err("Can't query the statistics of CQ#%d!\n", cq->idx);
-+ return -EINVAL;
-+ }
-+
-+ *frame_count = be40_to_cpu(result.frm_cnt);
-+ *byte_count = be48_to_cpu(result.byte_cnt);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics);
-+
-+int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq)
-+{
-+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread ppxr;
-+ int ret;
-+
-+ do {
-+ ret = qman_ceetm_cq_peek_pop_xsfdrread(cq, 1, 0, &ppxr);
-+ if (ret) {
-+ pr_err("Failed to pop frame from CQ\n");
-+ return -EINVAL;
-+ }
-+ } while (!(ppxr.stat & 0x2));
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_drain_cq);
-+
-+#define CEETM_LFQMT_LFQID_MSB 0xF00000
-+#define CEETM_LFQMT_LFQID_LSB 0x000FFF
-+int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
-+ struct qm_ceetm_cq *cq)
-+{
-+ struct qm_ceetm_lfq *p;
-+ u32 lfqid;
-+ int ret = 0;
-+ struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
-+
-+ if (cq->parent->dcp_idx == qm_dc_portal_fman0) {
-+ ret = qman_alloc_ceetm0_lfqid(&lfqid);
-+ } else if (cq->parent->dcp_idx == qm_dc_portal_fman1) {
-+ ret = qman_alloc_ceetm1_lfqid(&lfqid);
-+ } else {
-+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
-+ cq->parent->dcp_idx);
-+ return -EINVAL;
-+ }
-+
-+ if (ret) {
-+ pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx);
-+ return -ENODEV;
-+ }
-+ p = kmalloc(sizeof(*p), GFP_KERNEL);
-+ if (!p)
-+ return -ENOMEM;
-+ p->idx = lfqid;
-+ p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB);
-+ p->parent = cq->parent;
-+ list_add_tail(&p->node, &cq->bound_lfqids);
-+
-+ lfqmt_config.lfqid = cpu_to_be24(CEETM_LFQMT_LFQID_MSB |
-+ (cq->parent->dcp_idx << 16) |
-+ (lfqid & CEETM_LFQMT_LFQID_LSB));
-+ lfqmt_config.cqid = cpu_to_be16((cq->parent->idx << 4) | (cq->idx));
-+ lfqmt_config.dctidx = cpu_to_be16(p->dctidx);
-+ if (qman_ceetm_configure_lfqmt(&lfqmt_config)) {
-+ pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n",
-+ lfqid, cq->idx);
-+ list_del(&p->node);
-+ kfree(p);
-+ return -EINVAL;
-+ }
-+ *lfq = p;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lfq_claim);
-+
-+int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq)
-+{
-+ if (lfq->parent->dcp_idx == qm_dc_portal_fman0) {
-+ qman_release_ceetm0_lfqid(lfq->idx);
-+ } else if (lfq->parent->dcp_idx == qm_dc_portal_fman1) {
-+ qman_release_ceetm1_lfqid(lfq->idx);
-+ } else {
-+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
-+ lfq->parent->dcp_idx);
-+ return -EINVAL;
-+ }
-+ list_del(&lfq->node);
-+ kfree(lfq);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lfq_release);
-+
-+int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a,
-+ u32 context_b)
-+{
-+ struct qm_mcc_ceetm_dct_config dct_config;
-+ lfq->context_a = context_a;
-+ lfq->context_b = context_b;
-+ dct_config.dctidx = cpu_to_be16((u16)lfq->dctidx);
-+ dct_config.dcpid = lfq->parent->dcp_idx;
-+ dct_config.context_b = cpu_to_be32(context_b);
-+ dct_config.context_a = cpu_to_be64(context_a);
-+
-+ return qman_ceetm_configure_dct(&dct_config);
-+}
-+EXPORT_SYMBOL(qman_ceetm_lfq_set_context);
-+
-+int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a,
-+ u32 *context_b)
-+{
-+ struct qm_mcc_ceetm_dct_query dct_query;
-+ struct qm_mcr_ceetm_dct_query query_result;
-+
-+ dct_query.dctidx = cpu_to_be16(lfq->dctidx);
-+ dct_query.dcpid = lfq->parent->dcp_idx;
-+ if (qman_ceetm_query_dct(&dct_query, &query_result)) {
-+ pr_err("Can't query LFQID#%d's context!\n", lfq->idx);
-+ return -EINVAL;
-+ }
-+ *context_a = be64_to_cpu(query_result.context_a);
-+ *context_b = be32_to_cpu(query_result.context_b);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_lfq_get_context);
-+
-+int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq)
-+{
-+ spin_lock_init(&fq->fqlock);
-+ fq->fqid = lfq->idx;
-+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
-+ if (lfq->ern)
-+ fq->cb.ern = lfq->ern;
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
-+ return -ENOMEM;
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_create_fq);
-+
-+#define MAX_CCG_IDX 0x000F
-+int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
-+ struct qm_ceetm_channel *channel,
-+ unsigned int idx,
-+ void (*cscn)(struct qm_ceetm_ccg *,
-+ void *cb_ctx,
-+ int congested),
-+ void *cb_ctx)
-+{
-+ struct qm_ceetm_ccg *p;
-+
-+ if (idx > MAX_CCG_IDX) {
-+ pr_err("The given ccg index is out of range\n");
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(p, &channel->ccgs, node) {
-+ if (p->idx == idx) {
-+ pr_err("The CCG#%d has been claimed\n", idx);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ p = kmalloc(sizeof(*p), GFP_KERNEL);
-+ if (!p) {
-+ pr_err("Can't allocate memory for CCG#%d!\n", idx);
-+ return -ENOMEM;
-+ }
-+
-+ list_add_tail(&p->node, &channel->ccgs);
-+
-+ p->idx = idx;
-+ p->parent = channel;
-+ p->cb = cscn;
-+ p->cb_ctx = cb_ctx;
-+ INIT_LIST_HEAD(&p->cb_node);
-+
-+ *ccg = p;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_ccg_claim);
-+
-+int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg)
-+{
-+ unsigned long irqflags __maybe_unused;
-+ struct qm_mcc_ceetm_ccgr_config config_opts;
-+ int ret = 0;
-+ struct qman_portal *p = get_affine_portal();
-+
-+ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
-+ spin_lock_irqsave(&p->ccgr_lock, irqflags);
-+ if (!list_empty(&ccg->cb_node))
-+ list_del(&ccg->cb_node);
-+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
-+ (ccg->parent->idx << 4) | ccg->idx);
-+ config_opts.dcpid = ccg->parent->dcp_idx;
-+ config_opts.we_mask = cpu_to_be16(QM_CCGR_WE_CSCN_TUPD);
-+ config_opts.cm_config.cscn_tupd = cpu_to_be16(PORTAL_IDX(p));
-+ ret = qman_ceetm_configure_ccgr(&config_opts);
-+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
-+ put_affine_portal();
-+
-+ list_del(&ccg->node);
-+ kfree(ccg);
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_ceetm_ccg_release);
-+
-+int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask,
-+ const struct qm_ceetm_ccg_params *params)
-+{
-+ struct qm_mcc_ceetm_ccgr_config config_opts;
-+ unsigned long irqflags __maybe_unused;
-+ int ret;
-+ struct qman_portal *p;
-+
-+ if (((ccg->parent->idx << 4) | ccg->idx) >= (2 * __CGR_NUM))
-+ return -EINVAL;
-+
-+ p = get_affine_portal();
-+
-+ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
-+ spin_lock_irqsave(&p->ccgr_lock, irqflags);
-+
-+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
-+ (ccg->parent->idx << 4) | ccg->idx);
-+ config_opts.dcpid = ccg->parent->dcp_idx;
-+ config_opts.we_mask = we_mask;
-+ if (we_mask & QM_CCGR_WE_CSCN_EN) {
-+ config_opts.we_mask |= QM_CCGR_WE_CSCN_TUPD;
-+ config_opts.cm_config.cscn_tupd = cpu_to_be16(
-+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p));
-+ }
-+ config_opts.we_mask = cpu_to_be16(config_opts.we_mask);
-+ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
-+ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
-+ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
-+ config_opts.cm_config.ctl_td_en = params->td_en;
-+ config_opts.cm_config.ctl_td_mode = params->td_mode;
-+ config_opts.cm_config.ctl_cscn_en = params->cscn_en;
-+ config_opts.cm_config.ctl_mode = params->mode;
-+ config_opts.cm_config.oal = params->oal;
-+ config_opts.cm_config.cs_thres.hword =
-+ cpu_to_be16(params->cs_thres_in.hword);
-+ config_opts.cm_config.cs_thres_x.hword =
-+ cpu_to_be16(params->cs_thres_out.hword);
-+ config_opts.cm_config.td_thres.hword =
-+ cpu_to_be16(params->td_thres.hword);
-+ config_opts.cm_config.wr_parm_g.word =
-+ cpu_to_be32(params->wr_parm_g.word);
-+ config_opts.cm_config.wr_parm_y.word =
-+ cpu_to_be32(params->wr_parm_y.word);
-+ config_opts.cm_config.wr_parm_r.word =
-+ cpu_to_be32(params->wr_parm_r.word);
-+ ret = qman_ceetm_configure_ccgr(&config_opts);
-+ if (ret) {
-+ pr_err("Configure CCGR CM failed!\n");
-+ goto release_lock;
-+ }
-+
-+ if (we_mask & QM_CCGR_WE_CSCN_EN)
-+ if (list_empty(&ccg->cb_node))
-+ list_add(&ccg->cb_node,
-+ &p->ccgr_cbs[ccg->parent->dcp_idx]);
-+release_lock:
-+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
-+ put_affine_portal();
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_ceetm_ccg_set);
-+
-+#define CEETM_CCGR_CTL_MASK 0x01
-+int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
-+ struct qm_ceetm_ccg_params *params)
-+{
-+ struct qm_mcc_ceetm_ccgr_query query_opts;
-+ struct qm_mcr_ceetm_ccgr_query query_result;
-+
-+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
-+ (ccg->parent->idx << 4) | ccg->idx);
-+ query_opts.dcpid = ccg->parent->dcp_idx;
-+
-+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
-+ pr_err("Can't query CCGR#%d\n", ccg->idx);
-+ return -EINVAL;
-+ }
-+
-+ params->wr_parm_r.word = query_result.cm_query.wr_parm_r.word;
-+ params->wr_parm_y.word = query_result.cm_query.wr_parm_y.word;
-+ params->wr_parm_g.word = query_result.cm_query.wr_parm_g.word;
-+ params->td_thres.hword = query_result.cm_query.td_thres.hword;
-+ params->cs_thres_out.hword = query_result.cm_query.cs_thres_x.hword;
-+ params->cs_thres_in.hword = query_result.cm_query.cs_thres.hword;
-+ params->oal = query_result.cm_query.oal;
-+ params->wr_en_g = query_result.cm_query.ctl_wr_en_g;
-+ params->wr_en_y = query_result.cm_query.ctl_wr_en_y;
-+ params->wr_en_r = query_result.cm_query.ctl_wr_en_r;
-+ params->td_en = query_result.cm_query.ctl_td_en;
-+ params->td_mode = query_result.cm_query.ctl_td_mode;
-+ params->cscn_en = query_result.cm_query.ctl_cscn_en;
-+ params->mode = query_result.cm_query.ctl_mode;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_ccg_get);
-+
-+int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
-+ u64 *frame_count, u64 *byte_count)
-+{
-+ struct qm_mcr_ceetm_statistics_query result;
-+ u16 cid, command_type;
-+ enum qm_dc_portal dcp_idx;
-+ int ret;
-+
-+ cid = cpu_to_be16((ccg->parent->idx << 4) | ccg->idx);
-+ dcp_idx = ccg->parent->dcp_idx;
-+ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
-+ command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS;
-+ else
-+ command_type = CEETM_QUERY_REJECT_STATISTICS;
-+
-+ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
-+ if (ret) {
-+ pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx);
-+ return -EINVAL;
-+ }
-+
-+ *frame_count = be40_to_cpu(result.frm_cnt);
-+ *byte_count = be48_to_cpu(result.byte_cnt);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics);
-+
-+int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
-+ u16 swp_idx,
-+ unsigned int *cscn_enabled)
-+{
-+ struct qm_mcc_ceetm_ccgr_query query_opts;
-+ struct qm_mcr_ceetm_ccgr_query query_result;
-+ int i;
-+
-+ DPA_ASSERT(swp_idx < 127);
-+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
-+ (ccg->parent->idx << 4) | ccg->idx);
-+ query_opts.dcpid = ccg->parent->dcp_idx;
-+
-+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
-+ pr_err("Can't query CCGR#%d\n", ccg->idx);
-+ return -EINVAL;
-+ }
-+
-+ i = swp_idx / 32;
-+ i = 3 - i;
-+ *cscn_enabled = query_result.cm_query.cscn_targ_swp[i] >>
-+ (31 - swp_idx % 32);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cscn_swp_get);
-+
-+int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
-+ u16 dcp_idx,
-+ u8 vcgid,
-+ unsigned int cscn_enabled,
-+ u16 we_mask,
-+ const struct qm_ceetm_ccg_params *params)
-+{
-+ struct qm_mcc_ceetm_ccgr_config config_opts;
-+ int ret;
-+
-+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
-+ (ccg->parent->idx << 4) | ccg->idx);
-+ config_opts.dcpid = ccg->parent->dcp_idx;
-+ config_opts.we_mask = cpu_to_be16(we_mask | QM_CCGR_WE_CSCN_TUPD |
-+ QM_CCGR_WE_CDV);
-+ config_opts.cm_config.cdv = vcgid;
-+ config_opts.cm_config.cscn_tupd = cpu_to_be16((cscn_enabled << 15) |
-+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_idx);
-+ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
-+ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
-+ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
-+ config_opts.cm_config.ctl_td_en = params->td_en;
-+ config_opts.cm_config.ctl_td_mode = params->td_mode;
-+ config_opts.cm_config.ctl_cscn_en = params->cscn_en;
-+ config_opts.cm_config.ctl_mode = params->mode;
-+ config_opts.cm_config.cs_thres.hword =
-+ cpu_to_be16(params->cs_thres_in.hword);
-+ config_opts.cm_config.cs_thres_x.hword =
-+ cpu_to_be16(params->cs_thres_out.hword);
-+ config_opts.cm_config.td_thres.hword =
-+ cpu_to_be16(params->td_thres.hword);
-+ config_opts.cm_config.wr_parm_g.word =
-+ cpu_to_be32(params->wr_parm_g.word);
-+ config_opts.cm_config.wr_parm_y.word =
-+ cpu_to_be32(params->wr_parm_y.word);
-+ config_opts.cm_config.wr_parm_r.word =
-+ cpu_to_be32(params->wr_parm_r.word);
-+
-+ ret = qman_ceetm_configure_ccgr(&config_opts);
-+ if (ret) {
-+ pr_err("Configure CSCN_TARG_DCP failed!\n");
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set);
-+
-+int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
-+ u16 dcp_idx,
-+ u8 *vcgid,
-+ unsigned int *cscn_enabled)
-+{
-+ struct qm_mcc_ceetm_ccgr_query query_opts;
-+ struct qm_mcr_ceetm_ccgr_query query_result;
-+
-+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
-+ (ccg->parent->idx << 4) | ccg->idx);
-+ query_opts.dcpid = ccg->parent->dcp_idx;
-+
-+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
-+ pr_err("Can't query CCGR#%d\n", ccg->idx);
-+ return -EINVAL;
-+ }
-+
-+ *vcgid = query_result.cm_query.cdv;
-+ *cscn_enabled = (query_result.cm_query.cscn_targ_dcp >> dcp_idx) & 0x1;
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get);
-+
-+int qman_ceetm_querycongestion(struct __qm_mcr_querycongestion *ccg_state,
-+ unsigned int dcp_idx)
-+{
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ u8 res;
-+ int i, j;
-+
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+
-+ mcc = qm_mc_start(&p->p);
-+ for (i = 0; i < 2; i++) {
-+ mcc->ccgr_query.ccgrid =
-+ cpu_to_be16(CEETM_QUERY_CONGESTION_STATE | i);
-+ mcc->ccgr_query.dcpid = dcp_idx;
-+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
-+
-+ while (!(mcr = qm_mc_result(&p->p)))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_CEETM_VERB_CCGR_QUERY);
-+ res = mcr->result;
-+ if (res == QM_MCR_RESULT_OK) {
-+ for (j = 0; j < 8; j++)
-+ mcr->ccgr_query.congestion_state.state.
-+ __state[j] = be32_to_cpu(mcr->ccgr_query.
-+ congestion_state.state.__state[j]);
-+ *(ccg_state + i) =
-+ mcr->ccgr_query.congestion_state.state;
-+ } else {
-+ pr_err("QUERY CEETM CONGESTION STATE failed\n");
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ return -EIO;
-+ }
-+ }
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return 0;
-+}
-+
-+int qman_set_wpm(int wpm_enable)
-+{
-+ return qm_set_wpm(wpm_enable);
-+}
-+EXPORT_SYMBOL(qman_set_wpm);
-+
-+int qman_get_wpm(int *wpm_enable)
-+{
-+ return qm_get_wpm(wpm_enable);
-+}
-+EXPORT_SYMBOL(qman_get_wpm);
-+
-+int qman_shutdown_fq(u32 fqid)
-+{
-+ struct qman_portal *p;
-+ unsigned long irqflags __maybe_unused;
-+ int ret;
-+ struct qm_portal *low_p;
-+ p = get_affine_portal();
-+ PORTAL_IRQ_LOCK(p, irqflags);
-+ low_p = &p->p;
-+ ret = qm_shutdown_fq(&low_p, 1, fqid);
-+ PORTAL_IRQ_UNLOCK(p, irqflags);
-+ put_affine_portal();
-+ return ret;
-+}
-+
-+const struct qm_portal_config *qman_get_qm_portal_config(
-+ struct qman_portal *portal)
-+{
-+ return portal->sharing_redirect ? NULL : portal->config;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_low.h
-@@ -0,0 +1,1445 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qman_private.h"
-+
-+/***************************/
-+/* Portal register assists */
-+/***************************/
-+
-+/* Cache-inhibited register offsets */
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+
-+#define QM_REG_EQCR_PI_CINH 0x0000
-+#define QM_REG_EQCR_CI_CINH 0x0004
-+#define QM_REG_EQCR_ITR 0x0008
-+#define QM_REG_DQRR_PI_CINH 0x0040
-+#define QM_REG_DQRR_CI_CINH 0x0044
-+#define QM_REG_DQRR_ITR 0x0048
-+#define QM_REG_DQRR_DCAP 0x0050
-+#define QM_REG_DQRR_SDQCR 0x0054
-+#define QM_REG_DQRR_VDQCR 0x0058
-+#define QM_REG_DQRR_PDQCR 0x005c
-+#define QM_REG_MR_PI_CINH 0x0080
-+#define QM_REG_MR_CI_CINH 0x0084
-+#define QM_REG_MR_ITR 0x0088
-+#define QM_REG_CFG 0x0100
-+#define QM_REG_ISR 0x0e00
-+#define QM_REG_IIR 0x0e0c
-+#define QM_REG_ITPR 0x0e14
-+
-+/* Cache-enabled register offsets */
-+#define QM_CL_EQCR 0x0000
-+#define QM_CL_DQRR 0x1000
-+#define QM_CL_MR 0x2000
-+#define QM_CL_EQCR_PI_CENA 0x3000
-+#define QM_CL_EQCR_CI_CENA 0x3100
-+#define QM_CL_DQRR_PI_CENA 0x3200
-+#define QM_CL_DQRR_CI_CENA 0x3300
-+#define QM_CL_MR_PI_CENA 0x3400
-+#define QM_CL_MR_CI_CENA 0x3500
-+#define QM_CL_CR 0x3800
-+#define QM_CL_RR0 0x3900
-+#define QM_CL_RR1 0x3940
-+
-+#endif
-+
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+
-+#define QM_REG_EQCR_PI_CINH 0x3000
-+#define QM_REG_EQCR_CI_CINH 0x3040
-+#define QM_REG_EQCR_ITR 0x3080
-+#define QM_REG_DQRR_PI_CINH 0x3100
-+#define QM_REG_DQRR_CI_CINH 0x3140
-+#define QM_REG_DQRR_ITR 0x3180
-+#define QM_REG_DQRR_DCAP 0x31C0
-+#define QM_REG_DQRR_SDQCR 0x3200
-+#define QM_REG_DQRR_VDQCR 0x3240
-+#define QM_REG_DQRR_PDQCR 0x3280
-+#define QM_REG_MR_PI_CINH 0x3300
-+#define QM_REG_MR_CI_CINH 0x3340
-+#define QM_REG_MR_ITR 0x3380
-+#define QM_REG_CFG 0x3500
-+#define QM_REG_ISR 0x3600
-+#define QM_REG_IIR 0x36C0
-+#define QM_REG_ITPR 0x3740
-+
-+/* Cache-enabled register offsets */
-+#define QM_CL_EQCR 0x0000
-+#define QM_CL_DQRR 0x1000
-+#define QM_CL_MR 0x2000
-+#define QM_CL_EQCR_PI_CENA 0x3000
-+#define QM_CL_EQCR_CI_CENA 0x3040
-+#define QM_CL_DQRR_PI_CENA 0x3100
-+#define QM_CL_DQRR_CI_CENA 0x3140
-+#define QM_CL_MR_PI_CENA 0x3300
-+#define QM_CL_MR_CI_CENA 0x3340
-+#define QM_CL_CR 0x3800
-+#define QM_CL_RR0 0x3900
-+#define QM_CL_RR1 0x3940
-+
-+#endif
-+
-+
-+/* BTW, the drivers (and h/w programming model) already obtain the required
-+ * synchronisation for portal accesses via lwsync(), hwsync(), and
-+ * data-dependencies. Use of barrier()s or other order-preserving primitives
-+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
-+ * simply ensure that the compiler treats the portal registers as volatile (ie.
-+ * non-coherent). */
-+
-+/* Cache-inhibited register access. */
-+#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ci + (o)))
-+#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
-+ (qm)->addr_ci + (o));
-+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
-+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
-+
-+/* Cache-enabled (index) register access */
-+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
-+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
-+#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ce + (o)))
-+#define __qm_cl_out(qm, o, val) \
-+ do { \
-+ u32 *__tmpclout = (qm)->addr_ce + (o); \
-+ __raw_writel(cpu_to_be32(val), __tmpclout); \
-+ dcbf(__tmpclout); \
-+ } while (0)
-+#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
-+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
-+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
-+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
-+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
-+#define qm_cl_invalidate(reg)\
-+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
-+
-+/* Cache-enabled ring access */
-+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
-+
-+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
-+ * analysis, look at using the "extra" bit in the ring index registers to avoid
-+ * cyclic issues. */
-+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
-+{
-+ /* 'first' is included, 'last' is excluded */
-+ if (first <= last)
-+ return last - first;
-+ return ringsize + last - first;
-+}
-+
-+/* Portal modes.
-+ * Enum types;
-+ * pmode == production mode
-+ * cmode == consumption mode,
-+ * dmode == h/w dequeue mode.
-+ * Enum values use 3 letter codes. First letter matches the portal mode,
-+ * remaining two letters indicate;
-+ * ci == cache-inhibited portal register
-+ * ce == cache-enabled portal register
-+ * vb == in-band valid-bit (cache-enabled)
-+ * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only
-+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
-+ */
-+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
-+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
-+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
-+ qm_eqcr_pvb = 2 /* valid-bit */
-+};
-+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
-+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
-+ qm_dqrr_dpull = 1 /* PDQCR */
-+};
-+enum qm_dqrr_pmode { /* s/w-only */
-+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
-+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
-+ qm_dqrr_pvb /* reads valid-bit */
-+};
-+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
-+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
-+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
-+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */
-+};
-+enum qm_mr_pmode { /* s/w-only */
-+ qm_mr_pci, /* reads MR_PI_CINH */
-+ qm_mr_pce, /* reads MR_PI_CENA */
-+ qm_mr_pvb /* reads valid-bit */
-+};
-+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
-+ qm_mr_cci = 0, /* CI index, cache-inhibited */
-+ qm_mr_cce = 1 /* CI index, cache-enabled */
-+};
-+
-+
-+/* ------------------------- */
-+/* --- Portal structures --- */
-+
-+#define QM_EQCR_SIZE 8
-+#define QM_DQRR_SIZE 16
-+#define QM_MR_SIZE 8
-+
-+struct qm_eqcr {
-+ struct qm_eqcr_entry *ring, *cursor;
-+ u8 ci, available, ithresh, vbit;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ u32 busy;
-+ enum qm_eqcr_pmode pmode;
-+#endif
-+};
-+
-+struct qm_dqrr {
-+ const struct qm_dqrr_entry *ring, *cursor;
-+ u8 pi, ci, fill, ithresh, vbit;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ enum qm_dqrr_dmode dmode;
-+ enum qm_dqrr_pmode pmode;
-+ enum qm_dqrr_cmode cmode;
-+#endif
-+};
-+
-+struct qm_mr {
-+ const struct qm_mr_entry *ring, *cursor;
-+ u8 pi, ci, fill, ithresh, vbit;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ enum qm_mr_pmode pmode;
-+ enum qm_mr_cmode cmode;
-+#endif
-+};
-+
-+struct qm_mc {
-+ struct qm_mc_command *cr;
-+ struct qm_mc_result *rr;
-+ u8 rridx, vbit;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ enum {
-+ /* Can be _mc_start()ed */
-+ qman_mc_idle,
-+ /* Can be _mc_commit()ed or _mc_abort()ed */
-+ qman_mc_user,
-+ /* Can only be _mc_retry()ed */
-+ qman_mc_hw
-+ } state;
-+#endif
-+};
-+
-+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
-+
-+struct qm_addr {
-+ void __iomem *addr_ce; /* cache-enabled */
-+ void __iomem *addr_ci; /* cache-inhibited */
-+};
-+
-+struct qm_portal {
-+ /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
-+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
-+ * is setup-only, so isn't a cause for a concern. In other words, don't
-+ * rearrange this structure on a whim, there be dragons ... */
-+ struct qm_addr addr;
-+ struct qm_eqcr eqcr;
-+ struct qm_dqrr dqrr;
-+ struct qm_mr mr;
-+ struct qm_mc mc;
-+} QM_PORTAL_ALIGNMENT;
-+
-+
-+/* ---------------- */
-+/* --- EQCR API --- */
-+
-+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
-+#define EQCR_CARRYCLEAR(p) \
-+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
-+
-+/* Bit-wise logic to convert a ring pointer to a ring index */
-+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
-+{
-+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
-+}
-+
-+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
-+static inline void EQCR_INC(struct qm_eqcr *eqcr)
-+{
-+ /* NB: this is odd-looking, but experiments show that it generates fast
-+ * code with essentially no branching overheads. We increment to the
-+ * next EQCR pointer and handle overflow and 'vbit'. */
-+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
-+ eqcr->cursor = EQCR_CARRYCLEAR(partial);
-+ if (partial != eqcr->cursor)
-+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
-+}
-+
-+static inline int qm_eqcr_init(struct qm_portal *portal,
-+ enum qm_eqcr_pmode pmode,
-+ unsigned int eq_stash_thresh,
-+ int eq_stash_prio)
-+{
-+ /* This use of 'register', as well as all other occurrences, is because
-+ * it has been observed to generate much faster code with gcc than is
-+ * otherwise the case. */
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ u32 cfg;
-+ u8 pi;
-+
-+ eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
-+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
-+ qm_cl_invalidate(EQCR_CI);
-+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
-+ eqcr->cursor = eqcr->ring + pi;
-+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
-+ QM_EQCR_VERB_VBIT : 0;
-+ eqcr->available = QM_EQCR_SIZE - 1 -
-+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
-+ eqcr->ithresh = qm_in(EQCR_ITR);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ eqcr->busy = 0;
-+ eqcr->pmode = pmode;
-+#endif
-+ cfg = (qm_in(CFG) & 0x00ffffff) |
-+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
-+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
-+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
-+ qm_out(CFG, cfg);
-+ return 0;
-+}
-+
-+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
-+{
-+ return (qm_in(CFG) >> 28) & 0x7;
-+}
-+
-+static inline void qm_eqcr_finish(struct qm_portal *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ u8 pi, ci;
-+ u32 cfg;
-+
-+ /*
-+ * Disable EQCI stashing because the QMan only
-+ * presents the value it previously stashed to
-+ * maintain coherency. Setting the stash threshold
-+ * to 1 then 0 ensures that QMan has resyncronized
-+ * its internal copy so that the portal is clean
-+ * when it is reinitialized in the future
-+ */
-+ cfg = (qm_in(CFG) & 0x0fffffff) |
-+ (1 << 28); /* QCSP_CFG: EST */
-+ qm_out(CFG, cfg);
-+ cfg &= 0x0fffffff; /* stash threshold = 0 */
-+ qm_out(CFG, cfg);
-+
-+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
-+ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
-+
-+ /* Refresh EQCR CI cache value */
-+ qm_cl_invalidate(EQCR_CI);
-+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
-+
-+ DPA_ASSERT(!eqcr->busy);
-+ if (pi != EQCR_PTR2IDX(eqcr->cursor))
-+ pr_crit("losing uncommited EQCR entries\n");
-+ if (ci != eqcr->ci)
-+ pr_crit("missing existing EQCR completions\n");
-+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
-+ pr_crit("EQCR destroyed unquiesced\n");
-+}
-+
-+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
-+ *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ DPA_ASSERT(!eqcr->busy);
-+ if (!eqcr->available)
-+ return NULL;
-+
-+
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ eqcr->busy = 1;
-+#endif
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+ dcbz_64(eqcr->cursor);
-+#endif
-+ return eqcr->cursor;
-+}
-+
-+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
-+ *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ u8 diff, old_ci;
-+
-+ DPA_ASSERT(!eqcr->busy);
-+ if (!eqcr->available) {
-+ old_ci = eqcr->ci;
-+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
-+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
-+ eqcr->available += diff;
-+ if (!diff)
-+ return NULL;
-+ }
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ eqcr->busy = 1;
-+#endif
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+ dcbz_64(eqcr->cursor);
-+#endif
-+ return eqcr->cursor;
-+}
-+
-+static inline void qm_eqcr_abort(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
-+ DPA_ASSERT(eqcr->busy);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ eqcr->busy = 0;
-+#endif
-+}
-+
-+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
-+ struct qm_portal *portal, u8 myverb)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ DPA_ASSERT(eqcr->busy);
-+ DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
-+ if (eqcr->available == 1)
-+ return NULL;
-+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
-+ dcbf(eqcr->cursor);
-+ EQCR_INC(eqcr);
-+ eqcr->available--;
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+ dcbz_64(eqcr->cursor);
-+#endif
-+ return eqcr->cursor;
-+}
-+
-+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-+#define EQCR_COMMIT_CHECKS(eqcr) \
-+do { \
-+ DPA_ASSERT(eqcr->busy); \
-+ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0xffffff00)); \
-+ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0xffffff00)); \
-+} while (0)
-+#else
-+#define EQCR_COMMIT_CHECKS(eqcr) \
-+do { \
-+ DPA_ASSERT(eqcr->busy); \
-+ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & \
-+ cpu_to_be32(0x00ffffff))); \
-+ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & \
-+ cpu_to_be32(0x00ffffff))); \
-+} while (0)
-+#endif
-+
-+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ EQCR_COMMIT_CHECKS(eqcr);
-+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
-+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
-+ EQCR_INC(eqcr);
-+ eqcr->available--;
-+ dcbf(eqcr->cursor);
-+ hwsync();
-+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ eqcr->busy = 0;
-+#endif
-+}
-+
-+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
-+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
-+ qm_cl_invalidate(EQCR_PI);
-+ qm_cl_touch_rw(EQCR_PI);
-+}
-+
-+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ EQCR_COMMIT_CHECKS(eqcr);
-+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
-+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
-+ EQCR_INC(eqcr);
-+ eqcr->available--;
-+ dcbf(eqcr->cursor);
-+ lwsync();
-+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ eqcr->busy = 0;
-+#endif
-+}
-+
-+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ struct qm_eqcr_entry *eqcursor;
-+ EQCR_COMMIT_CHECKS(eqcr);
-+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
-+ lwsync();
-+ eqcursor = eqcr->cursor;
-+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
-+ dcbf(eqcursor);
-+ EQCR_INC(eqcr);
-+ eqcr->available--;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ eqcr->busy = 0;
-+#endif
-+}
-+
-+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ u8 diff, old_ci = eqcr->ci;
-+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
-+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
-+ eqcr->available += diff;
-+ return diff;
-+}
-+
-+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
-+ qm_cl_touch_ro(EQCR_CI);
-+}
-+
-+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ u8 diff, old_ci = eqcr->ci;
-+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
-+ qm_cl_invalidate(EQCR_CI);
-+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
-+ eqcr->available += diff;
-+ return diff;
-+}
-+
-+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ return eqcr->ithresh;
-+}
-+
-+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ eqcr->ithresh = ithresh;
-+ qm_out(EQCR_ITR, ithresh);
-+}
-+
-+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ return eqcr->available;
-+}
-+
-+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
-+{
-+ register struct qm_eqcr *eqcr = &portal->eqcr;
-+ return QM_EQCR_SIZE - 1 - eqcr->available;
-+}
-+
-+
-+/* ---------------- */
-+/* --- DQRR API --- */
-+
-+/* FIXME: many possible improvements;
-+ * - look at changing the API to use pointer rather than index parameters now
-+ * that 'cursor' is a pointer,
-+ * - consider moving other parameters to pointer if it could help (ci)
-+ */
-+
-+#define DQRR_CARRYCLEAR(p) \
-+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
-+
-+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
-+{
-+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
-+}
-+
-+static inline const struct qm_dqrr_entry *DQRR_INC(
-+ const struct qm_dqrr_entry *e)
-+{
-+ return DQRR_CARRYCLEAR(e + 1);
-+}
-+
-+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
-+{
-+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
-+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
-+}
-+
-+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
-+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
-+ qm_out(DQRR_CI_CINH, dqrr->ci);
-+}
-+
-+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
-+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
-+ qm_cl_out(DQRR_CI, dqrr->ci);
-+}
-+
-+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
-+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
-+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
-+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
-+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
-+}
-+
-+static inline int qm_dqrr_init(struct qm_portal *portal,
-+ const struct qm_portal_config *config,
-+ enum qm_dqrr_dmode dmode,
-+ __maybe_unused enum qm_dqrr_pmode pmode,
-+ enum qm_dqrr_cmode cmode, u8 max_fill)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ u32 cfg;
-+
-+ /* Make sure the DQRR will be idle when we enable */
-+ qm_out(DQRR_SDQCR, 0);
-+ qm_out(DQRR_VDQCR, 0);
-+ qm_out(DQRR_PDQCR, 0);
-+ dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
-+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
-+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
-+ dqrr->cursor = dqrr->ring + dqrr->ci;
-+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
-+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
-+ QM_DQRR_VERB_VBIT : 0;
-+ dqrr->ithresh = qm_in(DQRR_ITR);
-+
-+ /* Free up pending DQRR entries if any as per current DCM */
-+ if (dqrr->fill) {
-+ enum qm_dqrr_cmode dcm = (qm_in(CFG) >> 16) & 3;
-+
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ dqrr->cmode = dcm;
-+#endif
-+ switch (dcm) {
-+ case qm_dqrr_cci:
-+ qm_dqrr_cci_consume(portal, dqrr->fill);
-+ break;
-+ case qm_dqrr_cce:
-+ qm_dqrr_cce_consume(portal, dqrr->fill);
-+ break;
-+ case qm_dqrr_cdc:
-+ qm_dqrr_cdc_consume_n(portal, (1<<QM_DQRR_SIZE) - 1);
-+ break;
-+ default:
-+ DPA_ASSERT(0);
-+ }
-+ }
-+
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ dqrr->dmode = dmode;
-+ dqrr->pmode = pmode;
-+ dqrr->cmode = cmode;
-+#endif
-+ /* Invalidate every ring entry before beginning */
-+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
-+ dcbi(qm_cl(dqrr->ring, cfg));
-+ cfg = (qm_in(CFG) & 0xff000f00) |
-+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
-+ ((dmode & 1) << 18) | /* DP */
-+ ((cmode & 3) << 16) | /* DCM */
-+ 0xa0 | /* RE+SE */
-+ (0 ? 0x40 : 0) | /* Ignore RP */
-+ (0 ? 0x10 : 0); /* Ignore SP */
-+ qm_out(CFG, cfg);
-+ qm_dqrr_set_maxfill(portal, max_fill);
-+
-+ /* Recalculate cursor as we may have consumed frames */
-+ dqrr->cursor = dqrr->ring + dqrr->ci;
-+ return 0;
-+}
-+
-+static inline void qm_dqrr_finish(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if ((dqrr->cmode != qm_dqrr_cdc) &&
-+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
-+ pr_crit("Ignoring completed DQRR entries\n");
-+#endif
-+}
-+
-+static inline const struct qm_dqrr_entry *qm_dqrr_current(
-+ struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ if (!dqrr->fill)
-+ return NULL;
-+ return dqrr->cursor;
-+}
-+
-+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ return DQRR_PTR2IDX(dqrr->cursor);
-+}
-+
-+static inline u8 qm_dqrr_next(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->fill);
-+ dqrr->cursor = DQRR_INC(dqrr->cursor);
-+ return --dqrr->fill;
-+}
-+
-+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ u8 diff, old_pi = dqrr->pi;
-+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
-+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
-+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
-+ dqrr->fill += diff;
-+ return diff;
-+}
-+
-+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
-+ qm_cl_invalidate(DQRR_PI);
-+ qm_cl_touch_ro(DQRR_PI);
-+}
-+
-+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ u8 diff, old_pi = dqrr->pi;
-+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
-+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
-+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
-+ dqrr->fill += diff;
-+ return diff;
-+}
-+
-+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
-+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
-+#if (defined CONFIG_PPC || defined CONFIG_PPC64) && !defined CONFIG_FSL_PAMU
-+ /*
-+ * On PowerPC platforms if PAMU is not available we need to
-+ * manually invalidate the cache. When PAMU is available the
-+ * cache is updated by stashing operations generated by QMan
-+ */
-+ dcbi(res);
-+ dcbt_ro(res);
-+#endif
-+
-+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
-+ * inlining doesn't try to optimise out "excess reads". */
-+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
-+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
-+ if (!dqrr->pi)
-+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
-+ dqrr->fill++;
-+ }
-+}
-+
-+
-+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
-+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
-+ qm_out(DQRR_CI_CINH, dqrr->ci);
-+}
-+
-+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
-+ qm_cl_invalidate(DQRR_CI);
-+ qm_cl_touch_rw(DQRR_CI);
-+}
-+
-+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
-+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
-+ qm_cl_out(DQRR_CI, dqrr->ci);
-+}
-+
-+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
-+ int park)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
-+ DPA_ASSERT(idx < QM_DQRR_SIZE);
-+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
-+ ((park ? 1 : 0) << 6) | /* PK */
-+ idx); /* DCAP_CI */
-+}
-+
-+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
-+ const struct qm_dqrr_entry *dq,
-+ int park)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ u8 idx = DQRR_PTR2IDX(dq);
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
-+ DPA_ASSERT((dqrr->ring + idx) == dq);
-+ DPA_ASSERT(idx < QM_DQRR_SIZE);
-+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
-+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
-+ idx); /* DQRR_DCAP::DCAP_CI */
-+}
-+
-+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
-+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
-+}
-+
-+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
-+ qm_cl_invalidate(DQRR_CI);
-+ qm_cl_touch_ro(DQRR_CI);
-+}
-+
-+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
-+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
-+}
-+
-+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
-+ return dqrr->ci;
-+}
-+
-+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
-+{
-+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
-+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
-+ (1 << 6) | /* PK */
-+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
-+}
-+
-+static inline void qm_dqrr_park_current(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
-+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
-+ (1 << 6) | /* PK */
-+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
-+}
-+
-+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
-+{
-+ qm_out(DQRR_SDQCR, sdqcr);
-+}
-+
-+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
-+{
-+ return qm_in(DQRR_SDQCR);
-+}
-+
-+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
-+{
-+ qm_out(DQRR_VDQCR, vdqcr);
-+}
-+
-+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
-+{
-+ return qm_in(DQRR_VDQCR);
-+}
-+
-+static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
-+{
-+ qm_out(DQRR_PDQCR, pdqcr);
-+}
-+
-+static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
-+{
-+ return qm_in(DQRR_PDQCR);
-+}
-+
-+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
-+{
-+ register struct qm_dqrr *dqrr = &portal->dqrr;
-+ return dqrr->ithresh;
-+}
-+
-+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
-+{
-+ qm_out(DQRR_ITR, ithresh);
-+}
-+
-+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
-+{
-+ return (qm_in(CFG) & 0x00f00000) >> 20;
-+}
-+
-+
-+/* -------------- */
-+/* --- MR API --- */
-+
-+#define MR_CARRYCLEAR(p) \
-+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
-+
-+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
-+{
-+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
-+}
-+
-+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
-+{
-+ return MR_CARRYCLEAR(e + 1);
-+}
-+
-+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
-+ enum qm_mr_cmode cmode)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ u32 cfg;
-+
-+ mr->ring = portal->addr.addr_ce + QM_CL_MR;
-+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
-+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
-+ mr->cursor = mr->ring + mr->ci;
-+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
-+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
-+ mr->ithresh = qm_in(MR_ITR);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mr->pmode = pmode;
-+ mr->cmode = cmode;
-+#endif
-+ cfg = (qm_in(CFG) & 0xfffff0ff) |
-+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
-+ qm_out(CFG, cfg);
-+ return 0;
-+}
-+
-+static inline void qm_mr_finish(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ if (mr->ci != MR_PTR2IDX(mr->cursor))
-+ pr_crit("Ignoring completed MR entries\n");
-+}
-+
-+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ if (!mr->fill)
-+ return NULL;
-+ return mr->cursor;
-+}
-+
-+static inline u8 qm_mr_cursor(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ return MR_PTR2IDX(mr->cursor);
-+}
-+
-+static inline u8 qm_mr_next(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ DPA_ASSERT(mr->fill);
-+ mr->cursor = MR_INC(mr->cursor);
-+ return --mr->fill;
-+}
-+
-+static inline u8 qm_mr_pci_update(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ u8 diff, old_pi = mr->pi;
-+ DPA_ASSERT(mr->pmode == qm_mr_pci);
-+ mr->pi = qm_in(MR_PI_CINH);
-+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
-+ mr->fill += diff;
-+ return diff;
-+}
-+
-+static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_mr *mr = &portal->mr;
-+ DPA_ASSERT(mr->pmode == qm_mr_pce);
-+ qm_cl_invalidate(MR_PI);
-+ qm_cl_touch_ro(MR_PI);
-+}
-+
-+static inline u8 qm_mr_pce_update(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ u8 diff, old_pi = mr->pi;
-+ DPA_ASSERT(mr->pmode == qm_mr_pce);
-+ mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
-+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
-+ mr->fill += diff;
-+ return diff;
-+}
-+
-+static inline void qm_mr_pvb_update(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
-+ DPA_ASSERT(mr->pmode == qm_mr_pvb);
-+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
-+ * inlining doesn't try to optimise out "excess reads". */
-+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
-+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
-+ if (!mr->pi)
-+ mr->vbit ^= QM_MR_VERB_VBIT;
-+ mr->fill++;
-+ res = MR_INC(res);
-+ }
-+ dcbit_ro(res);
-+}
-+
-+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ DPA_ASSERT(mr->cmode == qm_mr_cci);
-+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
-+ qm_out(MR_CI_CINH, mr->ci);
-+}
-+
-+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ DPA_ASSERT(mr->cmode == qm_mr_cci);
-+ mr->ci = MR_PTR2IDX(mr->cursor);
-+ qm_out(MR_CI_CINH, mr->ci);
-+}
-+
-+static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_mr *mr = &portal->mr;
-+ DPA_ASSERT(mr->cmode == qm_mr_cce);
-+ qm_cl_invalidate(MR_CI);
-+ qm_cl_touch_rw(MR_CI);
-+}
-+
-+static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ DPA_ASSERT(mr->cmode == qm_mr_cce);
-+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
-+ qm_cl_out(MR_CI, mr->ci);
-+}
-+
-+static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ DPA_ASSERT(mr->cmode == qm_mr_cce);
-+ mr->ci = MR_PTR2IDX(mr->cursor);
-+ qm_cl_out(MR_CI, mr->ci);
-+}
-+
-+static inline u8 qm_mr_get_ci(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ return mr->ci;
-+}
-+
-+static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
-+{
-+ register struct qm_mr *mr = &portal->mr;
-+ return mr->ithresh;
-+}
-+
-+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
-+{
-+ qm_out(MR_ITR, ithresh);
-+}
-+
-+
-+/* ------------------------------ */
-+/* --- Management command API --- */
-+
-+static inline int qm_mc_init(struct qm_portal *portal)
-+{
-+ u8 rr0, rr1;
-+ register struct qm_mc *mc = &portal->mc;
-+
-+ mc->cr = portal->addr.addr_ce + QM_CL_CR;
-+ mc->rr = portal->addr.addr_ce + QM_CL_RR0;
-+
-+ /*
-+ * The expected valid bit polarity for the next CR command is 0
-+ * if RR1 contains a valid response, and is 1 if RR0 contains a
-+ * valid response. If both RR contain all 0, this indicates either
-+ * that no command has been executed since reset (in which case the
-+ * expected valid bit polarity is 1)
-+ */
-+ rr0 = __raw_readb(&mc->rr->verb);
-+ rr1 = __raw_readb(&(mc->rr+1)->verb);
-+ if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
-+ mc->rridx = 1;
-+ else
-+ mc->rridx = 0;
-+
-+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = qman_mc_idle;
-+#endif
-+ return 0;
-+}
-+
-+static inline void qm_mc_finish(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_mc *mc = &portal->mc;
-+ DPA_ASSERT(mc->state == qman_mc_idle);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ if (mc->state != qman_mc_idle)
-+ pr_crit("Losing incomplete MC command\n");
-+#endif
-+}
-+
-+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
-+{
-+ register struct qm_mc *mc = &portal->mc;
-+ DPA_ASSERT(mc->state == qman_mc_idle);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = qman_mc_user;
-+#endif
-+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-+ dcbz_64(mc->cr);
-+#endif
-+ return mc->cr;
-+}
-+
-+static inline void qm_mc_abort(struct qm_portal *portal)
-+{
-+ __maybe_unused register struct qm_mc *mc = &portal->mc;
-+ DPA_ASSERT(mc->state == qman_mc_user);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = qman_mc_idle;
-+#endif
-+}
-+
-+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
-+{
-+ register struct qm_mc *mc = &portal->mc;
-+ struct qm_mc_result *rr = mc->rr + mc->rridx;
-+ DPA_ASSERT(mc->state == qman_mc_user);
-+ lwsync();
-+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
-+ dcbf(mc->cr);
-+ dcbit_ro(rr);
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = qman_mc_hw;
-+#endif
-+}
-+
-+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
-+{
-+ register struct qm_mc *mc = &portal->mc;
-+ struct qm_mc_result *rr = mc->rr + mc->rridx;
-+ DPA_ASSERT(mc->state == qman_mc_hw);
-+ /* The inactive response register's verb byte always returns zero until
-+ * its command is submitted and completed. This includes the valid-bit,
-+ * in case you were wondering... */
-+ if (!__raw_readb(&rr->verb)) {
-+ dcbit_ro(rr);
-+ return NULL;
-+ }
-+ mc->rridx ^= 1;
-+ mc->vbit ^= QM_MCC_VERB_VBIT;
-+#ifdef CONFIG_FSL_DPA_CHECKING
-+ mc->state = qman_mc_idle;
-+#endif
-+ return rr;
-+}
-+
-+
-+/* ------------------------------------- */
-+/* --- Portal interrupt register API --- */
-+
-+static inline int qm_isr_init(__always_unused struct qm_portal *portal)
-+{
-+ return 0;
-+}
-+
-+static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
-+{
-+}
-+
-+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
-+{
-+ qm_out(ITPR, iperiod);
-+}
-+
-+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
-+{
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
-+#else
-+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
-+#endif
-+}
-+
-+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
-+ u32 val)
-+{
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
-+#else
-+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
-+#endif
-+}
-+
-+/* Cleanup FQs */
-+static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
-+ u32 fqid)
-+{
-+
-+ struct qm_mc_command *mcc;
-+ struct qm_mc_result *mcr;
-+ u8 state;
-+ int orl_empty, fq_empty, i, drain = 0;
-+ u32 result;
-+ u32 channel, wq;
-+ u16 dest_wq;
-+
-+ /* Determine the state of the FQID */
-+ mcc = qm_mc_start(portal[0]);
-+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
-+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
-+ while (!(mcr = qm_mc_result(portal[0])))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
-+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
-+ if (state == QM_MCR_NP_STATE_OOS)
-+ return 0; /* Already OOS, no need to do anymore checks */
-+
-+ /* Query which channel the FQ is using */
-+ mcc = qm_mc_start(portal[0]);
-+ mcc->queryfq.fqid = cpu_to_be32(fqid);
-+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
-+ while (!(mcr = qm_mc_result(portal[0])))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
-+
-+ /* Need to store these since the MCR gets reused */
-+ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
-+ wq = dest_wq & 0x7;
-+ channel = dest_wq>>3;
-+
-+ switch (state) {
-+ case QM_MCR_NP_STATE_TEN_SCHED:
-+ case QM_MCR_NP_STATE_TRU_SCHED:
-+ case QM_MCR_NP_STATE_ACTIVE:
-+ case QM_MCR_NP_STATE_PARKED:
-+ orl_empty = 0;
-+ mcc = qm_mc_start(portal[0]);
-+ mcc->alterfq.fqid = cpu_to_be32(fqid);
-+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
-+ while (!(mcr = qm_mc_result(portal[0])))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_MCR_VERB_ALTER_RETIRE);
-+ result = mcr->result; /* Make a copy as we reuse MCR below */
-+
-+ if (result == QM_MCR_RESULT_PENDING) {
-+ /* Need to wait for the FQRN in the message ring, which
-+ will only occur once the FQ has been drained. In
-+ order for the FQ to drain the portal needs to be set
-+ to dequeue from the channel the FQ is scheduled on */
-+ const struct qm_mr_entry *msg;
-+ const struct qm_dqrr_entry *dqrr = NULL;
-+ int found_fqrn = 0;
-+ u16 dequeue_wq = 0;
-+
-+ /* Flag that we need to drain FQ */
-+ drain = 1;
-+
-+ if (channel >= qm_channel_pool1 &&
-+ channel < (qm_channel_pool1 + 15)) {
-+ /* Pool channel, enable the bit in the portal */
-+ dequeue_wq = (channel -
-+ qm_channel_pool1 + 1)<<4 | wq;
-+ } else if (channel < qm_channel_pool1) {
-+ /* Dedicated channel */
-+ dequeue_wq = wq;
-+ } else {
-+ pr_info("Cannot recover FQ 0x%x, it is "
-+ "scheduled on channel 0x%x",
-+ fqid, channel);
-+ return -EBUSY;
-+ }
-+ /* Set the sdqcr to drain this channel */
-+ if (channel < qm_channel_pool1)
-+ for (i = 0; i < portal_count; i++)
-+ qm_dqrr_sdqcr_set(portal[i],
-+ QM_SDQCR_TYPE_ACTIVE |
-+ QM_SDQCR_CHANNELS_DEDICATED);
-+ else
-+ for (i = 0; i < portal_count; i++)
-+ qm_dqrr_sdqcr_set(
-+ portal[i],
-+ QM_SDQCR_TYPE_ACTIVE |
-+ QM_SDQCR_CHANNELS_POOL_CONV
-+ (channel));
-+ while (!found_fqrn) {
-+ /* Keep draining DQRR while checking the MR*/
-+ for (i = 0; i < portal_count; i++) {
-+ qm_dqrr_pvb_update(portal[i]);
-+ dqrr = qm_dqrr_current(portal[i]);
-+ while (dqrr) {
-+ qm_dqrr_cdc_consume_1ptr(
-+ portal[i], dqrr, 0);
-+ qm_dqrr_pvb_update(portal[i]);
-+ qm_dqrr_next(portal[i]);
-+ dqrr = qm_dqrr_current(
-+ portal[i]);
-+ }
-+ /* Process message ring too */
-+ qm_mr_pvb_update(portal[i]);
-+ msg = qm_mr_current(portal[i]);
-+ while (msg) {
-+ if ((msg->verb &
-+ QM_MR_VERB_TYPE_MASK)
-+ == QM_MR_VERB_FQRN)
-+ found_fqrn = 1;
-+ qm_mr_next(portal[i]);
-+ qm_mr_cci_consume_to_current(
-+ portal[i]);
-+ qm_mr_pvb_update(portal[i]);
-+ msg = qm_mr_current(portal[i]);
-+ }
-+ cpu_relax();
-+ }
-+ }
-+ }
-+ if (result != QM_MCR_RESULT_OK &&
-+ result != QM_MCR_RESULT_PENDING) {
-+ /* error */
-+ pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
-+ fqid, result);
-+ return -1;
-+ }
-+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
-+ /* ORL had no entries, no need to wait until the
-+ ERNs come in */
-+ orl_empty = 1;
-+ }
-+ /* Retirement succeeded, check to see if FQ needs
-+ to be drained */
-+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
-+ /* FQ is Not Empty, drain using volatile DQ commands */
-+ fq_empty = 0;
-+ do {
-+ const struct qm_dqrr_entry *dqrr = NULL;
-+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
-+ qm_dqrr_vdqcr_set(portal[0], vdqcr);
-+
-+ /* Wait for a dequeue to occur */
-+ while (dqrr == NULL) {
-+ qm_dqrr_pvb_update(portal[0]);
-+ dqrr = qm_dqrr_current(portal[0]);
-+ if (!dqrr)
-+ cpu_relax();
-+ }
-+ /* Process the dequeues, making sure to
-+ empty the ring completely */
-+ while (dqrr) {
-+ if (be32_to_cpu(dqrr->fqid) == fqid &&
-+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
-+ fq_empty = 1;
-+ qm_dqrr_cdc_consume_1ptr(portal[0],
-+ dqrr, 0);
-+ qm_dqrr_pvb_update(portal[0]);
-+ qm_dqrr_next(portal[0]);
-+ dqrr = qm_dqrr_current(portal[0]);
-+ }
-+ } while (fq_empty == 0);
-+ }
-+ for (i = 0; i < portal_count; i++)
-+ qm_dqrr_sdqcr_set(portal[i], 0);
-+
-+ /* Wait for the ORL to have been completely drained */
-+ while (orl_empty == 0) {
-+ const struct qm_mr_entry *msg;
-+ qm_mr_pvb_update(portal[0]);
-+ msg = qm_mr_current(portal[0]);
-+ while (msg) {
-+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
-+ QM_MR_VERB_FQRL)
-+ orl_empty = 1;
-+ qm_mr_next(portal[0]);
-+ qm_mr_cci_consume_to_current(portal[0]);
-+ qm_mr_pvb_update(portal[0]);
-+ msg = qm_mr_current(portal[0]);
-+ }
-+ cpu_relax();
-+ }
-+ mcc = qm_mc_start(portal[0]);
-+ mcc->alterfq.fqid = cpu_to_be32(fqid);
-+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
-+ while (!(mcr = qm_mc_result(portal[0])))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_MCR_VERB_ALTER_OOS);
-+ if (mcr->result != QM_MCR_RESULT_OK) {
-+ pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
-+ fqid, mcr->result);
-+ return -1;
-+ }
-+ return 0;
-+ case QM_MCR_NP_STATE_RETIRED:
-+ /* Send OOS Command */
-+ mcc = qm_mc_start(portal[0]);
-+ mcc->alterfq.fqid = cpu_to_be32(fqid);
-+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
-+ while (!(mcr = qm_mc_result(portal[0])))
-+ cpu_relax();
-+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-+ QM_MCR_VERB_ALTER_OOS);
-+ if (mcr->result) {
-+ pr_err("OOS Failed on FQID 0x%x\n", fqid);
-+ return -1;
-+ }
-+ return 0;
-+ }
-+ return -1;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_private.h
-@@ -0,0 +1,398 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "dpa_sys.h"
-+#include <linux/fsl_qman.h>
-+#include <linux/iommu.h>
-+
-+#if defined(CONFIG_FSL_PAMU)
-+#include <asm/fsl_pamu_stash.h>
-+#endif
-+
-+#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64)
-+#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP"
-+#endif
-+
-+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
-+ /* ----------------- */
-+ /* Congestion Groups */
-+ /* ----------------- */
-+/* This wrapper represents a bit-array for the state of the 256 Qman congestion
-+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
-+ * those that don't concern us. We harness the structure and accessor details
-+ * already used in the management command to query congestion groups. */
-+struct qman_cgrs {
-+ struct __qm_mcr_querycongestion q;
-+};
-+static inline void qman_cgrs_init(struct qman_cgrs *c)
-+{
-+ memset(c, 0, sizeof(*c));
-+}
-+static inline void qman_cgrs_fill(struct qman_cgrs *c)
-+{
-+ memset(c, 0xff, sizeof(*c));
-+}
-+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
-+{
-+ return QM_MCR_QUERYCONGESTION(&c->q, num);
-+}
-+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
-+{
-+ c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
-+}
-+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
-+{
-+ c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
-+}
-+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
-+{
-+ while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
-+ ;
-+ return num;
-+}
-+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
-+ const struct qman_cgrs *src)
-+{
-+ *dest = *src;
-+}
-+static inline void qman_cgrs_and(struct qman_cgrs *dest,
-+ const struct qman_cgrs *a, const struct qman_cgrs *b)
-+{
-+ int ret;
-+ u32 *_d = dest->q.__state;
-+ const u32 *_a = a->q.__state;
-+ const u32 *_b = b->q.__state;
-+ for (ret = 0; ret < 8; ret++)
-+ *(_d++) = *(_a++) & *(_b++);
-+}
-+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
-+ const struct qman_cgrs *a, const struct qman_cgrs *b)
-+{
-+ int ret;
-+ u32 *_d = dest->q.__state;
-+ const u32 *_a = a->q.__state;
-+ const u32 *_b = b->q.__state;
-+ for (ret = 0; ret < 8; ret++)
-+ *(_d++) = *(_a++) ^ *(_b++);
-+}
-+
-+ /* ----------------------- */
-+ /* CEETM Congestion Groups */
-+ /* ----------------------- */
-+/* This wrapper represents a bit-array for the state of the 512 Qman CEETM
-+ * congestion groups.
-+ */
-+struct qman_ccgrs {
-+ struct __qm_mcr_querycongestion q[2];
-+};
-+static inline void qman_ccgrs_init(struct qman_ccgrs *c)
-+{
-+ memset(c, 0, sizeof(*c));
-+}
-+static inline void qman_ccgrs_fill(struct qman_ccgrs *c)
-+{
-+ memset(c, 0xff, sizeof(*c));
-+}
-+static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num)
-+{
-+ if (num < __CGR_NUM)
-+ return QM_MCR_QUERYCONGESTION(&c->q[0], num);
-+ else
-+ return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM));
-+}
-+static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num)
-+{
-+ while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num))
-+ ;
-+ return num;
-+}
-+static inline void qman_ccgrs_cp(struct qman_ccgrs *dest,
-+ const struct qman_ccgrs *src)
-+{
-+ *dest = *src;
-+}
-+static inline void qman_ccgrs_and(struct qman_ccgrs *dest,
-+ const struct qman_ccgrs *a, const struct qman_ccgrs *b)
-+{
-+ int ret, i;
-+ u32 *_d;
-+ const u32 *_a, *_b;
-+ for (i = 0; i < 2; i++) {
-+ _d = dest->q[i].__state;
-+ _a = a->q[i].__state;
-+ _b = b->q[i].__state;
-+ for (ret = 0; ret < 8; ret++)
-+ *(_d++) = *(_a++) & *(_b++);
-+ }
-+}
-+static inline void qman_ccgrs_xor(struct qman_ccgrs *dest,
-+ const struct qman_ccgrs *a, const struct qman_ccgrs *b)
-+{
-+ int ret, i;
-+ u32 *_d;
-+ const u32 *_a, *_b;
-+ for (i = 0; i < 2; i++) {
-+ _d = dest->q[i].__state;
-+ _a = a->q[i].__state;
-+ _b = b->q[i].__state;
-+ for (ret = 0; ret < 8; ret++)
-+ *(_d++) = *(_a++) ^ *(_b++);
-+ }
-+}
-+
-+/* used by CCSR and portal interrupt code */
-+enum qm_isr_reg {
-+ qm_isr_status = 0,
-+ qm_isr_enable = 1,
-+ qm_isr_disable = 2,
-+ qm_isr_inhibit = 3
-+};
-+
-+struct qm_portal_config {
-+ /* Corenet portal addresses;
-+ * [0]==cache-enabled, [1]==cache-inhibited. */
-+ __iomem void *addr_virt[2];
-+ struct resource addr_phys[2];
-+ struct device dev;
-+ struct iommu_domain *iommu_domain;
-+ /* Allow these to be joined in lists */
-+ struct list_head list;
-+ /* User-visible portal configuration settings */
-+ struct qman_portal_config public_cfg;
-+ /* power management saved data */
-+ u32 saved_isdr;
-+};
-+
-+/* Revision info (for errata and feature handling) */
-+#define QMAN_REV11 0x0101
-+#define QMAN_REV12 0x0102
-+#define QMAN_REV20 0x0200
-+#define QMAN_REV30 0x0300
-+#define QMAN_REV31 0x0301
-+#define QMAN_REV32 0x0302
-+
-+/* QMan REV_2 register contains the Cfg option */
-+#define QMAN_REV_CFG_0 0x0
-+#define QMAN_REV_CFG_1 0x1
-+#define QMAN_REV_CFG_2 0x2
-+#define QMAN_REV_CFG_3 0x3
-+
-+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
-+extern u8 qman_ip_cfg;
-+extern u32 qman_clk;
-+extern u16 qman_portal_max;
-+
-+#ifdef CONFIG_FSL_QMAN_CONFIG
-+/* Hooks from qman_driver.c to qman_config.c */
-+int qman_init_ccsr(struct device_node *node);
-+void qman_liodn_fixup(u16 channel);
-+int qman_set_sdest(u16 channel, unsigned int cpu_idx);
-+size_t get_qman_fqd_size(void);
-+#else
-+static inline size_t get_qman_fqd_size(void)
-+{
-+ return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ);
-+}
-+#endif
-+
-+int qm_set_wpm(int wpm);
-+int qm_get_wpm(int *wpm);
-+
-+/* Hooks from qman_driver.c in to qman_high.c */
-+struct qman_portal *qman_create_portal(
-+ struct qman_portal *portal,
-+ const struct qm_portal_config *config,
-+ const struct qman_cgrs *cgrs);
-+
-+struct qman_portal *qman_create_affine_portal(
-+ const struct qm_portal_config *config,
-+ const struct qman_cgrs *cgrs);
-+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
-+ int cpu);
-+const struct qm_portal_config *qman_destroy_affine_portal(void);
-+void qman_destroy_portal(struct qman_portal *qm);
-+
-+/* Hooks from fsl_usdpaa.c to qman_driver.c */
-+struct qm_portal_config *qm_get_unused_portal(void);
-+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
-+
-+void qm_put_unused_portal(struct qm_portal_config *pcfg);
-+void qm_set_liodns(struct qm_portal_config *pcfg);
-+
-+/* This CGR feature is supported by h/w and required by unit-tests and the
-+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
-+ * corruption of h/w fields by s/w that are usually incorruptible (because the
-+ * counters are usually maintained entirely within h/w). As such, we declare
-+ * this API internally. */
-+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
-+ struct qm_mcr_cgrtestwrite *result);
-+
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+/* If the fq object pointer is greater than the size of context_b field,
-+ * than a lookup table is required. */
-+int qman_setup_fq_lookup_table(size_t num_entries);
-+#endif
-+
-+
-+/*************************************************/
-+/* QMan s/w corenet portal, low-level i/face */
-+/*************************************************/
-+
-+/* Note: most functions are only used by the high-level interface, so are
-+ * inlined from qman_low.h. The stuff below is for use by other parts of the
-+ * driver. */
-+
-+/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
-+ * dequeue TYPE. Choose TOKEN (8-bit).
-+ * If SOURCE == CHANNELS,
-+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
-+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
-+ * priority.
-+ * If SOURCE == SPECIFICWQ,
-+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
-+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
-+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
-+ * same value.
-+ */
-+#define QM_SDQCR_SOURCE_CHANNELS 0x0
-+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
-+#define QM_SDQCR_COUNT_EXACT1 0x0
-+#define QM_SDQCR_COUNT_UPTO3 0x20000000
-+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
-+#define QM_SDQCR_TYPE_MASK 0x03000000
-+#define QM_SDQCR_TYPE_NULL 0x0
-+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
-+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
-+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
-+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
-+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
-+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
-+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
-+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
-+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
-+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
-+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
-+
-+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
-+#define QM_VDQCR_FQID_MASK 0x00ffffff
-+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
-+
-+/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
-+ * If MODE==SCHEDULED
-+ * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
-+ * If CHANNELS,
-+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
-+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
-+ * priority.
-+ * If SPECIFICWQ,
-+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
-+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
-+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
-+ * same value.
-+ * If MODE==UNSCHEDULED
-+ * Choose FQID().
-+ */
-+#define QM_PDQCR_MODE_SCHEDULED 0x0
-+#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
-+#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
-+#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
-+#define QM_PDQCR_COUNT_EXACT1 0x0
-+#define QM_PDQCR_COUNT_UPTO3 0x20000000
-+#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
-+#define QM_PDQCR_TYPE_MASK 0x03000000
-+#define QM_PDQCR_TYPE_NULL 0x0
-+#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
-+#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
-+#define QM_PDQCR_TYPE_ACTIVE 0x03000000
-+#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
-+#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
-+#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
-+#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
-+#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
-+#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
-+#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
-+
-+/* Used by all portal interrupt registers except 'inhibit'
-+ * Channels with frame availability
-+ */
-+#define QM_PIRQ_DQAVAIL 0x0000ffff
-+
-+/* The DQAVAIL interrupt fields break down into these bits; */
-+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
-+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
-+#define QM_DQAVAIL_MASK 0xffff
-+/* This mask contains all the "irqsource" bits visible to API users */
-+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
-+
-+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
-+ * the disable register" rather than "disable the ability to write". */
-+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
-+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
-+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
-+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
-+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
-+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
-+/* TODO: unfortunate name-clash here, reword? */
-+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
-+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
-+
-+#ifdef CONFIG_FSL_QMAN_CONFIG
-+int qman_have_ccsr(void);
-+#else
-+#define qman_have_ccsr 0
-+#endif
-+
-+__init int qman_init(void);
-+__init int qman_resource_init(void);
-+
-+/* CEETM related */
-+#define QMAN_CEETM_MAX 2
-+extern u8 num_ceetms;
-+extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
-+int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
-+int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
-+int qman_ceetm_set_prescaler(enum qm_dc_portal portal);
-+int qman_ceetm_get_prescaler(u16 *pres);
-+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
-+ struct qm_mcr_ceetm_cq_query *cq_query);
-+int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
-+ struct qm_mcr_ceetm_ccgr_query *response);
-+int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num);
-+
-+extern void *affine_portals[NR_CPUS];
-+const struct qm_portal_config *qman_get_qm_portal_config(
-+ struct qman_portal *portal);
-+
-+/* power management */
-+#ifdef CONFIG_SUSPEND
-+void suspend_unused_qportal(void);
-+void resume_unused_qportal(void);
-+#endif
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_test.c
-@@ -0,0 +1,57 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qman_test.h"
-+
-+MODULE_AUTHOR("Geoff Thorpe");
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_DESCRIPTION("Qman testing");
-+
-+static int test_init(void)
-+{
-+ int loop = 1;
-+ while (loop--) {
-+#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO
-+ qman_test_hotpotato();
-+#endif
-+#ifdef CONFIG_FSL_QMAN_TEST_HIGH
-+ qman_test_high();
-+#endif
-+ }
-+ return 0;
-+}
-+
-+static void test_exit(void)
-+{
-+}
-+
-+module_init(test_init);
-+module_exit(test_exit);
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_test.h
-@@ -0,0 +1,45 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/io.h>
-+#include <linux/slab.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+
-+#include <linux/fsl_qman.h>
-+
-+void qman_test_hotpotato(void);
-+void qman_test_high(void);
-+
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_test_high.c
-@@ -0,0 +1,216 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qman_test.h"
-+
-+/*************/
-+/* constants */
-+/*************/
-+
-+#define CGR_ID 27
-+#define POOL_ID 2
-+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
-+#define NUM_ENQUEUES 10
-+#define NUM_PARTIAL 4
-+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
-+ QM_SDQCR_TYPE_PRIO_QOS | \
-+ QM_SDQCR_TOKEN_SET(0x98) | \
-+ QM_SDQCR_CHANNELS_DEDICATED | \
-+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
-+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
-+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
-+
-+/*************************************/
-+/* Predeclarations (eg. for fq_base) */
-+/*************************************/
-+
-+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
-+ struct qman_fq *,
-+ const struct qm_dqrr_entry *);
-+static void cb_ern(struct qman_portal *, struct qman_fq *,
-+ const struct qm_mr_entry *);
-+static void cb_fqs(struct qman_portal *, struct qman_fq *,
-+ const struct qm_mr_entry *);
-+
-+/***************/
-+/* global vars */
-+/***************/
-+
-+static struct qm_fd fd, fd_dq;
-+static struct qman_fq fq_base = {
-+ .cb.dqrr = cb_dqrr,
-+ .cb.ern = cb_ern,
-+ .cb.fqs = cb_fqs
-+};
-+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
-+static int retire_complete, sdqcr_complete;
-+
-+/**********************/
-+/* internal functions */
-+/**********************/
-+
-+/* Helpers for initialising and "incrementing" a frame descriptor */
-+static void fd_init(struct qm_fd *__fd)
-+{
-+ qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
-+ __fd->format = qm_fd_contig_big;
-+ __fd->length29 = 0x0000ffff;
-+ __fd->cmd = 0xfeedf00d;
-+}
-+
-+static void fd_inc(struct qm_fd *__fd)
-+{
-+ u64 t = qm_fd_addr_get64(__fd);
-+ int z = t >> 40;
-+ t <<= 1;
-+ if (z)
-+ t |= 1;
-+ qm_fd_addr_set64(__fd, t);
-+ __fd->length29--;
-+ __fd->cmd++;
-+}
-+
-+/* The only part of the 'fd' we can't memcmp() is the ppid */
-+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
-+{
-+ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
-+ if (!r)
-+ r = a->format - b->format;
-+ if (!r)
-+ r = a->opaque - b->opaque;
-+ if (!r)
-+ r = a->cmd - b->cmd;
-+ return r;
-+}
-+
-+/********/
-+/* test */
-+/********/
-+
-+static void do_enqueues(struct qman_fq *fq)
-+{
-+ unsigned int loop;
-+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
-+ if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
-+ (((loop + 1) == NUM_ENQUEUES) ?
-+ QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
-+ panic("qman_enqueue() failed\n");
-+ fd_inc(&fd);
-+ }
-+}
-+
-+void qman_test_high(void)
-+{
-+ unsigned int flags;
-+ int res;
-+ struct qman_fq *fq = &fq_base;
-+
-+ pr_info("qman_test_high starting\n");
-+ fd_init(&fd);
-+ fd_init(&fd_dq);
-+
-+ /* Initialise (parked) FQ */
-+ if (qman_create_fq(0, FQ_FLAGS, fq))
-+ panic("qman_create_fq() failed\n");
-+ if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
-+ panic("qman_init_fq() failed\n");
-+
-+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
-+ do_enqueues(fq);
-+ pr_info("VDQCR (till-empty);\n");
-+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
-+ QM_VDQCR_NUMFRAMES_TILLEMPTY))
-+ panic("qman_volatile_dequeue() failed\n");
-+ do_enqueues(fq);
-+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
-+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
-+ QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
-+ panic("qman_volatile_dequeue() failed\n");
-+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
-+ NUM_ENQUEUES);
-+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
-+ QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
-+ panic("qman_volatile_dequeue() failed\n");
-+
-+ do_enqueues(fq);
-+ pr_info("scheduled dequeue (till-empty)\n");
-+ if (qman_schedule_fq(fq))
-+ panic("qman_schedule_fq() failed\n");
-+ wait_event(waitqueue, sdqcr_complete);
-+
-+ /* Retire and OOS the FQ */
-+ res = qman_retire_fq(fq, &flags);
-+ if (res < 0)
-+ panic("qman_retire_fq() failed\n");
-+ wait_event(waitqueue, retire_complete);
-+ if (flags & QMAN_FQ_STATE_BLOCKOOS)
-+ panic("leaking frames\n");
-+ if (qman_oos_fq(fq))
-+ panic("qman_oos_fq() failed\n");
-+ qman_destroy_fq(fq, 0);
-+ pr_info("qman_test_high finished\n");
-+}
-+
-+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dq)
-+{
-+ if (fd_cmp(&fd_dq, &dq->fd)) {
-+ pr_err("BADNESS: dequeued frame doesn't match;\n");
-+ pr_err("Expected 0x%llx, got 0x%llx\n",
-+ (unsigned long long)fd_dq.length29,
-+ (unsigned long long)dq->fd.length29);
-+ BUG();
-+ }
-+ fd_inc(&fd_dq);
-+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
-+ sdqcr_complete = 1;
-+ wake_up(&waitqueue);
-+ }
-+ return qman_cb_dqrr_consume;
-+}
-+
-+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ panic("cb_ern() unimplemented");
-+}
-+
-+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_mr_entry *msg)
-+{
-+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
-+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
-+ panic("unexpected FQS message");
-+ pr_info("Retirement message received\n");
-+ retire_complete = 1;
-+ wake_up(&waitqueue);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c
-@@ -0,0 +1,502 @@
-+/* Copyright 2009-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/kthread.h>
-+#include <linux/platform_device.h>
-+#include <linux/dma-mapping.h>
-+#include "qman_test.h"
-+
-+/* Algorithm:
-+ *
-+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
-+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
-+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
-+ * shuttle a "hot potato" frame around them such that every forwarding action
-+ * moves it from one cpu to another. (The use of more than one handler per cpu
-+ * is to allow enough handlers/FQs to truly test the significance of caching -
-+ * ie. when cache-expiries are occurring.)
-+ *
-+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
-+ * first and last words of the frame data will undergo a transformation step on
-+ * each forwarding action. To achieve this, each handler will be assigned a
-+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
-+ * received by a handler, the mixer of the expected sender is XOR'd into all
-+ * words of the entire frame, which is then validated against the original
-+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
-+ * the current handler. Apart from validating that the frame is taking the
-+ * expected path, this also provides some quasi-realistic overheads to each
-+ * forwarding action - dereferencing *all* the frame data, computation, and
-+ * conditional branching. There is a "special" handler designated to act as the
-+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
-+ * to determine when the test has completed by counting HP_LOOPS iterations.
-+ *
-+ * Init phases:
-+ *
-+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
-+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
-+ * handlers and link-list them (but do no other handler setup).
-+ *
-+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
-+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
-+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
-+ * and advance the iterator for the next loop. This includes a final fixup,
-+ * which connects the last handler to the first (and which is why phase 2
-+ * and 3 are separate).
-+ *
-+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
-+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
-+ * initialise FQ objects and advance the iterator for the next loop.
-+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
-+ * initialisation targets the correct cpu.
-+ */
-+
-+/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
-+ * the fn from irq context, which is too restrictive). */
-+struct bstrap {
-+ void (*fn)(void);
-+ atomic_t started;
-+};
-+static int bstrap_fn(void *__bstrap)
-+{
-+ struct bstrap *bstrap = __bstrap;
-+ atomic_inc(&bstrap->started);
-+ bstrap->fn();
-+ while (!kthread_should_stop())
-+ msleep(1);
-+ return 0;
-+}
-+static int on_all_cpus(void (*fn)(void))
-+{
-+ int cpu;
-+ for_each_cpu(cpu, cpu_online_mask) {
-+ struct bstrap bstrap = {
-+ .fn = fn,
-+ .started = ATOMIC_INIT(0)
-+ };
-+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
-+ "hotpotato%d", cpu);
-+ int ret;
-+ if (IS_ERR(k))
-+ return -ENOMEM;
-+ kthread_bind(k, cpu);
-+ wake_up_process(k);
-+ /* If we call kthread_stop() before the "wake up" has had an
-+ * effect, then the thread may exit with -EINTR without ever
-+ * running the function. So poll until it's started before
-+ * requesting it to stop. */
-+ while (!atomic_read(&bstrap.started))
-+ msleep(10);
-+ ret = kthread_stop(k);
-+ if (ret)
-+ return ret;
-+ }
-+ return 0;
-+}
-+
-+struct hp_handler {
-+
-+ /* The following data is stashed when 'rx' is dequeued; */
-+ /* -------------- */
-+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
-+ struct qman_fq rx;
-+ /* The Tx FQ we should forward to */
-+ struct qman_fq tx;
-+ /* The value we XOR post-dequeue, prior to validating */
-+ u32 rx_mixer;
-+ /* The value we XOR pre-enqueue, after validating */
-+ u32 tx_mixer;
-+ /* what the hotpotato address should be on dequeue */
-+ dma_addr_t addr;
-+ u32 *frame_ptr;
-+
-+ /* The following data isn't (necessarily) stashed on dequeue; */
-+ /* -------------- */
-+ u32 fqid_rx, fqid_tx;
-+ /* list node for linking us into 'hp_cpu' */
-+ struct list_head node;
-+ /* Just to check ... */
-+ unsigned int processor_id;
-+} ____cacheline_aligned;
-+
-+struct hp_cpu {
-+ /* identify the cpu we run on; */
-+ unsigned int processor_id;
-+ /* root node for the per-cpu list of handlers */
-+ struct list_head handlers;
-+ /* list node for linking us into 'hp_cpu_list' */
-+ struct list_head node;
-+ /* when repeatedly scanning 'hp_list', each time linking the n'th
-+ * handlers together, this is used as per-cpu iterator state */
-+ struct hp_handler *iterator;
-+};
-+
-+/* Each cpu has one of these */
-+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
-+
-+/* links together the hp_cpu structs, in first-come first-serve order. */
-+static LIST_HEAD(hp_cpu_list);
-+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
-+
-+static unsigned int hp_cpu_list_length;
-+
-+/* the "special" handler, that starts and terminates the test. */
-+static struct hp_handler *special_handler;
-+static int loop_counter;
-+
-+/* handlers are allocated out of this, so they're properly aligned. */
-+static struct kmem_cache *hp_handler_slab;
-+
-+/* this is the frame data */
-+static void *__frame_ptr;
-+static u32 *frame_ptr;
-+static dma_addr_t frame_dma;
-+
-+/* the main function waits on this */
-+static DECLARE_WAIT_QUEUE_HEAD(queue);
-+
-+#define HP_PER_CPU 2
-+#define HP_LOOPS 8
-+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
-+#define HP_NUM_WORDS 80
-+/* First word of the LFSR-based frame data */
-+#define HP_FIRST_WORD 0xabbaf00d
-+
-+static inline u32 do_lfsr(u32 prev)
-+{
-+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
-+}
-+
-+static void allocate_frame_data(void)
-+{
-+ u32 lfsr = HP_FIRST_WORD;
-+ int loop;
-+ struct platform_device *pdev = platform_device_alloc("foobar", -1);
-+ if (!pdev)
-+ panic("platform_device_alloc() failed");
-+ if (platform_device_add(pdev))
-+ panic("platform_device_add() failed");
-+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
-+ if (!__frame_ptr)
-+ panic("kmalloc() failed");
-+ frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
-+ ~(unsigned long)63);
-+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
-+ frame_ptr[loop] = lfsr;
-+ lfsr = do_lfsr(lfsr);
-+ }
-+ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
-+ DMA_BIDIRECTIONAL);
-+ platform_device_del(pdev);
-+ platform_device_put(pdev);
-+}
-+
-+static void deallocate_frame_data(void)
-+{
-+ kfree(__frame_ptr);
-+}
-+
-+static inline void process_frame_data(struct hp_handler *handler,
-+ const struct qm_fd *fd)
-+{
-+ u32 *p = handler->frame_ptr;
-+ u32 lfsr = HP_FIRST_WORD;
-+ int loop;
-+ if (qm_fd_addr_get64(fd) != (handler->addr & 0xffffffffff)) {
-+ pr_err("Got 0x%llx expected 0x%llx\n",
-+ qm_fd_addr_get64(fd), handler->addr);
-+ panic("bad frame address");
-+ }
-+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
-+ *p ^= handler->rx_mixer;
-+ if (*p != lfsr)
-+ panic("corrupt frame data");
-+ *p ^= handler->tx_mixer;
-+ lfsr = do_lfsr(lfsr);
-+ }
-+}
-+
-+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dqrr)
-+{
-+ struct hp_handler *handler = (struct hp_handler *)fq;
-+
-+ process_frame_data(handler, &dqrr->fd);
-+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
-+ panic("qman_enqueue() failed");
-+ return qman_cb_dqrr_consume;
-+}
-+
-+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dqrr)
-+{
-+ struct hp_handler *handler = (struct hp_handler *)fq;
-+
-+ process_frame_data(handler, &dqrr->fd);
-+ if (++loop_counter < HP_LOOPS) {
-+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
-+ panic("qman_enqueue() failed");
-+ } else {
-+ pr_info("Received final (%dth) frame\n", loop_counter);
-+ wake_up(&queue);
-+ }
-+ return qman_cb_dqrr_consume;
-+}
-+
-+static void create_per_cpu_handlers(void)
-+{
-+ struct hp_handler *handler;
-+ int loop;
-+ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
-+
-+ hp_cpu->processor_id = smp_processor_id();
-+ spin_lock(&hp_lock);
-+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
-+ hp_cpu_list_length++;
-+ spin_unlock(&hp_lock);
-+ INIT_LIST_HEAD(&hp_cpu->handlers);
-+ for (loop = 0; loop < HP_PER_CPU; loop++) {
-+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
-+ if (!handler)
-+ panic("kmem_cache_alloc() failed");
-+ handler->processor_id = hp_cpu->processor_id;
-+ handler->addr = frame_dma;
-+ handler->frame_ptr = frame_ptr;
-+ list_add_tail(&handler->node, &hp_cpu->handlers);
-+ }
-+ put_cpu_var(hp_cpus);
-+}
-+
-+static void destroy_per_cpu_handlers(void)
-+{
-+ struct list_head *loop, *tmp;
-+ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
-+
-+ spin_lock(&hp_lock);
-+ list_del(&hp_cpu->node);
-+ spin_unlock(&hp_lock);
-+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
-+ u32 flags;
-+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
-+ node);
-+ if (qman_retire_fq(&handler->rx, &flags))
-+ panic("qman_retire_fq(rx) failed");
-+ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
-+ if (qman_oos_fq(&handler->rx))
-+ panic("qman_oos_fq(rx) failed");
-+ qman_destroy_fq(&handler->rx, 0);
-+ qman_destroy_fq(&handler->tx, 0);
-+ qman_release_fqid(handler->fqid_rx);
-+ list_del(&handler->node);
-+ kmem_cache_free(hp_handler_slab, handler);
-+ }
-+ put_cpu_var(hp_cpus);
-+}
-+
-+static inline u8 num_cachelines(u32 offset)
-+{
-+ u8 res = (offset + (L1_CACHE_BYTES - 1))
-+ / (L1_CACHE_BYTES);
-+ if (res > 3)
-+ return 3;
-+ return res;
-+}
-+#define STASH_DATA_CL \
-+ num_cachelines(HP_NUM_WORDS * 4)
-+#define STASH_CTX_CL \
-+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
-+
-+static void init_handler(void *__handler)
-+{
-+ struct qm_mcc_initfq opts;
-+ struct hp_handler *handler = __handler;
-+ BUG_ON(handler->processor_id != smp_processor_id());
-+ /* Set up rx */
-+ memset(&handler->rx, 0, sizeof(handler->rx));
-+ if (handler == special_handler)
-+ handler->rx.cb.dqrr = special_dqrr;
-+ else
-+ handler->rx.cb.dqrr = normal_dqrr;
-+ if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
-+ panic("qman_create_fq(rx) failed");
-+ memset(&opts, 0, sizeof(opts));
-+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
-+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
-+ opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
-+ opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
-+ if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
-+ QMAN_INITFQ_FLAG_LOCAL, &opts))
-+ panic("qman_init_fq(rx) failed");
-+ /* Set up tx */
-+ memset(&handler->tx, 0, sizeof(handler->tx));
-+ if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
-+ &handler->tx))
-+ panic("qman_create_fq(tx) failed");
-+}
-+
-+static void init_phase2(void)
-+{
-+ int loop;
-+ u32 fqid = 0;
-+ u32 lfsr = 0xdeadbeef;
-+ struct hp_cpu *hp_cpu;
-+ struct hp_handler *handler;
-+
-+ for (loop = 0; loop < HP_PER_CPU; loop++) {
-+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
-+ int ret;
-+ if (!loop)
-+ hp_cpu->iterator = list_first_entry(
-+ &hp_cpu->handlers,
-+ struct hp_handler, node);
-+ else
-+ hp_cpu->iterator = list_entry(
-+ hp_cpu->iterator->node.next,
-+ struct hp_handler, node);
-+ /* Rx FQID is the previous handler's Tx FQID */
-+ hp_cpu->iterator->fqid_rx = fqid;
-+ /* Allocate new FQID for Tx */
-+ ret = qman_alloc_fqid(&fqid);
-+ if (ret)
-+ panic("qman_alloc_fqid() failed");
-+ hp_cpu->iterator->fqid_tx = fqid;
-+ /* Rx mixer is the previous handler's Tx mixer */
-+ hp_cpu->iterator->rx_mixer = lfsr;
-+ /* Get new mixer for Tx */
-+ lfsr = do_lfsr(lfsr);
-+ hp_cpu->iterator->tx_mixer = lfsr;
-+ }
-+ }
-+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
-+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
-+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
-+ BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
-+ handler->fqid_rx = fqid;
-+ handler->rx_mixer = lfsr;
-+ /* and tag it as our "special" handler */
-+ special_handler = handler;
-+}
-+
-+static void init_phase3(void)
-+{
-+ int loop;
-+ struct hp_cpu *hp_cpu;
-+
-+ for (loop = 0; loop < HP_PER_CPU; loop++) {
-+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
-+ if (!loop)
-+ hp_cpu->iterator = list_first_entry(
-+ &hp_cpu->handlers,
-+ struct hp_handler, node);
-+ else
-+ hp_cpu->iterator = list_entry(
-+ hp_cpu->iterator->node.next,
-+ struct hp_handler, node);
-+ preempt_disable();
-+ if (hp_cpu->processor_id == smp_processor_id())
-+ init_handler(hp_cpu->iterator);
-+ else
-+ smp_call_function_single(hp_cpu->processor_id,
-+ init_handler, hp_cpu->iterator, 1);
-+ preempt_enable();
-+ }
-+ }
-+}
-+
-+static void send_first_frame(void *ignore)
-+{
-+ u32 *p = special_handler->frame_ptr;
-+ u32 lfsr = HP_FIRST_WORD;
-+ int loop;
-+ struct qm_fd fd;
-+
-+ BUG_ON(special_handler->processor_id != smp_processor_id());
-+ memset(&fd, 0, sizeof(fd));
-+ qm_fd_addr_set64(&fd, special_handler->addr);
-+ fd.format = qm_fd_contig_big;
-+ fd.length29 = HP_NUM_WORDS * 4;
-+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
-+ if (*p != lfsr)
-+ panic("corrupt frame data");
-+ *p ^= special_handler->tx_mixer;
-+ lfsr = do_lfsr(lfsr);
-+ }
-+ pr_info("Sending first frame\n");
-+ if (qman_enqueue(&special_handler->tx, &fd, 0))
-+ panic("qman_enqueue() failed");
-+}
-+
-+void qman_test_hotpotato(void)
-+{
-+ if (cpumask_weight(cpu_online_mask) < 2) {
-+ pr_info("qman_test_hotpotato, skip - only 1 CPU\n");
-+ return;
-+ }
-+
-+ pr_info("qman_test_hotpotato starting\n");
-+
-+ hp_cpu_list_length = 0;
-+ loop_counter = 0;
-+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
-+ sizeof(struct hp_handler), L1_CACHE_BYTES,
-+ SLAB_HWCACHE_ALIGN, NULL);
-+ if (!hp_handler_slab)
-+ panic("kmem_cache_create() failed");
-+
-+ allocate_frame_data();
-+
-+ /* Init phase 1 */
-+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
-+ if (on_all_cpus(create_per_cpu_handlers))
-+ panic("on_each_cpu() failed");
-+ pr_info("Number of cpus: %d, total of %d handlers\n",
-+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
-+
-+ init_phase2();
-+
-+ init_phase3();
-+
-+ preempt_disable();
-+ if (special_handler->processor_id == smp_processor_id())
-+ send_first_frame(NULL);
-+ else
-+ smp_call_function_single(special_handler->processor_id,
-+ send_first_frame, NULL, 1);
-+ preempt_enable();
-+
-+ wait_event(queue, loop_counter == HP_LOOPS);
-+ deallocate_frame_data();
-+ if (on_all_cpus(destroy_per_cpu_handlers))
-+ panic("on_each_cpu() failed");
-+ kmem_cache_destroy(hp_handler_slab);
-+ pr_info("qman_test_hotpotato finished\n");
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_qbman/qman_utility.c
-@@ -0,0 +1,129 @@
-+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qman_private.h"
-+
-+/* ----------------- */
-+/* --- FQID Pool --- */
-+
-+struct qman_fqid_pool {
-+ /* Base and size of the FQID range */
-+ u32 fqid_base;
-+ u32 total;
-+ /* Number of FQIDs currently "allocated" */
-+ u32 used;
-+ /* Allocation optimisation. When 'used<total', it is the index of an
-+ * available FQID. Otherwise there are no available FQIDs, and this
-+ * will be set when the next deallocation occurs. */
-+ u32 next;
-+ /* A bit-field representation of the FQID range. */
-+ unsigned long *bits;
-+};
-+
-+#define QLONG_BYTES sizeof(unsigned long)
-+#define QLONG_BITS (QLONG_BYTES * 8)
-+/* Number of 'longs' required for the given number of bits */
-+#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
-+/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
-+#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
-+/* And in bits */
-+#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
-+
-+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
-+{
-+ struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
-+ unsigned int i;
-+
-+ BUG_ON(!num);
-+ if (!pool)
-+ return NULL;
-+ pool->fqid_base = fqid_start;
-+ pool->total = num;
-+ pool->used = 0;
-+ pool->next = 0;
-+ pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
-+ if (!pool->bits) {
-+ kfree(pool);
-+ return NULL;
-+ }
-+ /* If num is not an even multiple of QLONG_BITS (or even 8, for
-+ * byte-oriented searching) then we fill the trailing bits with 1, to
-+ * make them look allocated (permanently). */
-+ for (i = num + 1; i < QNUM_BITS(num); i++)
-+ set_bit(i, pool->bits);
-+ return pool;
-+}
-+EXPORT_SYMBOL(qman_fqid_pool_create);
-+
-+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
-+{
-+ int ret = pool->used;
-+ kfree(pool->bits);
-+ kfree(pool);
-+ return ret;
-+}
-+EXPORT_SYMBOL(qman_fqid_pool_destroy);
-+
-+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
-+{
-+ int ret;
-+ if (pool->used == pool->total)
-+ return -ENOMEM;
-+ *fqid = pool->fqid_base + pool->next;
-+ ret = test_and_set_bit(pool->next, pool->bits);
-+ BUG_ON(ret);
-+ if (++pool->used == pool->total)
-+ return 0;
-+ pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
-+ if (pool->next >= pool->total)
-+ pool->next = find_first_zero_bit(pool->bits, pool->total);
-+ BUG_ON(pool->next >= pool->total);
-+ return 0;
-+}
-+EXPORT_SYMBOL(qman_fqid_pool_alloc);
-+
-+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
-+{
-+ int ret;
-+
-+ fqid -= pool->fqid_base;
-+ ret = test_and_clear_bit(fqid, pool->bits);
-+ BUG_ON(!ret);
-+ if (pool->used-- == pool->total)
-+ pool->next = fqid;
-+}
-+EXPORT_SYMBOL(qman_fqid_pool_free);
-+
-+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
-+{
-+ return pool->used;
-+}
-+EXPORT_SYMBOL(qman_fqid_pool_used);
---- /dev/null
-+++ b/include/linux/fsl/svr.h
-@@ -0,0 +1,97 @@
-+/*
-+ * MPC85xx cpu type detection
-+ *
-+ * Copyright 2011-2012 Freescale Semiconductor, Inc.
-+ *
-+ * This is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ */
-+
-+#ifndef FSL_SVR_H
-+#define FSL_SVR_H
-+
-+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
-+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
-+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
-+
-+/* Some parts define SVR[0:23] as the SOC version */
-+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
-+
-+#define SVR_8533 0x803400
-+#define SVR_8535 0x803701
-+#define SVR_8536 0x803700
-+#define SVR_8540 0x803000
-+#define SVR_8541 0x807200
-+#define SVR_8543 0x803200
-+#define SVR_8544 0x803401
-+#define SVR_8545 0x803102
-+#define SVR_8547 0x803101
-+#define SVR_8548 0x803100
-+#define SVR_8555 0x807100
-+#define SVR_8560 0x807000
-+#define SVR_8567 0x807501
-+#define SVR_8568 0x807500
-+#define SVR_8569 0x808000
-+#define SVR_8572 0x80E000
-+#define SVR_P1010 0x80F100
-+#define SVR_P1011 0x80E500
-+#define SVR_P1012 0x80E501
-+#define SVR_P1013 0x80E700
-+#define SVR_P1014 0x80F101
-+#define SVR_P1017 0x80F700
-+#define SVR_P1020 0x80E400
-+#define SVR_P1021 0x80E401
-+#define SVR_P1022 0x80E600
-+#define SVR_P1023 0x80F600
-+#define SVR_P1024 0x80E402
-+#define SVR_P1025 0x80E403
-+#define SVR_P2010 0x80E300
-+#define SVR_P2020 0x80E200
-+#define SVR_P2040 0x821000
-+#define SVR_P2041 0x821001
-+#define SVR_P3041 0x821103
-+#define SVR_P4040 0x820100
-+#define SVR_P4080 0x820000
-+#define SVR_P5010 0x822100
-+#define SVR_P5020 0x822000
-+#define SVR_P5021 0X820500
-+#define SVR_P5040 0x820400
-+#define SVR_T4240 0x824000
-+#define SVR_T4120 0x824001
-+#define SVR_T4160 0x824100
-+#define SVR_T4080 0x824102
-+#define SVR_C291 0x850000
-+#define SVR_C292 0x850020
-+#define SVR_C293 0x850030
-+#define SVR_B4860 0X868000
-+#define SVR_G4860 0x868001
-+#define SVR_G4060 0x868003
-+#define SVR_B4440 0x868100
-+#define SVR_G4440 0x868101
-+#define SVR_B4420 0x868102
-+#define SVR_B4220 0x868103
-+#define SVR_T1040 0x852000
-+#define SVR_T1041 0x852001
-+#define SVR_T1042 0x852002
-+#define SVR_T1020 0x852100
-+#define SVR_T1021 0x852101
-+#define SVR_T1022 0x852102
-+#define SVR_T1023 0x854100
-+#define SVR_T1024 0x854000
-+#define SVR_T2080 0x853000
-+#define SVR_T2081 0x853100
-+
-+#define SVR_8610 0x80A000
-+#define SVR_8641 0x809000
-+#define SVR_8641D 0x809001
-+
-+#define SVR_9130 0x860001
-+#define SVR_9131 0x860000
-+#define SVR_9132 0x861000
-+#define SVR_9232 0x861400
-+
-+#define SVR_Unknown 0xFFFFFF
-+
-+#endif
---- /dev/null
-+++ b/include/linux/fsl_bman.h
-@@ -0,0 +1,532 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef FSL_BMAN_H
-+#define FSL_BMAN_H
-+
-+#ifdef __cplusplus
-+extern "C" {
-+#endif
-+
-+/* Last updated for v00.79 of the BG */
-+
-+/* Portal processing (interrupt) sources */
-+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
-+#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
-+
-+/* This wrapper represents a bit-array for the depletion state of the 64 Bman
-+ * buffer pools. */
-+struct bman_depletion {
-+ u32 __state[2];
-+};
-+#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
-+#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
-+#define __bmdep_word(x) ((x) >> 5)
-+#define __bmdep_shift(x) ((x) & 0x1f)
-+#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
-+static inline void bman_depletion_init(struct bman_depletion *c)
-+{
-+ c->__state[0] = c->__state[1] = 0;
-+}
-+static inline void bman_depletion_fill(struct bman_depletion *c)
-+{
-+ c->__state[0] = c->__state[1] = ~0;
-+}
-+static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
-+{
-+ return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
-+}
-+static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
-+{
-+ c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
-+}
-+static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
-+{
-+ c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
-+}
-+
-+/* ------------------------------------------------------- */
-+/* --- Bman data structures (and associated constants) --- */
-+
-+/* Represents s/w corenet portal mapped data structures */
-+struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
-+struct bm_mc_command; /* MC (Management Command) command */
-+struct bm_mc_result; /* MC result */
-+
-+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
-+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
-+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
-+struct bm_buffer {
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 __reserved1;
-+ u8 bpid;
-+ u16 hi; /* High 16-bits of 48-bit address */
-+ u32 lo; /* Low 32-bits of 48-bit address */
-+#else
-+ u32 lo;
-+ u16 hi;
-+ u8 bpid;
-+ u8 __reserved;
-+#endif
-+ };
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u64 __notaddress:16;
-+ u64 addr:48;
-+#else
-+ u64 addr:48;
-+ u64 __notaddress:16;
-+#endif
-+ };
-+ u64 opaque;
-+ };
-+} __aligned(8);
-+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
-+{
-+ return buf->addr;
-+}
-+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
-+{
-+ return (dma_addr_t)buf->addr;
-+}
-+/* Macro, so we compile better if 'v' isn't always 64-bit */
-+#define bm_buffer_set64(buf, v) \
-+ do { \
-+ struct bm_buffer *__buf931 = (buf); \
-+ __buf931->hi = upper_32_bits(v); \
-+ __buf931->lo = lower_32_bits(v); \
-+ } while (0)
-+
-+/* See 1.5.3.5.4: "Release Command" */
-+struct bm_rcr_entry {
-+ union {
-+ struct {
-+ u8 __dont_write_directly__verb;
-+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
-+ u8 __reserved1[62];
-+ };
-+ struct bm_buffer bufs[8];
-+ };
-+} __packed;
-+#define BM_RCR_VERB_VBIT 0x80
-+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
-+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
-+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
-+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
-+
-+/* See 1.5.3.1: "Acquire Command" */
-+/* See 1.5.3.2: "Query Command" */
-+struct bm_mcc_acquire {
-+ u8 bpid;
-+ u8 __reserved1[62];
-+} __packed;
-+struct bm_mcc_query {
-+ u8 __reserved2[63];
-+} __packed;
-+struct bm_mc_command {
-+ u8 __dont_write_directly__verb;
-+ union {
-+ struct bm_mcc_acquire acquire;
-+ struct bm_mcc_query query;
-+ };
-+} __packed;
-+#define BM_MCC_VERB_VBIT 0x80
-+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
-+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
-+#define BM_MCC_VERB_CMD_QUERY 0x40
-+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
-+
-+/* See 1.5.3.3: "Acquire Response" */
-+/* See 1.5.3.4: "Query Response" */
-+struct bm_pool_state {
-+ u8 __reserved1[32];
-+ /* "availability state" and "depletion state" */
-+ struct {
-+ u8 __reserved1[8];
-+ /* Access using bman_depletion_***() */
-+ struct bman_depletion state;
-+ } as, ds;
-+};
-+struct bm_mc_result {
-+ union {
-+ struct {
-+ u8 verb;
-+ u8 __reserved1[63];
-+ };
-+ union {
-+ struct {
-+ u8 __reserved1;
-+ u8 bpid;
-+ u8 __reserved2[62];
-+ };
-+ struct bm_buffer bufs[8];
-+ } acquire;
-+ struct bm_pool_state query;
-+ };
-+} __packed;
-+#define BM_MCR_VERB_VBIT 0x80
-+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
-+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
-+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
-+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
-+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
-+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
-+/* Determine the "availability state" of pool 'p' from a query result 'r' */
-+#define BM_MCR_QUERY_AVAILABILITY(r, p) \
-+ bman_depletion_get(&r->query.as.state, p)
-+/* Determine the "depletion state" of pool 'p' from a query result 'r' */
-+#define BM_MCR_QUERY_DEPLETION(r, p) \
-+ bman_depletion_get(&r->query.ds.state, p)
-+
-+/*******************************************************************/
-+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
-+/*******************************************************************/
-+
-+ /* Portal and Buffer Pools */
-+ /* ----------------------- */
-+/* Represents a managed portal */
-+struct bman_portal;
-+
-+/* This object type represents Bman buffer pools. */
-+struct bman_pool;
-+
-+struct bman_portal_config {
-+ /* This is used for any "core-affine" portals, ie. default portals
-+ * associated to the corresponding cpu. -1 implies that there is no core
-+ * affinity configured. */
-+ int cpu;
-+ /* portal interrupt line */
-+ int irq;
-+ /* the unique index of this portal */
-+ u32 index;
-+ /* Is this portal shared? (If so, it has coarser locking and demuxes
-+ * processing on behalf of other CPUs.) */
-+ int is_shared;
-+ /* These are the buffer pool IDs that may be used via this portal. */
-+ struct bman_depletion mask;
-+};
-+
-+/* This callback type is used when handling pool depletion entry/exit. The
-+ * 'cb_ctx' value is the opaque value associated with the pool object in
-+ * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
-+ * depletion-exit. */
-+typedef void (*bman_cb_depletion)(struct bman_portal *bm,
-+ struct bman_pool *pool, void *cb_ctx, int depleted);
-+
-+/* This struct specifies parameters for a bman_pool object. */
-+struct bman_pool_params {
-+ /* index of the buffer pool to encapsulate (0-63), ignored if
-+ * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
-+ u32 bpid;
-+ /* bit-mask of BMAN_POOL_FLAG_*** options */
-+ u32 flags;
-+ /* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
-+ bman_cb_depletion cb;
-+ /* opaque user value passed as a parameter to 'cb' */
-+ void *cb_ctx;
-+ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
-+ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
-+ * when run in the control plane (which controls Bman CCSR). This array
-+ * matches the definition of bm_pool_set(). */
-+ u32 thresholds[4];
-+};
-+
-+/* Flags to bman_new_pool() */
-+#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
-+#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
-+#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
-+#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
-+#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
-+#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
-+
-+/* Flags to bman_release() */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
-+#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
-+#endif
-+#endif
-+#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
-+
-+/* Flags to bman_acquire() */
-+#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
-+
-+ /* Portal Management */
-+ /* ----------------- */
-+/**
-+ * bman_get_portal_config - get portal configuration settings
-+ *
-+ * This returns a read-only view of the current cpu's affine portal settings.
-+ */
-+const struct bman_portal_config *bman_get_portal_config(void);
-+
-+/**
-+ * bman_irqsource_get - return the portal work that is interrupt-driven
-+ *
-+ * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
-+ * enabled for interrupt handling on the current cpu's affine portal. These
-+ * sources will trigger the portal interrupt and the interrupt handler (or a
-+ * tasklet/bottom-half it defers to) will perform the corresponding processing
-+ * work. The bman_poll_***() functions will only process sources that are not in
-+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
-+ * this always returns zero.
-+ */
-+u32 bman_irqsource_get(void);
-+
-+/**
-+ * bman_irqsource_add - add processing sources to be interrupt-driven
-+ * @bits: bitmask of BM_PIRQ_**I processing sources
-+ *
-+ * Adds processing sources that should be interrupt-driven (rather than
-+ * processed via bman_poll_***() functions). Returns zero for success, or
-+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
-+int bman_irqsource_add(u32 bits);
-+
-+/**
-+ * bman_irqsource_remove - remove processing sources from being interrupt-driven
-+ * @bits: bitmask of BM_PIRQ_**I processing sources
-+ *
-+ * Removes processing sources from being interrupt-driven, so that they will
-+ * instead be processed via bman_poll_***() functions. Returns zero for success,
-+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
-+int bman_irqsource_remove(u32 bits);
-+
-+/**
-+ * bman_affine_cpus - return a mask of cpus that have affine portals
-+ */
-+const cpumask_t *bman_affine_cpus(void);
-+
-+/**
-+ * bman_poll_slow - process anything that isn't interrupt-driven.
-+ *
-+ * This function does any portal processing that isn't interrupt-driven. If the
-+ * current CPU is sharing a portal hosted on another CPU, this function will
-+ * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
-+ * indicating what interrupt sources were actually processed by the call.
-+ *
-+ * NB, unlike the legacy wrapper bman_poll(), this function will
-+ * deterministically check for the presence of portal processing work and do it,
-+ * which implies some latency even if there's nothing to do. The bman_poll()
-+ * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
-+ * checking for (and doing) portal processing infrequently. Ie. such that
-+ * qman_poll() and bman_poll() can be called from core-processing loops. Use
-+ * bman_poll_slow() when you yourself are deciding when to incur the overhead of
-+ * processing.
-+ */
-+u32 bman_poll_slow(void);
-+
-+/**
-+ * bman_poll - process anything that isn't interrupt-driven.
-+ *
-+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
-+ * affine portal. This function does whatever processing is not triggered by
-+ * interrupts. This is a legacy wrapper that can be used in core-processing
-+ * loops but mitigates the performance overhead of portal processing by
-+ * adaptively bypassing true portal processing most of the time. (Processing is
-+ * done once every 10 calls if the previous processing revealed that work needed
-+ * to be done, or once very 1000 calls if the previous processing revealed no
-+ * work needed doing.) If you wish to control this yourself, call
-+ * bman_poll_slow() instead, which always checks for portal processing work.
-+ */
-+void bman_poll(void);
-+
-+/**
-+ * bman_rcr_is_empty - Determine if portal's RCR is empty
-+ *
-+ * For use in situations where a cpu-affine caller needs to determine when all
-+ * releases for the local portal have been processed by Bman but can't use the
-+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
-+ * The function forces tracking of RCR consumption (which normally doesn't
-+ * happen until release processing needs to find space to put new release
-+ * commands), and returns zero if the ring still has unprocessed entries,
-+ * non-zero if it is empty.
-+ */
-+int bman_rcr_is_empty(void);
-+
-+/**
-+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
-+ * @result: is set by the API to the base BPID of the allocated range
-+ * @count: the number of BPIDs required
-+ * @align: required alignment of the allocated range
-+ * @partial: non-zero if the API can return fewer than @count BPIDs
-+ *
-+ * Returns the number of buffer pools allocated, or a negative error code. If
-+ * @partial is non zero, the allocation request may return a smaller range of
-+ * BPs than requested (though alignment will be as requested). If @partial is
-+ * zero, the return value will either be 'count' or negative.
-+ */
-+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
-+static inline int bman_alloc_bpid(u32 *result)
-+{
-+ int ret = bman_alloc_bpid_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+
-+/**
-+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
-+ * @bpid: the base BPID of the range to deallocate
-+ * @count: the number of BPIDs in the range
-+ *
-+ * This function can also be used to seed the allocator with ranges of BPIDs
-+ * that it can subsequently allocate from.
-+ */
-+void bman_release_bpid_range(u32 bpid, unsigned int count);
-+static inline void bman_release_bpid(u32 bpid)
-+{
-+ bman_release_bpid_range(bpid, 1);
-+}
-+
-+int bman_reserve_bpid_range(u32 bpid, unsigned int count);
-+static inline int bman_reserve_bpid(u32 bpid)
-+{
-+ return bman_reserve_bpid_range(bpid, 1);
-+}
-+
-+void bman_seed_bpid_range(u32 bpid, unsigned int count);
-+
-+
-+int bman_shutdown_pool(u32 bpid);
-+
-+ /* Pool management */
-+ /* --------------- */
-+/**
-+ * bman_new_pool - Allocates a Buffer Pool object
-+ * @params: parameters specifying the buffer pool ID and behaviour
-+ *
-+ * Creates a pool object for the given @params. A portal and the depletion
-+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
-+ * is set. NB, the fields from @params are copied into the new pool object, so
-+ * the structure provided by the caller can be released or reused after the
-+ * function returns.
-+ */
-+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
-+
-+/**
-+ * bman_free_pool - Deallocates a Buffer Pool object
-+ * @pool: the pool object to release
-+ *
-+ */
-+void bman_free_pool(struct bman_pool *pool);
-+
-+/**
-+ * bman_get_params - Returns a pool object's parameters.
-+ * @pool: the pool object
-+ *
-+ * The returned pointer refers to state within the pool object so must not be
-+ * modified and can no longer be read once the pool object is destroyed.
-+ */
-+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
-+
-+/**
-+ * bman_release - Release buffer(s) to the buffer pool
-+ * @pool: the buffer pool object to release to
-+ * @bufs: an array of buffers to release
-+ * @num: the number of buffers in @bufs (1-8)
-+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
-+ *
-+ * Adds the given buffers to RCR entries. If the portal @p was created with the
-+ * "COMPACT" flag, then it will be using a compaction algorithm to improve
-+ * utilisation of RCR. As such, these buffers may join an existing ring entry
-+ * and/or it may not be issued right away so as to allow future releases to join
-+ * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
-+ * behaviour by committing the RCR entry (or entries) right away. If the RCR
-+ * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
-+ * is selected, in which case it will sleep waiting for space to become
-+ * available in RCR. If the function receives a signal before such time (and
-+ * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
-+ * it returns zero.
-+ */
-+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
-+ u32 flags);
-+
-+/**
-+ * bman_acquire - Acquire buffer(s) from a buffer pool
-+ * @pool: the buffer pool object to acquire from
-+ * @bufs: array for storing the acquired buffers
-+ * @num: the number of buffers desired (@bufs is at least this big)
-+ *
-+ * Issues an "Acquire" command via the portal's management command interface.
-+ * The return value will be the number of buffers obtained from the pool, or a
-+ * negative error code if a h/w error or pool starvation was encountered. In
-+ * the latter case, the content of @bufs is undefined.
-+ */
-+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
-+ u32 flags);
-+
-+/**
-+ * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
-+ * @pool: the buffer pool object the stockpile belongs
-+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
-+ *
-+ * Adds stockpile buffers to RCR entries until the stockpile is empty.
-+ * The return value will be a negative error code if a h/w error occurred.
-+ * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
-+ * -EAGAIN will be returned.
-+ */
-+int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
-+
-+/**
-+ * bman_query_pools - Query all buffer pool states
-+ * @state: storage for the queried availability and depletion states
-+ */
-+int bman_query_pools(struct bm_pool_state *state);
-+
-+#ifdef CONFIG_FSL_BMAN_CONFIG
-+/**
-+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
-+ * @pool: the buffer pool object to query
-+ *
-+ * Return the number of the free buffers
-+ */
-+u32 bman_query_free_buffers(struct bman_pool *pool);
-+
-+/**
-+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
-+ * @pool: the buffer pool object to which the thresholds will be set
-+ * @thresholds: the new thresholds
-+ */
-+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
-+#endif
-+
-+/**
-+ * The below bman_p_***() variant might be called in a situation that the cpu
-+ * which the portal affine to is not online yet.
-+ * @bman_portal specifies which portal the API will use.
-+*/
-+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
-+#ifdef __cplusplus
-+}
-+#endif
-+
-+#endif /* FSL_BMAN_H */
---- /dev/null
-+++ b/include/linux/fsl_qman.h
-@@ -0,0 +1,3910 @@
-+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef FSL_QMAN_H
-+#define FSL_QMAN_H
-+
-+#ifdef __cplusplus
-+extern "C" {
-+#endif
-+
-+/* Last updated for v00.800 of the BG */
-+
-+/* Hardware constants */
-+#define QM_CHANNEL_SWPORTAL0 0
-+#define QMAN_CHANNEL_POOL1 0x21
-+#define QMAN_CHANNEL_CAAM 0x80
-+#define QMAN_CHANNEL_PME 0xa0
-+#define QMAN_CHANNEL_POOL1_REV3 0x401
-+#define QMAN_CHANNEL_CAAM_REV3 0x840
-+#define QMAN_CHANNEL_PME_REV3 0x860
-+#define QMAN_CHANNEL_DCE 0x8a0
-+#define QMAN_CHANNEL_DCE_QMANREV312 0x880
-+extern u16 qm_channel_pool1;
-+extern u16 qm_channel_caam;
-+extern u16 qm_channel_pme;
-+extern u16 qm_channel_dce;
-+enum qm_dc_portal {
-+ qm_dc_portal_fman0 = 0,
-+ qm_dc_portal_fman1 = 1,
-+ qm_dc_portal_caam = 2,
-+ qm_dc_portal_pme = 3,
-+ qm_dc_portal_rman = 4,
-+ qm_dc_portal_dce = 5
-+};
-+
-+/* Portal processing (interrupt) sources */
-+#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
-+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
-+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
-+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
-+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
-+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
-+/* This mask contains all the interrupt sources that need handling except DQRI,
-+ * ie. that if present should trigger slow-path processing. */
-+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
-+ QM_PIRQ_MRI | QM_PIRQ_CCSCI)
-+
-+/* --- Clock speed --- */
-+/* A qman driver instance may or may not know the current qman clock speed.
-+ * However, certain CEETM calculations may not be possible if this is not known.
-+ * The 'set' function will only succeed (return zero) if the driver did not
-+ * already know the clock speed. Likewise, the 'get' function will only succeed
-+ * if the driver does know the clock speed (either because it knew when booting,
-+ * or was told via 'set'). In cases where software is running on a driver
-+ * instance that does not know the clock speed (eg. on a hypervised data-plane),
-+ * and the user can obtain the current qman clock speed by other means (eg. from
-+ * a message sent from the control-plane), then the 'set' function can be used
-+ * to enable rate-calculations in a driver where it would otherwise not be
-+ * possible. */
-+int qm_get_clock(u64 *clock_hz);
-+int qm_set_clock(u64 clock_hz);
-+
-+/* For qman_static_dequeue_*** APIs */
-+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
-+/* for n in [1,15] */
-+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
-+/* for conversion from n of qm_channel */
-+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
-+{
-+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
-+}
-+
-+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
-+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
-+ * FQID(n) to fill in the frame queue ID. */
-+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
-+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
-+#define QM_VDQCR_EXACT 0x40000000
-+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
-+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
-+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
-+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
-+
-+
-+/* ------------------------------------------------------- */
-+/* --- Qman data structures (and associated constants) --- */
-+
-+/* Represents s/w corenet portal mapped data structures */
-+struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
-+struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
-+struct qm_mr_entry; /* MR (Message Ring) entries */
-+struct qm_mc_command; /* MC (Management Command) command */
-+struct qm_mc_result; /* MC result */
-+
-+/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
-+#define QM_FD_FORMAT_SG 0x4
-+#define QM_FD_FORMAT_LONG 0x2
-+#define QM_FD_FORMAT_COMPOUND 0x1
-+enum qm_fd_format {
-+ /* 'contig' implies a contiguous buffer, whereas 'sg' implies a
-+ * scatter-gather table. 'big' implies a 29-bit length with no offset
-+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
-+ * implies a s/g-like table, where each entry itself represents a frame
-+ * (contiguous or scatter-gather) and the 29-bit "length" is
-+ * interpreted purely for congestion calculations, ie. a "congestion
-+ * weight". */
-+ qm_fd_contig = 0,
-+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
-+ qm_fd_sg = QM_FD_FORMAT_SG,
-+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
-+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
-+};
-+
-+/* Capitalised versions are un-typed but can be used in static expressions */
-+#define QM_FD_CONTIG 0
-+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
-+#define QM_FD_SG QM_FD_FORMAT_SG
-+#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
-+#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
-+
-+/* See 1.5.1.1: "Frame Descriptor (FD)" */
-+struct qm_fd {
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 dd:2; /* dynamic debug */
-+ u8 liodn_offset:6;
-+ u8 bpid:8; /* Buffer Pool ID */
-+ u8 eliodn_offset:4;
-+ u8 __reserved:4;
-+ u8 addr_hi; /* high 8-bits of 40-bit address */
-+ u32 addr_lo; /* low 32-bits of 40-bit address */
-+#else
-+ u32 addr_lo; /* low 32-bits of 40-bit address */
-+ u8 addr_hi; /* high 8-bits of 40-bit address */
-+ u8 __reserved:4;
-+ u8 eliodn_offset:4;
-+ u8 bpid:8; /* Buffer Pool ID */
-+ u8 liodn_offset:6;
-+ u8 dd:2; /* dynamic debug */
-+#endif
-+ };
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u64 __notaddress:24;
-+ u64 addr:40;
-+#else
-+ u64 addr:40;
-+ u64 __notaddress:24;
-+#endif
-+ };
-+ u64 opaque_addr;
-+ };
-+ /* The 'format' field indicates the interpretation of the remaining 29
-+ * bits of the 32-bit word. For packing reasons, it is duplicated in the
-+ * other union elements. Note, union'd structs are difficult to use with
-+ * static initialisation under gcc, in which case use the "opaque" form
-+ * with one of the macros. */
-+ union {
-+ /* For easier/faster copying of this part of the fd (eg. from a
-+ * DQRR entry to an EQCR entry) copy 'opaque' */
-+ u32 opaque;
-+ /* If 'format' is _contig or _sg, 20b length and 9b offset */
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ enum qm_fd_format format:3;
-+ u16 offset:9;
-+ u32 length20:20;
-+#else
-+ u32 length20:20;
-+ u16 offset:9;
-+ enum qm_fd_format format:3;
-+#endif
-+ };
-+ /* If 'format' is _contig_big or _sg_big, 29b length */
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ enum qm_fd_format _format1:3;
-+ u32 length29:29;
-+#else
-+ u32 length29:29;
-+ enum qm_fd_format _format1:3;
-+#endif
-+ };
-+ /* If 'format' is _compound, 29b "congestion weight" */
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ enum qm_fd_format _format2:3;
-+ u32 cong_weight:29;
-+#else
-+ u32 cong_weight:29;
-+ enum qm_fd_format _format2:3;
-+#endif
-+ };
-+ };
-+ union {
-+ u32 cmd;
-+ u32 status;
-+ };
-+} __aligned(8);
-+#define QM_FD_DD_NULL 0x00
-+#define QM_FD_PID_MASK 0x3f
-+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
-+{
-+ return fd->addr;
-+}
-+
-+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
-+{
-+ return (dma_addr_t)fd->addr;
-+}
-+/* Macro, so we compile better if 'v' isn't always 64-bit */
-+#define qm_fd_addr_set64(fd, v) \
-+ do { \
-+ struct qm_fd *__fd931 = (fd); \
-+ __fd931->addr = v; \
-+ } while (0)
-+
-+/* For static initialisation of FDs (which is complicated by the use of unions
-+ * in "struct qm_fd"), use the following macros. Note that;
-+ * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
-+ * use-case),
-+ * - use capitalised QM_FD_*** formats for static initialisation.
-+ */
-+#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
-+ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
-+ { (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
-+ { cmd } }
-+#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
-+ { 0, 0, 0, 0, 0, addr_hi, addr_lo, \
-+ { (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
-+ { cmd } }
-+
-+/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
-+#define QM_SG_OFFSET_MASK 0x1FFF
-+struct qm_sg_entry {
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 __reserved1[3];
-+ u8 addr_hi; /* high 8-bits of 40-bit address */
-+ u32 addr_lo; /* low 32-bits of 40-bit address */
-+#else
-+ u32 addr_lo; /* low 32-bits of 40-bit address */
-+ u8 addr_hi; /* high 8-bits of 40-bit address */
-+ u8 __reserved1[3];
-+#endif
-+ };
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u64 __notaddress:24;
-+ u64 addr:40;
-+#else
-+ u64 addr:40;
-+ u64 __notaddress:24;
-+#endif
-+ };
-+ u64 opaque;
-+ };
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 extension:1; /* Extension bit */
-+ u32 final:1; /* Final bit */
-+ u32 length:30;
-+#else
-+ u32 length:30;
-+ u32 final:1; /* Final bit */
-+ u32 extension:1; /* Extension bit */
-+#endif
-+ };
-+ u32 sgt_efl;
-+ };
-+ u8 __reserved2;
-+ u8 bpid;
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 __reserved3:3;
-+ u16 offset:13;
-+#else
-+ u16 offset:13;
-+ u16 __reserved3:3;
-+#endif
-+ };
-+ u16 opaque_offset;
-+ };
-+} __packed;
-+union qm_sg_efl {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 extension:1; /* Extension bit */
-+ u32 final:1; /* Final bit */
-+ u32 length:30;
-+#else
-+ u32 length:30;
-+ u32 final:1; /* Final bit */
-+ u32 extension:1; /* Extension bit */
-+#endif
-+ };
-+ u32 efl;
-+};
-+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
-+{
-+ return (dma_addr_t)be64_to_cpu(sg->opaque) & 0xffffffffffULL;
-+}
-+static inline u8 qm_sg_entry_get_ext(const struct qm_sg_entry *sg)
-+{
-+ union qm_sg_efl u;
-+
-+ u.efl = be32_to_cpu(sg->sgt_efl);
-+ return u.extension;
-+}
-+static inline u8 qm_sg_entry_get_final(const struct qm_sg_entry *sg)
-+{
-+ union qm_sg_efl u;
-+
-+ u.efl = be32_to_cpu(sg->sgt_efl);
-+ return u.final;
-+}
-+static inline u32 qm_sg_entry_get_len(const struct qm_sg_entry *sg)
-+{
-+ union qm_sg_efl u;
-+
-+ u.efl = be32_to_cpu(sg->sgt_efl);
-+ return u.length;
-+}
-+static inline u8 qm_sg_entry_get_bpid(const struct qm_sg_entry *sg)
-+{
-+ return sg->bpid;
-+}
-+static inline u16 qm_sg_entry_get_offset(const struct qm_sg_entry *sg)
-+{
-+ u32 opaque_offset = be16_to_cpu(sg->opaque_offset);
-+
-+ return opaque_offset & 0x1fff;
-+}
-+
-+/* Macro, so we compile better if 'v' isn't always 64-bit */
-+#define qm_sg_entry_set64(sg, v) \
-+ do { \
-+ struct qm_sg_entry *__sg931 = (sg); \
-+ __sg931->opaque = cpu_to_be64(v); \
-+ } while (0)
-+#define qm_sg_entry_set_ext(sg, v) \
-+ do { \
-+ union qm_sg_efl __u932; \
-+ __u932.efl = be32_to_cpu((sg)->sgt_efl); \
-+ __u932.extension = v; \
-+ (sg)->sgt_efl = cpu_to_be32(__u932.efl); \
-+ } while (0)
-+#define qm_sg_entry_set_final(sg, v) \
-+ do { \
-+ union qm_sg_efl __u933; \
-+ __u933.efl = be32_to_cpu((sg)->sgt_efl); \
-+ __u933.final = v; \
-+ (sg)->sgt_efl = cpu_to_be32(__u933.efl); \
-+ } while (0)
-+#define qm_sg_entry_set_len(sg, v) \
-+ do { \
-+ union qm_sg_efl __u934; \
-+ __u934.efl = be32_to_cpu((sg)->sgt_efl); \
-+ __u934.length = v; \
-+ (sg)->sgt_efl = cpu_to_be32(__u934.efl); \
-+ } while (0)
-+#define qm_sg_entry_set_bpid(sg, v) \
-+ do { \
-+ struct qm_sg_entry *__u935 = (sg); \
-+ __u935->bpid = v; \
-+ } while (0)
-+#define qm_sg_entry_set_offset(sg, v) \
-+ do { \
-+ struct qm_sg_entry *__u936 = (sg); \
-+ __u936->opaque_offset = cpu_to_be16(v); \
-+ } while (0)
-+
-+/* See 1.5.8.1: "Enqueue Command" */
-+struct qm_eqcr_entry {
-+ u8 __dont_write_directly__verb;
-+ u8 dca;
-+ u16 seqnum;
-+ u32 orp; /* 24-bit */
-+ u32 fqid; /* 24-bit */
-+ u32 tag;
-+ struct qm_fd fd;
-+ u8 __reserved3[32];
-+} __packed;
-+#define QM_EQCR_VERB_VBIT 0x80
-+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
-+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
-+#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
-+#define QM_EQCR_VERB_COLOUR_GREEN 0x00
-+#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
-+#define QM_EQCR_VERB_COLOUR_RED 0x10
-+#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
-+#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
-+#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
-+#define QM_EQCR_DCA_ENABLE 0x80
-+#define QM_EQCR_DCA_PARK 0x40
-+#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
-+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
-+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
-+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
-+#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
-+
-+/* See 1.5.8.2: "Frame Dequeue Response" */
-+struct qm_dqrr_entry {
-+ u8 verb;
-+ u8 stat;
-+ u16 seqnum; /* 15-bit */
-+ u8 tok;
-+ u8 __reserved2[3];
-+ u32 fqid; /* 24-bit */
-+ u32 contextB;
-+ struct qm_fd fd;
-+ u8 __reserved4[32];
-+};
-+#define QM_DQRR_VERB_VBIT 0x80
-+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
-+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
-+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
-+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
-+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
-+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
-+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
-+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
-+
-+/* See 1.5.8.3: "ERN Message Response" */
-+/* See 1.5.8.4: "FQ State Change Notification" */
-+struct qm_mr_entry {
-+ u8 verb;
-+ union {
-+ struct {
-+ u8 dca;
-+ u16 seqnum;
-+ u8 rc; /* Rejection Code */
-+ u32 orp:24;
-+ u32 fqid; /* 24-bit */
-+ u32 tag;
-+ struct qm_fd fd;
-+ } __packed ern;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
-+ u8 __reserved1:3;
-+ enum qm_dc_portal portal:3;
-+#else
-+ enum qm_dc_portal portal:3;
-+ u8 __reserved1:3;
-+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
-+#endif
-+ u16 __reserved2;
-+ u8 rc; /* Rejection Code */
-+ u32 __reserved3:24;
-+ u32 fqid; /* 24-bit */
-+ u32 tag;
-+ struct qm_fd fd;
-+ } __packed dcern;
-+ struct {
-+ u8 fqs; /* Frame Queue Status */
-+ u8 __reserved1[6];
-+ u32 fqid; /* 24-bit */
-+ u32 contextB;
-+ u8 __reserved2[16];
-+ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
-+ };
-+ u8 __reserved2[32];
-+} __packed;
-+#define QM_MR_VERB_VBIT 0x80
-+/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
-+ * originating from direct-connect portals ("dcern") use 0x20 as a verb which
-+ * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
-+ * the other MR types by noting if the 0x20 bit is unset. */
-+#define QM_MR_VERB_TYPE_MASK 0x27
-+#define QM_MR_VERB_DC_ERN 0x20
-+#define QM_MR_VERB_FQRN 0x21
-+#define QM_MR_VERB_FQRNI 0x22
-+#define QM_MR_VERB_FQRL 0x23
-+#define QM_MR_VERB_FQPN 0x24
-+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
-+#define QM_MR_RC_CGR_TAILDROP 0x00
-+#define QM_MR_RC_WRED 0x10
-+#define QM_MR_RC_ERROR 0x20
-+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
-+#define QM_MR_RC_ORPWINDOW_LATE 0x40
-+#define QM_MR_RC_FQ_TAILDROP 0x50
-+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
-+#define QM_MR_RC_ORP_ZERO 0x70
-+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
-+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
-+#define QM_MR_DCERN_COLOUR_GREEN 0x00
-+#define QM_MR_DCERN_COLOUR_YELLOW 0x01
-+#define QM_MR_DCERN_COLOUR_RED 0x02
-+#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
-+
-+/* An identical structure of FQD fields is present in the "Init FQ" command and
-+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
-+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
-+ * latter has two inlines to assist with converting to/from the mant+exp
-+ * representation. */
-+struct qm_fqd_stashing {
-+ /* See QM_STASHING_EXCL_<...> */
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 exclusive;
-+ u8 __reserved1:2;
-+ /* Numbers of cachelines */
-+ u8 annotation_cl:2;
-+ u8 data_cl:2;
-+ u8 context_cl:2;
-+#else
-+ u8 context_cl:2;
-+ u8 data_cl:2;
-+ u8 annotation_cl:2;
-+ u8 __reserved1:2;
-+ u8 exclusive;
-+#endif
-+} __packed;
-+struct qm_fqd_taildrop {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 __reserved1:3;
-+ u16 mant:8;
-+ u16 exp:5;
-+#else
-+ u16 exp:5;
-+ u16 mant:8;
-+ u16 __reserved1:3;
-+#endif
-+} __packed;
-+struct qm_fqd_oac {
-+ /* See QM_OAC_<...> */
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 oac:2; /* "Overhead Accounting Control" */
-+ u8 __reserved1:6;
-+#else
-+ u8 __reserved1:6;
-+ u8 oac:2; /* "Overhead Accounting Control" */
-+#endif
-+ /* Two's-complement value (-128 to +127) */
-+ signed char oal; /* "Overhead Accounting Length" */
-+} __packed;
-+struct qm_fqd {
-+ union {
-+ u8 orpc;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 __reserved1:2;
-+ u8 orprws:3;
-+ u8 oa:1;
-+ u8 olws:2;
-+#else
-+ u8 olws:2;
-+ u8 oa:1;
-+ u8 orprws:3;
-+ u8 __reserved1:2;
-+#endif
-+ } __packed;
-+ };
-+ u8 cgid;
-+ u16 fq_ctrl; /* See QM_FQCTRL_<...> */
-+ union {
-+ u16 dest_wq;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 channel:13; /* qm_channel */
-+ u16 wq:3;
-+#else
-+ u16 wq:3;
-+ u16 channel:13; /* qm_channel */
-+#endif
-+ } __packed dest;
-+ };
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 __reserved2:1;
-+ u16 ics_cred:15;
-+#else
-+ u16 __reserved2:1;
-+ u16 ics_cred:15;
-+#endif
-+ /* For "Initialize Frame Queue" commands, the write-enable mask
-+ * determines whether 'td' or 'oac_init' is observed. For query
-+ * commands, this field is always 'td', and 'oac_query' (below) reflects
-+ * the Overhead ACcounting values. */
-+ union {
-+ struct qm_fqd_taildrop td;
-+ struct qm_fqd_oac oac_init;
-+ };
-+ u32 context_b;
-+ union {
-+ /* Treat it as 64-bit opaque */
-+ u64 opaque;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 hi;
-+ u32 lo;
-+#else
-+ u32 lo;
-+ u32 hi;
-+#endif
-+ };
-+ /* Treat it as s/w portal stashing config */
-+ /* See 1.5.6.7.1: "FQD Context_A field used for [...] */
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ struct qm_fqd_stashing stashing;
-+ /* 48-bit address of FQ context to
-+ * stash, must be cacheline-aligned */
-+ u16 context_hi;
-+ u32 context_lo;
-+#else
-+ u32 context_lo;
-+ u16 context_hi;
-+ struct qm_fqd_stashing stashing;
-+#endif
-+ } __packed;
-+ } context_a;
-+ struct qm_fqd_oac oac_query;
-+} __packed;
-+/* 64-bit converters for context_hi/lo */
-+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
-+{
-+ return ((u64)fqd->context_a.context_hi << 32) |
-+ (u64)fqd->context_a.context_lo;
-+}
-+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
-+{
-+ return (dma_addr_t)qm_fqd_stashing_get64(fqd);
-+}
-+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
-+{
-+ return ((u64)fqd->context_a.hi << 32) |
-+ (u64)fqd->context_a.lo;
-+}
-+/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
-+#define qm_fqd_stashing_set64(fqd, v) \
-+ do { \
-+ struct qm_fqd *__fqd931 = (fqd); \
-+ __fqd931->context_a.context_hi = upper_32_bits(v); \
-+ __fqd931->context_a.context_lo = lower_32_bits(v); \
-+ } while (0)
-+#define qm_fqd_context_a_set64(fqd, v) \
-+ do { \
-+ struct qm_fqd *__fqd931 = (fqd); \
-+ __fqd931->context_a.hi = upper_32_bits(v); \
-+ __fqd931->context_a.lo = lower_32_bits(v); \
-+ } while (0)
-+/* convert a threshold value into mant+exp representation */
-+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
-+ int roundup)
-+{
-+ u32 e = 0;
-+ int oddbit = 0;
-+ if (val > 0xe0000000)
-+ return -ERANGE;
-+ while (val > 0xff) {
-+ oddbit = val & 1;
-+ val >>= 1;
-+ e++;
-+ if (roundup && oddbit)
-+ val++;
-+ }
-+ td->exp = e;
-+ td->mant = val;
-+ return 0;
-+}
-+/* and the other direction */
-+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
-+{
-+ return (u32)td->mant << td->exp;
-+}
-+
-+/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
-+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
-+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
-+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
-+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
-+#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
-+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
-+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
-+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
-+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
-+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
-+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
-+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
-+
-+/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
-+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
-+#define QM_STASHING_EXCL_ANNOTATION 0x04
-+#define QM_STASHING_EXCL_DATA 0x02
-+#define QM_STASHING_EXCL_CTX 0x01
-+
-+/* See 1.5.5.3: "Intra Class Scheduling" */
-+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
-+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
-+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
-+
-+/* See 1.5.8.4: "FQ State Change Notification" */
-+/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
-+ * and associated commands/responses. The WRED parameters are calculated from
-+ * these fields as follows;
-+ * MaxTH = MA * (2 ^ Mn)
-+ * Slope = SA / (2 ^ Sn)
-+ * MaxP = 4 * (Pn + 1)
-+ */
-+struct qm_cgr_wr_parm {
-+ union {
-+ u32 word;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 MA:8;
-+ u32 Mn:5;
-+ u32 SA:7; /* must be between 64-127 */
-+ u32 Sn:6;
-+ u32 Pn:6;
-+#else
-+ u32 Pn:6;
-+ u32 Sn:6;
-+ u32 SA:7; /* must be between 64-127 */
-+ u32 Mn:5;
-+ u32 MA:8;
-+#endif
-+ } __packed;
-+ };
-+} __packed;
-+/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
-+ * management commands, this is padded to a 16-bit structure field, so that's
-+ * how we represent it here. The congestion state threshold is calculated from
-+ * these fields as follows;
-+ * CS threshold = TA * (2 ^ Tn)
-+ */
-+struct qm_cgr_cs_thres {
-+ union {
-+ u16 hword;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 __reserved:3;
-+ u16 TA:8;
-+ u16 Tn:5;
-+#else
-+ u16 Tn:5;
-+ u16 TA:8;
-+ u16 __reserved:3;
-+#endif
-+ } __packed;
-+ };
-+} __packed;
-+/* This identical structure of CGR fields is present in the "Init/Modify CGR"
-+ * commands and the "Query CGR" result. It's suctioned out here into its own
-+ * struct. */
-+struct __qm_mc_cgr {
-+ struct qm_cgr_wr_parm wr_parm_g;
-+ struct qm_cgr_wr_parm wr_parm_y;
-+ struct qm_cgr_wr_parm wr_parm_r;
-+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
-+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
-+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
-+ u8 cscn_en; /* boolean, use QM_CGR_EN */
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
-+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
-+#else
-+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
-+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
-+#endif
-+ };
-+ u32 cscn_targ; /* use QM_CGR_TARG_* */
-+ };
-+ u8 cstd_en; /* boolean, use QM_CGR_EN */
-+ u8 cs; /* boolean, only used in query response */
-+ union {
-+ /* use qm_cgr_cs_thres_set64() */
-+ struct qm_cgr_cs_thres cs_thres;
-+ u16 __cs_thres;
-+ };
-+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
-+} __packed;
-+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
-+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
-+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
-+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
-+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
-+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
-+/* Convert CGR thresholds to/from "cs_thres" format */
-+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
-+{
-+ return (u64)th->TA << th->Tn;
-+}
-+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
-+ int roundup)
-+{
-+ u32 e = 0;
-+ int oddbit = 0;
-+ while (val > 0xff) {
-+ oddbit = val & 1;
-+ val >>= 1;
-+ e++;
-+ if (roundup && oddbit)
-+ val++;
-+ }
-+ th->Tn = e;
-+ th->TA = val;
-+ return 0;
-+}
-+
-+/* See 1.5.8.5.1: "Initialize FQ" */
-+/* See 1.5.8.5.2: "Query FQ" */
-+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
-+/* See 1.5.8.5.4: "Alter FQ State Commands " */
-+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
-+/* See 1.5.8.6.2: "CGR Test Write" */
-+/* See 1.5.8.6.3: "Query CGR" */
-+/* See 1.5.8.6.4: "Query Congestion Group State" */
-+struct qm_mcc_initfq {
-+ u8 __reserved1;
-+ u16 we_mask; /* Write Enable Mask */
-+ u32 fqid; /* 24-bit */
-+ u16 count; /* Initialises 'count+1' FQDs */
-+ struct qm_fqd fqd; /* the FQD fields go here */
-+ u8 __reserved3[30];
-+} __packed;
-+struct qm_mcc_queryfq {
-+ u8 __reserved1[3];
-+ u32 fqid; /* 24-bit */
-+ u8 __reserved2[56];
-+} __packed;
-+struct qm_mcc_queryfq_np {
-+ u8 __reserved1[3];
-+ u32 fqid; /* 24-bit */
-+ u8 __reserved2[56];
-+} __packed;
-+struct qm_mcc_alterfq {
-+ u8 __reserved1[3];
-+ u32 fqid; /* 24-bit */
-+ u8 __reserved2;
-+ u8 count; /* number of consecutive FQID */
-+ u8 __reserved3[10];
-+ u32 context_b; /* frame queue context b */
-+ u8 __reserved4[40];
-+} __packed;
-+struct qm_mcc_initcgr {
-+ u8 __reserved1;
-+ u16 we_mask; /* Write Enable Mask */
-+ struct __qm_mc_cgr cgr; /* CGR fields */
-+ u8 __reserved2[2];
-+ u8 cgid;
-+ u8 __reserved4[32];
-+} __packed;
-+struct qm_mcc_cgrtestwrite {
-+ u8 __reserved1[2];
-+ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
-+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
-+ u8 __reserved2[23];
-+ u8 cgid;
-+ u8 __reserved3[32];
-+} __packed;
-+struct qm_mcc_querycgr {
-+ u8 __reserved1[30];
-+ u8 cgid;
-+ u8 __reserved2[32];
-+} __packed;
-+struct qm_mcc_querycongestion {
-+ u8 __reserved[63];
-+} __packed;
-+struct qm_mcc_querywq {
-+ u8 __reserved;
-+ /* select channel if verb != QUERYWQ_DEDICATED */
-+ union {
-+ u16 channel_wq; /* ignores wq (3 lsbits) */
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 id:13; /* qm_channel */
-+ u16 __reserved1:3;
-+#else
-+ u16 __reserved1:3;
-+ u16 id:13; /* qm_channel */
-+#endif
-+ } __packed channel;
-+ };
-+ u8 __reserved2[60];
-+} __packed;
-+
-+struct qm_mcc_ceetm_lfqmt_config {
-+ u8 __reserved1[4];
-+ u32 lfqid:24;
-+ u8 __reserved2[2];
-+ u16 cqid;
-+ u8 __reserved3[2];
-+ u16 dctidx;
-+ u8 __reserved4[48];
-+} __packed;
-+
-+struct qm_mcc_ceetm_lfqmt_query {
-+ u8 __reserved1[4];
-+ u32 lfqid:24;
-+ u8 __reserved2[56];
-+} __packed;
-+
-+struct qm_mcc_ceetm_cq_config {
-+ u8 __reserved1;
-+ u16 cqid;
-+ u8 dcpid;
-+ u8 __reserved2;
-+ u16 ccgid;
-+ u8 __reserved3[56];
-+} __packed;
-+
-+struct qm_mcc_ceetm_cq_query {
-+ u8 __reserved1;
-+ u16 cqid;
-+ u8 dcpid;
-+ u8 __reserved2[59];
-+} __packed;
-+
-+struct qm_mcc_ceetm_dct_config {
-+ u8 __reserved1;
-+ u16 dctidx;
-+ u8 dcpid;
-+ u8 __reserved2[15];
-+ u32 context_b;
-+ u64 context_a;
-+ u8 __reserved3[32];
-+} __packed;
-+
-+struct qm_mcc_ceetm_dct_query {
-+ u8 __reserved1;
-+ u16 dctidx;
-+ u8 dcpid;
-+ u8 __reserved2[59];
-+} __packed;
-+
-+struct qm_mcc_ceetm_class_scheduler_config {
-+ u8 __reserved1;
-+ u16 cqcid;
-+ u8 dcpid;
-+ u8 __reserved2[6];
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 gpc_reserved:1;
-+ u8 gpc_combine_flag:1;
-+ u8 gpc_prio_b:3;
-+ u8 gpc_prio_a:3;
-+#else
-+ u8 gpc_prio_a:3;
-+ u8 gpc_prio_b:3;
-+ u8 gpc_combine_flag:1;
-+ u8 gpc_reserved:1;
-+#endif
-+ u16 crem;
-+ u16 erem;
-+ u8 w[8];
-+ u8 __reserved3[40];
-+} __packed;
-+
-+struct qm_mcc_ceetm_class_scheduler_query {
-+ u8 __reserved1;
-+ u16 cqcid;
-+ u8 dcpid;
-+ u8 __reserved2[59];
-+} __packed;
-+
-+#define CEETM_COMMAND_CHANNEL_MAPPING (0 << 12)
-+#define CEETM_COMMAND_SP_MAPPING (1 << 12)
-+#define CEETM_COMMAND_CHANNEL_SHAPER (2 << 12)
-+#define CEETM_COMMAND_LNI_SHAPER (3 << 12)
-+#define CEETM_COMMAND_TCFC (4 << 12)
-+
-+#define CEETM_CCGRID_MASK 0x01FF
-+#define CEETM_CCGR_CM_CONFIGURE (0 << 14)
-+#define CEETM_CCGR_DN_CONFIGURE (1 << 14)
-+#define CEETM_CCGR_TEST_WRITE (2 << 14)
-+#define CEETM_CCGR_CM_QUERY (0 << 14)
-+#define CEETM_CCGR_DN_QUERY (1 << 14)
-+#define CEETM_CCGR_DN_QUERY_FLUSH (2 << 14)
-+#define CEETM_QUERY_CONGESTION_STATE (3 << 14)
-+
-+struct qm_mcc_ceetm_mapping_shaper_tcfc_config {
-+ u8 __reserved1;
-+ u16 cid;
-+ u8 dcpid;
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 map_shaped:1;
-+ u8 map_reserved:4;
-+ u8 map_lni_id:3;
-+#else
-+ u8 map_lni_id:3;
-+ u8 map_reserved:4;
-+ u8 map_shaped:1;
-+#endif
-+ u8 __reserved2[58];
-+ } __packed channel_mapping;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 map_reserved:5;
-+ u8 map_lni_id:3;
-+#else
-+ u8 map_lni_id:3;
-+ u8 map_reserved:5;
-+#endif
-+ u8 __reserved2[58];
-+ } __packed sp_mapping;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 cpl:1;
-+ u8 cpl_reserved:2;
-+ u8 oal:5;
-+#else
-+ u8 oal:5;
-+ u8 cpl_reserved:2;
-+ u8 cpl:1;
-+#endif
-+ u32 crtcr:24;
-+ u32 ertcr:24;
-+ u16 crtbl;
-+ u16 ertbl;
-+ u8 mps; /* This will be hardcoded by driver with 60 */
-+ u8 __reserved2[47];
-+ } __packed shaper_config;
-+ struct {
-+ u8 __reserved2[11];
-+ u64 lnitcfcc;
-+ u8 __reserved3[40];
-+ } __packed tcfc_config;
-+ };
-+} __packed;
-+
-+struct qm_mcc_ceetm_mapping_shaper_tcfc_query {
-+ u8 __reserved1;
-+ u16 cid;
-+ u8 dcpid;
-+ u8 __reserved2[59];
-+} __packed;
-+
-+struct qm_mcc_ceetm_ccgr_config {
-+ u8 __reserved1;
-+ u16 ccgrid;
-+ u8 dcpid;
-+ u8 __reserved2;
-+ u16 we_mask;
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 ctl_reserved:1;
-+ u8 ctl_wr_en_g:1;
-+ u8 ctl_wr_en_y:1;
-+ u8 ctl_wr_en_r:1;
-+ u8 ctl_td_en:1;
-+ u8 ctl_td_mode:1;
-+ u8 ctl_cscn_en:1;
-+ u8 ctl_mode:1;
-+#else
-+ u8 ctl_mode:1;
-+ u8 ctl_cscn_en:1;
-+ u8 ctl_td_mode:1;
-+ u8 ctl_td_en:1;
-+ u8 ctl_wr_en_r:1;
-+ u8 ctl_wr_en_y:1;
-+ u8 ctl_wr_en_g:1;
-+ u8 ctl_reserved:1;
-+#endif
-+ u8 cdv;
-+ u16 cscn_tupd;
-+ u8 oal;
-+ u8 __reserved3;
-+ struct qm_cgr_cs_thres cs_thres;
-+ struct qm_cgr_cs_thres cs_thres_x;
-+ struct qm_cgr_cs_thres td_thres;
-+ struct qm_cgr_wr_parm wr_parm_g;
-+ struct qm_cgr_wr_parm wr_parm_y;
-+ struct qm_cgr_wr_parm wr_parm_r;
-+ } __packed cm_config;
-+ struct {
-+ u8 dnc;
-+ u8 dn0;
-+ u8 dn1;
-+ u64 dnba:40;
-+ u8 __reserved3[2];
-+ u16 dnth_0;
-+ u8 __reserved4[2];
-+ u16 dnth_1;
-+ u8 __reserved5[8];
-+ } __packed dn_config;
-+ struct {
-+ u8 __reserved3[3];
-+ u64 i_cnt:40;
-+ u8 __reserved4[16];
-+ } __packed test_write;
-+ };
-+ u8 __reserved5[32];
-+} __packed;
-+
-+struct qm_mcc_ceetm_ccgr_query {
-+ u8 __reserved1;
-+ u16 ccgrid;
-+ u8 dcpid;
-+ u8 __reserved2[59];
-+} __packed;
-+
-+struct qm_mcc_ceetm_cq_peek_pop_xsfdrread {
-+ u8 __reserved1;
-+ u16 cqid;
-+ u8 dcpid;
-+ u8 ct;
-+ u16 xsfdr;
-+ u8 __reserved2[56];
-+} __packed;
-+
-+#define CEETM_QUERY_DEQUEUE_STATISTICS 0x00
-+#define CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS 0x01
-+#define CEETM_WRITE_DEQUEUE_STATISTICS 0x02
-+#define CEETM_QUERY_REJECT_STATISTICS 0x03
-+#define CEETM_QUERY_REJECT_CLEAR_STATISTICS 0x04
-+#define CEETM_WRITE_REJECT_STATISTICS 0x05
-+struct qm_mcc_ceetm_statistics_query_write {
-+ u8 __reserved1;
-+ u16 cid;
-+ u8 dcpid;
-+ u8 ct;
-+ u8 __reserved2[13];
-+ u64 frm_cnt:40;
-+ u8 __reserved3[2];
-+ u64 byte_cnt:48;
-+ u8 __reserved[32];
-+} __packed;
-+
-+struct qm_mc_command {
-+ u8 __dont_write_directly__verb;
-+ union {
-+ struct qm_mcc_initfq initfq;
-+ struct qm_mcc_queryfq queryfq;
-+ struct qm_mcc_queryfq_np queryfq_np;
-+ struct qm_mcc_alterfq alterfq;
-+ struct qm_mcc_initcgr initcgr;
-+ struct qm_mcc_cgrtestwrite cgrtestwrite;
-+ struct qm_mcc_querycgr querycgr;
-+ struct qm_mcc_querycongestion querycongestion;
-+ struct qm_mcc_querywq querywq;
-+ struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
-+ struct qm_mcc_ceetm_lfqmt_query lfqmt_query;
-+ struct qm_mcc_ceetm_cq_config cq_config;
-+ struct qm_mcc_ceetm_cq_query cq_query;
-+ struct qm_mcc_ceetm_dct_config dct_config;
-+ struct qm_mcc_ceetm_dct_query dct_query;
-+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
-+ struct qm_mcc_ceetm_class_scheduler_query csch_query;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config mst_config;
-+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query mst_query;
-+ struct qm_mcc_ceetm_ccgr_config ccgr_config;
-+ struct qm_mcc_ceetm_ccgr_query ccgr_query;
-+ struct qm_mcc_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
-+ struct qm_mcc_ceetm_statistics_query_write stats_query_write;
-+ };
-+} __packed;
-+#define QM_MCC_VERB_VBIT 0x80
-+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
-+#define QM_MCC_VERB_INITFQ_PARKED 0x40
-+#define QM_MCC_VERB_INITFQ_SCHED 0x41
-+#define QM_MCC_VERB_QUERYFQ 0x44
-+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
-+#define QM_MCC_VERB_QUERYWQ 0x46
-+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
-+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
-+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
-+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
-+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
-+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
-+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
-+#define QM_MCC_VERB_INITCGR 0x50
-+#define QM_MCC_VERB_MODIFYCGR 0x51
-+#define QM_MCC_VERB_CGRTESTWRITE 0x52
-+#define QM_MCC_VERB_QUERYCGR 0x58
-+#define QM_MCC_VERB_QUERYCONGESTION 0x59
-+/* INITFQ-specific flags */
-+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
-+#define QM_INITFQ_WE_OAC 0x0100
-+#define QM_INITFQ_WE_ORPC 0x0080
-+#define QM_INITFQ_WE_CGID 0x0040
-+#define QM_INITFQ_WE_FQCTRL 0x0020
-+#define QM_INITFQ_WE_DESTWQ 0x0010
-+#define QM_INITFQ_WE_ICSCRED 0x0008
-+#define QM_INITFQ_WE_TDTHRESH 0x0004
-+#define QM_INITFQ_WE_CONTEXTB 0x0002
-+#define QM_INITFQ_WE_CONTEXTA 0x0001
-+/* INITCGR/MODIFYCGR-specific flags */
-+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
-+#define QM_CGR_WE_WR_PARM_G 0x0400
-+#define QM_CGR_WE_WR_PARM_Y 0x0200
-+#define QM_CGR_WE_WR_PARM_R 0x0100
-+#define QM_CGR_WE_WR_EN_G 0x0080
-+#define QM_CGR_WE_WR_EN_Y 0x0040
-+#define QM_CGR_WE_WR_EN_R 0x0020
-+#define QM_CGR_WE_CSCN_EN 0x0010
-+#define QM_CGR_WE_CSCN_TARG 0x0008
-+#define QM_CGR_WE_CSTD_EN 0x0004
-+#define QM_CGR_WE_CS_THRES 0x0002
-+#define QM_CGR_WE_MODE 0x0001
-+
-+/* See 1.5.9.7 CEETM Management Commands */
-+#define QM_CEETM_VERB_LFQMT_CONFIG 0x70
-+#define QM_CEETM_VERB_LFQMT_QUERY 0x71
-+#define QM_CEETM_VERB_CQ_CONFIG 0x72
-+#define QM_CEETM_VERB_CQ_QUERY 0x73
-+#define QM_CEETM_VERB_DCT_CONFIG 0x74
-+#define QM_CEETM_VERB_DCT_QUERY 0x75
-+#define QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG 0x76
-+#define QM_CEETM_VERB_CLASS_SCHEDULER_QUERY 0x77
-+#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG 0x78
-+#define QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY 0x79
-+#define QM_CEETM_VERB_CCGR_CONFIG 0x7A
-+#define QM_CEETM_VERB_CCGR_QUERY 0x7B
-+#define QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD 0x7C
-+#define QM_CEETM_VERB_STATISTICS_QUERY_WRITE 0x7D
-+
-+/* See 1.5.8.5.1: "Initialize FQ" */
-+/* See 1.5.8.5.2: "Query FQ" */
-+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
-+/* See 1.5.8.5.4: "Alter FQ State Commands " */
-+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
-+/* See 1.5.8.6.2: "CGR Test Write" */
-+/* See 1.5.8.6.3: "Query CGR" */
-+/* See 1.5.8.6.4: "Query Congestion Group State" */
-+struct qm_mcr_initfq {
-+ u8 __reserved1[62];
-+} __packed;
-+struct qm_mcr_queryfq {
-+ u8 __reserved1[8];
-+ struct qm_fqd fqd; /* the FQD fields are here */
-+ u8 __reserved2[30];
-+} __packed;
-+struct qm_mcr_queryfq_np {
-+ u8 __reserved1;
-+ u8 state; /* QM_MCR_NP_STATE_*** */
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 __reserved2;
-+ u32 fqd_link:24;
-+ u16 __reserved3:2;
-+ u16 odp_seq:14;
-+ u16 __reserved4:2;
-+ u16 orp_nesn:14;
-+ u16 __reserved5:1;
-+ u16 orp_ea_hseq:15;
-+ u16 __reserved6:1;
-+ u16 orp_ea_tseq:15;
-+ u8 __reserved7;
-+ u32 orp_ea_hptr:24;
-+ u8 __reserved8;
-+ u32 orp_ea_tptr:24;
-+ u8 __reserved9;
-+ u32 pfdr_hptr:24;
-+ u8 __reserved10;
-+ u32 pfdr_tptr:24;
-+ u8 __reserved11[5];
-+ u8 __reserved12:7;
-+ u8 is:1;
-+ u16 ics_surp;
-+ u32 byte_cnt;
-+ u8 __reserved13;
-+ u32 frm_cnt:24;
-+ u32 __reserved14;
-+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
-+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
-+ u16 __reserved15;
-+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
-+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
-+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
-+#else
-+ u8 __reserved2;
-+ u32 fqd_link:24;
-+
-+ u16 odp_seq:14;
-+ u16 __reserved3:2;
-+
-+ u16 orp_nesn:14;
-+ u16 __reserved4:2;
-+
-+ u16 orp_ea_hseq:15;
-+ u16 __reserved5:1;
-+
-+ u16 orp_ea_tseq:15;
-+ u16 __reserved6:1;
-+
-+ u8 __reserved7;
-+ u32 orp_ea_hptr:24;
-+
-+ u8 __reserved8;
-+ u32 orp_ea_tptr:24;
-+
-+ u8 __reserved9;
-+ u32 pfdr_hptr:24;
-+
-+ u8 __reserved10;
-+ u32 pfdr_tptr:24;
-+
-+ u8 __reserved11[5];
-+ u8 is:1;
-+ u8 __reserved12:7;
-+ u16 ics_surp;
-+ u32 byte_cnt;
-+ u8 __reserved13;
-+ u32 frm_cnt:24;
-+ u32 __reserved14;
-+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
-+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
-+ u16 __reserved15;
-+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
-+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
-+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
-+#endif
-+} __packed;
-+
-+
-+struct qm_mcr_alterfq {
-+ u8 fqs; /* Frame Queue Status */
-+ u8 __reserved1[61];
-+} __packed;
-+struct qm_mcr_initcgr {
-+ u8 __reserved1[62];
-+} __packed;
-+struct qm_mcr_cgrtestwrite {
-+ u16 __reserved1;
-+ struct __qm_mc_cgr cgr; /* CGR fields */
-+ u8 __reserved2[3];
-+ u32 __reserved3:24;
-+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
-+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
-+ u32 __reserved4:24;
-+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
-+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
-+ u16 lgt; /* Last Group Tick */
-+ u16 wr_prob_g;
-+ u16 wr_prob_y;
-+ u16 wr_prob_r;
-+ u8 __reserved5[8];
-+} __packed;
-+struct qm_mcr_querycgr {
-+ u16 __reserved1;
-+ struct __qm_mc_cgr cgr; /* CGR fields */
-+ u8 __reserved2[3];
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 __reserved3:24;
-+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
-+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
-+#else
-+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
-+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
-+ u32 __reserved3:24;
-+#endif
-+ };
-+ u64 i_bcnt;
-+ };
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u32 __reserved4:24;
-+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
-+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
-+#else
-+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
-+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
-+ u32 __reserved4:24;
-+#endif
-+ };
-+ u64 a_bcnt;
-+ };
-+ union {
-+ u32 cscn_targ_swp[4];
-+ u8 __reserved5[16];
-+ };
-+} __packed;
-+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
-+{
-+ return be64_to_cpu(q->i_bcnt);
-+}
-+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
-+{
-+ return be64_to_cpu(q->a_bcnt);
-+}
-+static inline u64 qm_mcr_cgrtestwrite_i_get64(
-+ const struct qm_mcr_cgrtestwrite *q)
-+{
-+ return be64_to_cpu(((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo);
-+}
-+static inline u64 qm_mcr_cgrtestwrite_a_get64(
-+ const struct qm_mcr_cgrtestwrite *q)
-+{
-+ return be64_to_cpu(((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo);
-+}
-+/* Macro, so we compile better if 'v' isn't always 64-bit */
-+#define qm_mcr_querycgr_i_set64(q, v) \
-+ do { \
-+ struct qm_mcr_querycgr *__q931 = (fd); \
-+ __q931->i_bcnt_hi = upper_32_bits(v); \
-+ __q931->i_bcnt_lo = lower_32_bits(v); \
-+ } while (0)
-+#define qm_mcr_querycgr_a_set64(q, v) \
-+ do { \
-+ struct qm_mcr_querycgr *__q931 = (fd); \
-+ __q931->a_bcnt_hi = upper_32_bits(v); \
-+ __q931->a_bcnt_lo = lower_32_bits(v); \
-+ } while (0)
-+struct __qm_mcr_querycongestion {
-+ u32 __state[8];
-+};
-+struct qm_mcr_querycongestion {
-+ u8 __reserved[30];
-+ /* Access this struct using QM_MCR_QUERYCONGESTION() */
-+ struct __qm_mcr_querycongestion state;
-+} __packed;
-+struct qm_mcr_querywq {
-+ union {
-+ u16 channel_wq; /* ignores wq (3 lsbits) */
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u16 id:13; /* qm_channel */
-+ u16 __reserved:3;
-+#else
-+ u16 __reserved:3;
-+ u16 id:13; /* qm_channel */
-+#endif
-+ } __packed channel;
-+ };
-+ u8 __reserved[28];
-+ u32 wq_len[8];
-+} __packed;
-+
-+/* QMAN CEETM Management Command Response */
-+struct qm_mcr_ceetm_lfqmt_config {
-+ u8 __reserved1[62];
-+} __packed;
-+struct qm_mcr_ceetm_lfqmt_query {
-+ u8 __reserved1[8];
-+ u16 cqid;
-+ u8 __reserved2[2];
-+ u16 dctidx;
-+ u8 __reserved3[2];
-+ u16 ccgid;
-+ u8 __reserved4[44];
-+} __packed;
-+
-+struct qm_mcr_ceetm_cq_config {
-+ u8 __reserved1[62];
-+} __packed;
-+
-+struct qm_mcr_ceetm_cq_query {
-+ u8 __reserved1[4];
-+ u16 ccgid;
-+ u16 state;
-+ u32 pfdr_hptr:24;
-+ u32 pfdr_tptr:24;
-+ u16 od1_xsfdr;
-+ u16 od2_xsfdr;
-+ u16 od3_xsfdr;
-+ u16 od4_xsfdr;
-+ u16 od5_xsfdr;
-+ u16 od6_xsfdr;
-+ u16 ra1_xsfdr;
-+ u16 ra2_xsfdr;
-+ u8 __reserved2;
-+ u32 frm_cnt:24;
-+ u8 __reserved333[28];
-+} __packed;
-+
-+struct qm_mcr_ceetm_dct_config {
-+ u8 __reserved1[62];
-+} __packed;
-+
-+struct qm_mcr_ceetm_dct_query {
-+ u8 __reserved1[18];
-+ u32 context_b;
-+ u64 context_a;
-+ u8 __reserved2[32];
-+} __packed;
-+
-+struct qm_mcr_ceetm_class_scheduler_config {
-+ u8 __reserved1[62];
-+} __packed;
-+
-+struct qm_mcr_ceetm_class_scheduler_query {
-+ u8 __reserved1[9];
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 gpc_reserved:1;
-+ u8 gpc_combine_flag:1;
-+ u8 gpc_prio_b:3;
-+ u8 gpc_prio_a:3;
-+#else
-+ u8 gpc_prio_a:3;
-+ u8 gpc_prio_b:3;
-+ u8 gpc_combine_flag:1;
-+ u8 gpc_reserved:1;
-+#endif
-+ u16 crem;
-+ u16 erem;
-+ u8 w[8];
-+ u8 __reserved2[5];
-+ u32 wbfslist:24;
-+ u32 d8;
-+ u32 d9;
-+ u32 d10;
-+ u32 d11;
-+ u32 d12;
-+ u32 d13;
-+ u32 d14;
-+ u32 d15;
-+} __packed;
-+
-+struct qm_mcr_ceetm_mapping_shaper_tcfc_config {
-+ u16 cid;
-+ u8 __reserved2[60];
-+} __packed;
-+
-+struct qm_mcr_ceetm_mapping_shaper_tcfc_query {
-+ u16 cid;
-+ u8 __reserved1;
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 map_shaped:1;
-+ u8 map_reserved:4;
-+ u8 map_lni_id:3;
-+#else
-+ u8 map_lni_id:3;
-+ u8 map_reserved:4;
-+ u8 map_shaped:1;
-+#endif
-+ u8 __reserved2[58];
-+ } __packed channel_mapping_query;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 map_reserved:5;
-+ u8 map_lni_id:3;
-+#else
-+ u8 map_lni_id:3;
-+ u8 map_reserved:5;
-+#endif
-+ u8 __reserved2[58];
-+ } __packed sp_mapping_query;
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 cpl:1;
-+ u8 cpl_reserved:2;
-+ u8 oal:5;
-+#else
-+ u8 oal:5;
-+ u8 cpl_reserved:2;
-+ u8 cpl:1;
-+#endif
-+ u32 crtcr:24;
-+ u32 ertcr:24;
-+ u16 crtbl;
-+ u16 ertbl;
-+ u8 mps;
-+ u8 __reserved2[15];
-+ u32 crat;
-+ u32 erat;
-+ u8 __reserved3[24];
-+ } __packed shaper_query;
-+ struct {
-+ u8 __reserved1[11];
-+ u64 lnitcfcc;
-+ u8 __reserved3[40];
-+ } __packed tcfc_query;
-+ };
-+} __packed;
-+
-+struct qm_mcr_ceetm_ccgr_config {
-+ u8 __reserved1[46];
-+ union {
-+ u8 __reserved2[8];
-+ struct {
-+ u16 timestamp;
-+ u16 wr_porb_g;
-+ u16 wr_prob_y;
-+ u16 wr_prob_r;
-+ } __packed test_write;
-+ };
-+ u8 __reserved3[8];
-+} __packed;
-+
-+struct qm_mcr_ceetm_ccgr_query {
-+ u8 __reserved1[6];
-+ union {
-+ struct {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ u8 ctl_reserved:1;
-+ u8 ctl_wr_en_g:1;
-+ u8 ctl_wr_en_y:1;
-+ u8 ctl_wr_en_r:1;
-+ u8 ctl_td_en:1;
-+ u8 ctl_td_mode:1;
-+ u8 ctl_cscn_en:1;
-+ u8 ctl_mode:1;
-+#else
-+ u8 ctl_mode:1;
-+ u8 ctl_cscn_en:1;
-+ u8 ctl_td_mode:1;
-+ u8 ctl_td_en:1;
-+ u8 ctl_wr_en_r:1;
-+ u8 ctl_wr_en_y:1;
-+ u8 ctl_wr_en_g:1;
-+ u8 ctl_reserved:1;
-+#endif
-+ u8 cdv;
-+ u8 __reserved2[2];
-+ u8 oal;
-+ u8 __reserved3;
-+ struct qm_cgr_cs_thres cs_thres;
-+ struct qm_cgr_cs_thres cs_thres_x;
-+ struct qm_cgr_cs_thres td_thres;
-+ struct qm_cgr_wr_parm wr_parm_g;
-+ struct qm_cgr_wr_parm wr_parm_y;
-+ struct qm_cgr_wr_parm wr_parm_r;
-+ u16 cscn_targ_dcp;
-+ u8 dcp_lsn;
-+ u64 i_cnt:40;
-+ u8 __reserved4[3];
-+ u64 a_cnt:40;
-+ u32 cscn_targ_swp[4];
-+ } __packed cm_query;
-+ struct {
-+ u8 dnc;
-+ u8 dn0;
-+ u8 dn1;
-+ u64 dnba:40;
-+ u8 __reserved2[2];
-+ u16 dnth_0;
-+ u8 __reserved3[2];
-+ u16 dnth_1;
-+ u8 __reserved4[10];
-+ u16 dnacc_0;
-+ u8 __reserved5[2];
-+ u16 dnacc_1;
-+ u8 __reserved6[24];
-+ } __packed dn_query;
-+ struct {
-+ u8 __reserved2[24];
-+ struct __qm_mcr_querycongestion state;
-+ } __packed congestion_state;
-+
-+ };
-+} __packed;
-+
-+struct qm_mcr_ceetm_cq_peek_pop_xsfdrread {
-+ u8 stat;
-+ u8 __reserved1[11];
-+ u16 dctidx;
-+ struct qm_fd fd;
-+ u8 __reserved2[32];
-+} __packed;
-+
-+struct qm_mcr_ceetm_statistics_query {
-+ u8 __reserved1[17];
-+ u64 frm_cnt:40;
-+ u8 __reserved2[2];
-+ u64 byte_cnt:48;
-+ u8 __reserved3[32];
-+} __packed;
-+
-+struct qm_mc_result {
-+ u8 verb;
-+ u8 result;
-+ union {
-+ struct qm_mcr_initfq initfq;
-+ struct qm_mcr_queryfq queryfq;
-+ struct qm_mcr_queryfq_np queryfq_np;
-+ struct qm_mcr_alterfq alterfq;
-+ struct qm_mcr_initcgr initcgr;
-+ struct qm_mcr_cgrtestwrite cgrtestwrite;
-+ struct qm_mcr_querycgr querycgr;
-+ struct qm_mcr_querycongestion querycongestion;
-+ struct qm_mcr_querywq querywq;
-+ struct qm_mcr_ceetm_lfqmt_config lfqmt_config;
-+ struct qm_mcr_ceetm_lfqmt_query lfqmt_query;
-+ struct qm_mcr_ceetm_cq_config cq_config;
-+ struct qm_mcr_ceetm_cq_query cq_query;
-+ struct qm_mcr_ceetm_dct_config dct_config;
-+ struct qm_mcr_ceetm_dct_query dct_query;
-+ struct qm_mcr_ceetm_class_scheduler_config csch_config;
-+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_config mst_config;
-+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query mst_query;
-+ struct qm_mcr_ceetm_ccgr_config ccgr_config;
-+ struct qm_mcr_ceetm_ccgr_query ccgr_query;
-+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread cq_ppxr;
-+ struct qm_mcr_ceetm_statistics_query stats_query;
-+ };
-+} __packed;
-+
-+#define QM_MCR_VERB_RRID 0x80
-+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
-+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
-+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
-+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
-+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
-+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
-+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
-+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
-+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
-+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
-+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
-+#define QM_MCR_RESULT_NULL 0x00
-+#define QM_MCR_RESULT_OK 0xf0
-+#define QM_MCR_RESULT_ERR_FQID 0xf1
-+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
-+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
-+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
-+#define QM_MCR_RESULT_PENDING 0xf8
-+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
-+#define QM_MCR_NP_STATE_FE 0x10
-+#define QM_MCR_NP_STATE_R 0x08
-+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
-+#define QM_MCR_NP_STATE_OOS 0x00
-+#define QM_MCR_NP_STATE_RETIRED 0x01
-+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
-+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
-+#define QM_MCR_NP_STATE_PARKED 0x04
-+#define QM_MCR_NP_STATE_ACTIVE 0x05
-+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
-+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
-+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
-+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
-+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
-+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
-+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
-+/* This extracts the state for congestion group 'n' from a query response.
-+ * Eg.
-+ * u8 cgr = [...];
-+ * struct qm_mc_result *res = [...];
-+ * printf("congestion group %d congestion state: %d\n", cgr,
-+ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
-+ */
-+#define __CGR_WORD(num) (num >> 5)
-+#define __CGR_SHIFT(num) (num & 0x1f)
-+#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
-+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
-+ u8 cgr)
-+{
-+ return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
-+}
-+
-+
-+/*********************/
-+/* Utility interface */
-+/*********************/
-+
-+/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
-+ * spinlock them yourself if needed. */
-+struct qman_fqid_pool;
-+
-+/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
-+ * always succeeds, but returns non-zero if there were "leaked" FQID
-+ * allocations. */
-+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
-+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
-+/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
-+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
-+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
-+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
-+
-+/*******************************************************************/
-+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
-+/*******************************************************************/
-+
-+ /* Portal and Frame Queues */
-+ /* ----------------------- */
-+/* Represents a managed portal */
-+struct qman_portal;
-+
-+/* This object type represents Qman frame queue descriptors (FQD), it is
-+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
-+ * defined further down. */
-+struct qman_fq;
-+
-+/* This object type represents a Qman congestion group, it is defined further
-+ * down. */
-+struct qman_cgr;
-+
-+struct qman_portal_config {
-+ /* If the caller enables DQRR stashing (and thus wishes to operate the
-+ * portal from only one cpu), this is the logical CPU that the portal
-+ * will stash to. Whether stashing is enabled or not, this setting is
-+ * also used for any "core-affine" portals, ie. default portals
-+ * associated to the corresponding cpu. -1 implies that there is no core
-+ * affinity configured. */
-+ int cpu;
-+ /* portal interrupt line */
-+ int irq;
-+ /* the unique index of this portal */
-+ u32 index;
-+ /* Is this portal shared? (If so, it has coarser locking and demuxes
-+ * processing on behalf of other CPUs.) */
-+ int is_shared;
-+ /* The portal's dedicated channel id, use this value for initialising
-+ * frame queues to target this portal when scheduled. */
-+ u16 channel;
-+ /* A mask of which pool channels this portal has dequeue access to
-+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
-+ u32 pools;
-+};
-+
-+/* This enum, and the callback type that returns it, are used when handling
-+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
-+ * portal object (for handling dequeues that do not demux because contextB is
-+ * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
-+enum qman_cb_dqrr_result {
-+ /* DQRR entry can be consumed */
-+ qman_cb_dqrr_consume,
-+ /* Like _consume, but requests parking - FQ must be held-active */
-+ qman_cb_dqrr_park,
-+ /* Does not consume, for DCA mode only. This allows out-of-order
-+ * consumes by explicit calls to qman_dca() and/or the use of implicit
-+ * DCA via EQCR entries. */
-+ qman_cb_dqrr_defer,
-+ /* Stop processing without consuming this ring entry. Exits the current
-+ * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
-+ * interrupt handler, the callback would typically call
-+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
-+ * otherwise the interrupt will reassert immediately. */
-+ qman_cb_dqrr_stop,
-+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
-+ qman_cb_dqrr_consume_stop
-+};
-+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
-+ struct qman_fq *fq,
-+ const struct qm_dqrr_entry *dqrr);
-+
-+/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
-+ * are always consumed after the callback returns. */
-+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
-+ const struct qm_mr_entry *msg);
-+
-+/* This callback type is used when handling DCP ERNs */
-+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
-+ const struct qm_mr_entry *msg);
-+
-+/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
-+ * held-active + held-suspended are just "sched". Things like "retired" will not
-+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
-+ * then, to indicate it's completing and to gate attempts to retry the retire
-+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
-+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
-+ * index rather than the FQ that ring entry corresponds to), so repeated park
-+ * commands are allowed (if you're silly enough to try) but won't change FQ
-+ * state, and the resulting park notifications move FQs from "sched" to
-+ * "parked". */
-+enum qman_fq_state {
-+ qman_fq_state_oos,
-+ qman_fq_state_parked,
-+ qman_fq_state_sched,
-+ qman_fq_state_retired
-+};
-+
-+/* Frame queue objects (struct qman_fq) are stored within memory passed to
-+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
-+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
-+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
-+ * they should;
-+ *
-+ * (a) extend the qman_fq structure with their state; eg.
-+ *
-+ * // myfq is allocated and driver_fq callbacks filled in;
-+ * struct my_fq {
-+ * struct qman_fq base;
-+ * int an_extra_field;
-+ * [ ... add other fields to be associated with each FQ ...]
-+ * } *myfq = some_my_fq_allocator();
-+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
-+ *
-+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
-+ * struct my_fq *myfq = (struct my_fq *)fq;
-+ * do_something_with(myfq->an_extra_field);
-+ * [...]
-+ *
-+ * (b) when and if configuring the FQ for context stashing, specify how ever
-+ * many cachelines are required to stash 'struct my_fq', to accelerate not
-+ * only the Qman driver but the callback as well.
-+ */
-+
-+struct qman_fq_cb {
-+ qman_cb_dqrr dqrr; /* for dequeued frames */
-+ qman_cb_mr ern; /* for s/w ERNs */
-+ qman_cb_mr fqs; /* frame-queue state changes*/
-+};
-+
-+struct qman_fq {
-+ /* Caller of qman_create_fq() provides these demux callbacks */
-+ struct qman_fq_cb cb;
-+ /* These are internal to the driver, don't touch. In particular, they
-+ * may change, be removed, or extended (so you shouldn't rely on
-+ * sizeof(qman_fq) being a constant). */
-+ spinlock_t fqlock;
-+ u32 fqid;
-+ volatile unsigned long flags;
-+ enum qman_fq_state state;
-+ int cgr_groupid;
-+ struct rb_node node;
-+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-+ u32 key;
-+#endif
-+};
-+
-+/* This callback type is used when handling congestion group entry/exit.
-+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
-+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
-+ struct qman_cgr *cgr, int congested);
-+
-+struct qman_cgr {
-+ /* Set these prior to qman_create_cgr() */
-+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
-+ qman_cb_cgr cb;
-+ /* These are private to the driver */
-+ u16 chan; /* portal channel this object is created on */
-+ struct list_head node;
-+};
-+
-+/* Flags to qman_create_fq() */
-+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
-+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
-+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
-+#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
-+#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
-+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
-+
-+/* Flags to qman_destroy_fq() */
-+#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
-+
-+/* Flags from qman_fq_state() */
-+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
-+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
-+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
-+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
-+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
-+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
-+
-+/* Flags to qman_init_fq() */
-+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
-+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
-+
-+/* Flags to qman_volatile_dequeue() */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
-+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
-+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
-+#endif
-+
-+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
-+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
-+ * any change here should be audited in PME.) */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT
-+#define QMAN_ENQUEUE_FLAG_WAIT 0x00010000 /* wait if EQCR is full */
-+#define QMAN_ENQUEUE_FLAG_WAIT_INT 0x00020000 /* if wait, interruptible? */
-+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
-+#define QMAN_ENQUEUE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
-+#endif
-+#endif
-+#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
-+#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
-+#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
-+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
-+ (((u32)(p) << 2) & 0x00000f00)
-+#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
-+#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
-+#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
-+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
-+/* For the ORP-specific qman_enqueue_orp() variant;
-+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
-+ * of a frame. */
-+#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
-+/* - this flag performs no enqueue but fills in an ORP sequence number that
-+ * would otherwise block it (eg. if a frame has been dropped). */
-+#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
-+/* - this flag performs no enqueue but advances NESN to the given sequence
-+ * number. */
-+#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
-+
-+/* Flags to qman_modify_cgr() */
-+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
-+#define QMAN_CGR_MODE_FRAME 0x00000001
-+
-+ /* Portal Management */
-+ /* ----------------- */
-+/**
-+ * qman_get_portal_config - get portal configuration settings
-+ *
-+ * This returns a read-only view of the current cpu's affine portal settings.
-+ */
-+const struct qman_portal_config *qman_get_portal_config(void);
-+
-+/**
-+ * qman_irqsource_get - return the portal work that is interrupt-driven
-+ *
-+ * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
-+ * enabled for interrupt handling on the current cpu's affine portal. These
-+ * sources will trigger the portal interrupt and the interrupt handler (or a
-+ * tasklet/bottom-half it defers to) will perform the corresponding processing
-+ * work. The qman_poll_***() functions will only process sources that are not in
-+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
-+ * this always returns zero.
-+ */
-+u32 qman_irqsource_get(void);
-+
-+/**
-+ * qman_irqsource_add - add processing sources to be interrupt-driven
-+ * @bits: bitmask of QM_PIRQ_**I processing sources
-+ *
-+ * Adds processing sources that should be interrupt-driven (rather than
-+ * processed via qman_poll_***() functions). Returns zero for success, or
-+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
-+ */
-+int qman_irqsource_add(u32 bits);
-+
-+/**
-+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
-+ * @bits: bitmask of QM_PIRQ_**I processing sources
-+ *
-+ * Removes processing sources from being interrupt-driven, so that they will
-+ * instead be processed via qman_poll_***() functions. Returns zero for success,
-+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
-+ */
-+int qman_irqsource_remove(u32 bits);
-+
-+/**
-+ * qman_affine_cpus - return a mask of cpus that have affine portals
-+ */
-+const cpumask_t *qman_affine_cpus(void);
-+
-+/**
-+ * qman_affine_channel - return the channel ID of an portal
-+ * @cpu: the cpu whose affine portal is the subject of the query
-+ *
-+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
-+ * bug to call this function for any value of @cpu (other than -1) that is not a
-+ * member of the mask returned from qman_affine_cpus().
-+ */
-+u16 qman_affine_channel(int cpu);
-+
-+/**
-+ * qman_get_affine_portal - return the portal pointer affine to cpu
-+ * @cpu: the cpu whose affine portal is the subject of the query
-+ *
-+ */
-+void *qman_get_affine_portal(int cpu);
-+
-+/**
-+ * qman_poll_dqrr - process DQRR (fast-path) entries
-+ * @limit: the maximum number of DQRR entries to process
-+ *
-+ * Use of this function requires that DQRR processing not be interrupt-driven.
-+ * Ie. the value returned by qman_irqsource_get() should not include
-+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
-+ * this function will return -EINVAL, otherwise the return value is >=0 and
-+ * represents the number of DQRR entries processed.
-+ */
-+int qman_poll_dqrr(unsigned int limit);
-+
-+/**
-+ * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
-+ *
-+ * This function does any portal processing that isn't interrupt-driven. If the
-+ * current CPU is sharing a portal hosted on another CPU, this function will
-+ * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
-+ * indicating what interrupt sources were actually processed by the call.
-+ */
-+u32 qman_poll_slow(void);
-+
-+/**
-+ * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
-+ *
-+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
-+ * affine portal. There are two classes of portal processing in question;
-+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
-+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
-+ * thresholds, congestion state changes, etc). This function does whatever
-+ * processing is not triggered by interrupts.
-+ *
-+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
-+ * interrupt-driven) then this function uses a heuristic to determine how often
-+ * to run slow-path processing - as slow-path processing introduces at least a
-+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
-+ * close to zero-cost if there is no work to be done. Applications can tune this
-+ * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
-+ * rather than going via this wrapper.
-+ */
-+void qman_poll(void);
-+
-+/**
-+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
-+ *
-+ * Disables DQRR processing of the portal. This is reference-counted, so
-+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
-+ * truly re-enable dequeuing.
-+ */
-+void qman_stop_dequeues(void);
-+
-+/**
-+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
-+ *
-+ * Enables DQRR processing of the portal. This is reference-counted, so
-+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
-+ * truly re-enable dequeuing.
-+ */
-+void qman_start_dequeues(void);
-+
-+/**
-+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
-+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
-+ *
-+ * Adds a set of pool channels to the portal's static dequeue command register
-+ * (SDQCR). The requested pools are limited to those the portal has dequeue
-+ * access to.
-+ */
-+void qman_static_dequeue_add(u32 pools);
-+
-+/**
-+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
-+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
-+ *
-+ * Removes a set of pool channels from the portal's static dequeue command
-+ * register (SDQCR). The requested pools are limited to those the portal has
-+ * dequeue access to.
-+ */
-+void qman_static_dequeue_del(u32 pools);
-+
-+/**
-+ * qman_static_dequeue_get - return the portal's current SDQCR
-+ *
-+ * Returns the portal's current static dequeue command register (SDQCR). The
-+ * entire register is returned, so if only the currently-enabled pool channels
-+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
-+ */
-+u32 qman_static_dequeue_get(void);
-+
-+/**
-+ * qman_dca - Perform a Discrete Consumption Acknowledgement
-+ * @dq: the DQRR entry to be consumed
-+ * @park_request: indicates whether the held-active @fq should be parked
-+ *
-+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
-+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
-+ * does not take a 'portal' argument but implies the core affine portal from the
-+ * cpu that is currently executing the function. For reasons of locking, this
-+ * function must be called from the same CPU as that which processed the DQRR
-+ * entry in the first place.
-+ */
-+void qman_dca(struct qm_dqrr_entry *dq, int park_request);
-+
-+/**
-+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
-+ *
-+ * For use in situations where a cpu-affine caller needs to determine when all
-+ * enqueues for the local portal have been processed by Qman but can't use the
-+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
-+ * The function forces tracking of EQCR consumption (which normally doesn't
-+ * happen until enqueue processing needs to find space to put new enqueue
-+ * commands), and returns zero if the ring still has unprocessed entries,
-+ * non-zero if it is empty.
-+ */
-+int qman_eqcr_is_empty(void);
-+
-+/**
-+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
-+ * @handler: callback for processing DCP ERNs
-+ * @affine: whether this handler is specific to the locally affine portal
-+ *
-+ * If a hardware block's interface to Qman (ie. its direct-connect portal, or
-+ * DCP) is configured not to receive enqueue rejections, then any enqueues
-+ * through that DCP that are rejected will be sent to a given software portal.
-+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
-+ * received on the portal affine to the current CPU. If multiple CPUs share a
-+ * portal and they all call this function, they will be setting the handler for
-+ * the same portal! If @affine is zero, then this handler will be global to all
-+ * portals handled by this instance of the driver. Only those portals that do
-+ * not have their own affine handler will use the global handler.
-+ */
-+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
-+
-+ /* FQ management */
-+ /* ------------- */
-+/**
-+ * qman_create_fq - Allocates a FQ
-+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
-+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
-+ * @fq: memory for storing the 'fq', with callbacks filled in
-+ *
-+ * Creates a frame queue object for the given @fqid, unless the
-+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
-+ * dynamically allocated (or the function fails if none are available). Once
-+ * created, the caller should not touch the memory at 'fq' except as extended to
-+ * adjacent memory for user-defined fields (see the definition of "struct
-+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
-+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
-+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
-+ * causes the driver to honour any contextB modifications requested in the
-+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
-+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
-+ * software portals, the contextB field is controlled by the driver and can't be
-+ * modified by the caller. If the AS_IS flag is specified, management commands
-+ * will be used on portal @p to query state for frame queue @fqid and construct
-+ * a frame queue object based on that, rather than assuming/requiring that it be
-+ * Out of Service.
-+ */
-+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
-+
-+/**
-+ * qman_destroy_fq - Deallocates a FQ
-+ * @fq: the frame queue object to release
-+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
-+ *
-+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
-+ * not deallocated but the caller regains ownership, to do with as desired. The
-+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
-+ * is specified, in which case it may also be in the 'parked' state.
-+ */
-+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
-+
-+/**
-+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
-+ * @fq: the frame queue object to query
-+ */
-+u32 qman_fq_fqid(struct qman_fq *fq);
-+
-+/**
-+ * qman_fq_state - Queries the state of a FQ object
-+ * @fq: the frame queue object to query
-+ * @state: pointer to state enum to return the FQ scheduling state
-+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
-+ *
-+ * Queries the state of the FQ object, without performing any h/w commands.
-+ * This captures the state, as seen by the driver, at the time the function
-+ * executes.
-+ */
-+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
-+
-+/**
-+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
-+ * @fq: the frame queue object to modify, must be 'parked' or new.
-+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
-+ * @opts: the FQ-modification settings, as defined in the low-level API
-+ *
-+ * The @opts parameter comes from the low-level portal API. Select
-+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
-+ * rather than parked. NB, @opts can be NULL.
-+ *
-+ * Note that some fields and options within @opts may be ignored or overwritten
-+ * by the driver;
-+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
-+ * affects one frame queue: @fq).
-+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
-+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
-+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
-+ * initialised to a value used by the driver for demux.
-+ * - if context_b is initialised for demux, so is context_a in case stashing
-+ * is requested (see item 4).
-+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
-+ * objects.)
-+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
-+ * 'dest::channel' field will be overwritten to match the portal used to issue
-+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
-+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
-+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
-+ * isn't set, the destination channel/workqueue fields and the write-enable bit
-+ * are left as-is.
-+ * 4. if the driver overwrites context_a/b for demux, then if
-+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
-+ * context_a.address fields and will leave the stashing fields provided by the
-+ * user alone, otherwise it will zero out the context_a.stashing fields.
-+ */
-+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
-+
-+/**
-+ * qman_schedule_fq - Schedules a FQ
-+ * @fq: the frame queue object to schedule, must be 'parked'
-+ *
-+ * Schedules the frame queue, which must be Parked, which takes it to
-+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
-+ */
-+int qman_schedule_fq(struct qman_fq *fq);
-+
-+/**
-+ * qman_retire_fq - Retires a FQ
-+ * @fq: the frame queue object to retire
-+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
-+ *
-+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
-+ * the retirement was started asynchronously, otherwise it returns negative for
-+ * failure. When this function returns zero, @flags is set to indicate whether
-+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
-+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
-+ * FQRN message shows up on the portal's message ring.
-+ *
-+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
-+ * Active state), the completion will be via the message ring as a FQRN - but
-+ * the corresponding callback may occur before this function returns!! Ie. the
-+ * caller should be prepared to accept the callback as the function is called,
-+ * not only once it has returned.
-+ */
-+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
-+
-+/**
-+ * qman_oos_fq - Puts a FQ "out of service"
-+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
-+ *
-+ * The frame queue must be retired and empty, and if any order restoration list
-+ * was released as ERNs at the time of retirement, they must all be consumed.
-+ */
-+int qman_oos_fq(struct qman_fq *fq);
-+
-+/**
-+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
-+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
-+ * or 'retired' or 'parked' state
-+ * @xon: boolean to set fq in XON or XOFF state
-+ *
-+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
-+ * otherwise the IFSI interrupt will be asserted.
-+ */
-+int qman_fq_flow_control(struct qman_fq *fq, int xon);
-+
-+/**
-+ * qman_query_fq - Queries FQD fields (via h/w query command)
-+ * @fq: the frame queue object to be queried
-+ * @fqd: storage for the queried FQD fields
-+ */
-+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
-+
-+/**
-+ * qman_query_fq_np - Queries non-programmable FQD fields
-+ * @fq: the frame queue object to be queried
-+ * @np: storage for the queried FQD fields
-+ */
-+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
-+
-+/**
-+ * qman_query_wq - Queries work queue lengths
-+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
-+ * to this software portal. Otherwise, query length of WQs in a
-+ * channel specified in wq.
-+ * @wq: storage for the queried WQs lengths. Also specified the channel to
-+ * to query if query_dedicated is zero.
-+ */
-+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
-+
-+/**
-+ * qman_volatile_dequeue - Issue a volatile dequeue command
-+ * @fq: the frame queue object to dequeue from
-+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
-+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
-+ *
-+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
-+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
-+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
-+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
-+ * the VDQCR command has finished executing (ie. once the callback for the last
-+ * DQRR entry resulting from the VDQCR command has been called). If not using
-+ * the FINISH flag, completion can be determined either by detecting the
-+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
-+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
-+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
-+ * "flags" retrieved from qman_fq_state().
-+ */
-+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
-+
-+/**
-+ * qman_enqueue - Enqueue a frame to a frame queue
-+ * @fq: the frame queue object to enqueue to
-+ * @fd: a descriptor of the frame to be enqueued
-+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
-+ *
-+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
-+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
-+ * field is ignored. The return value is non-zero on error, such as ring full
-+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
-+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
-+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
-+ * interrupt will assert when Qman consumes the EQCR entry (subject to "status
-+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
-+ * perform an implied "discrete consumption acknowledgement" on the dequeue
-+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
-+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
-+ * this implicit DCA can delay the release of a "held active" frame queue
-+ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
-+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
-+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
-+ * acknowledgement should "park request" the "held active" frame queue. Ie.
-+ * when the portal eventually releases that frame queue, it will be left in the
-+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
-+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
-+ * is requested, and the FQ is a member of a congestion group, then this
-+ * function returns -EAGAIN if the congestion group is currently congested.
-+ * Note, this does not eliminate ERNs, as the async interface means we can be
-+ * sending enqueue commands to an un-congested FQ that becomes congested before
-+ * the enqueue commands are processed, but it does minimise needless thrashing
-+ * of an already busy hardware resource by throttling many of the to-be-dropped
-+ * enqueues "at the source".
-+ */
-+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
-+
-+typedef int (*qman_cb_precommit) (void *arg);
-+/**
-+ * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
-+ * @fq: the frame queue object to enqueue to
-+ * @fd: a descriptor of the frame to be enqueued
-+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
-+ * @cb: user supplied callback function to invoke before writing commit verb.
-+ * @cb_arg: callback function argument
-+ *
-+ * This is similar to qman_enqueue except that it will invoke a user supplied
-+ * callback function just before writng the commit verb. This is useful
-+ * when the user want to do something *just before* enqueuing the request and
-+ * the enqueue can't fail.
-+ */
-+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
-+ u32 flags, qman_cb_precommit cb, void *cb_arg);
-+
-+/**
-+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
-+ * @fq: the frame queue object to enqueue to
-+ * @fd: a descriptor of the frame to be enqueued
-+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
-+ * @orp: the frame queue object used as an order restoration point.
-+ * @orp_seqnum: the sequence number of this frame in the order restoration path
-+ *
-+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
-+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
-+ * enqueue operation to employ order restoration. Each frame queue object acts
-+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
-+ * with an incrementing sequence number, this value is generally ignored unless
-+ * that sequence of dequeued frames will need order restoration later. Each
-+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
-+ * is a re-assembly context for re-ordering frames relative to their sequence
-+ * numbers as they are enqueued. The ORP does not have to be within the frame
-+ * queue that receives the enqueued frame, in fact it is usually the frame
-+ * queue from which the frames were originally dequeued. For the purposes of
-+ * order restoration, multiple frames (or "fragments") can be enqueued for a
-+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
-+ * enqueues except the final fragment of a given sequence number. Ordering
-+ * between sequence numbers is guaranteed, even if fragments of different
-+ * sequence numbers are interlaced with one another. Fragments of the same
-+ * sequence number will retain the order in which they are enqueued. If no
-+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
-+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
-+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
-+ * sequence number should become the ORP's "Next Expected Sequence Number".
-+ *
-+ * Side note: a frame queue object can be used purely as an ORP, without
-+ * carrying any frames at all. Care should be taken not to deallocate a frame
-+ * queue object that is being actively used as an ORP, as a future allocation
-+ * of the frame queue object may start using the internal ORP before the
-+ * previous use has finished.
-+ */
-+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
-+ struct qman_fq *orp, u16 orp_seqnum);
-+
-+/**
-+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
-+ * @result: is set by the API to the base FQID of the allocated range
-+ * @count: the number of FQIDs required
-+ * @align: required alignment of the allocated range
-+ * @partial: non-zero if the API can return fewer than @count FQIDs
-+ *
-+ * Returns the number of frame queues allocated, or a negative error code. If
-+ * @partial is non zero, the allocation request may return a smaller range of
-+ * FQs than requested (though alignment will be as requested). If @partial is
-+ * zero, the return value will either be 'count' or negative.
-+ */
-+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
-+static inline int qman_alloc_fqid(u32 *result)
-+{
-+ int ret = qman_alloc_fqid_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+
-+/**
-+ * qman_release_fqid_range - Release the specified range of frame queue IDs
-+ * @fqid: the base FQID of the range to deallocate
-+ * @count: the number of FQIDs in the range
-+ *
-+ * This function can also be used to seed the allocator with ranges of FQIDs
-+ * that it can subsequently allocate from.
-+ */
-+void qman_release_fqid_range(u32 fqid, unsigned int count);
-+static inline void qman_release_fqid(u32 fqid)
-+{
-+ qman_release_fqid_range(fqid, 1);
-+}
-+
-+void qman_seed_fqid_range(u32 fqid, unsigned int count);
-+
-+
-+int qman_shutdown_fq(u32 fqid);
-+
-+/**
-+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
-+ * @fqid: the base FQID of the range to deallocate
-+ * @count: the number of FQIDs in the range
-+ */
-+int qman_reserve_fqid_range(u32 fqid, unsigned int count);
-+static inline int qman_reserve_fqid(u32 fqid)
-+{
-+ return qman_reserve_fqid_range(fqid, 1);
-+}
-+
-+ /* Pool-channel management */
-+ /* ----------------------- */
-+/**
-+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
-+ * @result: is set by the API to the base pool-channel ID of the allocated range
-+ * @count: the number of pool-channel IDs required
-+ * @align: required alignment of the allocated range
-+ * @partial: non-zero if the API can return fewer than @count
-+ *
-+ * Returns the number of pool-channel IDs allocated, or a negative error code.
-+ * If @partial is non zero, the allocation request may return a smaller range of
-+ * than requested (though alignment will be as requested). If @partial is zero,
-+ * the return value will either be 'count' or negative.
-+ */
-+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
-+static inline int qman_alloc_pool(u32 *result)
-+{
-+ int ret = qman_alloc_pool_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+
-+/**
-+ * qman_release_pool_range - Release the specified range of pool-channel IDs
-+ * @id: the base pool-channel ID of the range to deallocate
-+ * @count: the number of pool-channel IDs in the range
-+ */
-+void qman_release_pool_range(u32 id, unsigned int count);
-+static inline void qman_release_pool(u32 id)
-+{
-+ qman_release_pool_range(id, 1);
-+}
-+
-+/**
-+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
-+ * @id: the base pool-channel ID of the range to reserve
-+ * @count: the number of pool-channel IDs in the range
-+ */
-+int qman_reserve_pool_range(u32 id, unsigned int count);
-+static inline int qman_reserve_pool(u32 id)
-+{
-+ return qman_reserve_pool_range(id, 1);
-+}
-+
-+void qman_seed_pool_range(u32 id, unsigned int count);
-+
-+ /* CGR management */
-+ /* -------------- */
-+/**
-+ * qman_create_cgr - Register a congestion group object
-+ * @cgr: the 'cgr' object, with fields filled in
-+ * @flags: QMAN_CGR_FLAG_* values
-+ * @opts: optional state of CGR settings
-+ *
-+ * Registers this object to receiving congestion entry/exit callbacks on the
-+ * portal affine to the cpu portal on which this API is executed. If opts is
-+ * NULL then only the callback (cgr->cb) function is registered. If @flags
-+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
-+ * any unspecified parameters) will be used rather than a modify hw hardware
-+ * (which only modifies the specified parameters).
-+ */
-+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
-+ struct qm_mcc_initcgr *opts);
-+
-+/**
-+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
-+ * @cgr: the 'cgr' object, with fields filled in
-+ * @flags: QMAN_CGR_FLAG_* values
-+ * @dcp_portal: the DCP portal to which the cgr object is registered.
-+ * @opts: optional state of CGR settings
-+ *
-+ */
-+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
-+ struct qm_mcc_initcgr *opts);
-+
-+/**
-+ * qman_delete_cgr - Deregisters a congestion group object
-+ * @cgr: the 'cgr' object to deregister
-+ *
-+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
-+ * is executed. This must be excuted on the same affine portal on which it was
-+ * created.
-+ */
-+int qman_delete_cgr(struct qman_cgr *cgr);
-+
-+/**
-+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
-+ * @cgr: the 'cgr' object to deregister
-+ *
-+ * This will select the proper CPU and run there qman_delete_cgr().
-+ */
-+void qman_delete_cgr_safe(struct qman_cgr *cgr);
-+
-+/**
-+ * qman_modify_cgr - Modify CGR fields
-+ * @cgr: the 'cgr' object to modify
-+ * @flags: QMAN_CGR_FLAG_* values
-+ * @opts: the CGR-modification settings
-+ *
-+ * The @opts parameter comes from the low-level portal API, and can be NULL.
-+ * Note that some fields and options within @opts may be ignored or overwritten
-+ * by the driver, in particular the 'cgrid' field is ignored (this operation
-+ * only affects the given CGR object). If @flags contains
-+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
-+ * unspecified parameters) will be used rather than a modify hw hardware (which
-+ * only modifies the specified parameters).
-+ */
-+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
-+ struct qm_mcc_initcgr *opts);
-+
-+/**
-+* qman_query_cgr - Queries CGR fields
-+* @cgr: the 'cgr' object to query
-+* @result: storage for the queried congestion group record
-+*/
-+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
-+
-+/**
-+ * qman_query_congestion - Queries the state of all congestion groups
-+ * @congestion: storage for the queried state of all congestion groups
-+ */
-+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
-+
-+/**
-+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
-+ * @result: is set by the API to the base CGR ID of the allocated range
-+ * @count: the number of CGR IDs required
-+ * @align: required alignment of the allocated range
-+ * @partial: non-zero if the API can return fewer than @count
-+ *
-+ * Returns the number of CGR IDs allocated, or a negative error code.
-+ * If @partial is non zero, the allocation request may return a smaller range of
-+ * than requested (though alignment will be as requested). If @partial is zero,
-+ * the return value will either be 'count' or negative.
-+ */
-+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
-+static inline int qman_alloc_cgrid(u32 *result)
-+{
-+ int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+
-+/**
-+ * qman_release_cgrid_range - Release the specified range of CGR IDs
-+ * @id: the base CGR ID of the range to deallocate
-+ * @count: the number of CGR IDs in the range
-+ */
-+void qman_release_cgrid_range(u32 id, unsigned int count);
-+static inline void qman_release_cgrid(u32 id)
-+{
-+ qman_release_cgrid_range(id, 1);
-+}
-+
-+/**
-+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
-+ * @id: the base CGR ID of the range to reserve
-+ * @count: the number of CGR IDs in the range
-+ */
-+int qman_reserve_cgrid_range(u32 id, unsigned int count);
-+static inline int qman_reserve_cgrid(u32 id)
-+{
-+ return qman_reserve_cgrid_range(id, 1);
-+}
-+
-+void qman_seed_cgrid_range(u32 id, unsigned int count);
-+
-+
-+ /* Helpers */
-+ /* ------- */
-+/**
-+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
-+ * @fqid: the FQID that will be initialised by other s/w
-+ *
-+ * In many situations, a FQID is provided for communication between s/w
-+ * entities, and whilst the consumer is responsible for initialising and
-+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
-+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
-+ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
-+ * However, data can not be enqueued to the FQ until it is initialised out of
-+ * the OOS state - this function polls for that condition. It is particularly
-+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
-+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
-+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
-+ * synchronise. The function returns zero for success, +1 if the FQ is still in
-+ * the OOS state, or negative if there was an error.
-+ */
-+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
-+{
-+ struct qm_mcr_queryfq_np np;
-+ int err;
-+ err = qman_query_fq_np(fq, &np);
-+ if (err)
-+ return err;
-+ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
-+ return 1;
-+ return 0;
-+}
-+
-+ /* -------------- */
-+ /* CEETM :: types */
-+ /* -------------- */
-+/**
-+ * Token Rate Structure
-+ * Shaping rates are based on a "credit" system and a pre-configured h/w
-+ * internal timer. The following type represents a shaper "rate" parameter as a
-+ * fractional number of "tokens". Here's how it works. This (fractional) number
-+ * of tokens is added to the shaper's "credit" every time the h/w timer elapses
-+ * (up to a limit which is set by another shaper parameter). Every time a frame
-+ * is enqueued through a shaper, the shaper deducts as many tokens as there are
-+ * bytes of data in the enqueued frame. A shaper will not allow itself to
-+ * enqueue any frames if its token count is negative. As such;
-+ *
-+ * The rate at which data is enqueued is limited by the
-+ * rate at which tokens are added.
-+ *
-+ * Therefore if the user knows the period between these h/w timer updates in
-+ * seconds, they can calculate the maximum traffic rate of the shaper (in
-+ * bytes-per-second) from the token rate. And vice versa, they can calculate
-+ * the token rate to use in order to achieve a given traffic rate.
-+ */
-+struct qm_ceetm_rate {
-+ /* The token rate is; whole + (fraction/8192) */
-+ u32 whole:11; /* 0..2047 */
-+ u32 fraction:13; /* 0..8191 */
-+};
-+
-+struct qm_ceetm_weight_code {
-+ /* The weight code is; 5 msbits + 3 lsbits */
-+ u8 y:5;
-+ u8 x:3;
-+};
-+
-+struct qm_ceetm {
-+ unsigned int idx;
-+ struct list_head sub_portals;
-+ struct list_head lnis;
-+ unsigned int sp_range[2];
-+ unsigned int lni_range[2];
-+};
-+
-+struct qm_ceetm_sp {
-+ struct list_head node;
-+ unsigned int idx;
-+ unsigned int dcp_idx;
-+ int is_claimed;
-+ struct qm_ceetm_lni *lni;
-+};
-+
-+/* Logical Network Interface */
-+struct qm_ceetm_lni {
-+ struct list_head node;
-+ unsigned int idx;
-+ unsigned int dcp_idx;
-+ int is_claimed;
-+ struct qm_ceetm_sp *sp;
-+ struct list_head channels;
-+ int shaper_enable;
-+ int shaper_couple;
-+ int oal;
-+ struct qm_ceetm_rate cr_token_rate;
-+ struct qm_ceetm_rate er_token_rate;
-+ u16 cr_token_bucket_limit;
-+ u16 er_token_bucket_limit;
-+};
-+
-+/* Class Queue Channel */
-+struct qm_ceetm_channel {
-+ struct list_head node;
-+ unsigned int idx;
-+ unsigned int lni_idx;
-+ unsigned int dcp_idx;
-+ struct list_head class_queues;
-+ struct list_head ccgs;
-+ u8 shaper_enable;
-+ u8 shaper_couple;
-+ struct qm_ceetm_rate cr_token_rate;
-+ struct qm_ceetm_rate er_token_rate;
-+ u16 cr_token_bucket_limit;
-+ u16 er_token_bucket_limit;
-+};
-+
-+struct qm_ceetm_ccg;
-+
-+/* This callback type is used when handling congestion entry/exit. The
-+ * 'cb_ctx' value is the opaque value associated with ccg object.
-+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
-+ */
-+typedef void (*qman_cb_ccgr)(struct qm_ceetm_ccg *ccg, void *cb_ctx,
-+ int congested);
-+
-+/* Class Congestion Group */
-+struct qm_ceetm_ccg {
-+ struct qm_ceetm_channel *parent;
-+ struct list_head node;
-+ struct list_head cb_node;
-+ qman_cb_ccgr cb;
-+ void *cb_ctx;
-+ unsigned int idx;
-+};
-+
-+/* Class Queue */
-+struct qm_ceetm_cq {
-+ struct qm_ceetm_channel *parent;
-+ struct qm_ceetm_ccg *ccg;
-+ struct list_head node;
-+ unsigned int idx;
-+ int is_claimed;
-+ struct list_head bound_lfqids;
-+ struct list_head binding_node;
-+};
-+
-+/* Logical Frame Queue */
-+struct qm_ceetm_lfq {
-+ struct qm_ceetm_channel *parent;
-+ struct list_head node;
-+ unsigned int idx;
-+ unsigned int dctidx;
-+ u64 context_a;
-+ u32 context_b;
-+ qman_cb_mr ern;
-+};
-+
-+/**
-+ * qman_ceetm_bps2tokenrate - Given a desired rate 'bps' measured in bps
-+ * (ie. bits-per-second), compute the 'token_rate' fraction that best
-+ * approximates that rate.
-+ * @bps: the desired shaper rate in bps.
-+ * @token_rate: the output token rate computed with the given kbps.
-+ * @rounding: dictates how to round if an exact conversion is not possible; if
-+ * it is negative then 'token_rate' will round down to the highest value that
-+ * does not exceed the desired rate, if it is positive then 'token_rate' will
-+ * round up to the lowest value that is greater than or equal to the desired
-+ * rate, and if it is zero then it will round to the nearest approximation,
-+ * whether that be up or down.
-+ *
-+ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
-+ */
-+int qman_ceetm_bps2tokenrate(u64 bps,
-+ struct qm_ceetm_rate *token_rate,
-+ int rounding);
-+
-+/**
-+ * qman_ceetm_tokenrate2bps - Given a 'token_rate', compute the
-+ * corresponding number of 'bps'.
-+ * @token_rate: the input desired token_rate fraction.
-+ * @bps: the output shaper rate in bps computed with the give token rate.
-+ * @rounding: has the same semantics as the previous function.
-+ *
-+ * Return 0 for success, or -EINVAL if prescaler or qman clock is not available.
-+ */
-+int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate,
-+ u64 *bps,
-+ int rounding);
-+
-+int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
-+ int partial);
-+static inline int qman_alloc_ceetm0_channel(u32 *result)
-+{
-+ int ret = qman_alloc_ceetm0_channel_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+void qman_release_ceetm0_channel_range(u32 channelid, u32 count);
-+static inline void qman_release_ceetm0_channelid(u32 channelid)
-+{
-+ qman_release_ceetm0_channel_range(channelid, 1);
-+}
-+
-+int qman_reserve_ceetm0_channel_range(u32 channelid, u32 count);
-+static inline int qman_reserve_ceetm0_channelid(u32 channelid)
-+{
-+ return qman_reserve_ceetm0_channel_range(channelid, 1);
-+}
-+
-+void qman_seed_ceetm0_channel_range(u32 channelid, u32 count);
-+
-+
-+int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
-+ int partial);
-+static inline int qman_alloc_ceetm1_channel(u32 *result)
-+{
-+ int ret = qman_alloc_ceetm1_channel_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+void qman_release_ceetm1_channel_range(u32 channelid, u32 count);
-+static inline void qman_release_ceetm1_channelid(u32 channelid)
-+{
-+ qman_release_ceetm1_channel_range(channelid, 1);
-+}
-+int qman_reserve_ceetm1_channel_range(u32 channelid, u32 count);
-+static inline int qman_reserve_ceetm1_channelid(u32 channelid)
-+{
-+ return qman_reserve_ceetm1_channel_range(channelid, 1);
-+}
-+
-+void qman_seed_ceetm1_channel_range(u32 channelid, u32 count);
-+
-+
-+int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
-+ int partial);
-+static inline int qman_alloc_ceetm0_lfqid(u32 *result)
-+{
-+ int ret = qman_alloc_ceetm0_lfqid_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count);
-+static inline void qman_release_ceetm0_lfqid(u32 lfqid)
-+{
-+ qman_release_ceetm0_lfqid_range(lfqid, 1);
-+}
-+int qman_reserve_ceetm0_lfqid_range(u32 lfqid, u32 count);
-+static inline int qman_reserve_ceetm0_lfqid(u32 lfqid)
-+{
-+ return qman_reserve_ceetm0_lfqid_range(lfqid, 1);
-+}
-+
-+void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count);
-+
-+
-+int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
-+ int partial);
-+static inline int qman_alloc_ceetm1_lfqid(u32 *result)
-+{
-+ int ret = qman_alloc_ceetm1_lfqid_range(result, 1, 0, 0);
-+ return (ret > 0) ? 0 : ret;
-+}
-+void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count);
-+static inline void qman_release_ceetm1_lfqid(u32 lfqid)
-+{
-+ qman_release_ceetm1_lfqid_range(lfqid, 1);
-+}
-+int qman_reserve_ceetm1_lfqid_range(u32 lfqid, u32 count);
-+static inline int qman_reserve_ceetm1_lfqid(u32 lfqid)
-+{
-+ return qman_reserve_ceetm1_lfqid_range(lfqid, 1);
-+}
-+
-+void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count);
-+
-+
-+ /* ----------------------------- */
-+ /* CEETM :: sub-portals */
-+ /* ----------------------------- */
-+
-+/**
-+ * qman_ceetm_sp_claim - Claims the given sub-portal, provided it is available
-+ * to us and configured for traffic-management.
-+ * @sp: the returned sub-portal object, if successful.
-+ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
-+ * instance),
-+ * @sp_idx" is the desired sub-portal index from 0 to 15.
-+ *
-+ * Returns zero for success, or -ENODEV if the sub-portal is in use, or -EINVAL
-+ * if the sp_idx is out of range.
-+ *
-+ * Note that if there are multiple driver domains (eg. a linux kernel versus
-+ * user-space drivers in USDPAA, or multiple guests running under a hypervisor)
-+ * then a sub-portal may be accessible by more than one instance of a qman
-+ * driver and so it may be claimed multiple times. If this is the case, it is
-+ * up to the system architect to prevent conflicting configuration actions
-+ * coming from the different driver domains. The qman drivers do not have any
-+ * behind-the-scenes coordination to prevent this from happening.
-+ */
-+int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp,
-+ enum qm_dc_portal dcp_idx,
-+ unsigned int sp_idx);
-+
-+/**
-+ * qman_ceetm_sp_release - Releases a previously claimed sub-portal.
-+ * @sp: the sub-portal to be released.
-+ *
-+ * Returns 0 for success, or -EBUSY for failure if the dependencies are not
-+ * released.
-+ */
-+int qman_ceetm_sp_release(struct qm_ceetm_sp *sp);
-+
-+ /* ----------------------------------- */
-+ /* CEETM :: logical network interfaces */
-+ /* ----------------------------------- */
-+
-+/**
-+ * qman_ceetm_lni_claim - Claims an unclaimed LNI.
-+ * @lni: the returned LNI object, if successful.
-+ * @dcp_id: specifies the desired Fman block (and thus the relevant CEETM
-+ * instance)
-+ * @lni_idx: is the desired LNI index.
-+ *
-+ * Returns zero for success, or -EINVAL on failure, which will happen if the LNI
-+ * is not available or has already been claimed (and not yet successfully
-+ * released), or lni_dix is out of range.
-+ *
-+ * Note that there may be multiple driver domains (or instances) that need to
-+ * transmit out the same LNI, so this claim is only guaranteeing exclusivity
-+ * within the domain of the driver being called. See qman_ceetm_sp_claim() and
-+ * qman_ceetm_sp_get_lni() for more information.
-+ */
-+int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni,
-+ enum qm_dc_portal dcp_id,
-+ unsigned int lni_idx);
-+
-+/**
-+ * qman_ceetm_lni_releaes - Releases a previously claimed LNI.
-+ * @lni: the lni needs to be released.
-+ *
-+ * This will only succeed if all dependent objects have been released.
-+ * Returns zero for success, or -EBUSY if the dependencies are not released.
-+ */
-+int qman_ceetm_lni_release(struct qm_ceetm_lni *lni);
-+
-+/**
-+ * qman_ceetm_sp_set_lni
-+ * qman_ceetm_sp_get_lni - Set/get the LNI that the sub-portal is currently
-+ * mapped to.
-+ * @sp: the given sub-portal.
-+ * @lni(in "set"function): the LNI object which the sp will be mappaed to.
-+ * @lni_idx(in "get" function): the LNI index which the sp is mapped to.
-+ *
-+ * Returns zero for success, or -EINVAL for the "set" function when this sp-lni
-+ * mapping has been set, or configure mapping command returns error, and
-+ * -EINVAL for "get" function when this sp-lni mapping is not set or the query
-+ * mapping command returns error.
-+ *
-+ * This may be useful in situations where multiple driver domains have access
-+ * to the same sub-portals in order to all be able to transmit out the same
-+ * physical interface (perhaps they're on different IP addresses or VPNs, so
-+ * Fman is splitting Rx traffic and here we need to converge Tx traffic). In
-+ * that case, a control-plane is likely to use qman_ceetm_lni_claim() followed
-+ * by qman_ceetm_sp_set_lni() to configure the sub-portal, and other domains
-+ * are likely to use qman_ceetm_sp_get_lni() followed by qman_ceetm_lni_claim()
-+ * in order to determine the LNI that the control-plane had assigned. This is
-+ * why the "get" returns an index, whereas the "set" takes an (already claimed)
-+ * LNI object.
-+ */
-+int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp,
-+ struct qm_ceetm_lni *lni);
-+int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp,
-+ unsigned int *lni_idx);
-+
-+/**
-+ * qman_ceetm_lni_enable_shaper
-+ * qman_ceetm_lni_disable_shaper - Enables/disables shaping on the LNI.
-+ * @lni: the given LNI.
-+ * @coupled: indicates whether CR and ER are coupled.
-+ * @oal: the overhead accounting length which is added to the actual length of
-+ * each frame when performing shaper calculations.
-+ *
-+ * When the number of (unused) committed-rate tokens reach the committed-rate
-+ * token limit, 'coupled' indicates whether surplus tokens should be added to
-+ * the excess-rate token count (up to the excess-rate token limit).
-+ * When LNI is claimed, the shaper is disabled by default. The enable function
-+ * will turn on this shaper for this lni.
-+ * Whenever a claimed LNI is first enabled for shaping, its committed and
-+ * excess token rates and limits are zero, so will need to be changed to do
-+ * anything useful. The shaper can subsequently be enabled/disabled without
-+ * resetting the shaping parameters, but the shaping parameters will be reset
-+ * when the LNI is released.
-+ *
-+ * Returns zero for success, or errno for "enable" function in the cases as:
-+ * a) -EINVAL if the shaper is already enabled,
-+ * b) -EIO if the configure shaper command returns error.
-+ * For "disable" function, returns:
-+ * a) -EINVAL if the shaper is has already disabled.
-+ * b) -EIO if calling configure shaper command returns error.
-+ */
-+int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
-+ int oal);
-+int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni);
-+
-+/**
-+ * qman_ceetm_lni_is_shaper_enabled - Check LNI shaper status
-+ * @lni: the give LNI
-+ */
-+int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni);
-+
-+/**
-+ * qman_ceetm_lni_set_commit_rate
-+ * qman_ceetm_lni_get_commit_rate
-+ * qman_ceetm_lni_set_excess_rate
-+ * qman_ceetm_lni_get_excess_rate - Set/get the shaper CR/ER token rate and
-+ * token limit for the given LNI.
-+ * @lni: the given LNI.
-+ * @token_rate: the desired token rate for "set" fuction, or the token rate of
-+ * the LNI queried by "get" function.
-+ * @token_limit: the desired token bucket limit for "set" function, or the token
-+ * limit of the given LNI queried by "get" function.
-+ *
-+ * Returns zero for success. The "set" function returns -EINVAL if the given
-+ * LNI is unshapped or -EIO if the configure shaper command returns error.
-+ * The "get" function returns -EINVAL if the token rate or the token limit is
-+ * not set or the query command returns error.
-+ */
-+int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit);
-+int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit);
-+int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit);
-+int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit);
-+/**
-+ * qman_ceetm_lni_set_commit_rate_bps
-+ * qman_ceetm_lni_get_commit_rate_bps
-+ * qman_ceetm_lni_set_excess_rate_bps
-+ * qman_ceetm_lni_get_excess_rate_bps - Set/get the shaper CR/ER rate
-+ * and token limit for the given LNI.
-+ * @lni: the given LNI.
-+ * @bps: the desired shaping rate in bps for "set" fuction, or the shaping rate
-+ * of the LNI queried by "get" function.
-+ * @token_limit: the desired token bucket limit for "set" function, or the token
-+ * limit of the given LNI queried by "get" function.
-+ *
-+ * Returns zero for success. The "set" function returns -EINVAL if the given
-+ * LNI is unshapped or -EIO if the configure shaper command returns error.
-+ * The "get" function returns -EINVAL if the token rate or the token limit is
-+ * not set or the query command returns error.
-+ */
-+int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 bps,
-+ u16 token_limit);
-+int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 *bps, u16 *token_limit);
-+int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 bps,
-+ u16 token_limit);
-+int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
-+ u64 *bps, u16 *token_limit);
-+
-+/**
-+ * qman_ceetm_lni_set_tcfcc
-+ * qman_ceetm_lni_get_tcfcc - Configure/query "Traffic Class Flow Control".
-+ * @lni: the given LNI.
-+ * @cq_level: is between 0 and 15, representing individual class queue levels
-+ * (CQ0 to CQ7 for every channel) and grouped class queue levels (CQ8 to CQ15
-+ * for every channel).
-+ * @traffic_class: is between 0 and 7 when associating a given class queue level
-+ * to a traffic class, or -1 when disabling traffic class flow control for this
-+ * class queue level.
-+ *
-+ * Return zero for success, or -EINVAL if the cq_level or traffic_class is out
-+ * of range as indicated above, or -EIO if the configure/query tcfcc command
-+ * returns error.
-+ *
-+ * Refer to the section of QMan CEETM traffic class flow control in the
-+ * Reference Manual.
-+ */
-+int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
-+ unsigned int cq_level,
-+ int traffic_class);
-+int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni,
-+ unsigned int cq_level,
-+ int *traffic_class);
-+
-+ /* ----------------------------- */
-+ /* CEETM :: class queue channels */
-+ /* ----------------------------- */
-+
-+/**
-+ * qman_ceetm_channel_claim - Claims an unclaimed CQ channel that is mapped to
-+ * the given LNI.
-+ * @channel: the returned class queue channel object, if successful.
-+ * @lni: the LNI that the channel belongs to.
-+ *
-+ * Channels are always initially "unshaped".
-+ *
-+ * Return zero for success, or -ENODEV if there is no channel available(all 32
-+ * channels are claimed) or -EINVAL if the channel mapping command returns
-+ * error.
-+ */
-+int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
-+ struct qm_ceetm_lni *lni);
-+
-+/**
-+ * qman_ceetm_channel_release - Releases a previously claimed CQ channel.
-+ * @channel: the channel needs to be released.
-+ *
-+ * Returns zero for success, or -EBUSY if the dependencies are still in use.
-+ *
-+ * Note any shaping of the channel will be cleared to leave it in an unshaped
-+ * state.
-+ */
-+int qman_ceetm_channel_release(struct qm_ceetm_channel *channel);
-+
-+/**
-+ * qman_ceetm_channel_enable_shaper
-+ * qman_ceetm_channel_disable_shaper - Enables/disables shaping on the channel.
-+ * @channel: the given channel.
-+ * @coupled: indicates whether surplus CR tokens should be added to the
-+ * excess-rate token count (up to the excess-rate token limit) when the number
-+ * of (unused) committed-rate tokens reach the committed_rate token limit.
-+ *
-+ * Whenever a claimed channel is first enabled for shaping, its committed and
-+ * excess token rates and limits are zero, so will need to be changed to do
-+ * anything useful. The shaper can subsequently be enabled/disabled without
-+ * resetting the shaping parameters, but the shaping parameters will be reset
-+ * when the channel is released.
-+ *
-+ * Return 0 for success, or -EINVAL for failure, in the case that the channel
-+ * shaper has been enabled/disabled or the management command returns error.
-+ */
-+int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
-+ int coupled);
-+int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel);
-+
-+/**
-+ * qman_ceetm_channel_is_shaper_enabled - Check channel shaper status.
-+ * @channel: the give channel.
-+ */
-+int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel);
-+
-+/**
-+ * qman_ceetm_channel_set_commit_rate
-+ * qman_ceetm_channel_get_commit_rate
-+ * qman_ceetm_channel_set_excess_rate
-+ * qman_ceetm_channel_get_excess_rate - Set/get channel CR/ER shaper parameters.
-+ * @channel: the given channel.
-+ * @token_rate: the desired token rate for "set" function, or the queried token
-+ * rate for "get" function.
-+ * @token_limit: the desired token limit for "set" function, or the queried
-+ * token limit for "get" function.
-+ *
-+ * Return zero for success. The "set" function returns -EINVAL if the channel
-+ * is unshaped, or -EIO if the configure shapper command returns error. The
-+ * "get" function returns -EINVAL if token rate of token limit is not set, or
-+ * the query shaper command returns error.
-+ */
-+int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit);
-+int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit);
-+int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
-+ const struct qm_ceetm_rate *token_rate,
-+ u16 token_limit);
-+int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
-+ struct qm_ceetm_rate *token_rate,
-+ u16 *token_limit);
-+/**
-+ * qman_ceetm_channel_set_commit_rate_bps
-+ * qman_ceetm_channel_get_commit_rate_bps
-+ * qman_ceetm_channel_set_excess_rate_bps
-+ * qman_ceetm_channel_get_excess_rate_bps - Set/get channel CR/ER shaper
-+ * parameters.
-+ * @channel: the given channel.
-+ * @token_rate: the desired shaper rate in bps for "set" function, or the
-+ * shaper rate in bps for "get" function.
-+ * @token_limit: the desired token limit for "set" function, or the queried
-+ * token limit for "get" function.
-+ *
-+ * Return zero for success. The "set" function returns -EINVAL if the channel
-+ * is unshaped, or -EIO if the configure shapper command returns error. The
-+ * "get" function returns -EINVAL if token rate of token limit is not set, or
-+ * the query shaper command returns error.
-+ */
-+int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 bps, u16 token_limit);
-+int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 *bps, u16 *token_limit);
-+int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 bps, u16 token_limit);
-+int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
-+ u64 *bps, u16 *token_limit);
-+
-+/**
-+ * qman_ceetm_channel_set_weight
-+ * qman_ceetm_channel_get_weight - Set/get the weight for unshaped channel
-+ * @channel: the given channel.
-+ * @token_limit: the desired token limit as the weight of the unshaped channel
-+ * for "set" function, or the queried token limit for "get" function.
-+ *
-+ * The algorithm of unshaped fair queuing (uFQ) is used for unshaped channel.
-+ * It allows the unshaped channels to be included in the CR time eligible list,
-+ * and thus use the configured CR token limit value as their fair queuing
-+ * weight.
-+ *
-+ * Return zero for success, or -EINVAL if the channel is a shaped channel or
-+ * the management command returns error.
-+ */
-+int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
-+ u16 token_limit);
-+int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
-+ u16 *token_limit);
-+
-+/**
-+ * qman_ceetm_channel_set_group
-+ * qman_ceetm_channel_get_group - Set/get the grouping of the class scheduler.
-+ * @channel: the given channel.
-+ * @group_b: indicates whether there is group B in this channel.
-+ * @prio_a: the priority of group A.
-+ * @prio_b: the priority of group B.
-+ *
-+ * There are 8 individual class queues (CQ0-CQ7), and 8 grouped class queues
-+ * (CQ8-CQ15). If 'group_b' is zero, then all the grouped class queues are in
-+ * group A, otherwise they are split into group A (CQ8-11) and group B
-+ * (CQ12-C15). The individual class queues and the group(s) are in strict
-+ * priority order relative to each other. Within the group(s), the scheduling
-+ * is not strict priority order, but the result of scheduling within a group
-+ * is in strict priority order relative to the other class queues in the
-+ * channel. 'prio_a' and 'prio_b' control the priority order of the groups
-+ * relative to the individual class queues, and take values from 0-7. Eg. if
-+ * 'group_b' is non-zero, 'prio_a' is 2 and 'prio_b' is 6, then the strict
-+ * priority order would be;
-+ * CQ0, CQ1, CQ2, GROUPA, CQ3, CQ4, CQ5, CQ6, GROUPB, CQ7
-+ *
-+ * Return 0 for success. For "set" function, returns -EINVAL if prio_a or
-+ * prio_b are out of the range 0 - 7 (priority of group A or group B can not
-+ * be 0, CQ0 is always the highest class queue in this channel.), or -EIO if
-+ * the configure scheduler command returns error. For "get" function, return
-+ * -EINVAL if the query scheduler command returns error.
-+ */
-+int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel,
-+ int group_b,
-+ unsigned int prio_a,
-+ unsigned int prio_b);
-+int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel,
-+ int *group_b,
-+ unsigned int *prio_a,
-+ unsigned int *prio_b);
-+
-+/**
-+ * qman_ceetm_channel_set_group_cr_eligibility
-+ * qman_ceetm_channel_set_group_er_eligibility - Set channel group eligibility
-+ * @channel: the given channel object
-+ * @group_b: indicates whether there is group B in this channel.
-+ * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
-+ *
-+ * Return zero for success, or -EINVAL if eligibility setting fails.
-+*/
-+int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
-+ *channel, int group_b, int cre);
-+int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
-+ *channel, int group_b, int ere);
-+
-+/**
-+ * qman_ceetm_channel_set_cq_cr_eligibility
-+ * qman_ceetm_channel_set_cq_er_eligibility - Set channel cq eligibility
-+ * @channel: the given channel object
-+ * @idx: is from 0 to 7 (representing CQ0 to CQ7).
-+ * @cre: the commit rate eligibility, 1 for enable, 0 for disable.
-+ *
-+ * Return zero for success, or -EINVAL if eligibility setting fails.
-+*/
-+int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
-+ unsigned int idx, int cre);
-+int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
-+ unsigned int idx, int ere);
-+
-+ /* --------------------- */
-+ /* CEETM :: class queues */
-+ /* --------------------- */
-+
-+/**
-+ * qman_ceetm_cq_claim - Claims an individual class queue.
-+ * @cq: the returned class queue object, if successful.
-+ * @channel: the class queue channel.
-+ * @idx: is from 0 to 7 (representing CQ0 to CQ7).
-+ * @ccg: represents the class congestion group that this class queue should be
-+ * subscribed to, or NULL if no congestion group membership is desired.
-+ *
-+ * Returns zero for success, or -EINVAL if @idx is out of range 0 - 7 or
-+ * if this class queue has been claimed, or configure class queue command
-+ * returns error, or returns -ENOMEM if allocating CQ memory fails.
-+ */
-+int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
-+ struct qm_ceetm_channel *channel,
-+ unsigned int idx,
-+ struct qm_ceetm_ccg *ccg);
-+
-+/**
-+ * qman_ceetm_cq_claim_A - Claims a class queue group A.
-+ * @cq: the returned class queue object, if successful.
-+ * @channel: the class queue channel.
-+ * @idx: is from 8 to 15 if only group A exits, otherwise, it is from 8 to 11.
-+ * @ccg: represents the class congestion group that this class queue should be
-+ * subscribed to, or NULL if no congestion group membership is desired.
-+ *
-+ * Return zero for success, or -EINVAL if @idx is out the range or if
-+ * this class queue has been claimed or configure class queue command returns
-+ * error, or returns -ENOMEM if allocating CQ memory fails.
-+ */
-+int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
-+ struct qm_ceetm_channel *channel,
-+ unsigned int idx,
-+ struct qm_ceetm_ccg *ccg);
-+
-+/**
-+ * qman_ceetm_cq_claim_B - Claims a class queue group B.
-+ * @cq: the returned class queue object, if successful.
-+ * @channel: the class queue channel.
-+ * @idx: is from 0 to 3 (CQ12 to CQ15).
-+ * @ccg: represents the class congestion group that this class queue should be
-+ * subscribed to, or NULL if no congestion group membership is desired.
-+ *
-+ * Return zero for success, or -EINVAL if @idx is out the range or if
-+ * this class queue has been claimed or configure class queue command returns
-+ * error, or returns -ENOMEM if allocating CQ memory fails.
-+ */
-+int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
-+ struct qm_ceetm_channel *channel,
-+ unsigned int idx,
-+ struct qm_ceetm_ccg *ccg);
-+
-+/**
-+ * qman_ceetm_cq_release - Releases a previously claimed class queue.
-+ * @cq: The class queue to be released.
-+ *
-+ * Return zero for success, or -EBUSY if the dependent objects (eg. logical
-+ * FQIDs) have not been released.
-+ */
-+int qman_ceetm_cq_release(struct qm_ceetm_cq *cq);
-+
-+/**
-+ * qman_ceetm_set_queue_weight
-+ * qman_ceetm_get_queue_weight - Configure/query the weight of a grouped class
-+ * queue.
-+ * @cq: the given class queue.
-+ * @weight_code: the desired weight code to set for the given class queue for
-+ * "set" function or the queired weight code for "get" function.
-+ *
-+ * Grouped class queues have a default weight code of zero, which corresponds to
-+ * a scheduler weighting of 1. This function can be used to modify a grouped
-+ * class queue to another weight, (Use the helpers qman_ceetm_wbfs2ratio()
-+ * and qman_ceetm_ratio2wbfs() to convert between these 'weight_code' values
-+ * and the corresponding sharing weight.)
-+ *
-+ * Returns zero for success, or -EIO if the configure weight command returns
-+ * error for "set" function, or -EINVAL if the query command returns
-+ * error for "get" function.
-+ * See section "CEETM Weighted Scheduling among Grouped Classes" in Reference
-+ * Manual for weight and weight code.
-+ */
-+int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
-+ struct qm_ceetm_weight_code *weight_code);
-+int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
-+ struct qm_ceetm_weight_code *weight_code);
-+
-+/**
-+ * qman_ceetm_set_queue_weight_in_ratio
-+ * qman_ceetm_get_queue_weight_in_ratio - Configure/query the weight of a
-+ * grouped class queue.
-+ * @cq: the given class queue.
-+ * @ratio: the weight in ratio. It should be the real ratio number multiplied
-+ * by 100 to get rid of fraction.
-+ *
-+ * Returns zero for success, or -EIO if the configure weight command returns
-+ * error for "set" function, or -EINVAL if the query command returns
-+ * error for "get" function.
-+ */
-+int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio);
-+int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio);
-+
-+/* Weights are encoded using a pseudo-exponential scheme. The weight codes 0,
-+ * 32, 64, [...] correspond to weights of 1, 2, 4, [...]. The weights
-+ * corresponding to intermediate weight codes are calculated using linear
-+ * interpolation on the inverted values. Or put another way, the inverse weights
-+ * for each 32nd weight code are 1, 1/2, 1/4, [...], and so the intervals
-+ * between these are divided linearly into 32 intermediate values, the inverses
-+ * of which form the remaining weight codes.
-+ *
-+ * The Weighted Bandwidth Fair Scheduling (WBFS) algorithm provides a form of
-+ * scheduling within a group of class queues (group A or B). Weights are used to
-+ * normalise the class queues to an underlying BFS algorithm where all class
-+ * queues are assumed to require "equal bandwidth". So the weights referred to
-+ * by the weight codes act as divisors on the size of frames being enqueued. Ie.
-+ * one class queue in a group is assigned a weight of 2 whilst the other class
-+ * queues in the group keep the default weight of 1, then the WBFS scheduler
-+ * will effectively treat all frames enqueued on the weight-2 class queue as
-+ * having half the number of bytes they really have. Ie. if all other things are
-+ * equal, that class queue would get twice as much bytes-per-second bandwidth as
-+ * the others. So weights should be chosen to provide bandwidth ratios between
-+ * members of the same class queue group. These weights have no bearing on
-+ * behaviour outside that group's WBFS mechanism though.
-+ */
-+
-+/**
-+ * qman_ceetm_wbfs2ratio - Given a weight code ('wbfs'), an accurate fractional
-+ * representation of the corresponding weight is given (in order to not lose
-+ * any precision).
-+ * @weight_code: The given weight code in WBFS.
-+ * @numerator: the numerator part of the weight computed by the weight code.
-+ * @denominator: the denominator part of the weight computed by the weight code
-+ *
-+ * Returns zero for success or -EINVAL if the given weight code is illegal.
-+ */
-+int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
-+ u32 *numerator,
-+ u32 *denominator);
-+/**
-+ * qman_ceetm_ratio2wbfs - Given a weight, find the nearest possible weight code
-+ * If the user needs to know how close this is, convert the resulting weight
-+ * code back to a weight and compare.
-+ * @numerator: numerator part of the given weight.
-+ * @denominator: denominator part of the given weight.
-+ * @weight_code: the weight code computed from the given weight.
-+ *
-+ * Returns zero for success, or -ERANGE if "numerator/denominator" is outside
-+ * the range of weights.
-+ */
-+int qman_ceetm_ratio2wbfs(u32 numerator,
-+ u32 denominator,
-+ struct qm_ceetm_weight_code *weight_code,
-+ int rounding);
-+
-+#define QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER 0x1
-+/**
-+ * qman_ceetm_cq_get_dequeue_statistics - Get the statistics provided by CEETM
-+ * CQ counters.
-+ * @cq: the given CQ object.
-+ * @flags: indicates whether the statistics counter will be cleared after query.
-+ * @frame_count: The number of the frames that have been counted since the
-+ * counter was cleared last time.
-+ * @byte_count: the number of bytes in all frames that have been counted.
-+ *
-+ * Return zero for success or -EINVAL if query statistics command returns error.
-+ *
-+ */
-+int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
-+ u64 *frame_count, u64 *byte_count);
-+
-+/**
-+ * qman_ceetm_drain_cq - drain the CQ till it is empty.
-+ * @cq: the give CQ object.
-+ * Return 0 for success or -EINVAL for unsuccessful command to empty CQ.
-+ */
-+int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq);
-+
-+ /* ---------------------- */
-+ /* CEETM :: logical FQIDs */
-+ /* ---------------------- */
-+/**
-+ * qman_ceetm_lfq_claim - Claims an unused logical FQID, associates it with
-+ * the given class queue.
-+ * @lfq: the returned lfq object, if successful.
-+ * @cq: the class queue which needs to claim a LFQID.
-+ *
-+ * Return zero for success, or -ENODEV if no LFQID is available or -ENOMEM if
-+ * allocating memory for lfq fails, or -EINVAL if configuring LFQMT fails.
-+ */
-+int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
-+ struct qm_ceetm_cq *cq);
-+
-+/**
-+ * qman_ceetm_lfq_release - Releases a previously claimed logical FQID.
-+ * @lfq: the lfq to be released.
-+ *
-+ * Return zero for success.
-+ */
-+int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq);
-+
-+/**
-+ * qman_ceetm_lfq_set_context
-+ * qman_ceetm_lfq_get_context - Set/get the context_a/context_b pair to the
-+ * "dequeue context table" associated with the logical FQID.
-+ * @lfq: the given logical FQ object.
-+ * @context_a: contextA of the dequeue context.
-+ * @context_b: contextB of the dequeue context.
-+ *
-+ * Returns zero for success, or -EINVAL if there is error to set/get the
-+ * context pair.
-+ */
-+int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq,
-+ u64 context_a,
-+ u32 context_b);
-+int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq,
-+ u64 *context_a,
-+ u32 *context_b);
-+
-+/**
-+ * qman_ceetm_create_fq - Initialise a FQ object for the LFQ.
-+ * @lfq: the given logic fq.
-+ * @fq: the fq object created for the given logic fq.
-+ *
-+ * The FQ object can be used in qman_enqueue() and qman_enqueue_orp() APIs to
-+ * target a logical FQID (and the class queue it is associated with).
-+ * Note that this FQ object can only be used for enqueues, and
-+ * in the case of qman_enqueue_orp() it can not be used as the 'orp' parameter,
-+ * only as 'fq'. This FQ object can not (and shouldn't) be destroyed, it is only
-+ * valid as long as the underlying 'lfq' remains claimed. It is the user's
-+ * responsibility to ensure that the underlying 'lfq' is not released until any
-+ * enqueues to this FQ object have completed. The only field the user needs to
-+ * fill in is fq->cb.ern, as that enqueue rejection handler is the callback that
-+ * could conceivably be called on this FQ object. This API can be called
-+ * multiple times to create multiple FQ objects referring to the same logical
-+ * FQID, and any enqueue rejections will respect the callback of the object that
-+ * issued the enqueue (and will identify the object via the parameter passed to
-+ * the callback too). There is no 'flags' parameter to this API as there is for
-+ * qman_create_fq() - the created FQ object behaves as though qman_create_fq()
-+ * had been called with the single flag QMAN_FQ_FLAG_NO_MODIFY.
-+ *
-+ * Returns 0 for success.
-+ */
-+int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq);
-+
-+ /* -------------------------------- */
-+ /* CEETM :: class congestion groups */
-+ /* -------------------------------- */
-+
-+/**
-+ * qman_ceetm_ccg_claim - Claims an unused CCG.
-+ * @ccg: the returned CCG object, if successful.
-+ * @channel: the given class queue channel
-+ * @cscn: the callback function of this CCG.
-+ * @cb_ctx: the corresponding context to be used used if state change
-+ * notifications are later enabled for this CCG.
-+ *
-+ * The congestion group is local to the given class queue channel, so only
-+ * class queues within the channel can be associated with that congestion group.
-+ * The association of class queues to congestion groups occurs when the class
-+ * queues are claimed, see qman_ceetm_cq_claim() and related functions.
-+ * Congestion groups are in a "zero" state when initially claimed, and they are
-+ * returned to that state when released.
-+ *
-+ * Return zero for success, or -EINVAL if no CCG in the channel is available.
-+ */
-+int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
-+ struct qm_ceetm_channel *channel,
-+ unsigned int idx,
-+ void (*cscn)(struct qm_ceetm_ccg *,
-+ void *cb_ctx,
-+ int congested),
-+ void *cb_ctx);
-+
-+/**
-+ * qman_ceetm_ccg_release - Releases a previously claimed CCG.
-+ * @ccg: the given ccg.
-+ *
-+ * Returns zero for success, or -EBUSY if the given ccg's dependent objects
-+ * (class queues that are associated with the CCG) have not been released.
-+ */
-+int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg);
-+
-+/* This struct is used to specify attributes for a CCG. The 'we_mask' field
-+ * controls which CCG attributes are to be updated, and the remainder specify
-+ * the values for those attributes. A CCG counts either frames or the bytes
-+ * within those frames, but not both ('mode'). A CCG can optionally cause
-+ * enqueues to be rejected, due to tail-drop or WRED, or both (they are
-+ * independent options, 'td_en' and 'wr_en_g,wr_en_y,wr_en_r'). Tail-drop can be
-+ * level-triggered due to a single threshold ('td_thres') or edge-triggered due
-+ * to a "congestion state", but not both ('td_mode'). Congestion state has
-+ * distinct entry and exit thresholds ('cs_thres_in' and 'cs_thres_out'), and
-+ * notifications can be sent to software the CCG goes in to and out of this
-+ * congested state ('cscn_en'). */
-+struct qm_ceetm_ccg_params {
-+ /* Boolean fields together in a single bitfield struct */
-+ struct {
-+ /* Whether to count bytes or frames. 1==frames */
-+ u8 mode:1;
-+ /* En/disable tail-drop. 1==enable */
-+ u8 td_en:1;
-+ /* Tail-drop on congestion-state or threshold. 1=threshold */
-+ u8 td_mode:1;
-+ /* Generate congestion state change notifications. 1==enable */
-+ u8 cscn_en:1;
-+ /* Enable WRED rejections (per colour). 1==enable */
-+ u8 wr_en_g:1;
-+ u8 wr_en_y:1;
-+ u8 wr_en_r:1;
-+ } __packed;
-+ /* Tail-drop threshold. See qm_cgr_thres_[gs]et64(). */
-+ struct qm_cgr_cs_thres td_thres;
-+ /* Congestion state thresholds, for entry and exit. */
-+ struct qm_cgr_cs_thres cs_thres_in;
-+ struct qm_cgr_cs_thres cs_thres_out;
-+ /* Overhead accounting length. Per-packet "tax", from -128 to +127 */
-+ signed char oal;
-+ /* Congestion state change notification for DCP portal, virtual CCGID*/
-+ /* WRED parameters. */
-+ struct qm_cgr_wr_parm wr_parm_g;
-+ struct qm_cgr_wr_parm wr_parm_y;
-+ struct qm_cgr_wr_parm wr_parm_r;
-+};
-+/* Bits used in 'we_mask' to qman_ceetm_ccg_set(), controls which attributes of
-+ * the CCGR are to be updated. */
-+#define QM_CCGR_WE_MODE 0x0001 /* mode (bytes/frames) */
-+#define QM_CCGR_WE_CS_THRES_IN 0x0002 /* congestion state entry threshold */
-+#define QM_CCGR_WE_TD_EN 0x0004 /* congestion state tail-drop enable */
-+#define QM_CCGR_WE_CSCN_TUPD 0x0008 /* CSCN target update */
-+#define QM_CCGR_WE_CSCN_EN 0x0010 /* congestion notification enable */
-+#define QM_CCGR_WE_WR_EN_R 0x0020 /* WRED enable - red */
-+#define QM_CCGR_WE_WR_EN_Y 0x0040 /* WRED enable - yellow */
-+#define QM_CCGR_WE_WR_EN_G 0x0080 /* WRED enable - green */
-+#define QM_CCGR_WE_WR_PARM_R 0x0100 /* WRED parameters - red */
-+#define QM_CCGR_WE_WR_PARM_Y 0x0200 /* WRED parameters - yellow */
-+#define QM_CCGR_WE_WR_PARM_G 0x0400 /* WRED parameters - green */
-+#define QM_CCGR_WE_OAL 0x0800 /* overhead accounting length */
-+#define QM_CCGR_WE_CS_THRES_OUT 0x1000 /* congestion state exit threshold */
-+#define QM_CCGR_WE_TD_THRES 0x2000 /* tail-drop threshold */
-+#define QM_CCGR_WE_TD_MODE 0x4000 /* tail-drop mode (state/threshold) */
-+#define QM_CCGR_WE_CDV 0x8000 /* cdv */
-+
-+/**
-+ * qman_ceetm_ccg_set
-+ * qman_ceetm_ccg_get - Configure/query a subset of CCG attributes.
-+ * @ccg: the given CCG object.
-+ * @we_mask: the write enable mask.
-+ * @params: the parameters setting for this ccg
-+ *
-+ * Return 0 for success, or -EIO if configure ccg command returns error for
-+ * "set" function, or -EINVAL if query ccg command returns error for "get"
-+ * function.
-+ */
-+int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg,
-+ u16 we_mask,
-+ const struct qm_ceetm_ccg_params *params);
-+int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
-+ struct qm_ceetm_ccg_params *params);
-+
-+/** qman_ceetm_cscn_swp_set - Add or remove a software portal from the target
-+ * mask.
-+ * qman_ceetm_cscn_swp_get - Query whether a given software portal index is
-+ * in the cscn target mask.
-+ * @ccg: the give CCG object.
-+ * @swp_idx: the index of the software portal.
-+ * @cscn_enabled: 1: Set the swp to be cscn target. 0: remove the swp from
-+ * the target mask.
-+ * @we_mask: the write enable mask.
-+ * @params: the parameters setting for this ccg
-+ *
-+ * Return 0 for success, or -EINVAL if command in set/get function fails.
-+ */
-+int qman_ceetm_cscn_swp_set(struct qm_ceetm_ccg *ccg,
-+ u16 swp_idx,
-+ unsigned int cscn_enabled,
-+ u16 we_mask,
-+ const struct qm_ceetm_ccg_params *params);
-+int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
-+ u16 swp_idx,
-+ unsigned int *cscn_enabled);
-+
-+/** qman_ceetm_cscn_dcp_set - Add or remove a direct connect portal from the\
-+ * target mask.
-+ * qman_ceetm_cscn_dcp_get - Query whether a given direct connect portal index
-+ * is in the cscn target mask.
-+ * @ccg: the give CCG object.
-+ * @dcp_idx: the index of the direct connect portal.
-+ * @vcgid: congestion state change notification for dcp portal, virtual CGID.
-+ * @cscn_enabled: 1: Set the dcp to be cscn target. 0: remove the dcp from
-+ * the target mask.
-+ * @we_mask: the write enable mask.
-+ * @params: the parameters setting for this ccg
-+ *
-+ * Return 0 for success, or -EINVAL if command in set/get function fails.
-+ */
-+int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
-+ u16 dcp_idx,
-+ u8 vcgid,
-+ unsigned int cscn_enabled,
-+ u16 we_mask,
-+ const struct qm_ceetm_ccg_params *params);
-+int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
-+ u16 dcp_idx,
-+ u8 *vcgid,
-+ unsigned int *cscn_enabled);
-+
-+/**
-+ * qman_ceetm_ccg_get_reject_statistics - Get the statistics provided by
-+ * CEETM CCG counters.
-+ * @ccg: the given CCG object.
-+ * @flags: indicates whether the statistics counter will be cleared after query.
-+ * @frame_count: The number of the frames that have been counted since the
-+ * counter was cleared last time.
-+ * @byte_count: the number of bytes in all frames that have been counted.
-+ *
-+ * Return zero for success or -EINVAL if query statistics command returns error.
-+ *
-+ */
-+int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
-+ u64 *frame_count, u64 *byte_count);
-+
-+/**
-+ * qman_ceetm_query_lfqmt - Query the logical frame queue mapping table
-+ * @lfqid: Logical Frame Queue ID
-+ * @lfqmt_query: Results of the query command
-+ *
-+ * Returns zero for success or -EIO if the query command returns error.
-+ *
-+ */
-+int qman_ceetm_query_lfqmt(int lfqid,
-+ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query);
-+
-+/**
-+ * qman_ceetm_query_cq - Queries a CEETM CQ
-+ * @cqid: the channel ID (first byte) followed by the CQ idx
-+ * @dcpid: CEETM portal ID
-+ * @cq_query: storage for the queried CQ fields
-+ *
-+ * Returns zero for success or -EIO if the query command returns error.
-+ *
-+*/
-+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
-+ struct qm_mcr_ceetm_cq_query *cq_query);
-+
-+/**
-+ * qman_ceetm_query_write_statistics - Query (and optionally write) statistics
-+ * @cid: Target ID (CQID or CCGRID)
-+ * @dcp_idx: CEETM portal ID
-+ * @command_type: One of the following:
-+ * 0 = Query dequeue statistics. CID carries the CQID to be queried.
-+ * 1 = Query and clear dequeue statistics. CID carries the CQID to be queried
-+ * 2 = Write dequeue statistics. CID carries the CQID to be written.
-+ * 3 = Query reject statistics. CID carries the CCGRID to be queried.
-+ * 4 = Query and clear reject statistics. CID carries the CCGRID to be queried
-+ * 5 = Write reject statistics. CID carries the CCGRID to be written
-+ * @frame_count: Frame count value to be written if this is a write command
-+ * @byte_count: Bytes count value to be written if this is a write command
-+ *
-+ * Returns zero for success or -EIO if the query command returns error.
-+ */
-+int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
-+ u16 command_type, u64 frame_count,
-+ u64 byte_count);
-+
-+/**
-+ * qman_set_wpm - Set waterfall power management
-+ *
-+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
-+ *
-+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
-+ * accessible.
-+ */
-+int qman_set_wpm(int wpm_enable);
-+
-+/**
-+ * qman_get_wpm - Query the waterfall power management setting
-+ *
-+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
-+ *
-+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
-+ * accessible.
-+ */
-+int qman_get_wpm(int *wpm_enable);
-+
-+/* The below qman_p_***() variants might be called in a migration situation
-+ * (e.g. cpu hotplug). They are used to continue accessing the portal that
-+ * execution was affine to prior to migration.
-+ * @qman_portal specifies which portal the APIs will use.
-+*/
-+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
-+ *p);
-+int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
-+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
-+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
-+u32 qman_p_poll_slow(struct qman_portal *p);
-+void qman_p_poll(struct qman_portal *p);
-+void qman_p_stop_dequeues(struct qman_portal *p);
-+void qman_p_start_dequeues(struct qman_portal *p);
-+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
-+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
-+u32 qman_p_static_dequeue_get(struct qman_portal *p);
-+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
-+ int park_request);
-+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
-+ u32 flags __maybe_unused, u32 vdqcr);
-+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_fd *fd, u32 flags);
-+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_fd *fd, u32 flags,
-+ struct qman_fq *orp, u16 orp_seqnum);
-+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
-+ const struct qm_fd *fd, u32 flags,
-+ qman_cb_precommit cb, void *cb_arg);
-+
-+static inline int qman_is_probed(void) {
-+ return 1;
-+}
-+
-+
-+static inline int qman_portals_probed(void) {
-+ return 1;
-+}
-+
-+#ifdef __cplusplus
-+}
-+#endif
-+
-+#endif /* FSL_QMAN_H */
---- /dev/null
-+++ b/include/linux/fsl_usdpaa.h
-@@ -0,0 +1,372 @@
-+/* Copyright 2011-2012 Freescale Semiconductor, Inc.
-+ *
-+ * This file is licensed under the terms of the GNU General Public License
-+ * version 2. This program is licensed "as is" without any warranty of any
-+ * kind, whether express or implied.
-+ */
-+
-+#ifndef FSL_USDPAA_H
-+#define FSL_USDPAA_H
-+
-+#ifdef __cplusplus
-+extern "C" {
-+#endif
-+
-+#include <linux/uaccess.h>
-+#include <linux/ioctl.h>
-+#include <linux/fsl_qman.h> /* For "enum qm_channel" */
-+#include <linux/compat.h>
-+
-+#ifdef CONFIG_FSL_USDPAA
-+
-+/******************************/
-+/* Allocation of resource IDs */
-+/******************************/
-+
-+/* This enum is used to distinguish between the type of underlying object being
-+ * manipulated. */
-+enum usdpaa_id_type {
-+ usdpaa_id_fqid,
-+ usdpaa_id_bpid,
-+ usdpaa_id_qpool,
-+ usdpaa_id_cgrid,
-+ usdpaa_id_ceetm0_lfqid,
-+ usdpaa_id_ceetm0_channelid,
-+ usdpaa_id_ceetm1_lfqid,
-+ usdpaa_id_ceetm1_channelid,
-+ usdpaa_id_max /* <-- not a valid type, represents the number of types */
-+};
-+#define USDPAA_IOCTL_MAGIC 'u'
-+struct usdpaa_ioctl_id_alloc {
-+ uint32_t base; /* Return value, the start of the allocated range */
-+ enum usdpaa_id_type id_type; /* what kind of resource(s) to allocate */
-+ uint32_t num; /* how many IDs to allocate (and return value) */
-+ uint32_t align; /* must be a power of 2, 0 is treated like 1 */
-+ int partial; /* whether to allow less than 'num' */
-+};
-+struct usdpaa_ioctl_id_release {
-+ /* Input; */
-+ enum usdpaa_id_type id_type;
-+ uint32_t base;
-+ uint32_t num;
-+};
-+struct usdpaa_ioctl_id_reserve {
-+ enum usdpaa_id_type id_type;
-+ uint32_t base;
-+ uint32_t num;
-+};
-+
-+
-+/* ioctl() commands */
-+#define USDPAA_IOCTL_ID_ALLOC \
-+ _IOWR(USDPAA_IOCTL_MAGIC, 0x01, struct usdpaa_ioctl_id_alloc)
-+#define USDPAA_IOCTL_ID_RELEASE \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x02, struct usdpaa_ioctl_id_release)
-+#define USDPAA_IOCTL_ID_RESERVE \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x0A, struct usdpaa_ioctl_id_reserve)
-+
-+/**********************/
-+/* Mapping DMA memory */
-+/**********************/
-+
-+/* Maximum length for a map name, including NULL-terminator */
-+#define USDPAA_DMA_NAME_MAX 16
-+/* Flags for requesting DMA maps. Maps are private+unnamed or sharable+named.
-+ * For a sharable and named map, specify _SHARED (whether creating one or
-+ * binding to an existing one). If _SHARED is specified and _CREATE is not, then
-+ * the mapping must already exist. If _SHARED and _CREATE are specified and the
-+ * mapping doesn't already exist, it will be created. If _SHARED and _CREATE are
-+ * specified and the mapping already exists, the mapping will fail unless _LAZY
-+ * is specified. When mapping to a pre-existing sharable map, the length must be
-+ * an exact match. Lengths must be a power-of-4 multiple of page size.
-+ *
-+ * Note that this does not actually map the memory to user-space, that is done
-+ * by a subsequent mmap() using the page offset returned from this ioctl(). The
-+ * ioctl() is what gives the process permission to do this, and a page-offset
-+ * with which to do so.
-+ */
-+#define USDPAA_DMA_FLAG_SHARE 0x01
-+#define USDPAA_DMA_FLAG_CREATE 0x02
-+#define USDPAA_DMA_FLAG_LAZY 0x04
-+#define USDPAA_DMA_FLAG_RDONLY 0x08
-+struct usdpaa_ioctl_dma_map {
-+ /* Output parameters - virtual and physical addresses */
-+ void *ptr;
-+ uint64_t phys_addr;
-+ /* Input parameter, the length of the region to be created (or if
-+ * mapping an existing region, this must match it). Must be a power-of-4
-+ * multiple of page size. */
-+ uint64_t len;
-+ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
-+ uint32_t flags;
-+ /* If _FLAG_SHARE is specified, the name of the region to be created (or
-+ * of the existing mapping to use). */
-+ char name[USDPAA_DMA_NAME_MAX];
-+ /* If this ioctl() creates the mapping, this is an input parameter
-+ * stating whether the region supports locking. If mapping an existing
-+ * region, this is a return value indicating the same thing. */
-+ int has_locking;
-+ /* In the case of a successful map with _CREATE and _LAZY, this return
-+ * value indicates whether we created the mapped region or whether it
-+ * already existed. */
-+ int did_create;
-+};
-+
-+#ifdef CONFIG_COMPAT
-+struct usdpaa_ioctl_dma_map_compat {
-+ /* Output parameters - virtual and physical addresses */
-+ compat_uptr_t ptr;
-+ uint64_t phys_addr;
-+ /* Input parameter, the length of the region to be created (or if
-+ * mapping an existing region, this must match it). Must be a power-of-4
-+ * multiple of page size. */
-+ uint64_t len;
-+ /* Input parameter, the USDPAA_DMA_FLAG_* settings. */
-+ uint32_t flags;
-+ /* If _FLAG_SHARE is specified, the name of the region to be created (or
-+ * of the existing mapping to use). */
-+ char name[USDPAA_DMA_NAME_MAX];
-+ /* If this ioctl() creates the mapping, this is an input parameter
-+ * stating whether the region supports locking. If mapping an existing
-+ * region, this is a return value indicating the same thing. */
-+ int has_locking;
-+ /* In the case of a successful map with _CREATE and _LAZY, this return
-+ * value indicates whether we created the mapped region or whether it
-+ * already existed. */
-+ int did_create;
-+};
-+
-+#define USDPAA_IOCTL_DMA_MAP_COMPAT \
-+ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map_compat)
-+#endif
-+
-+
-+#define USDPAA_IOCTL_DMA_MAP \
-+ _IOWR(USDPAA_IOCTL_MAGIC, 0x03, struct usdpaa_ioctl_dma_map)
-+/* munmap() does not remove the DMA map, just the user-space mapping to it.
-+ * This ioctl will do both (though you can munmap() before calling the ioctl
-+ * too). */
-+#define USDPAA_IOCTL_DMA_UNMAP \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x04, unsigned char)
-+/* We implement a cross-process locking scheme per DMA map. Call this ioctl()
-+ * with a mmap()'d address, and the process will (interruptible) sleep if the
-+ * lock is already held by another process. Process destruction will
-+ * automatically clean up any held locks. */
-+#define USDPAA_IOCTL_DMA_LOCK \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x05, unsigned char)
-+#define USDPAA_IOCTL_DMA_UNLOCK \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x06, unsigned char)
-+
-+/***************************************/
-+/* Mapping and using QMan/BMan portals */
-+/***************************************/
-+enum usdpaa_portal_type {
-+ usdpaa_portal_qman,
-+ usdpaa_portal_bman,
-+};
-+
-+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
-+
-+struct usdpaa_ioctl_portal_map {
-+ /* Input parameter, is a qman or bman portal required. */
-+
-+ enum usdpaa_portal_type type;
-+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
-+ for don't care. The portal index will be populated by the
-+ driver when the ioctl() successfully completes */
-+ uint32_t index;
-+
-+ /* Return value if the map succeeds, this gives the mapped
-+ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
-+ struct usdpaa_portal_map {
-+ void *cinh;
-+ void *cena;
-+ } addr;
-+ /* Qman-specific return values */
-+ uint16_t channel;
-+ uint32_t pools;
-+};
-+
-+#ifdef CONFIG_COMPAT
-+struct compat_usdpaa_ioctl_portal_map {
-+ /* Input parameter, is a qman or bman portal required. */
-+ enum usdpaa_portal_type type;
-+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
-+ for don't care. The portal index will be populated by the
-+ driver when the ioctl() successfully completes */
-+ uint32_t index;
-+ /* Return value if the map succeeds, this gives the mapped
-+ * cache-inhibited (cinh) and cache-enabled (cena) addresses. */
-+ struct usdpaa_portal_map_compat {
-+ compat_uptr_t cinh;
-+ compat_uptr_t cena;
-+ } addr;
-+ /* Qman-specific return values */
-+ uint16_t channel;
-+ uint32_t pools;
-+};
-+#define USDPAA_IOCTL_PORTAL_MAP_COMPAT \
-+ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct compat_usdpaa_ioctl_portal_map)
-+#define USDPAA_IOCTL_PORTAL_UNMAP_COMPAT \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map_compat)
-+#endif
-+
-+#define USDPAA_IOCTL_PORTAL_MAP \
-+ _IOWR(USDPAA_IOCTL_MAGIC, 0x07, struct usdpaa_ioctl_portal_map)
-+#define USDPAA_IOCTL_PORTAL_UNMAP \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x08, struct usdpaa_portal_map)
-+
-+struct usdpaa_ioctl_irq_map {
-+ enum usdpaa_portal_type type; /* Type of portal to map */
-+ int fd; /* File descriptor that contains the portal */
-+ void *portal_cinh; /* Cache inhibited area to identify the portal */
-+};
-+
-+#define USDPAA_IOCTL_PORTAL_IRQ_MAP \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct usdpaa_ioctl_irq_map)
-+
-+#ifdef CONFIG_COMPAT
-+
-+struct compat_ioctl_irq_map {
-+ enum usdpaa_portal_type type; /* Type of portal to map */
-+ compat_int_t fd; /* File descriptor that contains the portal */
-+ compat_uptr_t portal_cinh; /* Used identify the portal */};
-+
-+#define USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT \
-+ _IOW(USDPAA_IOCTL_MAGIC, 0x09, struct compat_ioctl_irq_map)
-+#endif
-+
-+/* ioctl to query the amount of DMA memory used in the system */
-+struct usdpaa_ioctl_dma_used {
-+ uint64_t free_bytes;
-+ uint64_t total_bytes;
-+};
-+#define USDPAA_IOCTL_DMA_USED \
-+ _IOR(USDPAA_IOCTL_MAGIC, 0x0B, struct usdpaa_ioctl_dma_used)
-+
-+/* ioctl to allocate a raw portal */
-+struct usdpaa_ioctl_raw_portal {
-+ /* inputs */
-+ enum usdpaa_portal_type type; /* Type of portal to allocate */
-+
-+ /* set to non zero to turn on stashing */
-+ uint8_t enable_stash;
-+ /* Stashing attributes for the portal */
-+ uint32_t cpu;
-+ uint32_t cache;
-+ uint32_t window;
-+
-+ /* Specifies the stash request queue this portal should use */
-+ uint8_t sdest;
-+
-+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
-+ * for don't care. The portal index will be populated by the
-+ * driver when the ioctl() successfully completes */
-+ uint32_t index;
-+
-+ /* outputs */
-+ uint64_t cinh;
-+ uint64_t cena;
-+};
-+
-+#define USDPAA_IOCTL_ALLOC_RAW_PORTAL \
-+ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct usdpaa_ioctl_raw_portal)
-+
-+#define USDPAA_IOCTL_FREE_RAW_PORTAL \
-+ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct usdpaa_ioctl_raw_portal)
-+
-+#ifdef CONFIG_COMPAT
-+
-+struct compat_ioctl_raw_portal {
-+ /* inputs */
-+ enum usdpaa_portal_type type; /* Type of portal to allocate */
-+
-+ /* set to non zero to turn on stashing */
-+ uint8_t enable_stash;
-+ /* Stashing attributes for the portal */
-+ uint32_t cpu;
-+ uint32_t cache;
-+ uint32_t window;
-+ /* Specifies the stash request queue this portal should use */
-+ uint8_t sdest;
-+
-+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
-+ * for don't care. The portal index will be populated by the
-+ * driver when the ioctl() successfully completes */
-+ uint32_t index;
-+
-+ /* outputs */
-+ uint64_t cinh;
-+ uint64_t cena;
-+};
-+
-+#define USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT \
-+ _IOWR(USDPAA_IOCTL_MAGIC, 0x0C, struct compat_ioctl_raw_portal)
-+
-+#define USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT \
-+ _IOR(USDPAA_IOCTL_MAGIC, 0x0D, struct compat_ioctl_raw_portal)
-+
-+#endif
-+
-+#ifdef __KERNEL__
-+
-+/* Early-boot hook */
-+int __init fsl_usdpaa_init_early(void);
-+
-+/* Fault-handling in arch/powerpc/mm/mem.c gives USDPAA an opportunity to detect
-+ * faults within its ranges via this hook. */
-+int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size);
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* CONFIG_FSL_USDPAA */
-+
-+#ifdef __KERNEL__
-+/* This interface is needed in a few places and though it's not specific to
-+ * USDPAA as such, creating a new header for it doesn't make any sense. The
-+ * qbman kernel driver implements this interface and uses it as the backend for
-+ * both the FQID and BPID allocators. The fsl_usdpaa driver also uses this
-+ * interface for tracking per-process allocations handed out to user-space. */
-+struct dpa_alloc {
-+ struct list_head free;
-+ spinlock_t lock;
-+ struct list_head used;
-+};
-+#define DECLARE_DPA_ALLOC(name) \
-+ struct dpa_alloc name = { \
-+ .free = { \
-+ .prev = &name.free, \
-+ .next = &name.free \
-+ }, \
-+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
-+ .used = { \
-+ .prev = &name.used, \
-+ .next = &name.used \
-+ } \
-+ }
-+static inline void dpa_alloc_init(struct dpa_alloc *alloc)
-+{
-+ INIT_LIST_HEAD(&alloc->free);
-+ INIT_LIST_HEAD(&alloc->used);
-+ spin_lock_init(&alloc->lock);
-+}
-+int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
-+ int partial);
-+void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count);
-+void dpa_alloc_seed(struct dpa_alloc *alloc, u32 fqid, u32 count);
-+
-+/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
-+ * desired range is not available, or 0 for success. */
-+int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base_id, u32 count);
-+/* Pops and returns contiguous ranges from the allocator. Returns -ENOMEM when
-+ * 'alloc' is empty. */
-+int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count);
-+/* Returns 1 if the specified id is alloced, 0 otherwise */
-+int dpa_alloc_check(struct dpa_alloc *list, u32 id);
-+#endif /* __KERNEL__ */
-+
-+#ifdef __cplusplus
-+}
-+#endif
-+
-+#endif /* FSL_USDPAA_H */
---- a/include/linux/netdev_features.h
-+++ b/include/linux/netdev_features.h
-@@ -79,6 +79,7 @@ enum {
- NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
- NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
- NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
-+ NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */
-
- /*
- * Add your fresh new feature above and remember to update
-@@ -144,6 +145,7 @@ enum {
- #define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
- #define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
- #define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
-+#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ)
-
- /* Finds the next feature with the highest number of the range of start till 0.
- */
---- /dev/null
-+++ b/include/uapi/linux/fmd/Kbuild
-@@ -0,0 +1,5 @@
-+header-y += integrations/
-+header-y += Peripherals/
-+
-+header-y += ioctls.h
-+header-y += net_ioctls.h
---- /dev/null
-+++ b/include/uapi/linux/fmd/Peripherals/Kbuild
-@@ -0,0 +1,4 @@
-+header-y += fm_ioctls.h
-+header-y += fm_port_ioctls.h
-+header-y += fm_pcd_ioctls.h
-+header-y += fm_test_ioctls.h
---- /dev/null
-+++ b/include/uapi/linux/fmd/Peripherals/fm_ioctls.h
-@@ -0,0 +1,628 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File fm_ioctls.h
-+
-+ @Description FM Char device ioctls
-+*//***************************************************************************/
-+#ifndef __FM_IOCTLS_H
-+#define __FM_IOCTLS_H
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_grp Frame Manager Linux IOCTL API
-+
-+ @Description FM Linux ioctls definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection FM IOCTL device ('/dev') definitions
-+*//***************************************************************************/
-+#define DEV_FM_NAME "fm" /**< Name of the FM chardev */
-+
-+#define DEV_FM_MINOR_BASE 0
-+#define DEV_FM_PCD_MINOR_BASE (DEV_FM_MINOR_BASE + 1) /*/dev/fmx-pcd */
-+#define DEV_FM_OH_PORTS_MINOR_BASE (DEV_FM_PCD_MINOR_BASE + 1) /*/dev/fmx-port-ohy */
-+#define DEV_FM_RX_PORTS_MINOR_BASE (DEV_FM_OH_PORTS_MINOR_BASE + FM_MAX_NUM_OF_OH_PORTS) /*/dev/fmx-port-rxy */
-+#define DEV_FM_TX_PORTS_MINOR_BASE (DEV_FM_RX_PORTS_MINOR_BASE + FM_MAX_NUM_OF_RX_PORTS) /*/dev/fmx-port-txy */
-+#define DEV_FM_MAX_MINORS (DEV_FM_TX_PORTS_MINOR_BASE + FM_MAX_NUM_OF_TX_PORTS)
-+
-+#define FM_IOC_NUM(n) (n)
-+#define FM_PCD_IOC_NUM(n) (n+20)
-+#define FM_PORT_IOC_NUM(n) (n+70)
-+/* @} */
-+
-+#define IOC_FM_MAX_NUM_OF_PORTS 64
-+
-+
-+/**************************************************************************//**
-+ @Description Enum for defining port types
-+ (must match enum e_FmPortType defined in fm_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_port_type {
-+ e_IOC_FM_PORT_TYPE_OH_OFFLINE_PARSING = 0, /**< Offline parsing port */
-+ e_IOC_FM_PORT_TYPE_RX, /**< 1G Rx port */
-+ e_IOC_FM_PORT_TYPE_RX_10G, /**< 10G Rx port */
-+ e_IOC_FM_PORT_TYPE_TX, /**< 1G Tx port */
-+ e_IOC_FM_PORT_TYPE_TX_10G, /**< 10G Tx port */
-+ e_IOC_FM_PORT_TYPE_DUMMY
-+} ioc_fm_port_type;
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_lib_grp FM library
-+
-+ @Description FM API functions, definitions and enums
-+ The FM module is the main driver module and is a mandatory module
-+ for FM driver users. Before any further module initialization,
-+ this module must be initialized.
-+ The FM is a "single-tone" module. It is responsible of the common
-+ HW modules: FPM, DMA, common QMI, common BMI initializations and
-+ run-time control routines. This module must be initialized always
-+ when working with any of the FM modules.
-+ NOTE - We assumes that the FML will be initialize only by core No. 0!
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description FM Exceptions
-+*//***************************************************************************/
-+typedef enum ioc_fm_exceptions {
-+ e_IOC_FM_EX_DMA_BUS_ERROR, /**< DMA bus error. */
-+ e_IOC_EX_DMA_READ_ECC, /**< Read Buffer ECC error (Valid for FM rev < 6)*/
-+ e_IOC_EX_DMA_SYSTEM_WRITE_ECC, /**< Write Buffer ECC error on system side (Valid for FM rev < 6)*/
-+ e_IOC_EX_DMA_FM_WRITE_ECC, /**< Write Buffer ECC error on FM side (Valid for FM rev < 6)*/
-+ e_IOC_EX_DMA_SINGLE_PORT_ECC, /**< Single Port ECC error on FM side (Valid for FM rev > 6)*/
-+ e_IOC_EX_FPM_STALL_ON_TASKS, /**< Stall of tasks on FPM */
-+ e_IOC_EX_FPM_SINGLE_ECC, /**< Single ECC on FPM. */
-+ e_IOC_EX_FPM_DOUBLE_ECC, /**< Double ECC error on FPM ram access */
-+ e_IOC_EX_QMI_SINGLE_ECC, /**< Single ECC on QMI. */
-+ e_IOC_EX_QMI_DOUBLE_ECC, /**< Double bit ECC occurred on QMI */
-+ e_IOC_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/**< Dequeue from unknown port id */
-+ e_IOC_EX_BMI_LIST_RAM_ECC, /**< Linked List RAM ECC error */
-+ e_IOC_EX_BMI_STORAGE_PROFILE_ECC, /**< Storage Profile ECC Error */
-+ e_IOC_EX_BMI_STATISTICS_RAM_ECC, /**< Statistics Count RAM ECC Error Enable */
-+ e_IOC_EX_BMI_DISPATCH_RAM_ECC, /**< Dispatch RAM ECC Error Enable */
-+ e_IOC_EX_IRAM_ECC, /**< Double bit ECC occurred on IRAM*/
-+ e_IOC_EX_MURAM_ECC /**< Double bit ECC occurred on MURAM*/
-+} ioc_fm_exceptions;
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_runtime_control_grp FM Runtime Control Unit
-+
-+ @Description FM Runtime control unit API functions, definitions and enums.
-+ The FM driver provides a set of control routines for each module.
-+ These routines may only be called after the module was fully
-+ initialized (both configuration and initialization routines were
-+ called). They are typically used to get information from hardware
-+ (status, counters/statistics, revision etc.), to modify a current
-+ state or to force/enable a required action. Run-time control may
-+ be called whenever necessary and as many times as needed.
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection General FM defines.
-+ *//***************************************************************************/
-+#define IOC_FM_MAX_NUM_OF_VALID_PORTS (FM_MAX_NUM_OF_OH_PORTS + \
-+ FM_MAX_NUM_OF_1G_RX_PORTS + \
-+ FM_MAX_NUM_OF_10G_RX_PORTS + \
-+ FM_MAX_NUM_OF_1G_TX_PORTS + \
-+ FM_MAX_NUM_OF_10G_TX_PORTS)
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description Structure for Port bandwidth requirement. Port is identified
-+ by type and relative id.
-+ (must be identical to t_FmPortBandwidth defined in fm_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_bandwidth_t {
-+ ioc_fm_port_type type; /**< FM port type */
-+ uint8_t relative_port_id; /**< Type relative port id */
-+ uint8_t bandwidth; /**< bandwidth - (in term of percents) */
-+} ioc_fm_port_bandwidth_t;
-+
-+/**************************************************************************//**
-+ @Description A Structure containing an array of Port bandwidth requirements.
-+ The user should state the ports requiring bandwidth in terms of
-+ percentage - i.e. all port's bandwidths in the array must add
-+ up to 100.
-+ (must be identical to t_FmPortsBandwidthParams defined in fm_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_bandwidth_params {
-+ uint8_t num_of_ports;
-+ /**< num of ports listed in the array below */
-+ ioc_fm_port_bandwidth_t ports_bandwidths[IOC_FM_MAX_NUM_OF_VALID_PORTS];
-+ /**< for each port, it's bandwidth (all port's
-+ bandwidths must add up to 100.*/
-+} ioc_fm_port_bandwidth_params;
-+
-+/**************************************************************************//**
-+ @Description enum for defining FM counters
-+*//***************************************************************************/
-+typedef enum ioc_fm_counters {
-+ e_IOC_FM_COUNTERS_ENQ_TOTAL_FRAME, /**< QMI total enqueued frames counter */
-+ e_IOC_FM_COUNTERS_DEQ_TOTAL_FRAME, /**< QMI total dequeued frames counter */
-+ e_IOC_FM_COUNTERS_DEQ_0, /**< QMI 0 frames from QMan counter */
-+ e_IOC_FM_COUNTERS_DEQ_1, /**< QMI 1 frames from QMan counter */
-+ e_IOC_FM_COUNTERS_DEQ_2, /**< QMI 2 frames from QMan counter */
-+ e_IOC_FM_COUNTERS_DEQ_3, /**< QMI 3 frames from QMan counter */
-+ e_IOC_FM_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI dequeue from default queue counter */
-+ e_IOC_FM_COUNTERS_DEQ_FROM_CONTEXT, /**< QMI dequeue from FQ context counter */
-+ e_IOC_FM_COUNTERS_DEQ_FROM_FD, /**< QMI dequeue from FD command field counter */
-+ e_IOC_FM_COUNTERS_DEQ_CONFIRM, /**< QMI dequeue confirm counter */
-+} ioc_fm_counters;
-+
-+typedef struct ioc_fm_obj_t {
-+ void *obj;
-+} ioc_fm_obj_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for returning revision information
-+ (must match struct t_FmRevisionInfo declared in fm_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_revision_info_t {
-+ uint8_t major; /**< Major revision */
-+ uint8_t minor; /**< Minor revision */
-+} ioc_fm_revision_info_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for FM counters
-+*//***************************************************************************/
-+typedef struct ioc_fm_counters_params_t {
-+ ioc_fm_counters cnt; /**< The requested counter */
-+ uint32_t val; /**< The requested value to get/set from/into the counter */
-+} ioc_fm_counters_params_t;
-+
-+typedef union ioc_fm_api_version_t {
-+ struct {
-+ uint8_t major;
-+ uint8_t minor;
-+ uint8_t respin;
-+ uint8_t reserved;
-+ } version;
-+ uint32_t ver;
-+} ioc_fm_api_version_t;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description A structure of information about each of the external
-+ buffer pools used by a port or storage-profile.
-+ (must be identical to t_FmExtPoolParams defined in fm_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_ext_pool_params {
-+ uint8_t id; /**< External buffer pool id */
-+ uint16_t size; /**< External buffer pool buffer size */
-+} ioc_fm_ext_pool_params;
-+
-+/**************************************************************************//**
-+ @Description A structure for informing the driver about the external
-+ buffer pools allocated in the BM and used by a port or a
-+ storage-profile.
-+ (must be identical to t_FmExtPools defined in fm_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_ext_pools {
-+ uint8_t num_of_pools_used; /**< Number of pools use by this port */
-+ ioc_fm_ext_pool_params ext_buf_pool[FM_PORT_MAX_NUM_OF_EXT_POOLS];
-+ /**< Parameters for each port */
-+} ioc_fm_ext_pools;
-+
-+typedef struct ioc_fm_vsp_params_t {
-+ void *p_fm; /**< A handle to the FM object this VSP related to */
-+ ioc_fm_ext_pools ext_buf_pools; /**< Which external buffer pools are used
-+ (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes.
-+ parameter associated with Rx / OP port */
-+ uint16_t liodn_offset; /**< VSP's LIODN offset */
-+ struct {
-+ ioc_fm_port_type port_type; /**< Port type */
-+ uint8_t port_id; /**< Port Id - relative to type */
-+ } port_params;
-+ uint8_t relative_profile_id; /**< VSP Id - relative to VSP's range
-+ defined in relevant FM object */
-+ void *id; /**< return value */
-+} ioc_fm_vsp_params_t;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description A structure for defining BM pool depletion criteria
-+*//***************************************************************************/
-+typedef struct ioc_fm_buf_pool_depletion_t {
-+ bool pools_grp_mode_enable; /**< select mode in which pause frames will be sent after
-+ a number of pools (all together!) are depleted */
-+ uint8_t num_of_pools; /**< the number of depleted pools that will invoke
-+ pause frames transmission. */
-+ bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
-+ /**< For each pool, TRUE if it should be considered for
-+ depletion (Note - this pool must be used by this port!). */
-+ bool single_pool_mode_enable; /**< select mode in which pause frames will be sent after
-+ a single-pool is depleted; */
-+ bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
-+ /**< For each pool, TRUE if it should be considered for
-+ depletion (Note - this pool must be used by this port!) */
-+#if (DPAA_VERSION >= 11)
-+ bool pfc_priorities_en[FM_MAX_NUM_OF_PFC_PRIORITIES];
-+ /**< This field is used by the MAC as the Priority Enable Vector in the PFC frame
-+ which is transmitted */
-+#endif /* (DPAA_VERSION >= 11) */
-+} ioc_fm_buf_pool_depletion_t;
-+
-+#if (DPAA_VERSION >= 11)
-+typedef struct ioc_fm_buf_pool_depletion_params_t {
-+ void *p_fm_vsp;
-+ ioc_fm_buf_pool_depletion_t fm_buf_pool_depletion;
-+} ioc_fm_buf_pool_depletion_params_t;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+typedef struct ioc_fm_buffer_prefix_content_t {
-+ uint16_t priv_data_size; /**< Number of bytes to be left at the beginning
-+ of the external buffer; Note that the private-area will
-+ start from the base of the buffer address. */
-+ bool pass_prs_result; /**< TRUE to pass the parse result to/from the FM;
-+ User may use FM_PORT_GetBufferPrsResult() in order to
-+ get the parser-result from a buffer. */
-+ bool pass_time_stamp; /**< TRUE to pass the timeStamp to/from the FM
-+ User may use FM_PORT_GetBufferTimeStamp() in order to
-+ get the parser-result from a buffer. */
-+ bool pass_hash_result; /**< TRUE to pass the KG hash result to/from the FM
-+ User may use FM_PORT_GetBufferHashResult() in order to
-+ get the parser-result from a buffer. */
-+ bool pass_all_other_pcd_info; /**< Add all other Internal-Context information:
-+ AD, hash-result, key, etc. */
-+ uint16_t data_align; /**< 0 to use driver's default alignment [64],
-+ other value for selecting a data alignment (must be a power of 2);
-+ if write optimization is used, must be >= 16. */
-+ uint8_t manip_extra_space; /**< Maximum extra size needed (insertion-size minus removal-size);
-+ Note that this field impacts the size of the buffer-prefix
-+ (i.e. it pushes the data offset);
-+ This field is irrelevant if DPAA_VERSION==10 */
-+} ioc_fm_buffer_prefix_content_t;
-+
-+typedef struct ioc_fm_buffer_prefix_content_params_t {
-+ void *p_fm_vsp;
-+ ioc_fm_buffer_prefix_content_t fm_buffer_prefix_content;
-+} ioc_fm_buffer_prefix_content_params_t;
-+
-+#if (DPAA_VERSION >= 11)
-+typedef struct ioc_fm_vsp_config_no_sg_params_t {
-+ void *p_fm_vsp;
-+ bool no_sg;
-+} ioc_fm_vsp_config_no_sg_params_t;
-+
-+typedef struct ioc_fm_vsp_prs_result_params_t {
-+ void *p_fm_vsp;
-+ void *p_data;
-+} ioc_fm_vsp_prs_result_params_t;
-+#endif
-+
-+typedef struct fm_ctrl_mon_t {
-+ uint8_t percent_cnt[2];
-+} fm_ctrl_mon_t;
-+
-+typedef struct ioc_fm_ctrl_mon_counters_params_t {
-+ uint8_t fm_ctrl_index;
-+ fm_ctrl_mon_t *p_mon;
-+} ioc_fm_ctrl_mon_counters_params_t;
-+
-+/**************************************************************************//**
-+ @Function FM_IOC_SET_PORTS_BANDWIDTH
-+
-+ @Description Sets relative weights between ports when accessing common resources.
-+
-+ @Param[in] ioc_fm_port_bandwidth_params Port bandwidth percentages,
-+ their sum must equal 100.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_IOC_SET_PORTS_BANDWIDTH _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(2), ioc_fm_port_bandwidth_params)
-+
-+/**************************************************************************//**
-+ @Function FM_IOC_GET_REVISION
-+
-+ @Description Returns the FM revision
-+
-+ @Param[out] ioc_fm_revision_info_t A structure of revision information parameters.
-+
-+ @Return None.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_IOC_GET_REVISION _IOR(FM_IOC_TYPE_BASE, FM_IOC_NUM(3), ioc_fm_revision_info_t)
-+
-+/**************************************************************************//**
-+ @Function FM_IOC_GET_COUNTER
-+
-+ @Description Reads one of the FM counters.
-+
-+ @Param[in,out] ioc_fm_counters_params_t The requested counter parameters.
-+
-+ @Return Counter's current value.
-+
-+ @Cautions Allowed only following FM_Init().
-+ Note that it is user's responsibilty to call this routine only
-+ for enabled counters, and there will be no indication if a
-+ disabled counter is accessed.
-+*//***************************************************************************/
-+#define FM_IOC_GET_COUNTER _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(4), ioc_fm_counters_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_IOC_SET_COUNTER
-+
-+ @Description Sets a value to an enabled counter. Use "0" to reset the counter.
-+
-+ @Param[in] ioc_fm_counters_params_t The requested counter parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_IOC_SET_COUNTER _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(5), ioc_fm_counters_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_IOC_FORCE_INTR
-+
-+ @Description Causes an interrupt event on the requested source.
-+
-+ @Param[in] ioc_fm_exceptions An exception to be forced.
-+
-+ @Return E_OK on success; Error code if the exception is not enabled,
-+ or is not able to create interrupt.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_IOC_FORCE_INTR _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(6), ioc_fm_exceptions)
-+
-+/**************************************************************************//**
-+ @Function FM_IOC_GET_API_VERSION
-+
-+ @Description Reads the FMD IOCTL API version.
-+
-+ @Param[in,out] ioc_fm_api_version_t The requested counter parameters.
-+
-+ @Return Version's value.
-+*//***************************************************************************/
-+#define FM_IOC_GET_API_VERSION _IOR(FM_IOC_TYPE_BASE, FM_IOC_NUM(7), ioc_fm_api_version_t)
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Function FM_VSP_Config
-+
-+ @Description Creates descriptor for the FM VSP module.
-+
-+ The routine returns a handle (descriptor) to the FM VSP object.
-+ This descriptor must be passed as first parameter to all other
-+ FM VSP function calls.
-+
-+ No actual initialization or configuration of FM hardware is
-+ done by this routine.
-+
-+@Param[in] p_FmVspParams Pointer to data structure of parameters
-+
-+ @Retval Handle to FM VSP object, or NULL for Failure.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_VSP_CONFIG_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(8), ioc_compat_fm_vsp_params_t)
-+#endif
-+#define FM_IOC_VSP_CONFIG _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(8), ioc_fm_vsp_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_Init
-+
-+ @Description Initializes the FM VSP module
-+
-+ @Param[in] h_FmVsp - FM VSP module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_VSP_INIT_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(9), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_IOC_VSP_INIT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(9), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_Free
-+
-+ @Description Frees all resources that were assigned to FM VSP module.
-+
-+ Calling this routine invalidates the descriptor.
-+
-+ @Param[in] h_FmVsp - FM VSP module descriptor
-+
-+ @Return E_OK on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_VSP_FREE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(10), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_IOC_VSP_FREE _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(10), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigPoolDepletion
-+
-+ @Description Calling this routine enables pause frame generation depending on the
-+ depletion status of BM pools. It also defines the conditions to activate
-+ this functionality. By default, this functionality is disabled.
-+
-+ @Param[in] ioc_fm_buf_pool_depletion_params_t A structure holding the required parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_VSP_CONFIG_POOL_DEPLETION_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(11), ioc_compat_fm_buf_pool_depletion_params_t)
-+#endif
-+#define FM_IOC_VSP_CONFIG_POOL_DEPLETION _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(11), ioc_fm_buf_pool_depletion_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigBufferPrefixContent
-+
-+ @Description Defines the structure, size and content of the application buffer.
-+
-+ The prefix will
-+ In VSPs defined for Tx ports, if 'passPrsResult', the application
-+ should set a value to their offsets in the prefix of
-+ the FM will save the first 'privDataSize', than,
-+ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
-+ and timeStamp, and the packet itself (in this order), to the
-+ application buffer, and to offset.
-+
-+ Calling this routine changes the buffer margins definitions
-+ in the internal driver data base from its default
-+ configuration: Data size: [DEFAULT_FM_SP_bufferPrefixContent_privDataSize]
-+ Pass Parser result: [DEFAULT_FM_SP_bufferPrefixContent_passPrsResult].
-+ Pass timestamp: [DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp].
-+
-+ @Param[in] ioc_fm_buffer_prefix_content_params_t A structure holding the required parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(12), ioc_compat_fm_buffer_prefix_content_params_t)
-+#endif
-+#define FM_IOC_VSP_CONFIG_BUFFER_PREFIX_CONTENT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(12), ioc_fm_buffer_prefix_content_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_ConfigNoScatherGather
-+
-+ @Description Calling this routine changes the possibility to receive S/G frame
-+ in the internal driver data base
-+ from its default configuration: optimize = [DEFAULT_FM_SP_noScatherGather]
-+
-+ @Param[in] ioc_fm_vsp_config_no_sg_params_t A structure holding the required parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_VSP_Config() and before FM_VSP_Init().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_VSP_CONFIG_NO_SG_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(13), ioc_compat_fm_vsp_config_no_sg_params_t)
-+#endif
-+#define FM_IOC_VSP_CONFIG_NO_SG _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(13), ioc_fm_vsp_config_no_sg_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_VSP_GetBufferPrsResult
-+
-+ @Description Returns the pointer to the parse result in the data buffer.
-+ In Rx ports this is relevant after reception, if parse
-+ result is configured to be part of the data passed to the
-+ application. For non Rx ports it may be used to get the pointer
-+ of the area in the buffer where parse result should be
-+ initialized - if so configured.
-+ See FM_VSP_ConfigBufferPrefixContent for data buffer prefix
-+ configuration.
-+
-+ @Param[in] ioc_fm_vsp_prs_result_params_t A structure holding the required parameters.
-+
-+ @Return Parse result pointer on success, NULL if parse result was not
-+ configured for this port.
-+
-+ @Cautions Allowed only following FM_VSP_Init().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_VSP_GET_BUFFER_PRS_RESULT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(14), ioc_compat_fm_vsp_prs_result_params_t)
-+#endif
-+#define FM_IOC_VSP_GET_BUFFER_PRS_RESULT _IOWR(FM_IOC_TYPE_BASE, FM_IOC_NUM(14), ioc_fm_vsp_prs_result_params_t)
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Function FM_CtrlMonStart
-+
-+ @Description Start monitoring utilization of all available FM controllers.
-+
-+ In order to obtain FM controllers utilization the following sequence
-+ should be used:
-+ -# FM_CtrlMonStart()
-+ -# FM_CtrlMonStop()
-+ -# FM_CtrlMonGetCounters() - issued for each FM controller
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_IOC_CTRL_MON_START _IO(FM_IOC_TYPE_BASE, FM_IOC_NUM(15))
-+
-+
-+/**************************************************************************//**
-+ @Function FM_CtrlMonStop
-+
-+ @Description Stop monitoring utilization of all available FM controllers.
-+
-+ In order to obtain FM controllers utilization the following sequence
-+ should be used:
-+ -# FM_CtrlMonStart()
-+ -# FM_CtrlMonStop()
-+ -# FM_CtrlMonGetCounters() - issued for each FM controller
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_IOC_CTRL_MON_STOP _IO(FM_IOC_TYPE_BASE, FM_IOC_NUM(16))
-+
-+/**************************************************************************//**
-+ @Function FM_CtrlMonGetCounters
-+
-+ @Description Obtain FM controller utilization parameters.
-+
-+ In order to obtain FM controllers utilization the following sequence
-+ should be used:
-+ -# FM_CtrlMonStart()
-+ -# FM_CtrlMonStop()
-+ -# FM_CtrlMonGetCounters() - issued for each FM controller
-+
-+ @Param[in] ioc_fm_ctrl_mon_counters_params_t A structure holding the required parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_IOC_CTRL_MON_GET_COUNTERS_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(17), ioc_compat_fm_ctrl_mon_counters_params_t)
-+#endif
-+#define FM_IOC_CTRL_MON_GET_COUNTERS _IOW(FM_IOC_TYPE_BASE, FM_IOC_NUM(17), ioc_fm_ctrl_mon_counters_params_t)
-+
-+/** @} */ /* end of lnx_ioctl_FM_runtime_control_grp group */
-+/** @} */ /* end of lnx_ioctl_FM_lib_grp group */
-+/** @} */ /* end of lnx_ioctl_FM_grp */
-+
-+#define FMD_API_VERSION_MAJOR 21
-+#define FMD_API_VERSION_MINOR 1
-+#define FMD_API_VERSION_RESPIN 0
-+
-+#endif /* __FM_IOCTLS_H */
---- /dev/null
-+++ b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
-@@ -0,0 +1,3084 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/******************************************************************************
-+ @File fm_pcd_ioctls.h
-+
-+ @Description FM PCD ...
-+*//***************************************************************************/
-+#ifndef __FM_PCD_IOCTLS_H
-+#define __FM_PCD_IOCTLS_H
-+
-+#include "net_ioctls.h"
-+#include "fm_ioctls.h"
-+
-+
-+/**************************************************************************//**
-+
-+ @Group lnx_ioctl_FM_grp Frame Manager Linux IOCTL API
-+
-+ @Description Frame Manager Linux ioctls definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_PCD_grp FM PCD
-+
-+ @Description Frame Manager PCD API functions, definitions and enums
-+
-+ The FM PCD module is responsible for the initialization of all
-+ global classifying FM modules. This includes the parser general and
-+ common registers, the key generator global and common registers,
-+ and the policer global and common registers.
-+ In addition, the FM PCD SW module will initialize all required
-+ key generator schemes, coarse classification flows, and policer
-+ profiles. When an FM module is configured to work with one of these
-+ entities, it will register to it using the FM PORT API. The PCD
-+ module will manage the PCD resources - i.e. resource management of
-+ KeyGen schemes, etc.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Collection General PCD defines
-+*//***************************************************************************/
-+#define IOC_FM_PCD_MAX_NUM_OF_PRIVATE_HDRS 2 /**< Number of units/headers saved for user */
-+
-+#define IOC_FM_PCD_PRS_NUM_OF_HDRS 16 /**< Number of headers supported by HW parser */
-+#define IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS (32 - IOC_FM_PCD_MAX_NUM_OF_PRIVATE_HDRS)
-+ /**< Number of distinction units is limited by
-+ register size (32 bits) minus reserved bits
-+ for private headers. */
-+#define IOC_FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS 4 /**< Maximum number of interchangeable headers
-+ in a distinction unit */
-+#define IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS 8 /**< Total number of generic KeyGen registers */
-+#define IOC_FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY 35 /**< Max number allowed on any configuration;
-+ For HW implementation reasons, in most
-+ cases less than this will be allowed; The
-+ driver will return an initialization error
-+ if resource is unavailable. */
-+#define IOC_FM_PCD_KG_NUM_OF_EXTRACT_MASKS 4 /**< Total number of masks allowed on KeyGen extractions. */
-+#define IOC_FM_PCD_KG_NUM_OF_DEFAULT_GROUPS 16 /**< Number of default value logical groups */
-+
-+#define IOC_FM_PCD_PRS_NUM_OF_LABELS 32 /**< Maximum number of SW parser labels */
-+#define IOC_FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */
-+
-+#define IOC_FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE 128 /**< Maximum size of insertion template for
-+ insert manipulation */
-+
-+#if DPAA_VERSION >= 11
-+#define IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES 64 /**< Maximum possible entries for frame replicator group */
-+#endif /* DPAA_VERSION >= 11 */
-+/* @} */
-+
-+#ifdef FM_CAPWAP_SUPPORT
-+#error "FM_CAPWAP_SUPPORT not implemented!"
-+#endif
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_PCD_init_grp FM PCD Initialization Unit
-+
-+ @Description Frame Manager PCD Initialization Unit API
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description PCD counters
-+ (must match enum e_FmPcdCounters defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_counters {
-+ e_IOC_FM_PCD_KG_COUNTERS_TOTAL, /**< KeyGen counter */
-+ e_IOC_FM_PCD_PLCR_COUNTERS_RED, /**< Policer counter - counts the total number of RED packets that exit the Policer. */
-+ e_IOC_FM_PCD_PLCR_COUNTERS_YELLOW, /**< Policer counter - counts the total number of YELLOW packets that exit the Policer. */
-+ e_IOC_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED, /**< Policer counter - counts the number of packets that changed color to RED by the Policer;
-+ This is a subset of e_IOC_FM_PCD_PLCR_COUNTERS_RED packet count, indicating active color changes. */
-+ e_IOC_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW, /**< Policer counter - counts the number of packets that changed color to YELLOW by the Policer;
-+ This is a subset of e_IOC_FM_PCD_PLCR_COUNTERS_YELLOW packet count, indicating active color changes. */
-+ e_IOC_FM_PCD_PLCR_COUNTERS_TOTAL, /**< Policer counter - counts the total number of packets passed in the Policer. */
-+ e_IOC_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH, /**< Policer counter - counts the number of packets with length mismatch. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH, /**< Parser counter - counts the number of times the parser block is dispatched. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L2 parse result is returned (including errors). */
-+ e_IOC_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L3 parse result is returned (including errors). */
-+ e_IOC_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times L4 parse result is returned (including errors). */
-+ e_IOC_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED, /**< Parser counter - counts the number of times SHIM parse result is returned (including errors). */
-+ e_IOC_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L2 parse result is returned with errors. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L3 parse result is returned with errors. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times L4 parse result is returned with errors. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter - counts the number of times SHIM parse result is returned with errors. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES, /**< Parser counter - counts the number of cycles spent executing soft parser instruction (including stall cycles). */
-+ e_IOC_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES, /**< Parser counter - counts the number of cycles stalled waiting for parser internal memory reads while executing soft parser instruction. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES, /**< Parser counter - counts the number of cycles spent executing hard parser (including stall cycles). */
-+ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory read. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory read. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES, /**< MURAM counter - counts the number of cycles while performing FMan Memory write. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES, /**< MURAM counter - counts the number of cycles stalled while performing FMan Memory write. */
-+ e_IOC_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES /**< FPM counter - counts the number of cycles stalled while performing a FPM Command. */
-+} ioc_fm_pcd_counters;
-+
-+/**************************************************************************//**
-+ @Description PCD interrupts
-+ (must match enum e_FmPcdExceptions defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_exceptions {
-+ e_IOC_FM_PCD_KG_EXCEPTION_DOUBLE_ECC, /**< KeyGen double-bit ECC error is detected on internal memory read access. */
-+ e_IOC_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, /**< KeyGen scheme configuration error indicating a key size larger than 56 bytes. */
-+ e_IOC_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC, /**< Policer double-bit ECC error has been detected on PRAM read access. */
-+ e_IOC_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR, /**< Policer access to a non-initialized profile has been detected. */
-+ e_IOC_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE, /**< Policer RAM self-initialization complete */
-+ e_IOC_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE, /**< Policer atomic action complete */
-+ e_IOC_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC, /**< Parser double-bit ECC error */
-+ e_IOC_FM_PCD_PRS_EXCEPTION_SINGLE_ECC /**< Parser single-bit ECC error */
-+} ioc_fm_pcd_exceptions;
-+
-+/** @} */ /* end of lnx_ioctl_FM_PCD_init_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_PCD_Runtime_grp FM PCD Runtime Unit
-+
-+ @Description Frame Manager PCD Runtime Unit
-+
-+ The runtime control allows creation of PCD infrastructure modules
-+ such as Network Environment Characteristics, Classification Plan
-+ Groups and Coarse Classification Trees.
-+ It also allows on-the-fly initialization, modification and removal
-+ of PCD modules such as KeyGen schemes, coarse classification nodes
-+ and Policer profiles.
-+
-+ In order to explain the programming model of the PCD driver interface
-+ a few terms should be explained, and will be used below.
-+ - Distinction Header - One of the 16 protocols supported by the FM parser,
-+ or one of the SHIM headers (1 or 2). May be a header with a special
-+ option (see below).
-+ - Interchangeable Headers Group - This is a group of Headers recognized
-+ by either one of them. For example, if in a specific context the user
-+ chooses to treat IPv4 and IPV6 in the same way, they may create an
-+ interchangeable Headers Unit consisting of these 2 headers.
-+ - A Distinction Unit - a Distinction Header or an Interchangeable Headers
-+ Group.
-+ - Header with special option - applies to Ethernet, MPLS, VLAN, IPv4 and
-+ IPv6, includes multicast, broadcast and other protocol specific options.
-+ In terms of hardware it relates to the options available in the classification
-+ plan.
-+ - Network Environment Characteristics - a set of Distinction Units that define
-+ the total recognizable header selection for a certain environment. This is
-+ NOT the list of all headers that will ever appear in a flow, but rather
-+ everything that needs distinction in a flow, where distinction is made by KeyGen
-+ schemes and coarse classification action descriptors.
-+
-+ The PCD runtime modules initialization is done in stages. The first stage after
-+ initializing the PCD module itself is to establish a Network Flows Environment
-+ Definition. The application may choose to establish one or more such environments.
-+ Later, when needed, the application will have to state, for some of its modules,
-+ to which single environment it belongs.
-+
-+ @{
-+*//***************************************************************************/
-+
-+
-+/**************************************************************************//**
-+ @Description structure for FM counters
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_counters_params_t {
-+ ioc_fm_pcd_counters cnt; /**< The requested counter */
-+ uint32_t val; /**< The requested value to get/set from/into the counter */
-+} ioc_fm_pcd_counters_params_t;
-+
-+/**************************************************************************//**
-+ @Description structure for FM exception definitios
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_exception_params_t {
-+ ioc_fm_pcd_exceptions exception; /**< The requested exception */
-+ bool enable; /**< TRUE to enable interrupt, FALSE to mask it. */
-+} ioc_fm_pcd_exception_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for SW parser labels
-+ (must be identical to struct t_FmPcdPrsLabelParams defined in fm_pcd_ext.h)
-+ *//***************************************************************************/
-+typedef struct ioc_fm_pcd_prs_label_params_t {
-+ uint32_t instruction_offset; /**< SW parser label instruction offset (2 bytes
-+ resolution), relative to Parser RAM. */
-+ ioc_net_header_type hdr; /**< The existence of this header will invoke
-+ the SW parser code. */
-+ uint8_t index_per_hdr; /**< Normally 0, if more than one SW parser
-+ attachments for the same header, use this
-+ index to distinguish between them. */
-+} ioc_fm_pcd_prs_label_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for SW parser
-+ (Must match struct t_FmPcdPrsSwParams defined in fm_pcd_ext.h)
-+ *//***************************************************************************/
-+typedef struct ioc_fm_pcd_prs_sw_params_t {
-+ bool override; /**< FALSE to invoke a check that nothing else
-+ was loaded to this address, including
-+ internal patches.
-+ TRUE to override any existing code.*/
-+ uint32_t size; /**< SW parser code size */
-+ uint16_t base; /**< SW parser base (in instruction counts!
-+ must be larger than 0x20)*/
-+ uint8_t *p_code; /**< SW parser code */
-+ uint32_t sw_prs_data_params[IOC_FM_PCD_PRS_NUM_OF_HDRS];
-+ /**< SW parser data (parameters) */
-+ uint8_t num_of_labels; /**< Number of labels for SW parser. */
-+ ioc_fm_pcd_prs_label_params_t labels_table[IOC_FM_PCD_PRS_NUM_OF_LABELS];
-+ /**< SW parser labels table,
-+ containing num_of_labels entries */
-+} ioc_fm_pcd_prs_sw_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure to set the a KeyGen default value
-+ *//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_dflt_value_params_t {
-+ uint8_t valueId; /**< 0,1 - one of 2 global default values */
-+ uint32_t value; /**< The requested default value */
-+} ioc_fm_pcd_kg_dflt_value_params_t;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_Enable
-+
-+ @Description This routine should be called after PCD is initialized for enabling all
-+ PCD engines according to their existing configuration.
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only when PCD is disabled.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_ENABLE _IO(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(1))
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_Disable
-+
-+ @Description This routine may be called when PCD is enabled in order to
-+ disable all PCD engines. It may be called
-+ only when none of the ports in the system are using the PCD.
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only when PCD is enabled.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_DISABLE _IO(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(2))
-+
-+ /**************************************************************************//**
-+ @Function FM_PCD_PrsLoadSw
-+
-+ @Description This routine may be called only when all ports in the
-+ system are actively using the classification plan scheme.
-+ In such cases it is recommended in order to save resources.
-+ The driver automatically saves 8 classification plans for
-+ ports that do NOT use the classification plan mechanism, to
-+ avoid this (in order to save those entries) this routine may
-+ be called.
-+
-+ @Param[in] ioc_fm_pcd_prs_sw_params_t A pointer to the image of the software parser code.
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only when PCD is disabled.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_PRS_LOAD_SW_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(3), ioc_compat_fm_pcd_prs_sw_params_t)
-+#endif
-+#define FM_PCD_IOC_PRS_LOAD_SW _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(3), ioc_fm_pcd_prs_sw_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSetDfltValue
-+
-+ @Description Calling this routine sets a global default value to be used
-+ by the KeyGen when parser does not recognize a required
-+ field/header.
-+ By default default values are 0.
-+
-+ @Param[in] ioc_fm_pcd_kg_dflt_value_params_t A pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only when PCD is disabled.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_KG_SET_DFLT_VALUE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(6), ioc_fm_pcd_kg_dflt_value_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSetAdditionalDataAfterParsing
-+
-+ @Description Calling this routine allows the keygen to access data past
-+ the parser finishing point.
-+
-+ @Param[in] uint8_t payload-offset; the number of bytes beyond the parser location.
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only when PCD is disabled.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_KG_SET_ADDITIONAL_DATA_AFTER_PARSING _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(7), uint8_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_SetException
-+
-+ @Description Calling this routine enables/disables PCD interrupts.
-+
-+ @Param[in] ioc_fm_pcd_exception_params_t Arguments struct with exception to be enabled/disabled.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_SET_EXCEPTION _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(8), ioc_fm_pcd_exception_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_GetCounter
-+
-+ @Description Reads one of the FM PCD counters.
-+
-+ @Param[in,out] ioc_fm_pcd_counters_params_t The requested counter parameters.
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Note that it is user's responsibilty to call this routine only
-+ for enabled counters, and there will be no indication if a
-+ disabled counter is accessed.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_GET_COUNTER _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(9), ioc_fm_pcd_counters_params_t)
-+
-+/**************************************************************************//**
-+
-+ @Function FM_PCD_KgSchemeGetCounter
-+
-+ @Description Reads scheme packet counter.
-+
-+ @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet().
-+
-+ @Return Counter's current value.
-+
-+ @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_KG_SCHEME_GET_CNTR_COMPAT _IOR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(4), ioc_compat_fm_pcd_kg_scheme_spc_t)
-+#endif
-+#define FM_PCD_IOC_KG_SCHEME_GET_CNTR _IOR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(4), ioc_fm_pcd_kg_scheme_spc_t)
-+
-+#if 0
-+TODO: unused IOCTL
-+/**************************************************************************//**
-+ @Function FM_PCD_ModifyCounter
-+
-+ @Description Writes a value to an enabled counter. Use "0" to reset the counter.
-+
-+ @Param[in] ioc_fm_pcd_counters_params_t - The requested counter parameters.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_MODIFY_COUNTER _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(10), ioc_fm_pcd_counters_params_t)
-+#define FM_PCD_IOC_SET_COUNTER FM_PCD_IOC_MODIFY_COUNTER
-+#endif
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ForceIntr
-+
-+ @Description Causes an interrupt event on the requested source.
-+
-+ @Param[in] ioc_fm_pcd_exceptions - An exception to be forced.
-+
-+ @Return 0 on success; error code if the exception is not enabled,
-+ or is not able to create interrupt.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_FORCE_INTR _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(11), ioc_fm_pcd_exceptions)
-+
-+/**************************************************************************//**
-+ @Collection Definitions of coarse classification parameters as required by KeyGen
-+ (when coarse classification is the next engine after this scheme).
-+*//***************************************************************************/
-+#define IOC_FM_PCD_MAX_NUM_OF_CC_TREES 8
-+#define IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS 16
-+#define IOC_FM_PCD_MAX_NUM_OF_CC_UNITS 4
-+#define IOC_FM_PCD_MAX_NUM_OF_KEYS 256
-+#define IOC_FM_PCD_MAX_NUM_OF_FLOWS (4*KILOBYTE)
-+#define IOC_FM_PCD_MAX_SIZE_OF_KEY 56
-+#define IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP 16
-+#define IOC_FM_PCD_LAST_KEY_INDEX 0xffff
-+#define IOC_FM_PCD_MANIP_DSCP_VALUES 64
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Collection A set of definitions to allow protocol
-+ special option description.
-+*//***************************************************************************/
-+typedef uint32_t ioc_protocol_opt_t; /**< A general type to define a protocol option. */
-+
-+typedef ioc_protocol_opt_t ioc_eth_protocol_opt_t; /**< Ethernet protocol options. */
-+#define IOC_ETH_BROADCAST 0x80000000 /**< Ethernet Broadcast. */
-+#define IOC_ETH_MULTICAST 0x40000000 /**< Ethernet Multicast. */
-+
-+typedef ioc_protocol_opt_t ioc_vlan_protocol_opt_t; /**< Vlan protocol options. */
-+#define IOC_VLAN_STACKED 0x20000000 /**< Stacked VLAN. */
-+
-+typedef ioc_protocol_opt_t ioc_mpls_protocol_opt_t; /**< MPLS protocol options. */
-+#define IOC_MPLS_STACKED 0x10000000 /**< Stacked MPLS. */
-+
-+typedef ioc_protocol_opt_t ioc_ipv4_protocol_opt_t; /**< IPv4 protocol options. */
-+#define IOC_IPV4_BROADCAST_1 0x08000000 /**< IPv4 Broadcast. */
-+#define IOC_IPV4_MULTICAST_1 0x04000000 /**< IPv4 Multicast. */
-+#define IOC_IPV4_UNICAST_2 0x02000000 /**< Tunneled IPv4 - Unicast. */
-+#define IOC_IPV4_MULTICAST_BROADCAST_2 0x01000000 /**< Tunneled IPv4 - Broadcast/Multicast. */
-+
-+#define IOC_IPV4_FRAG_1 0x00000008 /**< IPV4 reassembly option.
-+ IPV4 Reassembly manipulation requires network
-+ environment with IPV4 header and IPV4_FRAG_1 option */
-+
-+typedef ioc_protocol_opt_t ioc_ipv6_protocol_opt_t; /**< IPv6 protocol options. */
-+#define IOC_IPV6_MULTICAST_1 0x00800000 /**< IPv6 Multicast. */
-+#define IOC_IPV6_UNICAST_2 0x00400000 /**< Tunneled IPv6 - Unicast. */
-+#define IOC_IPV6_MULTICAST_2 0x00200000 /**< Tunneled IPv6 - Multicast. */
-+
-+#define IOC_IPV6_FRAG_1 0x00000004 /**< IPV6 reassembly option.
-+ IPV6 Reassembly manipulation requires network
-+ environment with IPV6 header and IPV6_FRAG_1 option */
-+#if (DPAA_VERSION >= 11)
-+typedef ioc_protocol_opt_t ioc_capwap_protocol_opt_t; /**< CAPWAP protocol options. */
-+#define CAPWAP_FRAG_1 0x00000008 /**< CAPWAP reassembly option.
-+ CAPWAP Reassembly manipulation requires network
-+ environment with CAPWAP header and CAPWAP_FRAG_1 option;
-+ in case where fragment found, the fragment-extension offset
-+ may be found at 'shim2' (in parser-result). */
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/* @} */
-+
-+#define IOC_FM_PCD_MANIP_MAX_HDR_SIZE 256
-+#define IOC_FM_PCD_MANIP_DSCP_TO_VLAN_TRANS 64
-+/**************************************************************************//**
-+ @Collection A set of definitions to support Header Manipulation selection.
-+*//***************************************************************************/
-+typedef uint32_t ioc_hdr_manip_flags_t; /**< A general type to define a HMan update command flags. */
-+
-+typedef ioc_hdr_manip_flags_t ioc_ipv4_hdr_manip_update_flags_t; /**< IPv4 protocol HMan update command flags. */
-+
-+#define IOC_HDR_MANIP_IPV4_TOS 0x80000000 /**< update TOS with the given value ('tos' field
-+ of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
-+#define IOC_HDR_MANIP_IPV4_ID 0x40000000 /**< update IP ID with the given value ('id' field
-+ of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
-+#define IOC_HDR_MANIP_IPV4_TTL 0x20000000 /**< Decrement TTL by 1 */
-+#define IOC_HDR_MANIP_IPV4_SRC 0x10000000 /**< update IP source address with the given value
-+ ('src' field of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
-+#define IOC_HDR_MANIP_IPV4_DST 0x08000000 /**< update IP destination address with the given value
-+ ('dst' field of ioc_fm_pcd_manip_hdr_field_update_ipv4_t) */
-+
-+typedef ioc_hdr_manip_flags_t ioc_ipv6_hdr_manip_update_flags_t; /**< IPv6 protocol HMan update command flags. */
-+
-+#define IOC_HDR_MANIP_IPV6_TC 0x80000000 /**< update Traffic Class address with the given value
-+ ('traffic_class' field of ioc_fm_pcd_manip_hdr_field_update_ipv6_t) */
-+#define IOC_HDR_MANIP_IPV6_HL 0x40000000 /**< Decrement Hop Limit by 1 */
-+#define IOC_HDR_MANIP_IPV6_SRC 0x20000000 /**< update IP source address with the given value
-+ ('src' field of ioc_fm_pcd_manip_hdr_field_update_ipv6_t) */
-+#define IOC_HDR_MANIP_IPV6_DST 0x10000000 /**< update IP destination address with the given value
-+ ('dst' field of ioc_fm_pcd_manip_hdr_field_update_ipv6_t) */
-+
-+typedef ioc_hdr_manip_flags_t ioc_tcp_udp_hdr_manip_update_flags_t;/**< TCP/UDP protocol HMan update command flags. */
-+
-+#define IOC_HDR_MANIP_TCP_UDP_SRC 0x80000000 /**< update TCP/UDP source address with the given value
-+ ('src' field of ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t) */
-+#define IOC_HDR_MANIP_TCP_UDP_DST 0x40000000 /**< update TCP/UDP destination address with the given value
-+ ('dst' field of ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t) */
-+#define IOC_HDR_MANIP_TCP_UDP_CHECKSUM 0x20000000 /**< update TCP/UDP checksum */
-+
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description A type used for returning the order of the key extraction.
-+ each value in this array represents the index of the extraction
-+ command as defined by the user in the initialization extraction array.
-+ The valid size of this array is the user define number of extractions
-+ required (also marked by the second '0' in this array).
-+*//***************************************************************************/
-+typedef uint8_t ioc_fm_pcd_kg_key_order_t [IOC_FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
-+
-+/**************************************************************************//**
-+ @Description All PCD engines
-+ (must match enum e_FmPcdEngine defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_engine {
-+ e_IOC_FM_PCD_INVALID = 0, /**< Invalid PCD engine */
-+ e_IOC_FM_PCD_DONE, /**< No PCD Engine indicated */
-+ e_IOC_FM_PCD_KG, /**< KeyGen */
-+ e_IOC_FM_PCD_CC, /**< Coarse Classifier */
-+ e_IOC_FM_PCD_PLCR, /**< Policer */
-+ e_IOC_FM_PCD_PRS, /**< Parser */
-+#if DPAA_VERSION >= 11
-+ e_IOC_FM_PCD_FR, /**< Frame Replicator */
-+#endif /* DPAA_VERSION >= 11 */
-+ e_IOC_FM_PCD_HASH /**< Hash Table */
-+} ioc_fm_pcd_engine;
-+
-+/**************************************************************************//**
-+ @Description An enum for selecting extraction by header types
-+ (Must match enum e_FmPcdExtractByHdrType defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_extract_by_hdr_type {
-+ e_IOC_FM_PCD_EXTRACT_FROM_HDR, /**< Extract bytes from header */
-+ e_IOC_FM_PCD_EXTRACT_FROM_FIELD, /**< Extract bytes from header field */
-+ e_IOC_FM_PCD_EXTRACT_FULL_FIELD /**< Extract a full field */
-+} ioc_fm_pcd_extract_by_hdr_type;
-+
-+/**************************************************************************//**
-+ @Description An enum for selecting extraction source (when it is not the header)
-+ (Must match enum e_FmPcdExtractFrom defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_extract_from {
-+ e_IOC_FM_PCD_EXTRACT_FROM_FRAME_START, /**< KG & CC: Extract from beginning of frame */
-+ e_IOC_FM_PCD_EXTRACT_FROM_DFLT_VALUE, /**< KG only: Extract from a default value */
-+ e_IOC_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE, /**< KG only: Extract from the point where parsing had finished */
-+ e_IOC_FM_PCD_EXTRACT_FROM_KEY, /**< CC only: Field where saved KEY */
-+ e_IOC_FM_PCD_EXTRACT_FROM_HASH, /**< CC only: Field where saved HASH */
-+ e_IOC_FM_PCD_EXTRACT_FROM_PARSE_RESULT, /**< KG & CC: Extract from the parser result */
-+ e_IOC_FM_PCD_EXTRACT_FROM_ENQ_FQID, /**< KG & CC: Extract from enqueue FQID */
-+ e_IOC_FM_PCD_EXTRACT_FROM_FLOW_ID /**< CC only: Field where saved Dequeue FQID */
-+} ioc_fm_pcd_extract_from;
-+
-+/**************************************************************************//**
-+ @Description An enum for selecting extraction type
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_extract_type {
-+ e_IOC_FM_PCD_EXTRACT_BY_HDR, /**< Extract according to header */
-+ e_IOC_FM_PCD_EXTRACT_NON_HDR, /**< Extract from data that is not the header */
-+ e_IOC_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO /**< Extract private info as specified by user */
-+} ioc_fm_pcd_extract_type;
-+
-+/**************************************************************************//**
-+ @Description An enum for selecting a default
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_kg_extract_dflt_select {
-+ e_IOC_FM_PCD_KG_DFLT_GBL_0, /**< Default selection is KG register 0 */
-+ e_IOC_FM_PCD_KG_DFLT_GBL_1, /**< Default selection is KG register 1 */
-+ e_IOC_FM_PCD_KG_DFLT_PRIVATE_0, /**< Default selection is a per scheme register 0 */
-+ e_IOC_FM_PCD_KG_DFLT_PRIVATE_1, /**< Default selection is a per scheme register 1 */
-+ e_IOC_FM_PCD_KG_DFLT_ILLEGAL /**< Illegal selection */
-+} ioc_fm_pcd_kg_extract_dflt_select;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type defining all default groups - each group shares
-+ a default value, one of four user-initialized values.
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_kg_known_fields_dflt_types {
-+ e_IOC_FM_PCD_KG_MAC_ADDR, /**< MAC Address */
-+ e_IOC_FM_PCD_KG_TCI, /**< TCI field */
-+ e_IOC_FM_PCD_KG_ENET_TYPE, /**< ENET Type */
-+ e_IOC_FM_PCD_KG_PPP_SESSION_ID, /**< PPP Session id */
-+ e_IOC_FM_PCD_KG_PPP_PROTOCOL_ID, /**< PPP Protocol id */
-+ e_IOC_FM_PCD_KG_MPLS_LABEL, /**< MPLS label */
-+ e_IOC_FM_PCD_KG_IP_ADDR, /**< IP addr */
-+ e_IOC_FM_PCD_KG_PROTOCOL_TYPE, /**< Protocol type */
-+ e_IOC_FM_PCD_KG_IP_TOS_TC, /**< TOS or TC */
-+ e_IOC_FM_PCD_KG_IPV6_FLOW_LABEL, /**< IPV6 flow label */
-+ e_IOC_FM_PCD_KG_IPSEC_SPI, /**< IPSEC SPI */
-+ e_IOC_FM_PCD_KG_L4_PORT, /**< L4 Port */
-+ e_IOC_FM_PCD_KG_TCP_FLAG, /**< TCP Flag */
-+ e_IOC_FM_PCD_KG_GENERIC_FROM_DATA, /**< grouping implemented by SW,
-+ any data extraction that is not the full
-+ field described above */
-+ e_IOC_FM_PCD_KG_GENERIC_FROM_DATA_NO_V, /**< grouping implemented by SW,
-+ any data extraction without validation */
-+ e_IOC_FM_PCD_KG_GENERIC_NOT_FROM_DATA /**< grouping implemented by SW,
-+ extraction from parser result or
-+ direct use of default value */
-+} ioc_fm_pcd_kg_known_fields_dflt_types;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for defining header index for scenarios with
-+ multiple (tunneled) headers
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_hdr_index {
-+ e_IOC_FM_PCD_HDR_INDEX_NONE = 0, /**< used when multiple headers not used, also
-+ to specify regular IP (not tunneled). */
-+ e_IOC_FM_PCD_HDR_INDEX_1, /**< may be used for VLAN, MPLS, tunneled IP */
-+ e_IOC_FM_PCD_HDR_INDEX_2, /**< may be used for MPLS, tunneled IP */
-+ e_IOC_FM_PCD_HDR_INDEX_3, /**< may be used for MPLS */
-+ e_IOC_FM_PCD_HDR_INDEX_LAST = 0xFF /**< may be used for VLAN, MPLS */
-+} ioc_fm_pcd_hdr_index;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile functional type
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_profile_type_selection {
-+ e_IOC_FM_PCD_PLCR_PORT_PRIVATE, /**< Port dedicated profile */
-+ e_IOC_FM_PCD_PLCR_SHARED /**< Shared profile (shared within partition) */
-+} ioc_fm_pcd_profile_type_selection;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile algorithm
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_plcr_algorithm_selection {
-+ e_IOC_FM_PCD_PLCR_PASS_THROUGH, /**< Policer pass through */
-+ e_IOC_FM_PCD_PLCR_RFC_2698, /**< Policer algorithm RFC 2698 */
-+ e_IOC_FM_PCD_PLCR_RFC_4115 /**< Policer algorithm RFC 4115 */
-+} ioc_fm_pcd_plcr_algorithm_selection;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting a policer profile color mode
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_plcr_color_mode {
-+ e_IOC_FM_PCD_PLCR_COLOR_BLIND, /**< Color blind */
-+ e_IOC_FM_PCD_PLCR_COLOR_AWARE /**< Color aware */
-+} ioc_fm_pcd_plcr_color_mode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting a policer profile color
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_plcr_color {
-+ e_IOC_FM_PCD_PLCR_GREEN, /**< Green */
-+ e_IOC_FM_PCD_PLCR_YELLOW, /**< Yellow */
-+ e_IOC_FM_PCD_PLCR_RED, /**< Red */
-+ e_IOC_FM_PCD_PLCR_OVERRIDE /**< Color override */
-+} ioc_fm_pcd_plcr_color;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile packet frame length selector
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_plcr_frame_length_select {
-+ e_IOC_FM_PCD_PLCR_L2_FRM_LEN, /**< L2 frame length */
-+ e_IOC_FM_PCD_PLCR_L3_FRM_LEN, /**< L3 frame length */
-+ e_IOC_FM_PCD_PLCR_L4_FRM_LEN, /**< L4 frame length */
-+ e_IOC_FM_PCD_PLCR_FULL_FRM_LEN /**< Full frame length */
-+} ioc_fm_pcd_plcr_frame_length_select;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting roll-back frame
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_plcr_roll_back_frame_select {
-+ e_IOC_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN, /**< Rollback L2 frame length */
-+ e_IOC_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN /**< Rollback Full frame length */
-+} ioc_fm_pcd_plcr_roll_back_frame_select;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer profile packet or byte mode
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_plcr_rate_mode {
-+ e_IOC_FM_PCD_PLCR_BYTE_MODE, /**< Byte mode */
-+ e_IOC_FM_PCD_PLCR_PACKET_MODE /**< Packet mode */
-+} ioc_fm_pcd_plcr_rate_mode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for defining action of frame
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_done_action {
-+ e_IOC_FM_PCD_ENQ_FRAME = 0, /**< Enqueue frame */
-+ e_IOC_FM_PCD_DROP_FRAME /**< Drop frame */
-+} ioc_fm_pcd_done_action;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the policer counter
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_plcr_profile_counters {
-+ e_IOC_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER, /**< Green packets counter */
-+ e_IOC_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER, /**< Yellow packets counter */
-+ e_IOC_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER, /**< Red packets counter */
-+ e_IOC_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER, /**< Recolored yellow packets counter */
-+ e_IOC_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER /**< Recolored red packets counter */
-+} ioc_fm_pcd_plcr_profile_counters;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting the PCD action after extraction
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_action {
-+ e_IOC_FM_PCD_ACTION_NONE, /**< NONE */
-+ e_IOC_FM_PCD_ACTION_EXACT_MATCH, /**< Exact match on the selected extraction*/
-+ e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP /**< Indexed lookup on the selected extraction*/
-+} ioc_fm_pcd_action;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of insert manipulation
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_insrt_type {
-+ e_IOC_FM_PCD_MANIP_INSRT_GENERIC, /**< Insert according to offset & size */
-+ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR, /**< Insert according to protocol */
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ e_IOC_FM_PCD_MANIP_INSRT_BY_TEMPLATE /**< Insert template to start of frame */
-+#endif /* FM_CAPWAP_SUPPORT */
-+} ioc_fm_pcd_manip_hdr_insrt_type;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of remove manipulation
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_rmv_type {
-+ e_IOC_FM_PCD_MANIP_RMV_GENERIC, /**< Remove according to offset & size */
-+ e_IOC_FM_PCD_MANIP_RMV_BY_HDR /**< Remove according to offset & size */
-+} ioc_fm_pcd_manip_hdr_rmv_type;
-+
-+/**************************************************************************//**
-+ @Description An enum for selecting specific L2 fields removal
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_rmv_specific_l2 {
-+ e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET, /**< Ethernet/802.3 MAC */
-+ e_IOC_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS, /**< stacked QTags */
-+ e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS, /**< MPLS and Ethernet/802.3 MAC header until
-+ the header which follows the MPLS header */
-+ e_IOC_FM_PCD_MANIP_HDR_RMV_MPLS /**< Remove MPLS header (Unlimited MPLS labels) */
-+} ioc_fm_pcd_manip_hdr_rmv_specific_l2;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific fields updates
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_field_update_type {
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN, /**< VLAN updates */
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4, /**< IPV4 updates */
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6, /**< IPV6 updates */
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP, /**< TCP_UDP updates */
-+} ioc_fm_pcd_manip_hdr_field_update_type;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting VLAN updates
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_field_update_vlan {
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI, /**< Replace VPri of outer most VLAN tag. */
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN /**< DSCP to VLAN priority bits translation */
-+} ioc_fm_pcd_manip_hdr_field_update_vlan;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific L2 fields removal
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_insrt_specific_l2 {
-+ e_IOC_FM_PCD_MANIP_HDR_INSRT_MPLS /**< Insert MPLS header (Unlimited MPLS labels) */
-+} ioc_fm_pcd_manip_hdr_insrt_specific_l2;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting QoS mapping mode
-+
-+ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE'
-+ User should instruct the port to read the parser-result
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_qos_mapping_mode {
-+ e_IOC_FM_PCD_MANIP_HDR_QOS_MAPPING_NONE = 0, /**< No mapping, QoS field will not be changed */
-+ e_IOC_FM_PCD_MANIP_HDR_QOS_MAPPING_AS_IS, /**< QoS field will be overwritten by the last byte in the parser-result. */
-+} ioc_fm_pcd_manip_hdr_qos_mapping_mode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting QoS source
-+
-+ Note: In all cases except 'e_FM_PCD_MANIP_HDR_QOS_SRC_NONE'
-+ User should left room for the parser-result on input/output buffer
-+ and instruct the port to read/write the parser-result to the buffer (RPD should be set)
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_qos_src {
-+ e_IOC_FM_PCD_MANIP_HDR_QOS_SRC_NONE = 0, /**< TODO */
-+ e_IOC_FM_PCD_MANIP_HDR_QOS_SRC_USER_DEFINED, /**< QoS will be taken from the last byte in the parser-result. */
-+} ioc_fm_pcd_manip_hdr_qos_src;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of header insertion
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_insrt_by_hdr_type {
-+ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2, /**< Specific L2 fields insertion */
-+#if (DPAA_VERSION >= 11)
-+ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_IP, /**< IP insertion */
-+ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_UDP, /**< UDP insertion */
-+ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, /**< UDP lite insertion */
-+ e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP /**< CAPWAP insertion */
-+#endif /* (DPAA_VERSION >= 11) */
-+} ioc_fm_pcd_manip_hdr_insrt_by_hdr_type;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific custom command
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_custom_type {
-+ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE, /**< Replace IPv4/IPv6 */
-+} ioc_fm_pcd_manip_hdr_custom_type;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting specific custom command
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_custom_ip_replace {
-+ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV4_BY_IPV6, /**< Replace IPv4 by IPv6 */
-+ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 /**< Replace IPv6 by IPv4 */
-+} ioc_fm_pcd_manip_hdr_custom_ip_replace;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of header removal
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_hdr_rmv_by_hdr_type {
-+ e_IOC_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2 = 0, /**< Specific L2 fields removal */
-+#if (DPAA_VERSION >= 11)
-+ e_IOC_FM_PCD_MANIP_RMV_BY_HDR_CAPWAP, /**< CAPWAP removal */
-+#endif /* (DPAA_VERSION >= 11) */
-+#if (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ e_IOC_FM_PCD_MANIP_RMV_BY_HDR_FROM_START, /**< Locate from data that is not the header */
-+#endif /* (DPAA_VERSION >= 11) || ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT)) */
-+} ioc_fm_pcd_manip_hdr_rmv_by_hdr_type;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of timeout mode
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_reassem_time_out_mode {
-+ e_IOC_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES, /**< Limits the time of the reassembly process
-+ from the first fragment to the last */
-+ e_IOC_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG /**< Limits the time of receiving the fragment */
-+} ioc_fm_pcd_manip_reassem_time_out_mode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of WaysNumber mode
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_reassem_ways_number {
-+ e_IOC_FM_PCD_MANIP_ONE_WAY_HASH = 1, /**< One way hash */
-+ e_IOC_FM_PCD_MANIP_TWO_WAYS_HASH, /**< Two ways hash */
-+ e_IOC_FM_PCD_MANIP_THREE_WAYS_HASH, /**< Three ways hash */
-+ e_IOC_FM_PCD_MANIP_FOUR_WAYS_HASH, /**< Four ways hash */
-+ e_IOC_FM_PCD_MANIP_FIVE_WAYS_HASH, /**< Five ways hash */
-+ e_IOC_FM_PCD_MANIP_SIX_WAYS_HASH, /**< Six ways hash */
-+ e_IOC_FM_PCD_MANIP_SEVEN_WAYS_HASH, /**< Seven ways hash */
-+ e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH /**< Eight ways hash */
-+} ioc_fm_pcd_manip_reassem_ways_number;
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of statistics mode
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_stats {
-+ e_IOC_FM_PCD_STATS_PER_FLOWID = 0 /**< Flow ID is used as index for getting statistics */
-+} ioc_fm_pcd_stats;
-+#endif
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting manipulation type
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_type {
-+ e_IOC_FM_PCD_MANIP_HDR = 0, /**< Header manipulation */
-+ e_IOC_FM_PCD_MANIP_REASSEM, /**< Reassembly */
-+ e_IOC_FM_PCD_MANIP_FRAG, /**< Fragmentation */
-+ e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD /**< Special Offloading */
-+} ioc_fm_pcd_manip_type;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of statistics mode
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_cc_stats_mode {
-+ e_IOC_FM_PCD_CC_STATS_MODE_NONE = 0, /**< No statistics support */
-+ e_IOC_FM_PCD_CC_STATS_MODE_FRAME, /**< Frame count statistics */
-+ e_IOC_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME, /**< Byte and frame count statistics */
-+#if (DPAA_VERSION >= 11)
-+ e_IOC_FM_PCD_CC_STATS_MODE_RMON, /**< Byte and frame length range count statistics */
-+#endif /* (DPAA_VERSION >= 11) */
-+} ioc_fm_pcd_cc_stats_mode;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for determining the action in case an IP packet
-+ is larger than MTU but its DF (Don't Fragment) bit is set.
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_dont_frag_action {
-+ e_IOC_FM_PCD_MANIP_DISCARD_PACKET = 0, /**< Discard packet */
-+ e_IOC_FM_PCD_MANIP_ENQ_TO_ERR_Q_OR_DISCARD_PACKET = e_IOC_FM_PCD_MANIP_DISCARD_PACKET,
-+ /**< Obsolete, cannot enqueue to error queue;
-+ In practice, selects to discard packets;
-+ Will be removed in the future */
-+ e_IOC_FM_PCD_MANIP_FRAGMENT_PACKECT, /**< Fragment packet and continue normal processing */
-+ e_IOC_FM_PCD_MANIP_CONTINUE_WITHOUT_FRAG /**< Continue normal processing without fragmenting the packet */
-+} ioc_fm_pcd_manip_dont_frag_action;
-+
-+/**************************************************************************//**
-+ @Description Enumeration type for selecting type of special offload manipulation
-+*//***************************************************************************/
-+typedef enum ioc_fm_pcd_manip_special_offload_type {
-+ e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC, /**< IPSec offload manipulation */
-+#if (DPAA_VERSION >= 11)
-+ e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP /**< CAPWAP offload manipulation */
-+#endif /* (DPAA_VERSION >= 11) */
-+} ioc_fm_pcd_manip_special_offload_type;
-+
-+/**************************************************************************//**
-+ @Description A union of protocol dependent special options
-+ (Must match union u_FmPcdHdrProtocolOpt defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef union ioc_fm_pcd_hdr_protocol_opt_u {
-+ ioc_eth_protocol_opt_t eth_opt; /**< Ethernet options */
-+ ioc_vlan_protocol_opt_t vlan_opt; /**< Vlan options */
-+ ioc_mpls_protocol_opt_t mpls_opt; /**< MPLS options */
-+ ioc_ipv4_protocol_opt_t ipv4_opt; /**< IPv4 options */
-+ ioc_ipv6_protocol_opt_t ipv6_opt; /**< IPv6 options */
-+#if (DPAA_VERSION >= 11)
-+ ioc_capwap_protocol_opt_t capwap_opt; /**< CAPWAP options */
-+#endif /* (DPAA_VERSION >= 11) */
-+} ioc_fm_pcd_hdr_protocol_opt_u;
-+
-+/**************************************************************************//**
-+ @Description A union holding all known protocol fields
-+*//***************************************************************************/
-+typedef union ioc_fm_pcd_fields_u {
-+ ioc_header_field_eth_t eth; /**< Ethernet */
-+ ioc_header_field_vlan_t vlan; /**< VLAN */
-+ ioc_header_field_llc_snap_t llc_snap; /**< LLC SNAP */
-+ ioc_header_field_pppoe_t pppoe; /**< PPPoE */
-+ ioc_header_field_mpls_t mpls; /**< MPLS */
-+ ioc_header_field_ip_t ip; /**< IP */
-+ ioc_header_field_ipv4_t ipv4; /**< IPv4 */
-+ ioc_header_field_ipv6_t ipv6; /**< IPv6 */
-+ ioc_header_field_udp_t udp; /**< UDP */
-+ ioc_header_field_udp_lite_t udp_lite; /**< UDP_Lite */
-+ ioc_header_field_tcp_t tcp; /**< TCP */
-+ ioc_header_field_sctp_t sctp; /**< SCTP */
-+ ioc_header_field_dccp_t dccp; /**< DCCP */
-+ ioc_header_field_gre_t gre; /**< GRE */
-+ ioc_header_field_minencap_t minencap; /**< Minimal Encapsulation */
-+ ioc_header_field_ipsec_ah_t ipsec_ah; /**< IPSec AH */
-+ ioc_header_field_ipsec_esp_t ipsec_esp; /**< IPSec ESP */
-+ ioc_header_field_udp_encap_esp_t udp_encap_esp; /**< UDP Encapsulation ESP */
-+} ioc_fm_pcd_fields_u;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header extraction for key generation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_from_hdr_t {
-+ uint8_t size; /**< Size in byte */
-+ uint8_t offset; /**< Byte offset */
-+} ioc_fm_pcd_from_hdr_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining field extraction for key generation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_from_field_t {
-+ ioc_fm_pcd_fields_u field; /**< Field selection */
-+ uint8_t size; /**< Size in byte */
-+ uint8_t offset; /**< Byte offset */
-+} ioc_fm_pcd_from_field_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a single network environment unit
-+ A distinction unit should be defined if it will later be used
-+ by one or more PCD engines to distinguish between flows.
-+ (Must match struct t_FmPcdDistinctionUnit defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_distinction_unit_t {
-+ struct {
-+ ioc_net_header_type hdr; /**< One of the headers supported by the FM */
-+ ioc_fm_pcd_hdr_protocol_opt_u opt; /**< Select only one option! */
-+ } hdrs[IOC_FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS];
-+} ioc_fm_pcd_distinction_unit_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining all different distinction units supported
-+ by a specific PCD Network Environment Characteristics module.
-+
-+ Each unit represent a protocol or a group of protocols that may
-+ be used later by the different PCD engines to distinguish between flows.
-+ (Must match struct t_FmPcdNetEnvParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_net_env_params_t {
-+ uint8_t num_of_distinction_units;/**< Number of different units to be identified */
-+ ioc_fm_pcd_distinction_unit_t units[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
-+ /**< An array of num_of_distinction_units of the
-+ different units to be identified */
-+ void *id; /**< Output parameter; Returns the net-env Id to be used */
-+} ioc_fm_pcd_net_env_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a single extraction action when
-+ creating a key
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_extract_entry_t {
-+ ioc_fm_pcd_extract_type type; /**< Extraction type select */
-+ union {
-+ struct {
-+ ioc_net_header_type hdr; /**< Header selection */
-+ bool ignore_protocol_validation;
-+ /**< Ignore protocol validation */
-+ ioc_fm_pcd_hdr_index hdr_index; /**< Relevant only for MPLS, VLAN and tunneled
-+ IP. Otherwise should be cleared.*/
-+ ioc_fm_pcd_extract_by_hdr_type type; /**< Header extraction type select */
-+ union {
-+ ioc_fm_pcd_from_hdr_t from_hdr; /**< Extract bytes from header parameters */
-+ ioc_fm_pcd_from_field_t from_field; /**< Extract bytes from field parameters */
-+ ioc_fm_pcd_fields_u full_field; /**< Extract full field parameters */
-+ } extract_by_hdr_type;
-+ } extract_by_hdr; /**< Used when type = e_IOC_FM_PCD_KG_EXTRACT_BY_HDR */
-+ struct {
-+ ioc_fm_pcd_extract_from src; /**< Non-header extraction source */
-+ ioc_fm_pcd_action action; /**< Relevant for CC Only */
-+ uint16_t ic_indx_mask; /**< Relevant only for CC when
-+ action = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP;
-+ Note that the number of bits that are set within
-+ this mask must be log2 of the CC-node 'num_of_keys'.
-+ Note that the mask cannot be set on the lower bits. */
-+ uint8_t offset; /**< Byte offset */
-+ uint8_t size; /**< Size in bytes */
-+ } extract_non_hdr; /**< Used when type = e_IOC_FM_PCD_KG_EXTRACT_NON_HDR */
-+ } extract_params;
-+} ioc_fm_pcd_extract_entry_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining masks for each extracted
-+ field in the key.
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_extract_mask_t {
-+ uint8_t extract_array_index; /**< Index in the extraction array, as initialized by user */
-+ uint8_t offset; /**< Byte offset */
-+ uint8_t mask; /**< A byte mask (selected bits will be ignored) */
-+} ioc_fm_pcd_kg_extract_mask_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining default selection per groups
-+ of fields
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_extract_dflt_t {
-+ ioc_fm_pcd_kg_known_fields_dflt_types type; /**< Default type select*/
-+ ioc_fm_pcd_kg_extract_dflt_select dflt_select; /**< Default register select */
-+} ioc_fm_pcd_kg_extract_dflt_t;
-+
-+
-+/**************************************************************************//**
-+ @Description A structure for defining all parameters needed for
-+ generation a key and using a hash function
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_key_extract_and_hash_params_t {
-+ uint32_t private_dflt0; /**< Scheme default register 0 */
-+ uint32_t private_dflt1; /**< Scheme default register 1 */
-+ uint8_t num_of_used_extracts; /**< defines the valid size of the following array */
-+ ioc_fm_pcd_extract_entry_t extract_array [IOC_FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY];
-+ /**< An array of extraction definitions. */
-+ uint8_t num_of_used_dflts; /**< defines the valid size of the following array */
-+ ioc_fm_pcd_kg_extract_dflt_t dflts[IOC_FM_PCD_KG_NUM_OF_DEFAULT_GROUPS];
-+ /**< For each extraction used in this scheme, specify the required
-+ default register to be used when header is not found.
-+ types not specified in this array will get undefined value. */
-+ uint8_t num_of_used_masks; /**< Defines the valid size of the following array */
-+ ioc_fm_pcd_kg_extract_mask_t masks[IOC_FM_PCD_KG_NUM_OF_EXTRACT_MASKS];
-+ uint8_t hash_shift; /**< Hash result right shift.
-+ Selects the 24 bits out of the 64 hash result.
-+ 0 means using the 24 LSB's, otherwise use the
-+ 24 LSB's after shifting right.*/
-+ uint32_t hash_distribution_num_of_fqids; /**< must be > 1 and a power of 2. Represents the range
-+ of queues for the key and hash functionality */
-+ uint8_t hash_distribution_fqids_shift; /**< selects the FQID bits that will be effected by the hash */
-+ bool symmetric_hash; /**< TRUE to generate the same hash for frames with swapped source and
-+ destination fields on all layers; If TRUE, driver will check that for
-+ all layers, if SRC extraction is selected, DST extraction must also be
-+ selected, and vice versa. */
-+} ioc_fm_pcd_kg_key_extract_and_hash_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure of parameters for defining a single
-+ Qid mask (extracted OR).
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_extracted_or_params_t {
-+ ioc_fm_pcd_extract_type type; /**< Extraction type select */
-+ union {
-+ struct { /**< used when type = e_IOC_FM_PCD_KG_EXTRACT_BY_HDR */
-+ ioc_net_header_type hdr;
-+ ioc_fm_pcd_hdr_index hdr_index; /**< Relevant only for MPLS, VLAN and tunneled
-+ IP. Otherwise should be cleared.*/
-+ bool ignore_protocol_validation;
-+
-+ } extract_by_hdr;
-+ ioc_fm_pcd_extract_from src; /**< used when type = e_IOC_FM_PCD_KG_EXTRACT_NON_HDR */
-+ } extract_params;
-+ uint8_t extraction_offset; /**< Offset for extraction */
-+ ioc_fm_pcd_kg_extract_dflt_select dflt_value; /**< Select register from which extraction is taken if
-+ field not found */
-+ uint8_t mask; /**< Mask LSB byte of extraction (specified bits are ignored) */
-+ uint8_t bit_offset_in_fqid; /**< 0-31, Selects which bits of the 24 FQID bits to effect using
-+ the extracted byte; Assume byte is placed as the 8 MSB's in
-+ a 32 bit word where the lower bits
-+ are the FQID; i.e if bitOffsetInFqid=1 than its LSB
-+ will effect the FQID MSB, if bitOffsetInFqid=24 than the
-+ extracted byte will effect the 8 LSB's of the FQID,
-+ if bitOffsetInFqid=31 than the byte's MSB will effect
-+ the FQID's LSB; 0 means - no effect on FQID;
-+ Note that one, and only one of
-+ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
-+ extracted byte must effect either FQID or Policer profile).*/
-+ uint8_t bit_offset_in_plcr_profile;
-+ /**< 0-15, Selects which bits of the 8 policer profile id bits to
-+ effect using the extracted byte; Assume byte is placed
-+ as the 8 MSB's in a 16 bit word where the lower bits
-+ are the policer profile id; i.e if bitOffsetInPlcrProfile=1
-+ than its LSB will effect the profile MSB, if bitOffsetInFqid=8
-+ than the extracted byte will effect the whole policer profile id,
-+ if bitOffsetInFqid=15 than the byte's MSB will effect
-+ the Policer Profile id's LSB;
-+ 0 means - no effect on policer profile; Note that one, and only one of
-+ bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e,
-+ extracted byte must effect either FQID or Policer profile).*/
-+} ioc_fm_pcd_kg_extracted_or_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for configuring scheme counter
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_scheme_counter_t {
-+ bool update; /**< FALSE to keep the current counter state
-+ and continue from that point, TRUE to update/reset
-+ the counter when the scheme is written. */
-+ uint32_t value; /**< If update=TRUE, this value will be written into the
-+ counter; clear this field to reset the counter. */
-+} ioc_fm_pcd_kg_scheme_counter_t;
-+
-+
-+/**************************************************************************//**
-+ @Description A structure for retrieving FMKG_SE_SPC
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_scheme_spc_t {
-+ uint32_t val; /**< return value */
-+ void *id; /**< scheme handle */
-+} ioc_fm_pcd_kg_scheme_spc_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining policer profile parameters as required by keygen
-+ (when policer is the next engine after this scheme).
-+ (Must match struct t_FmPcdKgPlcrProfile defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_plcr_profile_t {
-+ bool shared_profile; /**< TRUE if this profile is shared between ports
-+ (i.e. managed by master partition) May not be TRUE
-+ if profile is after Coarse Classification*/
-+ bool direct; /**< If TRUE, direct_relative_profile_id only selects the profile
-+ id, if FALSE fqid_offset_relative_profile_id_base is used
-+ together with fqid_offset_shift and num_of_profiles
-+ parameters, to define a range of profiles from
-+ which the KeyGen result will determine the
-+ destination policer profile. */
-+ union {
-+ uint16_t direct_relative_profile_id; /**< Used if 'direct' is TRUE, to select policer profile.
-+ This parameter should indicate the policer profile offset within the port's
-+ policer profiles or SHARED window. */
-+ struct {
-+ uint8_t fqid_offset_shift; /**< Shift of KG results without the qid base */
-+ uint8_t fqid_offset_relative_profile_id_base;
-+ /**< OR of KG results without the qid base
-+ This parameter should indicate the policer profile
-+ offset within the port's policer profiles window
-+ or SHARED window depends on shared_profile */
-+ uint8_t num_of_profiles; /**< Range of profiles starting at base */
-+ } indirect_profile; /**< Indirect profile parameters */
-+ } profile_select; /**< Direct/indirect profile selection and parameters */
-+} ioc_fm_pcd_kg_plcr_profile_t;
-+
-+#if DPAA_VERSION >= 11
-+/**************************************************************************//**
-+ @Description Parameters for configuring a storage profile for a KeyGen scheme.
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_storage_profile_t {
-+ bool direct; /**< If TRUE, directRelativeProfileId only selects the
-+ profile id;
-+ If FALSE, fqidOffsetRelativeProfileIdBase is used
-+ together with fqidOffsetShift and numOfProfiles
-+ parameters to define a range of profiles from which
-+ the KeyGen result will determine the destination
-+ storage profile. */
-+ union {
-+ uint16_t direct_relative_profileId; /**< Used when 'direct' is TRUE, to select a storage profile;
-+ should indicate the storage profile offset within the
-+ port's storage profiles window. */
-+ struct {
-+ uint8_t fqid_offset_shift; /**< Shift of KeyGen results without the FQID base */
-+ uint8_t fqid_offset_relative_profile_id_base;
-+ /**< OR of KeyGen results without the FQID base;
-+ should indicate the policer profile offset within the
-+ port's storage profiles window. */
-+ uint8_t num_of_profiles; /**< Range of profiles starting at base. */
-+ } indirect_profile; /**< Indirect profile parameters. */
-+ } profile_select; /**< Direct/indirect profile selection and parameters. */
-+} ioc_fm_pcd_kg_storage_profile_t;
-+#endif /* DPAA_VERSION >= 11 */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CC as the next engine after KeyGen
-+ (Must match struct t_FmPcdKgCc defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_cc_t {
-+ void *tree_id; /**< CC Tree id */
-+ uint8_t grp_id; /**< CC group id within the CC tree */
-+ bool plcr_next; /**< TRUE if after CC, in case of data frame,
-+ policing is required. */
-+ bool bypass_plcr_profile_generation;
-+ /**< TRUE to bypass KeyGen policer profile generation;
-+ selected profile is the one set at port initialization. */
-+ ioc_fm_pcd_kg_plcr_profile_t plcr_profile; /**< Valid only if plcr_next = TRUE and
-+ bypass_plcr_profile_generation = FALSE */
-+} ioc_fm_pcd_kg_cc_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining initializing a KeyGen scheme
-+ (Must match struct t_FmPcdKgSchemeParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_scheme_params_t {
-+ bool modify; /**< TRUE to change an existing scheme */
-+ union {
-+ uint8_t relative_scheme_id;
-+ /**< if modify=FALSE: partition-relative scheme id */
-+ void *scheme_id; /**< if modify=TRUE: the id of an existing scheme */
-+ } scm_id;
-+ bool always_direct; /**< This scheme is reached only directly, i.e. no need
-+ for match vector; KeyGen will ignore it when matching */
-+ struct { /**< HL relevant only if always_direct=FALSE */
-+ void *net_env_id; /**< The id of the Network Environment as returned
-+ by FM_PCD_NetEnvCharacteristicsSet() */
-+ uint8_t num_of_distinction_units;
-+ /**< Number of NetEnv units listed in unit_ids array */
-+ uint8_t unit_ids[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS];
-+ /**< Indexes as passed to SetNetEnvCharacteristics (?) array */
-+ } net_env_params;
-+ bool use_hash; /**< use the KG Hash functionality */
-+ ioc_fm_pcd_kg_key_extract_and_hash_params_t key_extract_and_hash_params;
-+ /**< used only if useHash = TRUE */
-+ bool bypass_fqid_generation;
-+ /**< Normally - FALSE, TRUE to avoid FQID update in the IC;
-+ In such a case FQID after KG will be the default FQID
-+ defined for the relevant port, or the FQID defined by CC
-+ in cases where CC was the previous engine. */
-+ uint32_t base_fqid; /**< Base FQID; Relevant only if bypass_fqid_generation = FALSE;
-+ If hash is used and an even distribution is expected
-+ according to hash_distribution_num_of_fqids, base_fqid must be aligned to
-+ hash_distribution_num_of_fqids. */
-+ uint8_t num_of_used_extracted_ors;
-+ /**< Number of FQID masks listed in extracted_ors array*/
-+ ioc_fm_pcd_kg_extracted_or_params_t extracted_ors[IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS];
-+ /**< IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS
-+ registers are shared between qid_masks
-+ functionality and some of the extraction
-+ actions; Normally only some will be used
-+ for qid_mask. Driver will return error if
-+ resource is full at initialization time. */
-+#if DPAA_VERSION >= 11
-+ bool override_storage_profile;
-+ /**< TRUE if KeyGen override previously decided storage profile */
-+ ioc_fm_pcd_kg_storage_profile_t storage_profile;/**< Used when override_storage_profile=TRUE */
-+#endif /* DPAA_VERSION >= 11 */
-+ ioc_fm_pcd_engine next_engine; /**< may be BMI, PLCR or CC */
-+ union { /**< depends on nextEngine */
-+ ioc_fm_pcd_done_action done_action; /**< Used when next engine is BMI (done) */
-+ ioc_fm_pcd_kg_plcr_profile_t plcr_profile; /**< Used when next engine is PLCR */
-+ ioc_fm_pcd_kg_cc_t cc; /**< Used when next engine is CC */
-+ } kg_next_engine_params;
-+ ioc_fm_pcd_kg_scheme_counter_t scheme_counter; /**< A structure of parameters for updating
-+ the scheme counter */
-+ void *id; /**< Returns the scheme Id to be used */
-+} ioc_fm_pcd_kg_scheme_params_t;
-+
-+/**************************************************************************//**
-+ @Collection
-+*//***************************************************************************/
-+#if DPAA_VERSION >= 11
-+#define IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR 10 /* Maximal supported number of frame length ranges */
-+#define IOC_FM_PCD_CC_STATS_FLR_SIZE 2 /* Size in bytes of a frame length range limit */
-+#endif /* DPAA_VERSION >= 11 */
-+#define IOC_FM_PCD_CC_STATS_FLR_COUNT_SIZE 4 /* Size in bytes of a frame length range counter */
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CC as the next engine after a CC node.
-+ (Must match struct t_FmPcdCcNextCcParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_next_cc_params_t {
-+ void *cc_node_id; /**< Id of the next CC node */
-+} ioc_fm_pcd_cc_next_cc_params_t;
-+
-+#if DPAA_VERSION >= 11
-+/**************************************************************************//**
-+ @Description A structure for defining Frame Replicator as the next engine after a CC node.
-+ (Must match struct t_FmPcdCcNextFrParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_next_fr_params_t {
-+ void* frm_replic_id; /**< The id of the next frame replicator group */
-+} ioc_fm_pcd_cc_next_fr_params_t;
-+#endif /* DPAA_VERSION >= 11 */
-+
-+/**************************************************************************//**
-+ @Description A structure for defining PLCR params when PLCR is the
-+ next engine after a CC node
-+ (Must match struct t_FmPcdCcNextPlcrParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_next_plcr_params_t {
-+ bool override_params; /**< TRUE if CC override previously decided parameters*/
-+ bool shared_profile; /**< Relevant only if overrideParams=TRUE:
-+ TRUE if this profile is shared between ports */
-+ uint16_t new_relative_profile_id; /**< Relevant only if overrideParams=TRUE:
-+ (otherwise profile id is taken from keygen);
-+ This parameter should indicate the policer
-+ profile offset within the port's
-+ policer profiles or from SHARED window.*/
-+ uint32_t new_fqid; /**< Relevant only if overrideParams=TRUE:
-+ FQID for enquing the frame;
-+ In earlier chips if policer next engine is KEYGEN,
-+ this parameter can be 0, because the KEYGEN always decides
-+ the enqueue FQID.*/
-+#if DPAA_VERSION >= 11
-+ uint8_t new_relative_storage_profile_id;
-+ /**< Indicates the relative storage profile offset within
-+ the port's storage profiles window;
-+ Relevant only if the port was configured with VSP. */
-+#endif /* DPAA_VERSION >= 11 */
-+} ioc_fm_pcd_cc_next_plcr_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining enqueue params when BMI is the
-+ next engine after a CC node
-+ (Must match struct t_FmPcdCcNextEnqueueParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_next_enqueue_params_t {
-+ ioc_fm_pcd_done_action action; /**< Action - when next engine is BMI (done) */
-+ bool override_fqid; /**< TRUE if CC override previously decided fqid and vspid,
-+ relevant if action = e_IOC_FM_PCD_ENQ_FRAME */
-+ uint32_t new_fqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
-+ (otherwise FQID is taken from KeyGen),
-+ relevant if action = e_IOC_FM_PCD_ENQ_FRAME*/
-+#if DPAA_VERSION >= 11
-+ uint8_t new_relative_storage_profile_id;
-+ /**< Valid if override_fqid=TRUE, Indicates the relative virtual
-+ storage profile offset within the port's storage profiles
-+ window; Relevant only if the port was configured with VSP. */
-+#endif /* DPAA_VERSION >= 11 */
-+
-+} ioc_fm_pcd_cc_next_enqueue_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining KG params when KG is the next engine after a CC node
-+ (Must match struct t_FmPcdCcNextKgParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_next_kg_params_t {
-+ bool override_fqid; /**< TRUE if CC override previously decided fqid and vspid,
-+ Note - this parameters are irrelevant for earlier chips */
-+ uint32_t new_fqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame
-+ (otherwise FQID is taken from KeyGen),
-+ Note - this parameters are irrelevant for earlier chips */
-+#if DPAA_VERSION >= 11
-+ uint8_t new_relative_storage_profile_id;
-+ /**< Valid if override_fqid=TRUE, Indicates the relative virtual
-+ storage profile offset within the port's storage profiles
-+ window; Relevant only if the port was configured with VSP. */
-+#endif /* DPAA_VERSION >= 11 */
-+ void *p_direct_scheme; /**< Direct scheme id to go to. */
-+} ioc_fm_pcd_cc_next_kg_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the next engine after a CC node.
-+ (Must match struct t_FmPcdCcNextEngineParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_next_engine_params_t {
-+ ioc_fm_pcd_engine next_engine; /**< User has to initialize parameters
-+ according to nextEngine definition */
-+ union {
-+ ioc_fm_pcd_cc_next_cc_params_t cc_params; /**< Parameters in case next engine is CC */
-+ ioc_fm_pcd_cc_next_plcr_params_t plcr_params; /**< Parameters in case next engine is PLCR */
-+ ioc_fm_pcd_cc_next_enqueue_params_t enqueue_params; /**< Parameters in case next engine is BMI */
-+ ioc_fm_pcd_cc_next_kg_params_t kg_params; /**< Parameters in case next engine is KG */
-+#if DPAA_VERSION >= 11
-+ ioc_fm_pcd_cc_next_fr_params_t fr_params; /**< Parameters in case next engine is FR */
-+#endif /* DPAA_VERSION >= 11 */
-+ } params; /**< Union used for all the next-engine parameters options */
-+ void *manip_id; /**< Handle to Manipulation object.
-+ Relevant if next engine is of type result
-+ (e_IOC_FM_PCD_PLCR, e_IOC_FM_PCD_KG, e_IOC_FM_PCD_DONE) */
-+ bool statistics_en; /**< If TRUE, statistics counters are incremented
-+ for each frame passing through this
-+ Coarse Classification entry. */
-+} ioc_fm_pcd_cc_next_engine_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a single CC key
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_key_params_t {
-+ uint8_t *p_key; /**< pointer to the key of the size defined in key_size */
-+ uint8_t *p_mask; /**< pointer to the Mask per key of the size defined
-+ in keySize. p_key and p_mask (if defined) has to be
-+ of the same size defined in the key_size */
-+ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
-+ /**< parameters for the next for the defined Key in p_key */
-+
-+} ioc_fm_pcd_cc_key_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining CC keys parameters
-+ The driver supports two methods for CC node allocation: dynamic and static.
-+ Static mode was created in order to prevent runtime alloc/free
-+ of FMan memory (MURAM), which may cause fragmentation; in this mode,
-+ the driver automatically allocates the memory according to
-+ 'max_num_of_keys' parameter. The driver calculates the maximal memory
-+ size that may be used for this CC-Node taking into consideration
-+ 'mask_support' and 'statistics_mode' parameters.
-+ When 'action' = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP in the extraction
-+ parameters of this node, 'max_num_of_keys' must be equal to 'num_of_keys'.
-+ In dynamic mode, 'max_num_of_keys' must be zero. At initialization,
-+ all required structures are allocated according to 'num_of_keys'
-+ parameter. During runtime modification, these structures are
-+ re-allocated according to the updated number of keys.
-+
-+ Please note that 'action' and 'ic_indx_mask' mentioned in the
-+ specific parameter explanations are passed in the extraction
-+ parameters of the node (fields of extractccparams.extractnonhdr).
-+*//***************************************************************************/
-+typedef struct ioc_keys_params_t {
-+ uint16_t max_num_of_keys;/**< Maximum number of keys that will (ever) be used in this CC-Node;
-+ A value of zero may be used for dynamic memory allocation. */
-+ bool mask_support; /**< This parameter is relevant only if a node is initialized with
-+ action = e_IOC_FM_PCD_ACTION_EXACT_MATCH and max_num_of_keys > 0;
-+ Should be TRUE to reserve table memory for key masks, even if
-+ initial keys do not contain masks, or if the node was initialized
-+ as 'empty' (without keys); this will allow user to add keys with
-+ masks at runtime. */
-+ ioc_fm_pcd_cc_stats_mode statistics_mode;/**< Determines the supported statistics mode for all node's keys.
-+ To enable statistics gathering, statistics should be enabled per
-+ every key, using 'statistics_en' in next engine parameters structure
-+ of that key;
-+ If 'max_num_of_keys' is set, all required structures will be
-+ preallocated for all keys. */
-+#if (DPAA_VERSION >= 11)
-+ uint16_t frame_length_ranges[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
-+ /**< Relevant only for 'RMON' statistics mode
-+ (this feature is supported only on B4860 device);
-+ Holds a list of programmable thresholds. For each received frame,
-+ its length in bytes is examined against these range thresholds and
-+ the appropriate counter is incremented by 1. For example, to belong
-+ to range i, the following should hold:
-+ range i-1 threshold < frame length <= range i threshold
-+ Each range threshold must be larger then its preceding range
-+ threshold. Last range threshold must be 0xFFFF. */
-+#endif /* (DPAA_VERSION >= 11) */
-+ uint16_t num_of_keys; /**< Number of initial keys;
-+ Note that in case of 'action' = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP,
-+ this field should be power-of-2 of the number of bits that are
-+ set in 'ic_indx_mask'. */
-+ uint8_t key_size; /**< Size of key - for extraction of type FULL_FIELD, 'key_size' has
-+ to be the standard size of the selected key; For other extraction
-+ types, 'key_size' has to be as size of extraction; When 'action' =
-+ e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP, 'keySize' must be 2. */
-+ ioc_fm_pcd_cc_key_params_t key_params[IOC_FM_PCD_MAX_NUM_OF_KEYS];
-+ /**< An array with 'num_of_keys' entries, each entry specifies the
-+ corresponding key parameters;
-+ When 'action' = e_IOC_FM_PCD_ACTION_EXACT_MATCH, this value must not
-+ exceed 255 (IOC_FM_PCD_MAX_NUM_OF_KEYS-1) as the last entry is saved
-+ for the 'miss' entry. */
-+ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss;
-+ /**< Parameters for defining the next engine when a key is not matched;
-+ Not relevant if action = e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP. */
-+} ioc_keys_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a CC node
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_node_params_t {
-+ ioc_fm_pcd_extract_entry_t extract_cc_params; /**< Extraction parameters */
-+ ioc_keys_params_t keys_params; /**< Keys definition matching the selected extraction */
-+ void *id; /**< Output parameter; returns the CC node Id to be used */
-+} ioc_fm_pcd_cc_node_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a hash table
-+ (Must match struct t_FmPcdHashTableParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_hash_table_params_t {
-+ uint16_t max_num_of_keys; /**< Maximum Number Of Keys that will (ever) be used in this Hash-table */
-+ ioc_fm_pcd_cc_stats_mode statistics_mode; /**< If not e_IOC_FM_PCD_CC_STATS_MODE_NONE, the required structures for the
-+ requested statistics mode will be allocated according to max_num_of_keys. */
-+ uint8_t kg_hash_shift; /**< KG-Hash-shift as it was configured in the KG-scheme
-+ that leads to this hash-table. */
-+ uint16_t hash_res_mask; /**< Mask that will be used on the hash-result;
-+ The number-of-sets for this hash will be calculated
-+ as (2^(number of bits set in 'hash_res_mask'));
-+ The 4 lower bits must be cleared. */
-+ uint8_t hash_shift; /**< Byte offset from the beginning of the KeyGen hash result to the
-+ 2-bytes to be used as hash index. */
-+ uint8_t match_key_size; /**< Size of the exact match keys held by the hash buckets */
-+
-+ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss;
-+ /**< Parameters for defining the next engine when a key is not matched */
-+ void *id;
-+} ioc_fm_pcd_hash_table_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure with the arguments for the FM_PCD_HashTableAddKey ioctl() call
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_hash_table_add_key_params_t {
-+ void *p_hash_tbl;
-+ uint8_t key_size;
-+ ioc_fm_pcd_cc_key_params_t key_params;
-+} ioc_fm_pcd_hash_table_add_key_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a CC tree group.
-+
-+ This structure defines a CC group in terms of NetEnv units
-+ and the action to be taken in each case. The unit_ids list must
-+ be given in order from low to high indices.
-+
-+ ioc_fm_pcd_cc_next_engine_params_t is a list of 2^num_of_distinction_units
-+ structures where each defines the next action to be taken for
-+ each units combination. for example:
-+ num_of_distinction_units = 2
-+ unit_ids = {1,3}
-+ next_engine_per_entries_in_grp[0] = ioc_fm_pcd_cc_next_engine_params_t for the case that
-+ unit 1 - not found; unit 3 - not found;
-+ next_engine_per_entries_in_grp[1] = ioc_fm_pcd_cc_next_engine_params_t for the case that
-+ unit 1 - not found; unit 3 - found;
-+ next_engine_per_entries_in_grp[2] = ioc_fm_pcd_cc_next_engine_params_t for the case that
-+ unit 1 - found; unit 3 - not found;
-+ next_engine_per_entries_in_grp[3] = ioc_fm_pcd_cc_next_engine_params_t for the case that
-+ unit 1 - found; unit 3 - found;
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_grp_params_t {
-+ uint8_t num_of_distinction_units; /**< Up to 4 */
-+ uint8_t unit_ids [IOC_FM_PCD_MAX_NUM_OF_CC_UNITS];
-+ /**< Indexes of the units as defined in
-+ FM_PCD_NetEnvCharacteristicsSet() */
-+ ioc_fm_pcd_cc_next_engine_params_t next_engine_per_entries_in_grp[IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP];
-+ /**< Maximum entries per group is 16 */
-+} ioc_fm_pcd_cc_grp_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the CC tree groups
-+ (Must match struct t_FmPcdCcTreeParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_tree_params_t {
-+ void *net_env_id; /**< Id of the Network Environment as returned
-+ by FM_PCD_NetEnvCharacteristicsSet() */
-+ uint8_t num_of_groups; /**< Number of CC groups within the CC tree */
-+ ioc_fm_pcd_cc_grp_params_t fm_pcd_cc_group_params [IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS];
-+ /**< Parameters for each group. */
-+ void *id; /**< Output parameter; Returns the tree Id to be used */
-+} ioc_fm_pcd_cc_tree_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining policer byte rate
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_plcr_byte_rate_mode_param_t {
-+ ioc_fm_pcd_plcr_frame_length_select frame_length_selection; /**< Frame length selection */
-+ ioc_fm_pcd_plcr_roll_back_frame_select roll_back_frame_selection; /**< relevant option only e_IOC_FM_PCD_PLCR_L2_FRM_LEN,
-+ e_IOC_FM_PCD_PLCR_FULL_FRM_LEN */
-+} ioc_fm_pcd_plcr_byte_rate_mode_param_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the policer profile (based on
-+ RFC-2698 or RFC-4115 attributes).
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_plcr_non_passthrough_alg_param_t {
-+ ioc_fm_pcd_plcr_rate_mode rate_mode; /**< Byte / Packet */
-+ ioc_fm_pcd_plcr_byte_rate_mode_param_t byte_mode_param; /**< Valid for Byte NULL for Packet */
-+ uint32_t committed_info_rate; /**< KBits/Sec or Packets/Sec */
-+ uint32_t committed_burst_size; /**< KBits or Packets */
-+ uint32_t peak_or_excess_info_rate; /**< KBits/Sec or Packets/Sec */
-+ uint32_t peak_or_excess_burst_size; /**< KBits or Packets */
-+} ioc_fm_pcd_plcr_non_passthrough_alg_param_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the next engine after policer
-+*//***************************************************************************/
-+typedef union ioc_fm_pcd_plcr_next_engine_params_u {
-+ ioc_fm_pcd_done_action action; /**< Action - when next engine is BMI (done) */
-+ void *p_profile; /**< Policer profile handle - used when next engine
-+ is PLCR, must be a SHARED profile */
-+ void *p_direct_scheme; /**< Direct scheme select - when next engine is Keygen */
-+} ioc_fm_pcd_plcr_next_engine_params_u;
-+
-+typedef struct ioc_fm_pcd_port_params_t {
-+ ioc_fm_port_type port_type; /**< Type of port for this profile */
-+ uint8_t port_id; /**< FM-Port id of port for this profile */
-+} ioc_fm_pcd_port_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining the policer profile entry
-+ (Must match struct t_FmPcdPlcrProfileParams defined in fm_pcd_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_plcr_profile_params_t {
-+ bool modify; /**< TRUE to change an existing profile */
-+ union {
-+ struct {
-+ ioc_fm_pcd_profile_type_selection profile_type; /**< Type of policer profile */
-+ ioc_fm_pcd_port_params_t *p_fm_port; /**< Relevant for per-port profiles only */
-+ uint16_t relative_profile_id; /**< Profile id - relative to shared group or to port */
-+ } new_params; /**< Use it when modify = FALSE */
-+ void *p_profile; /**< A handle to a profile - use it when modify=TRUE */
-+ } profile_select;
-+ ioc_fm_pcd_plcr_algorithm_selection alg_selection; /**< Profile Algorithm PASS_THROUGH, RFC_2698, RFC_4115 */
-+ ioc_fm_pcd_plcr_color_mode color_mode; /**< COLOR_BLIND, COLOR_AWARE */
-+
-+ union {
-+ ioc_fm_pcd_plcr_color dflt_color; /**< For Color-Blind Pass-Through mode; the policer will re-color
-+ any incoming packet with the default value. */
-+ ioc_fm_pcd_plcr_color override; /**< For Color-Aware modes; the profile response to a
-+ pre-color value of 2'b11. */
-+ } color;
-+
-+ ioc_fm_pcd_plcr_non_passthrough_alg_param_t non_passthrough_alg_param; /**< RFC2698 or RFC4115 parameters */
-+
-+ ioc_fm_pcd_engine next_engine_on_green; /**< Next engine for green-colored frames */
-+ ioc_fm_pcd_plcr_next_engine_params_u params_on_green; /**< Next engine parameters for green-colored frames */
-+
-+ ioc_fm_pcd_engine next_engine_on_yellow; /**< Next engine for yellow-colored frames */
-+ ioc_fm_pcd_plcr_next_engine_params_u params_on_yellow; /**< Next engine parameters for yellow-colored frames */
-+
-+ ioc_fm_pcd_engine next_engine_on_red; /**< Next engine for red-colored frames */
-+ ioc_fm_pcd_plcr_next_engine_params_u params_on_red; /**< Next engine parameters for red-colored frames */
-+
-+ bool trap_profile_on_flow_A; /**< Obsolete - do not use */
-+ bool trap_profile_on_flow_B; /**< Obsolete - do not use */
-+ bool trap_profile_on_flow_C; /**< Obsolete - do not use */
-+
-+ void *id; /**< output parameter; Returns the profile Id to be used */
-+} ioc_fm_pcd_plcr_profile_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for modifying CC tree next engine
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_tree_modify_next_engine_params_t {
-+ void *id; /**< CC tree Id to be used */
-+ uint8_t grp_indx; /**< A Group index in the tree */
-+ uint8_t indx; /**< Entry index in the group defined by grp_index */
-+ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
-+ /**< Parameters for the next for the defined Key in the p_Key */
-+} ioc_fm_pcd_cc_tree_modify_next_engine_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for modifying CC node next engine
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_node_modify_next_engine_params_t {
-+ void *id; /**< CC node Id to be used */
-+ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
-+ NOTE: This parameter is IGNORED for miss-key! */
-+ uint8_t key_size; /**< Key size of added key */
-+ ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params;
-+ /**< parameters for the next for the defined Key in the p_Key */
-+} ioc_fm_pcd_cc_node_modify_next_engine_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for remove CC node key
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_node_remove_key_params_t {
-+ void *id; /**< CC node Id to be used */
-+ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
-+ NOTE: This parameter is IGNORED for miss-key! */
-+} ioc_fm_pcd_cc_node_remove_key_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for modifying CC node key and next engine
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t {
-+ void *id; /**< CC node Id to be used */
-+ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
-+ NOTE: This parameter is IGNORED for miss-key! */
-+ uint8_t key_size; /**< Key size of added key */
-+ ioc_fm_pcd_cc_key_params_t key_params; /**< it's array with numOfKeys entries each entry in
-+ the array of the type ioc_fm_pcd_cc_key_params_t */
-+} ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for modifying CC node key
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_cc_node_modify_key_params_t {
-+ void *id; /**< CC node Id to be used */
-+ uint16_t key_indx; /**< Key index for Next Engine Params modifications;
-+ NOTE: This parameter is IGNORED for miss-key! */
-+ uint8_t key_size; /**< Key size of added key */
-+ uint8_t *p_key; /**< Pointer to the key of the size defined in key_size */
-+ uint8_t *p_mask; /**< Pointer to the Mask per key of the size defined
-+ in keySize. p_Key and p_Mask (if defined) have to be
-+ of the same size as defined in the key_size */
-+} ioc_fm_pcd_cc_node_modify_key_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure with the arguments for the FM_PCD_HashTableRemoveKey ioctl() call
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_hash_table_remove_key_params_t {
-+ void *p_hash_tbl; /**< The id of the hash table */
-+ uint8_t key_size; /**< The size of the key to remove */
-+ uint8_t *p_key; /**< Pointer to the key to remove */
-+} ioc_fm_pcd_hash_table_remove_key_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for selecting a location for requested manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_manip_hdr_info_t {
-+ ioc_net_header_type hdr; /**< Header selection */
-+ ioc_fm_pcd_hdr_index hdr_index; /**< Relevant only for MPLS, VLAN and tunneled IP. Otherwise should be cleared. */
-+ bool by_field; /**< TRUE if the location of manipulation is according to some field in the specific header*/
-+ ioc_fm_pcd_fields_u full_field; /**< Relevant only when by_field = TRUE: Extract field */
-+} ioc_fm_manip_hdr_info_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header removal by header type
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t {
-+ ioc_fm_pcd_manip_hdr_rmv_by_hdr_type type; /**< Selection of header removal location */
-+ union {
-+#if ((DPAA_VERSION == 10) && defined(FM_CAPWAP_SUPPORT))
-+ struct {
-+ bool include;/**< If FALSE, remove until the specified header (not including the header);
-+ If TRUE, remove also the specified header. */
-+ ioc_fm_manip_hdr_info_t hdr_info;
-+ } from_start_by_hdr; /**< Relevant when type = e_IOC_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
-+#endif /* FM_CAPWAP_SUPPORT */
-+#if (DPAA_VERSION >= 11)
-+ ioc_fm_manip_hdr_info_t hdr_info; /**< Relevant when type = e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START */
-+#endif /* (DPAA_VERSION >= 11) */
-+ ioc_fm_pcd_manip_hdr_rmv_specific_l2 specific_l2;/**< Relevant when type = e_IOC_FM_PCD_MANIP_BY_HDR_SPECIFIC_L2;
-+ Defines which L2 headers to remove. */
-+ } u;
-+} ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring IP fragmentation manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_frag_ip_params_t {
-+ uint16_t size_for_fragmentation; /**< If length of the frame is greater than this value,
-+ IP fragmentation will be executed.*/
-+#if DPAA_VERSION == 10
-+ uint8_t scratch_bpid; /**< Absolute buffer pool id according to BM configuration.*/
-+#endif /* DPAA_VERSION == 10 */
-+ bool sg_bpid_en; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
-+ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
-+ received frame's buffer. */
-+ uint8_t sg_bpid; /**< Scatter/Gather buffer pool id;
-+ This parameter is relevant when 'sg_bpid_en=TRUE';
-+ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
-+ of this pool need to be allocated in the same memory area as the received buffers.
-+ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
-+ mutual to all these sources. */
-+ ioc_fm_pcd_manip_dont_frag_action dont_frag_action; /**< Dont Fragment Action - If an IP packet is larger
-+ than MTU and its DF bit is set, then this field will
-+ determine the action to be taken.*/
-+} ioc_fm_pcd_manip_frag_ip_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring IP reassembly manipulation.
-+
-+ This is a common structure for both IPv4 and IPv6 reassembly
-+ manipulation. For reassembly of both IPv4 and IPv6, make sure to
-+ set the 'hdr' field in ioc_fm_pcd_manip_reassem_params_t to IOC_HEADER_TYPE_IPv6.
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_reassem_ip_params_t {
-+ uint8_t relative_scheme_id[2]; /**< Partition relative scheme id:
-+ relativeSchemeId[0] - Relative scheme ID for IPV4 Reassembly manipulation;
-+ relativeSchemeId[1] - Relative scheme ID for IPV6 Reassembly manipulation;
-+ NOTE: The following comment is relevant only for FMAN v2 devices:
-+ Relative scheme ID for IPv4/IPv6 Reassembly manipulation must be smaller than
-+ the user schemes id to ensure that the reassembly's schemes will be first match.
-+ The remaining schemes, if defined, should have higher relative scheme ID. */
-+#if DPAA_VERSION >= 11
-+ uint32_t non_consistent_sp_fqid; /**< In case that other fragments of the frame corresponds to different storage
-+ profile than the opening fragment (Non-Consistent-SP state)
-+ then one of two possible scenarios occurs:
-+ if 'nonConsistentSpFqid != 0', the reassembled frame will be enqueued to
-+ this fqid, otherwise a 'Non Consistent SP' bit will be set in the FD[status].*/
-+#else
-+ uint8_t sg_bpid; /**< Buffer pool id for the S/G frame created by the reassembly process */
-+#endif /* DPAA_VERSION >= 11 */
-+ uint8_t data_mem_id; /**< Memory partition ID for the IPR's external tables structure */
-+ uint16_t data_liodn_offset; /**< LIODN offset for access the IPR's external tables structure. */
-+ uint16_t min_frag_size[2]; /**< Minimum fragment size:
-+ minFragSize[0] - for ipv4, minFragSize[1] - for ipv6 */
-+ ioc_fm_pcd_manip_reassem_ways_number num_of_frames_per_hash_entry[2];
-+ /**< Number of frames per hash entry needed for reassembly process:
-+ numOfFramesPerHashEntry[0] - for ipv4 (max value is e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH);
-+ numOfFramesPerHashEntry[1] - for ipv6 (max value is e_IOC_FM_PCD_MANIP_SIX_WAYS_HASH). */
-+ uint16_t max_num_frames_in_process;/**< Number of frames which can be processed by Reassembly in the same time;
-+ Must be power of 2;
-+ In the case numOfFramesPerHashEntry == e_IOC_FM_PCD_MANIP_FOUR_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 4 - 512;
-+ In the case numOfFramesPerHashEntry == e_IOC_FM_PCD_MANIP_EIGHT_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 8 - 2048. */
-+ ioc_fm_pcd_manip_reassem_time_out_mode time_out_mode; /**< Expiration delay initialized by Reassembly process */
-+ uint32_t fqid_for_time_out_frames;/**< FQID in which time out frames will enqueue during Time Out Process */
-+ uint32_t timeout_threshold_for_reassm_process;
-+ /**< Represents the time interval in microseconds which defines
-+ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
-+} ioc_fm_pcd_manip_reassem_ip_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining IPSEC manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_special_offload_ipsec_params_t {
-+ bool decryption; /**< TRUE if being used in decryption direction;
-+ FALSE if being used in encryption direction. */
-+ bool ecn_copy; /**< TRUE to copy the ECN bits from inner/outer to outer/inner
-+ (direction depends on the 'decryption' field). */
-+ bool dscp_copy; /**< TRUE to copy the DSCP bits from inner/outer to outer/inner
-+ (direction depends on the 'decryption' field). */
-+ bool variable_ip_hdr_len; /**< TRUE for supporting variable IP header length in decryption. */
-+ bool variable_ip_version; /**< TRUE for supporting both IP version on the same SA in encryption */
-+ uint8_t outer_ip_hdr_len; /**< If 'variable_ip_version == TRUE' than this field must be set to non-zero value;
-+ It is specifies the length of the outer IP header that was configured in the
-+ corresponding SA. */
-+ uint16_t arw_size; /**< if <> '0' then will perform ARW check for this SA;
-+ The value must be a multiplication of 16 */
-+ void *arw_addr; /**< if arwSize <> '0' then this field must be set to non-zero value;
-+ MUST be allocated from FMAN's MURAM that the post-sec op-port belong
-+ Must be 4B aligned. Required MURAM size is '(NEXT_POWER_OF_2(arwSize+32))/8+4' Bytes */
-+} ioc_fm_pcd_manip_special_offload_ipsec_params_t;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Parameters for configuring CAPWAP fragmentation manipulation
-+
-+ Restrictions:
-+ - Maximum number of fragments per frame is 16.
-+ - Transmit confirmation is not supported.
-+ - Fragmentation nodes must be set as the last PCD action (i.e. the
-+ corresponding CC node key must have next engine set to e_FM_PCD_DONE).
-+ - Only BMan buffers shall be used for frames to be fragmented.
-+ - NOTE: The following comment is relevant only for FMAN v3 devices: IPF
-+ does not support VSP. Therefore, on the same port where we have IPF we
-+ cannot support VSP.
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_frag_capwap_params_t {
-+ uint16_t size_for_fragmentation; /**< If length of the frame is greater than this value,
-+ CAPWAP fragmentation will be executed.*/
-+ bool sg_bpid_en; /**< Enable a dedicated buffer pool id for the Scatter/Gather buffer allocation;
-+ If disabled, the Scatter/Gather buffer will be allocated from the same pool as the
-+ received frame's buffer. */
-+ uint8_t sg_bpid; /**< Scatter/Gather buffer pool id;
-+ This parameters is relevant when 'sgBpidEn=TRUE';
-+ Same LIODN number is used for these buffers as for the received frames buffers, so buffers
-+ of this pool need to be allocated in the same memory area as the received buffers.
-+ If the received buffers arrive from different sources, the Scatter/Gather BP id should be
-+ mutual to all these sources. */
-+ bool compress_mode_en; /**< CAPWAP Header Options Compress Enable mode;
-+ When this mode is enabled then only the first fragment include the CAPWAP header options
-+ field (if user provides it in the input frame) and all other fragments exclude the CAPWAP
-+ options field (CAPWAP header is updated accordingly).*/
-+} ioc_fm_pcd_manip_frag_capwap_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for configuring CAPWAP reassembly manipulation.
-+
-+ Restrictions:
-+ - Application must define one scheme to catch the reassembled frames.
-+ - Maximum number of fragments per frame is 16.
-+
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_reassem_capwap_params_t {
-+ uint8_t relative_scheme_id; /**< Partition relative scheme id;
-+ NOTE: this id must be smaller than the user schemes id to ensure that the reassembly scheme will be first match;
-+ Rest schemes, if defined, should have higher relative scheme ID. */
-+ uint8_t data_mem_id; /**< Memory partition ID for the IPR's external tables structure */
-+ uint16_t data_liodn_offset; /**< LIODN offset for access the IPR's external tables structure. */
-+ uint16_t max_reassembled_frame_length;/**< The maximum CAPWAP reassembled frame length in bytes;
-+ If maxReassembledFrameLength == 0, any successful reassembled frame length is
-+ considered as a valid length;
-+ if maxReassembledFrameLength > 0, a successful reassembled frame which its length
-+ exceeds this value is considered as an error frame (FD status[CRE] bit is set). */
-+ ioc_fm_pcd_manip_reassem_ways_number num_of_frames_per_hash_entry;
-+ /**< Number of frames per hash entry needed for reassembly process */
-+ uint16_t max_num_frames_in_process; /**< Number of frames which can be processed by reassembly in the same time;
-+ Must be power of 2;
-+ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 4 - 512;
-+ In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH,
-+ maxNumFramesInProcess has to be in the range of 8 - 2048. */
-+ ioc_fm_pcd_manip_reassem_time_out_mode time_out_mode; /**< Expiration delay initialized by Reassembly process */
-+ uint32_t fqid_for_time_out_frames; /**< FQID in which time out frames will enqueue during Time Out Process;
-+ Recommended value for this field is 0; in this way timed-out frames will be discarded */
-+ uint32_t timeout_threshold_for_reassm_process;
-+ /**< Represents the time interval in microseconds which defines
-+ if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/
-+} ioc_fm_pcd_manip_reassem_capwap_params_t;
-+
-+/**************************************************************************//**
-+ @Description structure for defining CAPWAP manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_special_offload_capwap_params_t {
-+ bool dtls; /**< TRUE if continue to SEC DTLS encryption */
-+ ioc_fm_pcd_manip_hdr_qos_src qos_src; /**< TODO */
-+} ioc_fm_pcd_manip_special_offload_capwap_params_t;
-+
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining special offload manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_special_offload_params_t {
-+ ioc_fm_pcd_manip_special_offload_type type; /**< Type of special offload manipulation */
-+ union
-+ {
-+ ioc_fm_pcd_manip_special_offload_ipsec_params_t ipsec; /**< Parameters for IPSec; Relevant when
-+ type = e_IOC_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC */
-+
-+#if (DPAA_VERSION >= 11)
-+ ioc_fm_pcd_manip_special_offload_capwap_params_t capwap; /**< Parameters for CAPWAP; Relevant when
-+ type = e_FM_PCD_MANIP_SPECIAL_OFFLOAD_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} ioc_fm_pcd_manip_special_offload_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining generic removal manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_rmv_generic_params_t {
-+ uint8_t offset; /**< Offset from beginning of header to the start
-+ location of the removal */
-+ uint8_t size; /**< Size of removed section */
-+} ioc_fm_pcd_manip_hdr_rmv_generic_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining insertion manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_insrt_t {
-+ uint8_t size; /**< size of inserted section */
-+ uint8_t *p_data; /**< data to be inserted */
-+} ioc_fm_pcd_manip_hdr_insrt_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining generic insertion manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_insrt_generic_params_t {
-+ uint8_t offset; /**< Offset from beginning of header to the start
-+ location of the insertion */
-+ uint8_t size; /**< Size of inserted section */
-+ bool replace; /**< TRUE to override (replace) existing data at
-+ 'offset', FALSE to insert */
-+ uint8_t *p_data; /**< Pointer to data to be inserted */
-+} ioc_fm_pcd_manip_hdr_insrt_generic_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation VLAN DSCP To Vpri translation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_field_update_vlan_dscp_to_vpri_t {
-+ uint8_t dscp_to_vpri_table[IOC_FM_PCD_MANIP_DSCP_TO_VLAN_TRANS];
-+ /**< A table of VPri values for each DSCP value;
-+ The index is the D_SCP value (0-0x3F) and the
-+ value is the corresponding VPRI (0-15). */
-+ uint8_t vpri_def_val; /**< 0-7, Relevant only if if update_type =
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN,
-+ this field is the Q Tag default value if the
-+ IP header is not found. */
-+} ioc_fm_pcd_manip_hdr_field_update_vlan_dscp_to_vpri_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation VLAN fields updates
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_field_update_vlan_t {
-+ ioc_fm_pcd_manip_hdr_field_update_vlan update_type; /**< Selects VLAN update type */
-+ union {
-+ uint8_t vpri; /**< 0-7, Relevant only if If update_type =
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_PRI, this
-+ is the new VLAN pri. */
-+ ioc_fm_pcd_manip_hdr_field_update_vlan_dscp_to_vpri_t dscp_to_vpri;
-+ /**< Parameters structure, Relevant only if update_type =
-+ e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN. */
-+ } u;
-+} ioc_fm_pcd_manip_hdr_field_update_vlan_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation IPV4 fields updates
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_field_update_ipv4_t {
-+ ioc_ipv4_hdr_manip_update_flags_t valid_updates; /**< ORed flag, selecting the required updates */
-+ uint8_t tos; /**< 8 bit New TOS; Relevant if valid_updates contains
-+ IOC_HDR_MANIP_IPV4_TOS */
-+ uint16_t id; /**< 16 bit New IP ID; Relevant only if valid_updates
-+ contains IOC_HDR_MANIP_IPV4_ID */
-+ uint32_t src; /**< 32 bit New IP SRC; Relevant only if valid_updates
-+ contains IOC_HDR_MANIP_IPV4_SRC */
-+ uint32_t dst; /**< 32 bit New IP DST; Relevant only if valid_updates
-+ contains IOC_HDR_MANIP_IPV4_DST */
-+} ioc_fm_pcd_manip_hdr_field_update_ipv4_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation IPV6 fields updates
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_field_update_ipv6_t {
-+ ioc_ipv6_hdr_manip_update_flags_t valid_updates; /**< ORed flag, selecting the required updates */
-+ uint8_t traffic_class; /**< 8 bit New Traffic Class; Relevant if valid_updates contains
-+ IOC_HDR_MANIP_IPV6_TC */
-+ uint8_t src[IOC_NET_HEADER_FIELD_IPv6_ADDR_SIZE];
-+ /**< 16 byte new IP SRC; Relevant only if valid_updates
-+ contains IOC_HDR_MANIP_IPV6_SRC */
-+ uint8_t dst[IOC_NET_HEADER_FIELD_IPv6_ADDR_SIZE];
-+ /**< 16 byte new IP DST; Relevant only if valid_updates
-+ contains IOC_HDR_MANIP_IPV6_DST */
-+} ioc_fm_pcd_manip_hdr_field_update_ipv6_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation TCP/UDP fields updates
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t {
-+ ioc_tcp_udp_hdr_manip_update_flags_t valid_updates; /**< ORed flag, selecting the required updates */
-+ uint16_t src; /**< 16 bit New TCP/UDP SRC; Relevant only if valid_updates
-+ contains IOC_HDR_MANIP_TCP_UDP_SRC */
-+ uint16_t dst; /**< 16 bit New TCP/UDP DST; Relevant only if valid_updates
-+ contains IOC_HDR_MANIP_TCP_UDP_DST */
-+} ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation fields updates
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_field_update_params_t {
-+ ioc_fm_pcd_manip_hdr_field_update_type type; /**< Type of header field update manipulation */
-+ union {
-+ ioc_fm_pcd_manip_hdr_field_update_vlan_t vlan; /**< Parameters for VLAN update. Relevant when
-+ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN */
-+ ioc_fm_pcd_manip_hdr_field_update_ipv4_t ipv4; /**< Parameters for IPv4 update. Relevant when
-+ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4 */
-+ ioc_fm_pcd_manip_hdr_field_update_ipv6_t ipv6; /**< Parameters for IPv6 update. Relevant when
-+ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6 */
-+ ioc_fm_pcd_manip_hdr_field_update_tcp_udp_t tcp_udp;/**< Parameters for TCP/UDP update. Relevant when
-+ type = e_IOC_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP */
-+ } u;
-+} ioc_fm_pcd_manip_hdr_field_update_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining custom header manipulation for IP replacement
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_custom_ip_hdr_replace_t {
-+ ioc_fm_pcd_manip_hdr_custom_ip_replace replace_type; /**< Selects replace update type */
-+ bool dec_ttl_hl; /**< Decrement TTL (IPV4) or Hop limit (IPV6) by 1 */
-+ bool update_ipv4_id; /**< Relevant when replace_type =
-+ e_IOC_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4 */
-+ uint16_t id; /**< 16 bit New IP ID; Relevant only if
-+ update_ipv4_id = TRUE */
-+ uint8_t hdr_size; /**< The size of the new IP header */
-+ uint8_t hdr[IOC_FM_PCD_MANIP_MAX_HDR_SIZE];
-+ /**< The new IP header */
-+} ioc_fm_pcd_manip_hdr_custom_ip_hdr_replace_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining custom header manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_custom_params_t {
-+ ioc_fm_pcd_manip_hdr_custom_type type; /**< Type of header field update manipulation */
-+ union {
-+ ioc_fm_pcd_manip_hdr_custom_ip_hdr_replace_t ip_hdr_replace;
-+ /**< Parameters IP header replacement */
-+ } u;
-+} ioc_fm_pcd_manip_hdr_custom_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining specific L2 insertion manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_insrt_specific_l2_params_t {
-+ ioc_fm_pcd_manip_hdr_insrt_specific_l2 specific_l2; /**< Selects which L2 headers to insert */
-+ bool update; /**< TRUE to update MPLS header */
-+ uint8_t size; /**< size of inserted section */
-+ uint8_t *p_data; /**< data to be inserted */
-+} ioc_fm_pcd_manip_hdr_insrt_specific_l2_params_t;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Parameters for defining IP insertion manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_insrt_ip_params_t {
-+ bool calc_l4_checksum; /**< Calculate L4 checksum. */
-+ ioc_fm_pcd_manip_hdr_qos_mapping_mode mapping_mode; /**< TODO */
-+ uint8_t last_pid_offset; /**< the offset of the last Protocol within
-+ the inserted header */
-+ uint16_t id; /**< 16 bit New IP ID */
-+ bool dont_frag_overwrite;
-+ /**< IPv4 only. DF is overwritten with the hash-result next-to-last byte.
-+ * This byte is configured to be overwritten when RPD is set. */
-+ uint8_t last_dst_offset;
-+ /**< IPv6 only. if routing extension exist, user should set the offset of the destination address
-+ * in order to calculate UDP checksum pseudo header;
-+ * Otherwise set it to '0'. */
-+ ioc_fm_pcd_manip_hdr_insrt_t insrt; /**< size and data to be inserted. */
-+} ioc_fm_pcd_manip_hdr_insrt_ip_params_t;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header insertion manipulation by header type
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t {
-+ ioc_fm_pcd_manip_hdr_insrt_by_hdr_type type; /**< Selects manipulation type */
-+ union {
-+ ioc_fm_pcd_manip_hdr_insrt_specific_l2_params_t specific_l2_params;
-+ /**< Used when type = e_IOC_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2:
-+ Selects which L2 headers to remove */
-+#if (DPAA_VERSION >= 11)
-+ ioc_fm_pcd_manip_hdr_insrt_ip_params_t ip_params; /**< Used when type = e_FM_PCD_MANIP_INSRT_BY_HDR_IP */
-+ ioc_fm_pcd_manip_hdr_insrt_t insrt; /**< Used when type is one of e_FM_PCD_MANIP_INSRT_BY_HDR_UDP,
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_UDP_LITE, or
-+ e_FM_PCD_MANIP_INSRT_BY_HDR_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header insertion manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_insrt_params_t {
-+ ioc_fm_pcd_manip_hdr_insrt_type type; /**< Type of insertion manipulation */
-+ union {
-+ ioc_fm_pcd_manip_hdr_insrt_by_hdr_params_t by_hdr; /**< Parameters for defining header insertion manipulation by header type,
-+ relevant if 'type' = e_IOC_FM_PCD_MANIP_INSRT_BY_HDR */
-+ ioc_fm_pcd_manip_hdr_insrt_generic_params_t generic;/**< Parameters for defining generic header insertion manipulation,
-+ relevant if type = e_IOC_FM_PCD_MANIP_INSRT_GENERIC */
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ ioc_fm_pcd_manip_hdr_insrt_by_template_params_t by_template;
-+ /**< Parameters for defining header insertion manipulation by template,
-+ relevant if 'type' = e_IOC_FM_PCD_MANIP_INSRT_BY_TEMPLATE */
-+#endif /* FM_CAPWAP_SUPPORT */
-+ } u;
-+} ioc_fm_pcd_manip_hdr_insrt_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header removal manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_rmv_params_t {
-+ ioc_fm_pcd_manip_hdr_rmv_type type; /**< Type of header removal manipulation */
-+ union {
-+ ioc_fm_pcd_manip_hdr_rmv_by_hdr_params_t by_hdr; /**< Parameters for defining header removal manipulation by header type,
-+ relevant if type = e_IOC_FM_PCD_MANIP_RMV_BY_HDR */
-+ ioc_fm_pcd_manip_hdr_rmv_generic_params_t generic; /**< Parameters for defining generic header removal manipulation,
-+ relevant if type = e_IOC_FM_PCD_MANIP_RMV_GENERIC */
-+ } u;
-+} ioc_fm_pcd_manip_hdr_rmv_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining header manipulation node
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_hdr_params_t {
-+ bool rmv; /**< TRUE, to define removal manipulation */
-+ ioc_fm_pcd_manip_hdr_rmv_params_t rmv_params; /**< Parameters for removal manipulation, relevant if 'rmv' = TRUE */
-+
-+ bool insrt; /**< TRUE, to define insertion manipulation */
-+ ioc_fm_pcd_manip_hdr_insrt_params_t insrt_params; /**< Parameters for insertion manipulation, relevant if 'insrt' = TRUE */
-+
-+ bool field_update; /**< TRUE, to define field update manipulation */
-+ ioc_fm_pcd_manip_hdr_field_update_params_t field_update_params; /**< Parameters for field update manipulation, relevant if 'fieldUpdate' = TRUE */
-+
-+ bool custom; /**< TRUE, to define custom manipulation */
-+ ioc_fm_pcd_manip_hdr_custom_params_t custom_params; /**< Parameters for custom manipulation, relevant if 'custom' = TRUE */
-+
-+ bool dont_parse_after_manip;/**< FALSE to activate the parser a second time after
-+ completing the manipulation on the frame */
-+} ioc_fm_pcd_manip_hdr_params_t;
-+
-+
-+/**************************************************************************//**
-+ @Description structure for defining fragmentation manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_frag_params_t {
-+ ioc_net_header_type hdr; /**< Header selection */
-+ union {
-+#if (DPAA_VERSION >= 11)
-+ ioc_fm_pcd_manip_frag_capwap_params_t capwap_frag; /**< Parameters for defining CAPWAP fragmentation,
-+ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+ ioc_fm_pcd_manip_frag_ip_params_t ip_frag; /**< Parameters for defining IP fragmentation,
-+ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
-+ } u;
-+} ioc_fm_pcd_manip_frag_params_t;
-+
-+/**************************************************************************//**
-+ @Description structure for defining reassemble manipulation
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_reassem_params_t {
-+ ioc_net_header_type hdr; /**< Header selection */
-+ union {
-+#if (DPAA_VERSION >= 11)
-+ ioc_fm_pcd_manip_reassem_capwap_params_t capwap_reassem; /**< Parameters for defining CAPWAP reassembly,
-+ relevant if 'hdr' = HEADER_TYPE_CAPWAP */
-+#endif /* (DPAA_VERSION >= 11) */
-+ ioc_fm_pcd_manip_reassem_ip_params_t ip_reassem; /**< Parameters for defining IP reassembly,
-+ relevant if 'hdr' = HEADER_TYPE_Ipv4 or HEADER_TYPE_Ipv6 */
-+ } u;
-+} ioc_fm_pcd_manip_reassem_params_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for defining a manipulation node
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_params_t {
-+ ioc_fm_pcd_manip_type type; /**< Selects type of manipulation node */
-+ union {
-+ ioc_fm_pcd_manip_hdr_params_t hdr; /**< Parameters for defining header manipulation node */
-+ ioc_fm_pcd_manip_reassem_params_t reassem;/**< Parameters for defining reassembly manipulation node */
-+ ioc_fm_pcd_manip_frag_params_t frag; /**< Parameters for defining fragmentation manipulation node */
-+ ioc_fm_pcd_manip_special_offload_params_t special_offload;/**< Parameters for defining special offload manipulation node */
-+ } u;
-+ void *p_next_manip;/**< Handle to another (previously defined) manipulation node;
-+ Allows concatenation of manipulation actions
-+ This parameter is optional and may be NULL. */
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ bool frag_or_reasm;/**< TRUE, if defined fragmentation/reassembly manipulation */
-+ ioc_fm_pcd_manip_frag_or_reasm_params_t frag_or_reasm_params;/**< Parameters for fragmentation/reassembly manipulation,
-+ relevant if frag_or_reasm = TRUE */
-+#endif /* FM_CAPWAP_SUPPORT */
-+ void *id;
-+} ioc_fm_pcd_manip_params_t;
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving IP reassembly statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_reassem_ip_stats_t {
-+ /* common counters for both IPv4 and IPv6 */
-+ uint32_t timeout; /**< Counts the number of TimeOut occurrences */
-+ uint32_t rfd_pool_busy; /**< Counts the number of failed attempts to allocate
-+ a Reassembly Frame Descriptor */
-+ uint32_t internal_buffer_busy; /**< Counts the number of times an internal buffer busy occurred */
-+ uint32_t external_buffer_busy; /**< Counts the number of times external buffer busy occurred */
-+ uint32_t sg_fragments; /**< Counts the number of Scatter/Gather fragments */
-+ uint32_t dma_semaphore_depletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
-+#if (DPAA_VERSION >= 11)
-+ uint32_t non_consistent_sp; /**< Counts the number of Non Consistent Storage Profile events for
-+ successfully reassembled frames */
-+#endif /* (DPAA_VERSION >= 11) */
-+struct {
-+ uint32_t successfully_reassembled; /**< Counts the number of successfully reassembled frames */
-+ uint32_t valid_fragments; /**< Counts the total number of valid fragments that
-+ have been processed for all frames */
-+ uint32_t processed_fragments; /**< Counts the number of processed fragments
-+ (valid and error fragments) for all frames */
-+ uint32_t malformed_fragments; /**< Counts the number of malformed fragments processed for all frames */
-+ uint32_t discarded_fragments; /**< Counts the number of fragments discarded by the reassembly process */
-+ uint32_t auto_learn_busy; /**< Counts the number of times a busy condition occurs when attempting
-+ to access an IP-Reassembly Automatic Learning Hash set */
-+ uint32_t more_than16fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
-+ exceeds 16 */
-+ } specific_hdr_statistics[2]; /**< slot '0' is for IPv4, slot '1' is for IPv6 */
-+} ioc_fm_pcd_manip_reassem_ip_stats_t;
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving IP fragmentation statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_frag_ip_stats_t {
-+ uint32_t total_frames; /**< Number of frames that passed through the manipulation node */
-+ uint32_t fragmented_frames; /**< Number of frames that were fragmented */
-+ uint32_t generated_fragments; /**< Number of fragments that were generated */
-+} ioc_fm_pcd_manip_frag_ip_stats_t;
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Description Structure for retrieving CAPWAP reassembly statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_reassem_capwap_stats_t {
-+ uint32_t timeout; /**< Counts the number of timeout occurrences */
-+ uint32_t rfd_pool_busy; /**< Counts the number of failed attempts to allocate
-+ a Reassembly Frame Descriptor */
-+ uint32_t internal_buffer_busy; /**< Counts the number of times an internal buffer busy occurred */
-+ uint32_t external_buffer_busy; /**< Counts the number of times external buffer busy occurred */
-+ uint32_t sg_fragments; /**< Counts the number of Scatter/Gather fragments */
-+ uint32_t dma_semaphore_depletion; /**< Counts the number of failed attempts to allocate a DMA semaphore */
-+ uint32_t successfully_reassembled; /**< Counts the number of successfully reassembled frames */
-+ uint32_t valid_fragments; /**< Counts the total number of valid fragments that
-+ have been processed for all frames */
-+ uint32_t processed_fragments; /**< Counts the number of processed fragments
-+ (valid and error fragments) for all frames */
-+ uint32_t malformed_fragments; /**< Counts the number of malformed fragments processed for all frames */
-+ uint32_t autoLearn_busy; /**< Counts the number of times a busy condition occurs when attempting
-+ to access an Reassembly Automatic Learning Hash set */
-+ uint32_t discarded_fragments; /**< Counts the number of fragments discarded by the reassembly process */
-+ uint32_t more_than16fragments; /**< Counts the fragment occurrences in which the number of fragments-per-frame
-+ exceeds 16 */
-+ uint32_t exceed_max_reassembly_frame_len;/**< ounts the number of times that a successful reassembled frame
-+ length exceeds MaxReassembledFrameLength value */
-+} ioc_fm_pcd_manip_reassem_capwap_stats_t;
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving CAPWAP fragmentation statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_frag_capwap_stats_t {
-+ uint32_t total_frames; /**< Number of frames that passed through the manipulation node */
-+ uint32_t fragmented_frames; /**< Number of frames that were fragmented */
-+ uint32_t generated_fragments; /**< Number of fragments that were generated */
-+#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0))
-+ uint8_t sg_allocation_failure; /**< Number of allocation failure of s/g buffers */
-+#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */
-+} ioc_fm_pcd_manip_frag_capwap_stats_t;
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Description Structure for retrieving reassembly statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_reassem_stats_t {
-+ union {
-+ ioc_fm_pcd_manip_reassem_ip_stats_t ip_reassem; /**< Structure for IP reassembly statistics */
-+#if (DPAA_VERSION >= 11)
-+ ioc_fm_pcd_manip_reassem_capwap_stats_t capwap_reassem; /**< Structure for CAPWAP reassembly statistics */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} ioc_fm_pcd_manip_reassem_stats_t;
-+
-+/**************************************************************************//**
-+ @Description structure for retrieving fragmentation statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_frag_stats_t {
-+ union {
-+ ioc_fm_pcd_manip_frag_ip_stats_t ip_frag; /**< Structure for IP fragmentation statistics */
-+#if (DPAA_VERSION >= 11)
-+ ioc_fm_pcd_manip_frag_capwap_stats_t capwap_frag; /**< Structure for CAPWAP fragmentation statistics */
-+#endif /* (DPAA_VERSION >= 11) */
-+ } u;
-+} ioc_fm_pcd_manip_frag_stats_t;
-+
-+/**************************************************************************//**
-+ @Description structure for defining manipulation statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_stats_t {
-+ union {
-+ ioc_fm_pcd_manip_reassem_stats_t reassem; /**< Structure for reassembly statistics */
-+ ioc_fm_pcd_manip_frag_stats_t frag; /**< Structure for fragmentation statistics */
-+ } u;
-+} ioc_fm_pcd_manip_stats_t;
-+
-+/**************************************************************************//**
-+ @Description Parameters for acquiring manipulation statistics
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_manip_get_stats_t {
-+ void *id;
-+ ioc_fm_pcd_manip_stats_t stats;
-+} ioc_fm_pcd_manip_get_stats_t;
-+
-+#if DPAA_VERSION >= 11
-+/**************************************************************************//**
-+ @Description Parameters for defining frame replicator group and its members
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_frm_replic_group_params_t {
-+ uint8_t max_num_of_entries; /**< Maximal number of members in the group - must be at least two */
-+ uint8_t num_of_entries; /**< Number of members in the group - must be at least 1 */
-+ ioc_fm_pcd_cc_next_engine_params_t next_engine_params[IOC_FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES];
-+ /**< Array of members' parameters */
-+ void *id;
-+} ioc_fm_pcd_frm_replic_group_params_t;
-+
-+typedef struct ioc_fm_pcd_frm_replic_member_t {
-+ void *h_replic_group;
-+ uint16_t member_index;
-+} ioc_fm_pcd_frm_replic_member_t;
-+
-+typedef struct ioc_fm_pcd_frm_replic_member_params_t {
-+ ioc_fm_pcd_frm_replic_member_t member;
-+ ioc_fm_pcd_cc_next_engine_params_t next_engine_params;
-+} ioc_fm_pcd_frm_replic_member_params_t;
-+#endif /* DPAA_VERSION >= 11 */
-+
-+
-+typedef struct ioc_fm_pcd_cc_key_statistics_t {
-+ uint32_t byte_count; /**< This counter reflects byte count of frames that
-+ were matched by this key. */
-+ uint32_t frame_count; /**< This counter reflects count of frames that
-+ were matched by this key. */
-+#if (DPAA_VERSION >= 11)
-+ uint32_t frame_length_range_count[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
-+ /**< These counters reflect how many frames matched
-+ this key in 'RMON' statistics mode:
-+ Each counter holds the number of frames of a
-+ specific frames length range, according to the
-+ ranges provided at initialization. */
-+#endif /* (DPAA_VERSION >= 11) */
-+} ioc_fm_pcd_cc_key_statistics_t;
-+
-+
-+typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {
-+ void *id;
-+ uint16_t key_index;
-+ ioc_fm_pcd_cc_key_statistics_t statistics;
-+} ioc_fm_pcd_cc_tbl_get_stats_t;
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableGetKeyStatistics
-+
-+ @Description This routine may be used to get statistics counters of specific key
-+ in a CC Node.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames passed that were matched
-+ this key; The total frames count will be returned in the counter
-+ of the first range (as only one frame length range was defined).
-+ If 'e_FM_PCD_CC_STATS_MODE_RMON' was set for this node, the total
-+ frame count will be separated to frame length counters, based on
-+ provided frame length ranges.
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[in] keyIndex Key index for adding
-+ @Param[out] p_KeyStatistics Key statistics counters
-+
-+ @Return The specific key statistics.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_compat_fm_pcd_cc_tbl_get_stats_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_GET_KEY_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_fm_pcd_cc_tbl_get_stats_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableGetMissStatistics
-+
-+ @Description This routine may be used to get statistics counters of miss entry
-+ in a CC Node.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames were not matched to any
-+ existing key and therefore passed through the miss entry; The
-+ total frames count will be returned in the counter of the
-+ first range (as only one frame length range was defined).
-+
-+ @Param[in] h_CcNode A handle to the node
-+ @Param[out] p_MissStatistics Statistics counters for 'miss'
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_compat_fm_pcd_cc_tbl_get_stats_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_fm_pcd_cc_tbl_get_stats_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableGetMissStatistics
-+
-+ @Description This routine may be used to get statistics counters of 'miss'
-+ entry of the a hash table.
-+
-+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
-+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
-+ these counters reflect how many frames were not matched to any
-+ existing key and therefore passed through the miss entry;
-+
-+ @Param[in] h_HashTbl A handle to a hash table
-+ @Param[out] p_MissStatistics Statistics counters for 'miss'
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(14), ioc_compat_fm_pcd_cc_tbl_get_stats_t)
-+#endif
-+#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(14), ioc_fm_pcd_cc_tbl_get_stats_t)
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_NetEnvCharacteristicsSet
-+
-+ @Description Define a set of Network Environment Characteristics.
-+
-+ When setting an environment it is important to understand its
-+ application. It is not meant to describe the flows that will run
-+ on the ports using this environment, but what the user means TO DO
-+ with the PCD mechanisms in order to parse-classify-distribute those
-+ frames.
-+ By specifying a distinction unit, the user means it would use that option
-+ for distinction between frames at either a KeyGen scheme or a coarse
-+ classification action descriptor. Using interchangeable headers to define a
-+ unit means that the user is indifferent to which of the interchangeable
-+ headers is present in the frame, and wants the distinction to be based
-+ on the presence of either one of them.
-+
-+ Depending on context, there are limitations to the use of environments. A
-+ port using the PCD functionality is bound to an environment. Some or even
-+ all ports may share an environment but also an environment per port is
-+ possible. When initializing a scheme, a classification plan group (see below),
-+ or a coarse classification tree, one of the initialized environments must be
-+ stated and related to. When a port is bound to a scheme, a classification
-+ plan group, or a coarse classification tree, it MUST be bound to the same
-+ environment.
-+
-+ The different PCD modules, may relate (for flows definition) ONLY on
-+ distinction units as defined by their environment. When initializing a
-+ scheme for example, it may not choose to select IPV4 as a match for
-+ recognizing flows unless it was defined in the relating environment. In
-+ fact, to guide the user through the configuration of the PCD, each module's
-+ characterization in terms of flows is not done using protocol names, but using
-+ environment indexes.
-+
-+ In terms of HW implementation, the list of distinction units sets the LCV vectors
-+ and later used for match vector, classification plan vectors and coarse classification
-+ indexing.
-+
-+ @Param[in,out] ioc_fm_pcd_net_env_params_t A structure defining the distiction units for this configuration.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(20), ioc_compat_fm_pcd_net_env_params_t)
-+#endif
-+#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(20), ioc_fm_pcd_net_env_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_NetEnvCharacteristicsDelete
-+
-+ @Description Deletes a set of Network Environment Charecteristics.
-+
-+ @Param[in] ioc_fm_obj_t - The id of a Network Environment object.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(21), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(21), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSchemeSet
-+
-+ @Description Initializing or modifying and enabling a scheme for the KeyGen.
-+ This routine should be called for adding or modifying a scheme.
-+ When a scheme needs modifying, the API requires that it will be
-+ rewritten. In such a case 'modify' should be TRUE. If the
-+ routine is called for a valid scheme and 'modify' is FALSE,
-+ it will return error.
-+
-+ @Param[in,out] ioc_fm_pcd_kg_scheme_params_t A structure of parameters for defining the scheme
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_KG_SCHEME_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(24), ioc_compat_fm_pcd_kg_scheme_params_t)
-+#endif
-+#define FM_PCD_IOC_KG_SCHEME_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(24), ioc_fm_pcd_kg_scheme_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_KgSchemeDelete
-+
-+ @Description Deleting an initialized scheme.
-+
-+ @Param[in] ioc_fm_obj_t scheme id as initalized by application at FM_PCD_IOC_KG_SET_SCHEME
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(25), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_KG_SCHEME_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(25), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_CcRootBuild
-+
-+ @Description This routine must be called to define a complete coarse
-+ classification tree. This is the way to define coarse
-+ classification to a certain flow - the KeyGen schemes
-+ may point only to trees defined in this way.
-+
-+ @Param[in,out] ioc_fm_pcd_cc_tree_params_t A structure of parameters to define the tree.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_CC_ROOT_BUILD_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(26), compat_uptr_t)
-+#endif
-+#define FM_PCD_IOC_CC_ROOT_BUILD _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(26), void *) /* workaround ...*/
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_CcRootDelete
-+
-+ @Description Deleting a built tree.
-+
-+ @Param[in] ioc_fm_obj_t - The id of a CC tree.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_CC_ROOT_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(27), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_CC_ROOT_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(27), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableSet
-+
-+ @Description This routine should be called for each CC (coarse classification)
-+ node. The whole CC tree should be built bottom up so that each
-+ node points to already defined nodes. p_NodeId returns the node
-+ Id to be used by other nodes.
-+
-+ @Param[in,out] ioc_fm_pcd_cc_node_params_t A structure for defining the CC node params
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(28), compat_uptr_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(28), void *) /* workaround ...*/
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableDelete
-+
-+ @Description Deleting a built node.
-+
-+ @Param[in] ioc_fm_obj_t - The id of a CC node.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(29), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(29), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_CcRootModifyNextEngine
-+
-+ @Description Modify the Next Engine Parameters in the entry of the tree.
-+
-+ @Param[in] ioc_fm_pcd_cc_tree_modify_next_engine_params_t - Pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_CcRootBuild().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(30), ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t)
-+#endif
-+#define FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(30), ioc_fm_pcd_cc_tree_modify_next_engine_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyNextEngine
-+
-+ @Description Modify the Next Engine Parameters in the relevant key entry of the node.
-+
-+ @Param[in] ioc_fm_pcd_cc_node_modify_next_engine_params_t A pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(31), ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(31), ioc_fm_pcd_cc_node_modify_next_engine_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyMissNextEngine
-+
-+ @Description Modify the Next Engine Parameters of the Miss key case of the node.
-+
-+ @Param[in] ioc_fm_pcd_cc_node_modify_next_engine_params_t - Pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(32), ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(32), ioc_fm_pcd_cc_node_modify_next_engine_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableRemoveKey
-+
-+ @Description Remove the key (including next engine parameters of this key)
-+ defined by the index of the relevant node.
-+
-+ @Param[in] ioc_fm_pcd_cc_node_remove_key_params_t A pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_PCD_MatchTableSet() has been called for this
-+ node and for all of the nodes that lead to it.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(33), ioc_compat_fm_pcd_cc_node_remove_key_params_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(33), ioc_fm_pcd_cc_node_remove_key_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableAddKey
-+
-+ @Description Add the key (including next engine parameters of this key in the
-+ index defined by the keyIndex. Note that 'FM_PCD_LAST_KEY_INDEX'
-+ may be used when the user doesn't care about the position of the
-+ key in the table - in that case, the key will be automatically
-+ added by the driver in the last available entry.
-+
-+ @Param[in] ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t A pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_PCD_MatchTableSet() has been called for this
-+ node and for all of the nodes that lead to it.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_ADD_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(34), ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_ADD_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(34), ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyKeyAndNextEngine
-+
-+ @Description Modify the key and Next Engine Parameters of this key in the index defined by key_index.
-+
-+ @Param[in] ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t A pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_MatchTableSet() not only of the relevnt node but also
-+ the node that points to this node
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_MatchTableModifyKey
-+
-+ @Description Modify the key at the index defined by key_index.
-+
-+ @Param[in] ioc_fm_pcd_cc_node_modify_key_params_t - Pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only after FM_PCD_MatchTableSet() has been called for this
-+ node and for all of the nodes that lead to it.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(36), ioc_compat_fm_pcd_cc_node_modify_key_params_t)
-+#endif
-+#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(36), ioc_fm_pcd_cc_node_modify_key_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableSet
-+
-+ @Description This routine initializes a hash table structure.
-+ KeyGen hash result determines the hash bucket.
-+ Next, KeyGen key is compared against all keys of this
-+ bucket (exact match).
-+ Number of sets (number of buckets) of the hash equals to the
-+ number of 1-s in 'hash_res_mask' in the provided parameters.
-+ Number of hash table ways is then calculated by dividing
-+ 'max_num_of_keys' equally between the hash sets. This is the maximal
-+ number of keys that a hash bucket may hold.
-+ The hash table is initialized empty and keys may be
-+ added to it following the initialization. Keys masks are not
-+ supported in current hash table implementation.
-+ The initialized hash table can be integrated as a node in a
-+ CC tree.
-+
-+ @Param[in,out] ioc_fm_pcd_hash_table_params_t - Pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_HASH_TABLE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_compat_fm_pcd_hash_table_params_t)
-+#endif
-+#define FM_PCD_IOC_HASH_TABLE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_fm_pcd_hash_table_params_t)
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableDelete
-+
-+ @Description This routine deletes the provided hash table and released all
-+ its allocated resources.
-+
-+ @Param[in] ioc_fm_obj_t - The ID of a hash table.
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_HASH_TABLE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_HASH_TABLE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(37), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableAddKey
-+
-+ @Description This routine adds the provided key (including next engine
-+ parameters of this key) to the hash table.
-+ The key is added as the last key of the bucket that it is
-+ mapped to.
-+
-+ @Param[in] ioc_fm_pcd_hash_table_add_key_params_t - Pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_HASH_TABLE_ADD_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(39), ioc_compat_fm_pcd_hash_table_add_key_params_t)
-+#endif
-+#define FM_PCD_IOC_HASH_TABLE_ADD_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(39), ioc_fm_pcd_hash_table_add_key_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_HashTableRemoveKey
-+
-+ @Description This routine removes the requested key (including next engine
-+ parameters of this key) from the hash table.
-+
-+ @Param[in] ioc_fm_pcd_hash_table_remove_key_params_t - Pointer to a structure with the relevant parameters
-+
-+ @Return 0 on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_HashTableSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_HASH_TABLE_REMOVE_KEY_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(40), ioc_compat_fm_pcd_hash_table_remove_key_params_t)
-+#endif
-+#define FM_PCD_IOC_HASH_TABLE_REMOVE_KEY _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(40), ioc_fm_pcd_hash_table_remove_key_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrProfileSet
-+
-+ @Description Sets a profile entry in the policer profile table.
-+ The routine overrides any existing value.
-+
-+ @Param[in,out] ioc_fm_pcd_plcr_profile_params_t A structure of parameters for defining a
-+ policer profile entry.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_PLCR_PROFILE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_compat_fm_pcd_plcr_profile_params_t)
-+#endif
-+#define FM_PCD_IOC_PLCR_PROFILE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_fm_pcd_plcr_profile_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_PlcrProfileDelete
-+
-+ @Description Delete a profile entry in the policer profile table.
-+ The routine set entry to invalid.
-+
-+ @Param[in] ioc_fm_obj_t The id of a policer profile.
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_PLCR_PROFILE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_PLCR_PROFILE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(41), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipNodeSet
-+
-+ @Description This routine should be called for defining a manipulation
-+ node. A manipulation node must be defined before the CC node
-+ that precedes it.
-+
-+ @Param[in] ioc_fm_pcd_manip_params_t - A structure of parameters defining the manipulation
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MANIP_NODE_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(43), ioc_compat_fm_pcd_manip_params_t)
-+#endif
-+#define FM_PCD_IOC_MANIP_NODE_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(43), ioc_fm_pcd_manip_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipNodeReplace
-+
-+ @Description Change existing manipulation node to be according to new requirement.
-+ (Here, it's implemented as a variant of the same IOCTL as for
-+ FM_PCD_ManipNodeSet(), and one that when called, the 'id' member
-+ in its 'ioc_fm_pcd_manip_params_t' argument is set to contain
-+ the manip node's handle)
-+
-+ @Param[in] ioc_fm_pcd_manip_params_t - A structure of parameters defining the manipulation
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_ManipNodeSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MANIP_NODE_REPLACE_COMPAT FM_PCD_IOC_MANIP_NODE_SET_COMPAT
-+#endif
-+#define FM_PCD_IOC_MANIP_NODE_REPLACE FM_PCD_IOC_MANIP_NODE_SET
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipNodeDelete
-+
-+ @Description Delete an existing manipulation node.
-+
-+ @Param[in] ioc_fm_obj_t The id of the manipulation node to delete.
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_ManipNodeSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MANIP_NODE_DELETE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(44), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_MANIP_NODE_DELETE _IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(44), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_ManipGetStatistics
-+
-+ @Description Retrieve the manipulation statistics.
-+
-+ @Param[in] h_ManipNode A handle to a manipulation node.
-+ @Param[out] p_FmPcdManipStats A structure for retrieving the manipulation statistics
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_ManipNodeSet().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_MANIP_GET_STATS_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(50), ioc_compat_fm_pcd_manip_get_stats_t)
-+#endif
-+#define FM_PCD_IOC_MANIP_GET_STATS _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(50), ioc_fm_pcd_manip_get_stats_t)
-+
-+/**************************************************************************//**
-+@Function FM_PCD_SetAdvancedOffloadSupport
-+
-+@Description This routine must be called in order to support the following features:
-+ IP-fragmentation, IP-reassembly, IPsec, Header-manipulation, frame-replicator.
-+
-+@Param[in] h_FmPcd FM PCD module descriptor.
-+
-+@Return 0 on success; error code otherwise.
-+
-+@Cautions Allowed only when PCD is disabled.
-+*//***************************************************************************/
-+#define FM_PCD_IOC_SET_ADVANCED_OFFLOAD_SUPPORT _IO(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(45))
-+
-+#if (DPAA_VERSION >= 11)
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicSetGroup
-+
-+ @Description Initialize a Frame Replicator group.
-+
-+ @Param[in] h_FmPcd FM PCD module descriptor.
-+ @Param[in] p_FrmReplicGroupParam A structure of parameters for the initialization of
-+ the frame replicator group.
-+
-+ @Return A handle to the initialized object on success; NULL code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_Init().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_FRM_REPLIC_GROUP_SET_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(46), ioc_compat_fm_pcd_frm_replic_group_params_t)
-+#endif
-+#define FM_PCD_IOC_FRM_REPLIC_GROUP_SET _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(46), ioc_fm_pcd_frm_replic_group_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicDeleteGroup
-+
-+ @Description Delete a Frame Replicator group.
-+
-+ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(47), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PCD_IOC_FRM_REPLIC_GROUP_DELETE _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(47), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicAddMember
-+
-+ @Description Add the member in the index defined by the memberIndex.
-+
-+ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
-+ @Param[in] memberIndex member index for adding.
-+ @Param[in] p_MemberParams A pointer to the new member parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(48), ioc_compat_fm_pcd_frm_replic_member_params_t)
-+#endif
-+#define FM_PCD_IOC_FRM_REPLIC_MEMBER_ADD _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(48), ioc_fm_pcd_frm_replic_member_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PCD_FrmReplicRemoveMember
-+
-+ @Description Remove the member defined by the index from the relevant group.
-+
-+ @Param[in] h_FrmReplicGroup A handle to the frame replicator group.
-+ @Param[in] memberIndex member index for removing.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PCD_FrmReplicSetGroup() of this group.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(49), ioc_compat_fm_pcd_frm_replic_member_t)
-+#endif
-+#define FM_PCD_IOC_FRM_REPLIC_MEMBER_REMOVE _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(49), ioc_fm_pcd_frm_replic_member_t)
-+
-+#endif
-+
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+/**************************************************************************//**
-+ @Function FM_PCD_StatisticsSetNode
-+
-+ @Description This routine should be called for defining a statistics node.
-+
-+ @Param[in,out] ioc_fm_pcd_stats_params_t A structure of parameters defining the statistics
-+
-+ @Return 0 on success; Error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_STATISTICS_SET_NODE_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(45), void *)
-+#endif
-+#define FM_PCD_IOC_STATISTICS_SET_NODE _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(45), void *)
-+
-+#endif /* FM_CAPWAP_SUPPORT */
-+
-+#ifdef NCSW_BACKWARD_COMPATIBLE_API
-+#if defined(CONFIG_COMPAT)
-+#define FM_PCD_IOC_SET_NET_ENV_CHARACTERISTICS_COMPAT \
-+ FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET_COMPAT
-+#define FM_PCD_IOC_DELETE_NET_ENV_CHARACTERISTICS_COMPAT \
-+ FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE_COMPAT
-+#define FM_PCD_IOC_KG_SET_SCHEME_COMPAT FM_PCD_IOC_KG_SCHEME_SET_COMPAT
-+#define FM_PCD_IOC_KG_DEL_SCHEME_COMPAT FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT
-+#define FM_PCD_IOC_CC_BUILD_TREE_COMPAT FM_PCD_IOC_CC_ROOT_BUILD_COMPAT
-+#define FM_PCD_IOC_CC_DELETE_TREE_COMPAT FM_PCD_IOC_CC_ROOT_DELETE_COMPAT
-+#define FM_PCD_IOC_CC_DELETE_NODE_COMPAT FM_PCD_IOC_MATCH_TABLE_DELETE_COMPAT
-+#define FM_PCD_IOC_CC_TREE_MODIFY_NEXT_ENGINE_COMPAT \
-+ FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE_COMPAT
-+#define FM_PCD_IOC_CC_NODE_MODIFY_NEXT_ENGINE_COMPAT \
-+ FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE_COMPAT
-+#define FM_PCD_IOC_CC_NODE_MODIFY_MISS_NEXT_ENGINE_COMPAT \
-+ FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE_COMPAT
-+#define FM_PCD_IOC_CC_NODE_REMOVE_KEY_COMPAT FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY_COMPAT
-+#define FM_PCD_IOC_CC_NODE_ADD_KEY_COMPAT FM_PCD_IOC_MATCH_TABLE_ADD_KEY_COMPAT
-+#define FM_PCD_IOC_CC_NODE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT \
-+ FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT
-+#define FM_PCD_IOC_CC_NODE_MODIFY_KEY_COMPAT FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_COMPAT
-+#define FM_PCD_IOC_PLCR_SET_PROFILE_COMPAT FM_PCD_IOC_PLCR_PROFILE_SET_COMPAT
-+#define FM_PCD_IOC_PLCR_DEL_PROFILE_COMPAT FM_PCD_IOC_PLCR_PROFILE_DELETE_COMPAT
-+#define FM_PCD_IOC_MANIP_SET_NODE_COMPAT FM_PCD_IOC_MANIP_NODE_SET_COMPAT
-+#define FM_PCD_IOC_MANIP_DELETE_NODE_COMPAT FM_PCD_IOC_MANIP_NODE_DELETE_COMPAT
-+#endif
-+#define FM_PCD_IOC_SET_NET_ENV_CHARACTERISTICS FM_PCD_IOC_NET_ENV_CHARACTERISTICS_SET
-+#define FM_PCD_IOC_DELETE_NET_ENV_CHARACTERISTICS \
-+ FM_PCD_IOC_NET_ENV_CHARACTERISTICS_DELETE
-+#define FM_PCD_IOC_KG_SET_SCHEME FM_PCD_IOC_KG_SCHEME_SET
-+#define FM_PCD_IOC_KG_DEL_SCHEME FM_PCD_IOC_KG_SCHEME_DELETE
-+#define FM_PCD_IOC_CC_BUILD_TREE FM_PCD_IOC_CC_ROOT_BUILD
-+#define FM_PCD_IOC_CC_DELETE_TREE FM_PCD_IOC_CC_ROOT_DELETE
-+#define FM_PCD_IOC_CC_DELETE_NODE FM_PCD_IOC_MATCH_TABLE_DELETE
-+#define FM_PCD_IOC_CC_TREE_MODIFY_NEXT_ENGINE FM_PCD_IOC_CC_ROOT_MODIFY_NEXT_ENGINE
-+#define FM_PCD_IOC_CC_NODE_MODIFY_NEXT_ENGINE FM_PCD_IOC_MATCH_TABLE_MODIFY_NEXT_ENGINE
-+#define FM_PCD_IOC_CC_NODE_MODIFY_MISS_NEXT_ENGINE \
-+ FM_PCD_IOC_MATCH_TABLE_MODIFY_MISS_NEXT_ENGINE
-+#define FM_PCD_IOC_CC_NODE_REMOVE_KEY FM_PCD_IOC_MATCH_TABLE_REMOVE_KEY
-+#define FM_PCD_IOC_CC_NODE_ADD_KEY FM_PCD_IOC_MATCH_TABLE_ADD_KEY
-+#define FM_PCD_IOC_CC_NODE_MODIFY_KEY_AND_NEXT_ENGINE \
-+ FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE
-+#define FM_PCD_IOC_CC_NODE_MODIFY_KEY FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY
-+#define FM_PCD_IOC_PLCR_SET_PROFILE FM_PCD_IOC_PLCR_PROFILE_SET
-+#define FM_PCD_IOC_PLCR_DEL_PROFILE FM_PCD_IOC_PLCR_PROFILE_DELETE
-+#define FM_PCD_IOC_MANIP_SET_NODE FM_PCD_IOC_MANIP_NODE_SET
-+#define FM_PCD_IOC_MANIP_DELETE_NODE FM_PCD_IOC_MANIP_NODE_DELETE
-+#endif /* NCSW_BACKWARD_COMPATIBLE_API */
-+
-+#endif /* __FM_PCD_IOCTLS_H */
-+/** @} */ /* end of lnx_ioctl_FM_PCD_Runtime_grp group */
-+/** @} */ /* end of lnx_ioctl_FM_PCD_grp group */
-+/** @} */ /* end of lnx_ioctl_FM_grp group */
---- /dev/null
-+++ b/include/uapi/linux/fmd/Peripherals/fm_port_ioctls.h
-@@ -0,0 +1,973 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/******************************************************************************
-+ @File fm_port_ioctls.h
-+
-+ @Description FM Port routines
-+*//***************************************************************************/
-+#ifndef __FM_PORT_IOCTLS_H
-+#define __FM_PORT_IOCTLS_H
-+
-+#include "enet_ext.h"
-+#include "net_ioctls.h"
-+#include "fm_ioctls.h"
-+#include "fm_pcd_ioctls.h"
-+
-+
-+/**************************************************************************//**
-+
-+ @Group lnx_ioctl_FM_grp Frame Manager Linux IOCTL API
-+
-+ @Description FM Linux ioctls definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_PORT_grp FM Port
-+
-+ @Description FM Port API
-+
-+ The FM uses a general module called "port" to represent a Tx port
-+ (MAC), an Rx port (MAC), offline parsing flow or host command
-+ flow. There may be up to 17 (may change) ports in an FM - 5 Tx
-+ ports (4 for the 1G MACs, 1 for the 10G MAC), 5 Rx Ports, and 7
-+ Host command/Offline parsing ports. The SW driver manages these
-+ ports as sub-modules of the FM, i.e. after an FM is initialized,
-+ its ports may be initialized and operated upon.
-+
-+ The port is initialized aware of its type, but other functions on
-+ a port may be indifferent to its type. When necessary, the driver
-+ verifies coherency and returns error if applicable.
-+
-+ On initialization, user specifies the port type and it's index
-+ (relative to the port's type). Host command and Offline parsing
-+ ports share the same id range, I.e user may not initialized host
-+ command port 0 and offline parsing port 0.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description An enum for defining port PCD modes.
-+ (Must match enum e_FmPortPcdSupport defined in fm_port_ext.h)
-+
-+ This enum defines the superset of PCD engines support - i.e. not
-+ all engines have to be used, but all have to be enabled. The real
-+ flow of a specific frame depends on the PCD configuration and the
-+ frame headers and payload.
-+ Note: the first engine and the first engine after the parser (if
-+ exists) should be in order, the order is important as it will
-+ define the flow of the port. However, as for the rest engines
-+ (the ones that follows), the order is not important anymore as
-+ it is defined by the PCD graph itself.
-+*//***************************************************************************/
-+typedef enum ioc_fm_port_pcd_support {
-+ e_IOC_FM_PORT_PCD_SUPPORT_NONE = 0 /**< BMI to BMI, PCD is not used */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_ONLY /**< Use only Parser */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PLCR_ONLY /**< Use only Policer */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR /**< Use Parser and Policer */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG /**< Use Parser and Keygen */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC /**< Use Parser, Keygen and Coarse Classification */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR
-+ /**< Use all PCD engines */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR /**< Use Parser, Keygen and Policer */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_CC /**< Use Parser and Coarse Classification */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_CC_AND_PLCR /**< Use Parser and Coarse Classification and Policer */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_CC_ONLY /**< Use only Coarse Classification */
-+#if (defined(FM_CAPWAP_SUPPORT) && (DPAA_VERSION == 10))
-+ , e_IOC_FM_PORT_PCD_SUPPORT_CC_AND_KG /**< Use Coarse Classification,and Keygen */
-+ , e_IOC_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR /**< Use Coarse Classification, Keygen and Policer */
-+#endif /* FM_CAPWAP_SUPPORT */
-+} ioc_fm_port_pcd_support;
-+
-+
-+/**************************************************************************//**
-+ @Collection FM Frame error
-+*//***************************************************************************/
-+typedef uint32_t ioc_fm_port_frame_err_select_t; /**< typedef for defining Frame Descriptor errors */
-+
-+/* @} */
-+
-+
-+/**************************************************************************//**
-+ @Description An enum for defining Dual Tx rate limiting scale.
-+ (Must match e_FmPortDualRateLimiterScaleDown defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_port_dual_rate_limiter_scale_down {
-+ e_IOC_FM_PORT_DUAL_RATE_LIMITER_NONE = 0, /**< Use only single rate limiter */
-+ e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_2, /**< Divide high rate limiter by 2 */
-+ e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_4, /**< Divide high rate limiter by 4 */
-+ e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 /**< Divide high rate limiter by 8 */
-+} ioc_fm_port_dual_rate_limiter_scale_down;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining Tx rate limiting
-+ (Must match struct t_FmPortRateLimit defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_rate_limit_t {
-+ uint16_t max_burst_size; /**< in KBytes for Tx ports, in frames
-+ for offline parsing ports. (note that
-+ for early chips burst size is
-+ rounded up to a multiply of 1000 frames).*/
-+ uint32_t rate_limit; /**< in Kb/sec for Tx ports, in frame/sec for
-+ offline parsing ports. Rate limit refers to
-+ data rate (rather than line rate). */
-+ ioc_fm_port_dual_rate_limiter_scale_down rate_limit_divider; /**< For offline parsing ports only. Not-valid
-+ for some earlier chip revisions */
-+} ioc_fm_port_rate_limit_t;
-+
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_PORT_runtime_control_grp FM Port Runtime Control Unit
-+
-+ @Description FM Port Runtime control unit API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description An enum for defining FM Port counters.
-+ (Must match enum e_FmPortCounters defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef enum ioc_fm_port_counters {
-+ e_IOC_FM_PORT_COUNTERS_CYCLE, /**< BMI performance counter */
-+ e_IOC_FM_PORT_COUNTERS_TASK_UTIL, /**< BMI performance counter */
-+ e_IOC_FM_PORT_COUNTERS_QUEUE_UTIL, /**< BMI performance counter */
-+ e_IOC_FM_PORT_COUNTERS_DMA_UTIL, /**< BMI performance counter */
-+ e_IOC_FM_PORT_COUNTERS_FIFO_UTIL, /**< BMI performance counter */
-+ e_IOC_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION, /**< BMI Rx only performance counter */
-+ e_IOC_FM_PORT_COUNTERS_FRAME, /**< BMI statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_DISCARD_FRAME, /**< BMI statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_DEALLOC_BUF, /**< BMI deallocate buffer statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_RX_BAD_FRAME, /**< BMI Rx only statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_RX_LARGE_FRAME, /**< BMI Rx only statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_RX_FILTER_FRAME, /**< BMI Rx & OP only statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_RX_LIST_DMA_ERR, /**< BMI Rx, OP & HC only statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD, /**< BMI Rx, OP & HC statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_PREPARE_TO_ENQUEUE_COUNTER, /**< BMI Rx, OP & HC only statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_WRED_DISCARD, /**< BMI OP & HC only statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_LENGTH_ERR, /**< BMI non-Rx statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT, /**< BMI non-Rx statistics counter */
-+ e_IOC_FM_PORT_COUNTERS_DEQ_TOTAL, /**< QMI total QM dequeues counter */
-+ e_IOC_FM_PORT_COUNTERS_ENQ_TOTAL, /**< QMI total QM enqueues counter */
-+ e_IOC_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI counter */
-+ e_IOC_FM_PORT_COUNTERS_DEQ_CONFIRM /**< QMI counter */
-+} ioc_fm_port_counters;
-+
-+typedef struct ioc_fm_port_bmi_stats_t {
-+ uint32_t cnt_cycle;
-+ uint32_t cnt_task_util;
-+ uint32_t cnt_queue_util;
-+ uint32_t cnt_dma_util;
-+ uint32_t cnt_fifo_util;
-+ uint32_t cnt_rx_pause_activation;
-+ uint32_t cnt_frame;
-+ uint32_t cnt_discard_frame;
-+ uint32_t cnt_dealloc_buf;
-+ uint32_t cnt_rx_bad_frame;
-+ uint32_t cnt_rx_large_frame;
-+ uint32_t cnt_rx_filter_frame;
-+ uint32_t cnt_rx_list_dma_err;
-+ uint32_t cnt_rx_out_of_buffers_discard;
-+ uint32_t cnt_wred_discard;
-+ uint32_t cnt_length_err;
-+ uint32_t cnt_unsupported_format;
-+} ioc_fm_port_bmi_stats_t;
-+
-+/**************************************************************************//**
-+ @Description Structure for Port id parameters.
-+ (Description may be inaccurate;
-+ must match struct t_FmPortCongestionGrps defined in fm_port_ext.h)
-+
-+ Fields commented 'IN' are passed by the port module to be used
-+ by the FM module.
-+ Fields commented 'OUT' will be filled by FM before returning to port.
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_congestion_groups_t {
-+ uint16_t num_of_congestion_grps_to_consider; /**< The number of required congestion groups
-+ to define the size of the following array */
-+ uint8_t congestion_grps_to_consider [FM_PORT_NUM_OF_CONGESTION_GRPS];
-+ /**< An array of CG indexes;
-+ Note that the size of the array should be
-+ 'num_of_congestion_grps_to_consider'. */
-+#if DPAA_VERSION >= 11
-+ bool pfc_priorities_enable[FM_PORT_NUM_OF_CONGESTION_GRPS][FM_MAX_NUM_OF_PFC_PRIORITIES];
-+ /**< A matrix that represents the map between the CG ids
-+ defined in 'congestion_grps_to_consider' to the priorities
-+ mapping array. */
-+#endif /* DPAA_VERSION >= 11 */
-+} ioc_fm_port_congestion_groups_t;
-+
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Disable
-+
-+ @Description Gracefully disable an FM port. The port will not start new tasks after all
-+ tasks associated with the port are terminated.
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions This is a blocking routine, it returns after port is
-+ gracefully stopped, i.e. the port will not except new frames,
-+ but it will finish all frames or tasks which were already began
-+*//***************************************************************************/
-+#define FM_PORT_IOC_DISABLE _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(1))
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_Enable
-+
-+ @Description A runtime routine provided to allow disable/enable of port.
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_ENABLE _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(2))
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetRateLimit
-+
-+ @Description Calling this routine enables rate limit algorithm.
-+ By default, this functionality is disabled.
-+ Note that rate-limit mechanism uses the FM time stamp.
-+ The selected rate limit specified here would be
-+ rounded DOWN to the nearest 16M.
-+
-+ May be used for Tx and offline parsing ports only
-+
-+ @Param[in] ioc_fm_port_rate_limit A structure of rate limit parameters
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_SET_RATE_LIMIT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(3), ioc_fm_port_rate_limit_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_DeleteRateLimit
-+
-+ @Description Calling this routine disables the previously enabled rate limit.
-+
-+ May be used for Tx and offline parsing ports only
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_DELETE_RATE_LIMIT _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(5))
-+#define FM_PORT_IOC_REMOVE_RATE_LIMIT FM_PORT_IOC_DELETE_RATE_LIMIT
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_AddCongestionGrps
-+
-+ @Description This routine effects the corresponding Tx port.
-+ It should be called in order to enable pause
-+ frame transmission in case of congestion in one or more
-+ of the congestion groups relevant to this port.
-+ Each call to this routine may add one or more congestion
-+ groups to be considered relevant to this port.
-+
-+ May be used for Rx, or RX+OP ports only (depending on chip)
-+
-+ @Param[in] ioc_fm_port_congestion_groups_t - A pointer to an array of
-+ congestion group ids to consider.
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_ADD_CONGESTION_GRPS _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(34), ioc_fm_port_congestion_groups_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_RemoveCongestionGrps
-+
-+ @Description This routine effects the corresponding Tx port. It should be
-+ called when congestion groups were
-+ defined for this port and are no longer relevant, or pause
-+ frames transmitting is not required on their behalf.
-+ Each call to this routine may remove one or more congestion
-+ groups to be considered relevant to this port.
-+
-+ May be used for Rx, or RX+OP ports only (depending on chip)
-+
-+ @Param[in] ioc_fm_port_congestion_groups_t - A pointer to an array of
-+ congestion group ids to consider.
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_REMOVE_CONGESTION_GRPS _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(35), ioc_fm_port_congestion_groups_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetErrorsRoute
-+
-+ @Description Errors selected for this routine will cause a frame with that error
-+ to be enqueued to error queue.
-+ Errors not selected for this routine will cause a frame with that error
-+ to be enqueued to the one of the other port queues.
-+ By default all errors are defined to be enqueued to error queue.
-+ Errors that were configured to be discarded (at initialization)
-+ may not be selected here.
-+
-+ May be used for Rx and offline parsing ports only
-+
-+ @Param[in] ioc_fm_port_frame_err_select_t A list of errors to enqueue to error queue
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+ (szbs001: How is it possible to have one function that needs to be
-+ called BEFORE FM_PORT_Init() implemented as an ioctl,
-+ which will ALWAYS be called AFTER the FM_PORT_Init()
-+ for that port!?!?!?!???!?!??!?!?)
-+*//***************************************************************************/
-+#define FM_PORT_IOC_SET_ERRORS_ROUTE _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(4), ioc_fm_port_frame_err_select_t)
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FM_PORT_pcd_runtime_control_grp FM Port PCD Runtime Control Unit
-+
-+ @Description FM Port PCD Runtime control unit API functions, definitions and enums.
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description A structure defining the KG scheme after the parser.
-+ (Must match struct t_FmPcdKgSchemeSelect defined in fm_port_ext.h)
-+
-+ This is relevant only to change scheme selection mode - from
-+ direct to indirect and vice versa, or when the scheme is selected directly,
-+ to select the scheme id.
-+
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_kg_scheme_select_t {
-+ bool direct; /**< TRUE to use 'scheme_id' directly, FALSE to use LCV.*/
-+ void *scheme_id; /**< Relevant for 'direct'=TRUE only.
-+ 'scheme_id' selects the scheme after parser. */
-+} ioc_fm_pcd_kg_scheme_select_t;
-+
-+/**************************************************************************//**
-+ @Description Scheme IDs structure
-+ (Must match struct t_FmPcdPortSchemesParams defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_port_schemes_params_t {
-+ uint8_t num_of_schemes; /**< Number of schemes for port to be bound to. */
-+ void *scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES]; /**< Array of 'num_of_schemes' schemes for the
-+ port to be bound to */
-+} ioc_fm_pcd_port_schemes_params_t;
-+
-+/**************************************************************************//**
-+ @Description A union for defining port protocol parameters for parser
-+ (Must match union u_FmPcdHdrPrsOpts defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef union ioc_fm_pcd_hdr_prs_opts_u {
-+ /* MPLS */
-+ struct {
-+ bool label_interpretation_enable;/**< When this bit is set, the last MPLS label will be
-+ interpreted as described in HW spec table. When the bit
-+ is cleared, the parser will advance to MPLS next parse */
-+ ioc_net_header_type next_parse; /**< must be equal or higher than IPv4 */
-+ } mpls_prs_options;
-+
-+ /* VLAN */
-+ struct {
-+ uint16_t tag_protocol_id1; /**< User defined Tag Protocol Identifier, to be recognized
-+ on VLAN TAG on top of 0x8100 and 0x88A8 */
-+ uint16_t tag_protocol_id2; /**< User defined Tag Protocol Identifier, to be recognized
-+ on VLAN TAG on top of 0x8100 and 0x88A8 */
-+ } vlan_prs_options;
-+
-+ /* PPP */
-+ struct{
-+ bool enable_mtu_check; /**< Check validity of MTU according to RFC2516 */
-+ } pppoe_prs_options;
-+
-+ /* IPV6 */
-+ struct {
-+ bool routing_hdr_disable; /**< Disable routing header */
-+ } ipv6_prs_options;
-+
-+ /* UDP */
-+ struct {
-+ bool pad_ignore_checksum; /**< TRUE to ignore pad in checksum */
-+ } udp_prs_options;
-+
-+ /* TCP */
-+ struct {
-+ bool pad_ignore_checksum; /**< TRUE to ignore pad in checksum */
-+ } tcp_prs_options;
-+} ioc_fm_pcd_hdr_prs_opts_u;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining each header for the parser
-+ (must match struct t_FmPcdPrsAdditionalHdrParams defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_prs_additional_hdr_params_t {
-+ ioc_net_header_type hdr; /**< Selected header */
-+ bool err_disable; /**< TRUE to disable error indication */
-+ bool soft_prs_enable; /**< Enable jump to SW parser when this
-+ header is recognized by the HW parser. */
-+ uint8_t index_per_hdr; /**< Normally 0, if more than one sw parser
-+ attachments exists for the same header,
-+ (in the main sw parser code) use this
-+ index to distinguish between them. */
-+ bool use_prs_opts; /**< TRUE to use parser options. */
-+ ioc_fm_pcd_hdr_prs_opts_u prs_opts; /**< A unuion according to header type,
-+ defining the parser options selected.*/
-+} ioc_fm_pcd_prs_additional_hdr_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining port PCD parameters
-+ (Must match t_FmPortPcdPrsParams defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_pcd_prs_params_t {
-+ uint8_t prs_res_priv_info; /**< The private info provides a method of inserting
-+ port information into the parser result. This information
-+ may be extracted by KeyGen and be used for frames
-+ distribution when a per-port distinction is required,
-+ it may also be used as a port logical id for analyzing
-+ incoming frames. */
-+ uint8_t parsing_offset; /**< Number of bytes from begining of packet to start parsing */
-+ ioc_net_header_type first_prs_hdr; /**< The type of the first header axpected at 'parsing_offset' */
-+ bool include_in_prs_statistics; /**< TRUE to include this port in the parser statistics */
-+ uint8_t num_of_hdrs_with_additional_params;
-+ /**< Normally 0, some headers may get special parameters */
-+ ioc_fm_pcd_prs_additional_hdr_params_t additional_params[IOC_FM_PCD_PRS_NUM_OF_HDRS];
-+ /**< 'num_of_hdrs_with_additional_params' structures
-+ additional parameters for each header that requires them */
-+ bool set_vlan_tpid1; /**< TRUE to configure user selection of Ethertype to
-+ indicate a VLAN tag (in addition to the TPID values
-+ 0x8100 and 0x88A8). */
-+ uint16_t vlan_tpid1; /**< extra tag to use if set_vlan_tpid1=TRUE. */
-+ bool set_vlan_tpid2; /**< TRUE to configure user selection of Ethertype to
-+ indicate a VLAN tag (in addition to the TPID values
-+ 0x8100 and 0x88A8). */
-+ uint16_t vlan_tpid2; /**< extra tag to use if set_vlan_tpid1=TRUE. */
-+} ioc_fm_port_pcd_prs_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining coarse alassification parameters
-+ (Must match t_FmPortPcdCcParams defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_pcd_cc_params_t {
-+ void *cc_tree_id; /**< CC tree id */
-+} ioc_fm_port_pcd_cc_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining keygen parameters
-+ (Must match t_FmPortPcdKgParams defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_pcd_kg_params_t {
-+ uint8_t num_of_schemes; /**< Number of schemes for port to be bound to. */
-+ void *scheme_ids[FM_PCD_KG_NUM_OF_SCHEMES];
-+ /**< Array of 'num_of_schemes' schemes for the
-+ port to be bound to */
-+ bool direct_scheme; /**< TRUE for going from parser to a specific scheme,
-+ regardless of parser result */
-+ void *direct_scheme_id; /**< Scheme id, as returned by FM_PCD_KgSetScheme;
-+ relevant only if direct=TRUE. */
-+} ioc_fm_port_pcd_kg_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining policer parameters
-+ (Must match t_FmPortPcdPlcrParams defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_pcd_plcr_params_t {
-+ void *plcr_profile_id; /**< Selected profile handle;
-+ relevant in one of the following cases:
-+ e_IOC_FM_PORT_PCD_SUPPORT_PLCR_ONLY or
-+ e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR were selected,
-+ or if any flow uses a KG scheme where policer
-+ profile is not generated (bypass_plcr_profile_generation selected) */
-+} ioc_fm_port_pcd_plcr_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining port PCD parameters
-+ (Must match struct t_FmPortPcdParams defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_pcd_params_t {
-+ ioc_fm_port_pcd_support pcd_support; /**< Relevant for Rx and offline ports only.
-+ Describes the active PCD engines for this port. */
-+ void *net_env_id; /**< HL Unused in PLCR only mode */
-+ ioc_fm_port_pcd_prs_params_t *p_prs_params; /**< Parser parameters for this port */
-+ ioc_fm_port_pcd_cc_params_t *p_cc_params; /**< Coarse classification parameters for this port */
-+ ioc_fm_port_pcd_kg_params_t *p_kg_params; /**< Keygen parameters for this port */
-+ ioc_fm_port_pcd_plcr_params_t *p_plcr_params; /**< Policer parameters for this port */
-+ void *p_ip_reassembly_manip;/**< IP Reassembly manipulation */
-+#if (DPAA_VERSION >= 11)
-+ void *p_capwap_reassembly_manip;/**< CAPWAP Reassembly manipulation */
-+#endif /* (DPAA_VERSION >= 11) */
-+} ioc_fm_port_pcd_params_t;
-+
-+/**************************************************************************//**
-+ @Description A structure for defining the Parser starting point
-+ (Must match struct t_FmPcdPrsStart defined in fm_port_ext.h)
-+*//***************************************************************************/
-+typedef struct ioc_fm_pcd_prs_start_t {
-+ uint8_t parsing_offset; /**< Number of bytes from begining of packet to
-+ start parsing */
-+ ioc_net_header_type first_prs_hdr; /**< The type of the first header axpected at
-+ 'parsing_offset' */
-+} ioc_fm_pcd_prs_start_t;
-+
-+
-+/**************************************************************************//**
-+ @Description FQID parameters structure
-+*//***************************************************************************/
-+typedef struct ioc_fm_port_pcd_fqids_params_t {
-+ uint32_t num_fqids; /**< Number of fqids to be allocated for the port */
-+ uint8_t alignment; /**< Alignment required for this port */
-+ uint32_t base_fqid; /**< output parameter - the base fqid */
-+} ioc_fm_port_pcd_fqids_params_t;
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_IOC_ALLOC_PCD_FQIDS
-+
-+ @Description Allocates FQID's
-+
-+ May be used for Rx and offline parsing ports only
-+
-+ @Param[in,out] ioc_fm_port_pcd_fqids_params_t Parameters for allocating FQID's
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_ALLOC_PCD_FQIDS _IOWR(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(19), ioc_fm_port_pcd_fqids_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_IOC_FREE_PCD_FQIDS
-+
-+ @Description Frees previously-allocated FQIDs
-+
-+ May be used for Rx and offline parsing ports only
-+
-+ @Param[in] uint32_t Base FQID of previously allocated range.
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_FREE_PCD_FQIDS _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(19), uint32_t)
-+
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_SetPCD
-+
-+ @Description Calling this routine defines the port's PCD configuration.
-+ It changes it from its default configuration which is PCD
-+ disabled (BMI to BMI) and configures it according to the passed
-+ parameters.
-+
-+ May be used for Rx and offline parsing ports only
-+
-+ @Param[in] ioc_fm_port_pcd_params_t A Structure of parameters defining the port's PCD
-+ configuration.
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PORT_IOC_SET_PCD_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(20), ioc_compat_fm_port_pcd_params_t)
-+#endif
-+#define FM_PORT_IOC_SET_PCD _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(20), ioc_fm_port_pcd_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_DeletePCD
-+
-+ @Description Calling this routine releases the port's PCD configuration.
-+ The port returns to its default configuration which is PCD
-+ disabled (BMI to BMI) and all PCD configuration is removed.
-+
-+ May be used for Rx and offline parsing ports which are
-+ in PCD mode only
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_DELETE_PCD _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(21))
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_AttachPCD
-+
-+ @Description This routine may be called after FM_PORT_DetachPCD was called,
-+ to return to the originally configured PCD support flow.
-+ The couple of routines are used to allow PCD configuration changes
-+ that demand that PCD will not be used while changes take place.
-+
-+ May be used for Rx and offline parsing ports which are
-+ in PCD mode only
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_ATTACH_PCD _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(23))
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_DetachPCD
-+
-+ @Description Calling this routine detaches the port from its PCD functionality.
-+ The port returns to its default flow which is BMI to BMI.
-+
-+ May be used for Rx and offline parsing ports which are
-+ in PCD mode only
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_DETACH_PCD _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(22))
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdPlcrAllocProfiles
-+
-+ @Description This routine may be called only for ports that use the Policer in
-+ order to allocate private policer profiles.
-+
-+ @Param[in] uint16_t The number of required policer profiles
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed before FM_PORT_SetPCD() only.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_PCD_PLCR_ALLOC_PROFILES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(24), uint16_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdPlcrFreeProfiles
-+
-+ @Description This routine should be called for freeing private policer profiles.
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed before FM_PORT_SetPCD() only.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_PCD_PLCR_FREE_PROFILES _IO(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(25))
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdKgModifyInitialScheme
-+
-+ @Description This routine may be called only for ports that use the keygen in
-+ order to change the initial scheme frame should be routed to.
-+ The change may be of a scheme id (in case of direct mode),
-+ from direct to indirect, or from indirect to direct - specifying the scheme id.
-+
-+ @Param[in] ioc_fm_pcd_kg_scheme_select_t A structure of parameters for defining whether
-+ a scheme is direct/indirect, and if direct - scheme id.
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(26), ioc_compat_fm_pcd_kg_scheme_select_t)
-+#endif
-+#define FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(26), ioc_fm_pcd_kg_scheme_select_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdPlcrModifyInitialProfile
-+
-+ @Description This routine may be called for ports with flows
-+ e_IOC_FM_PCD_SUPPORT_PLCR_ONLY or e_IOC_FM_PCD_SUPPORT_PRS_AND_PLCR only,
-+ to change the initial Policer profile frame should be routed to.
-+ The change may be of a profile and/or absolute/direct mode selection.
-+
-+ @Param[in] ioc_fm_obj_t Policer profile Id as returned from FM_PCD_PlcrSetProfile.
-+
-+ @Return 0 on success; error code otherwise.
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(27), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(27), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdCcModifyTree
-+
-+ @Description This routine may be called to change this port connection to
-+ a pre-initializes coarse classification Tree.
-+
-+ @Param[in] ioc_fm_obj_t Id of new coarse classification tree selected for this port.
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_SetPCD() and FM_PORT_DetachPCD()
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PORT_IOC_PCD_CC_MODIFY_TREE_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(28), ioc_compat_fm_obj_t)
-+#endif
-+#define FM_PORT_IOC_PCD_CC_MODIFY_TREE _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(28), ioc_fm_obj_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdKgBindSchemes
-+
-+ @Description These routines may be called for modifying the binding of ports
-+ to schemes. The scheme itself is not added,
-+ just this specific port starts using it.
-+
-+ @Param[in] ioc_fm_pcd_port_schemes_params_t Schemes parameters structre
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_SetPCD().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PORT_IOC_PCD_KG_BIND_SCHEMES_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(30), ioc_compat_fm_pcd_port_schemes_params_t)
-+#endif
-+#define FM_PORT_IOC_PCD_KG_BIND_SCHEMES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(30), ioc_fm_pcd_port_schemes_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_PcdKgUnbindSchemes
-+
-+ @Description These routines may be called for modifying the binding of ports
-+ to schemes. The scheme itself is not removed or invalidated,
-+ just this specific port stops using it.
-+
-+ @Param[in] ioc_fm_pcd_port_schemes_params_t Schemes parameters structre
-+
-+ @Return 0 on success; error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_SetPCD().
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(31), ioc_compat_fm_pcd_port_schemes_params_t)
-+#endif
-+#define FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(31), ioc_fm_pcd_port_schemes_params_t)
-+
-+typedef struct ioc_fm_port_mac_addr_params_t {
-+ uint8_t addr[ENET_NUM_OCTETS_PER_ADDRESS];
-+} ioc_fm_port_mac_addr_params_t;
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_AddHashMacAddr
-+
-+ @Description Add an Address to the hash table. This is for filter purpose only.
-+
-+ @Param[in] ioc_fm_port_mac_addr_params_t - Ethernet Mac address
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init(). It is a filter only address.
-+ @Cautions Some address need to be filtered out in upper FM blocks.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(36), ioc_fm_port_mac_addr_params_t)
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_RemoveHashMacAddr
-+
-+ @Description Delete an Address to the hash table. This is for filter purpose only.
-+
-+ @Param[in] ioc_fm_port_mac_addr_params_t - Ethernet Mac address
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+*//***************************************************************************/
-+#define FM_PORT_IOC_REMOVE_RX_HASH_MAC_ADDR _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(37), ioc_fm_port_mac_addr_params_t)
-+
-+typedef struct ioc_fm_port_tx_pause_frames_params_t {
-+ uint8_t priority;
-+ uint16_t pause_time;
-+ uint16_t thresh_time;
-+} ioc_fm_port_tx_pause_frames_params_t;
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_SetTxPauseFrames
-+
-+ @Description Enable/Disable transmission of Pause-Frames.
-+ The routine changes the default configuration:
-+ pause-time - [0xf000]
-+ threshold-time - [0]
-+
-+ @Param[in] ioc_fm_port_tx_pause_frames_params_t A structure holding the required parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_MAC_Init().
-+ PFC is supported only on new mEMAC; i.e. in MACs that don't have
-+ PFC support (10G-MAC and dTSEC), user should use 'FM_MAC_NO_PFC'
-+ in the 'priority' field.
-+*//***************************************************************************/
-+#define FM_PORT_IOC_SET_TX_PAUSE_FRAMES _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(40), ioc_fm_port_tx_pause_frames_params_t)
-+
-+typedef struct ioc_fm_port_mac_statistics_t {
-+ /* RMON */
-+ uint64_t e_stat_pkts_64; /**< r-10G tr-DT 64 byte frame counter */
-+ uint64_t e_stat_pkts_65_to_127; /**< r-10G 65 to 127 byte frame counter */
-+ uint64_t e_stat_pkts_128_to_255; /**< r-10G 128 to 255 byte frame counter */
-+ uint64_t e_stat_pkts_256_to_511; /**< r-10G 256 to 511 byte frame counter */
-+ uint64_t e_stat_pkts_512_to_1023; /**< r-10G 512 to 1023 byte frame counter */
-+ uint64_t e_stat_pkts_1024_to_1518; /**< r-10G 1024 to 1518 byte frame counter */
-+ uint64_t e_stat_pkts_1519_to_1522; /**< r-10G 1519 to 1522 byte good frame count */
-+ /* */
-+ uint64_t e_stat_fragments; /**< Total number of packets that were less than 64 octets long with a wrong CRC.*/
-+ uint64_t e_stat_jabbers; /**< Total number of packets longer than valid maximum length octets */
-+ uint64_t e_stat_drop_events; /**< number of dropped packets due to internal errors of the MAC Client (during recieve). */
-+ uint64_t e_stat_CRC_align_errors; /**< Incremented when frames of correct length but with CRC error are received.*/
-+ uint64_t e_stat_undersize_pkts; /**< Incremented for frames under 64 bytes with a valid FCS and otherwise well formed;
-+ This count does not include range length errors */
-+ uint64_t e_stat_oversize_pkts; /**< Incremented for frames which exceed 1518 (non VLAN) or 1522 (VLAN) and contains
-+ a valid FCS and otherwise well formed */
-+ /* Pause */
-+ uint64_t te_stat_pause; /**< Pause MAC Control received */
-+ uint64_t re_stat_pause; /**< Pause MAC Control sent */
-+ /* MIB II */
-+ uint64_t if_in_octets; /**< Total number of byte received. */
-+ uint64_t if_in_pkts; /**< Total number of packets received.*/
-+ uint64_t if_in_ucast_pkts; /**< Total number of unicast frame received;
-+ NOTE: this counter is not supported on dTSEC MAC */
-+ uint64_t if_in_mcast_pkts; /**< Total number of multicast frame received*/
-+ uint64_t if_in_bcast_pkts; /**< Total number of broadcast frame received */
-+ uint64_t if_in_discards; /**< Frames received, but discarded due to problems within the MAC RX. */
-+ uint64_t if_in_errors; /**< Number of frames received with error:
-+ - FIFO Overflow Error
-+ - CRC Error
-+ - Frame Too Long Error
-+ - Alignment Error
-+ - The dedicated Error Code (0xfe, not a code error) was received */
-+ uint64_t if_out_octets; /**< Total number of byte sent. */
-+ uint64_t if_out_pkts; /**< Total number of packets sent .*/
-+ uint64_t if_out_ucast_pkts; /**< Total number of unicast frame sent;
-+ NOTE: this counter is not supported on dTSEC MAC */
-+ uint64_t if_out_mcast_pkts; /**< Total number of multicast frame sent */
-+ uint64_t if_out_bcast_pkts; /**< Total number of multicast frame sent */
-+ uint64_t if_out_discards; /**< Frames received, but discarded due to problems within the MAC TX N/A!.*/
-+ uint64_t if_out_errors; /**< Number of frames transmitted with error:
-+ - FIFO Overflow Error
-+ - FIFO Underflow Error
-+ - Other */
-+} ioc_fm_port_mac_statistics_t;
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_GetStatistics
-+
-+ @Description get all MAC statistics counters
-+
-+ @Param[out] ioc_fm_port_mac_statistics_t A structure holding the statistics
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_PORT_IOC_GET_MAC_STATISTICS _IOR(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(41), ioc_fm_port_mac_statistics_t)
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_ConfigBufferPrefixContent
-+
-+ @Description Defines the structure, size and content of the application buffer.
-+ The prefix will
-+ In Tx ports, if 'passPrsResult', the application
-+ should set a value to their offsets in the prefix of
-+ the FM will save the first 'privDataSize', than,
-+ depending on 'passPrsResult' and 'passTimeStamp', copy parse result
-+ and timeStamp, and the packet itself (in this order), to the
-+ application buffer, and to offset.
-+ Calling this routine changes the buffer margins definitions
-+ in the internal driver data base from its default
-+ configuration: Data size: [DEFAULT_FM_SP_bufferPrefixContent_privDataSize]
-+ Pass Parser result: [DEFAULT_FM_SP_bufferPrefixContent_passPrsResult].
-+ Pass timestamp: [DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp].
-+
-+ May be used for all ports
-+
-+ @Param[in] ioc_fm_buffer_prefix_content_t A structure holding the required parameters.
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init().
-+*//***************************************************************************/
-+#define FM_PORT_IOC_CONFIG_BUFFER_PREFIX_CONTENT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(39), ioc_fm_buffer_prefix_content_t)
-+
-+#if (DPAA_VERSION >= 11)
-+typedef struct ioc_fm_port_vsp_alloc_params_t {
-+ uint8_t num_of_profiles; /**< Number of Virtual Storage Profiles */
-+ uint8_t dflt_relative_id; /**< The default Virtual-Storage-Profile-id dedicated to Rx/OP port
-+ The same default Virtual-Storage-Profile-id will be for coupled Tx port
-+ if relevant function called for Rx port */
-+ void *p_fm_tx_port; /**< Handle to coupled Tx Port; not relevant for OP port. */
-+}ioc_fm_port_vsp_alloc_params_t;
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_VSPAlloc
-+
-+ @Description This routine allocated VSPs per port and forces the port to work
-+ in VSP mode. Note that the port is initialized by default with the
-+ physical-storage-profile only.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[in] p_Params A structure of parameters for allocation VSP's per port
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init(), and before FM_PORT_SetPCD()
-+ and also before FM_PORT_Enable() (i.e. the port should be disabled).
-+*//***************************************************************************/
-+#if defined(CONFIG_COMPAT)
-+#define FM_PORT_IOC_VSP_ALLOC_COMPAT _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(38), ioc_compat_fm_port_vsp_alloc_params_t)
-+#endif
-+#define FM_PORT_IOC_VSP_ALLOC _IOW(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(38), ioc_fm_port_vsp_alloc_params_t)
-+#endif /* (DPAA_VERSION >= 11) */
-+
-+/**************************************************************************//**
-+ @Function FM_PORT_GetBmiCounters
-+
-+ @Description Read port's BMI stat counters and place them into
-+ a designated structure of counters.
-+
-+ @Param[in] h_FmPort A handle to a FM Port module.
-+ @Param[out] p_BmiStats counters structure
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_PORT_Init().
-+*//***************************************************************************/
-+
-+#define FM_PORT_IOC_GET_BMI_COUNTERS _IOR(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(42), ioc_fm_port_bmi_stats_t)
-+
-+typedef struct ioc_fm_port_mac_frame_size_counters_t {
-+
-+ e_CommMode type;
-+ uint64_t count_pkts_64; /**< 64 byte frame counter */
-+ uint64_t count_pkts_65_to_127; /**< 65 to 127 byte frame counter */
-+ uint64_t count_pkts_128_to_255; /**< 128 to 255 byte frame counter */
-+ uint64_t count_pkts_256_to_511; /**< 256 to 511 byte frame counter */
-+ uint64_t count_pkts_512_to_1023; /**< 512 to 1023 byte frame counter */
-+ uint64_t count_pkts_1024_to_1518; /**< 1024 to 1518 byte frame counter */
-+ uint64_t count_pkts_1519_to_1522; /**< 1519 to 1522 byte good frame count */
-+} ioc_fm_port_mac_frame_size_counters_t;
-+
-+/**************************************************************************//**
-+ @Function FM_MAC_GetFrameSizeCounters
-+
-+ @Description get MAC statistics counters for different frame size
-+
-+ @Param[out] ioc_fm_port_mac_frame_size_counters_t A structure holding the counters
-+
-+ @Return E_OK on success; Error code otherwise.
-+
-+ @Cautions Allowed only following FM_Init().
-+*//***************************************************************************/
-+#define FM_PORT_IOC_GET_MAC_FRAME_SIZE_COUNTERS _IOR(FM_IOC_TYPE_BASE, FM_PORT_IOC_NUM(43), ioc_fm_port_mac_frame_size_counters_t)
-+
-+
-+/** @} */ /* end of lnx_ioctl_FM_PORT_pcd_runtime_control_grp group */
-+/** @} */ /* end of lnx_ioctl_FM_PORT_runtime_control_grp group */
-+
-+/** @} */ /* end of lnx_ioctl_FM_PORT_grp group */
-+/** @} */ /* end of lnx_ioctl_FM_grp group */
-+#endif /* __FM_PORT_IOCTLS_H */
---- /dev/null
-+++ b/include/uapi/linux/fmd/Peripherals/fm_test_ioctls.h
-@@ -0,0 +1,208 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File fm_test_ioctls.h
-+
-+ @Description FM Char device ioctls
-+*//***************************************************************************/
-+#ifndef __FM_TEST_IOCTLS_H
-+#define __FM_TEST_IOCTLS_H
-+
-+#include "ioctls.h"
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FMT_grp Frame Manager Test Linux IOCTL API
-+
-+ @Description FM-Test Linux ioctls definitions and enums
-+
-+ @{
-+*//***************************************************************************/
-+
-+#define IOC_FMT_MAX_NUM_OF_PORTS 26
-+
-+/**************************************************************************//**
-+ @Collection TEST Parameters
-+*//***************************************************************************/
-+/**************************************************************************//**
-+ @Description: Name of the FM-Test chardev
-+*//***************************************************************************/
-+#define DEV_FM_TEST_NAME "fm-test-port"
-+
-+#define DEV_FM_TEST_PORTS_MINOR_BASE 0
-+#define DEV_FM_TEST_MAX_MINORS (DEV_FM_TEST_PORTS_MINOR_BASE + IOC_FMT_MAX_NUM_OF_PORTS)
-+
-+#define FMT_PORT_IOC_NUM(n) n
-+/* @} */
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FMT_lib_grp FM-Test library
-+
-+ @Description TODO
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description TODO
-+*//***************************************************************************/
-+typedef uint8_t ioc_fmt_xxx_t;
-+
-+#define FM_PRS_MAX 32
-+#define FM_TIME_STAMP_MAX 8
-+
-+/**************************************************************************//**
-+ @Description FM Port buffer content description
-+*//***************************************************************************/
-+typedef struct ioc_fmt_buff_context_t {
-+ void *p_user_priv;
-+ uint8_t fm_prs_res[FM_PRS_MAX];
-+ uint8_t fm_time_stamp[FM_TIME_STAMP_MAX];
-+} ioc_fmt_buff_context_t;
-+
-+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
-+typedef struct ioc_fmt_compat_buff_context_t {
-+ compat_uptr_t p_user_priv;
-+ uint8_t fm_prs_res[FM_PRS_MAX];
-+ uint8_t fm_time_stamp[FM_TIME_STAMP_MAX];
-+} ioc_fmt_compat_buff_context_t;
-+#endif
-+
-+/**************************************************************************//**
-+ @Description Buffer descriptor
-+*//***************************************************************************/
-+typedef struct ioc_fmt_buff_desc_t {
-+ uint32_t qid;
-+ void *p_data;
-+ uint32_t size;
-+ uint32_t status;
-+ ioc_fmt_buff_context_t buff_context;
-+} ioc_fmt_buff_desc_t;
-+
-+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
-+typedef struct ioc_fmt_compat_buff_desc_t {
-+ uint32_t qid;
-+ compat_uptr_t p_data;
-+ uint32_t size;
-+ uint32_t status;
-+ ioc_fmt_compat_buff_context_t buff_context;
-+} ioc_fmt_compat_buff_desc_t;
-+#endif
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FMT_runtime_control_grp FM-Test Runtime Control Unit
-+
-+ @Description TODO
-+ @{
-+*//***************************************************************************/
-+
-+/** @} */ /* end of lnx_ioctl_FMT_runtime_control_grp group */
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_FMTP_lib_grp FM-Port-Test library
-+
-+ @Description TODO
-+
-+ @{
-+*//***************************************************************************/
-+
-+/**************************************************************************//**
-+ @Description FM-Test FM port type
-+*//***************************************************************************/
-+typedef enum ioc_fmt_port_type {
-+ e_IOC_FMT_PORT_T_RXTX, /**< Standard port */
-+ e_IOC_FMT_PORT_T_OP, /**< Offline-parsing port */
-+} ioc_fmt_port_type;
-+
-+/**************************************************************************//**
-+ @Description TODO
-+*//***************************************************************************/
-+typedef struct ioc_fmt_port_param_t {
-+ uint8_t fm_id;
-+ ioc_fmt_port_type fm_port_type;
-+ uint8_t fm_port_id;
-+ uint32_t num_tx_queues;
-+} ioc_fmt_port_param_t;
-+
-+
-+/**************************************************************************//**
-+ @Function FMT_PORT_IOC_INIT
-+
-+ @Description TODO
-+
-+ @Param[in] ioc_fmt_port_param_t TODO
-+
-+ @Cautions Allowed only after the FM equivalent port is already initialized.
-+*//***************************************************************************/
-+#define FMT_PORT_IOC_INIT _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(0), ioc_fmt_port_param_t)
-+
-+/**************************************************************************//**
-+ @Function FMT_PORT_IOC_SET_DIAG_MODE
-+
-+ @Description TODO
-+
-+ @Param[in] ioc_diag_mode TODO
-+
-+ @Cautions Allowed only following FMT_PORT_IOC_INIT().
-+*//***************************************************************************/
-+#define FMT_PORT_IOC_SET_DIAG_MODE _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(1), ioc_diag_mode)
-+
-+/**************************************************************************//**
-+ @Function FMT_PORT_IOC_SET_IP_HEADER_MANIP
-+
-+ @Description Set IP header manipulations for this port.
-+
-+ @Param[in] int 1 to enable; 0 to disable
-+
-+ @Cautions Allowed only following FMT_PORT_IOC_INIT().
-+*//***************************************************************************/
-+#define FMT_PORT_IOC_SET_IP_HEADER_MANIP _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(2), int)
-+
-+/**************************************************************************//**
-+ @Function FMT_PORT_IOC_SET_DPAECHO_MODE
-+
-+ @Description Set DPA in echo mode - all frame are sent back.
-+
-+ @Param[in] int 1 to enable; 0 to disable
-+
-+ @Cautions Allowed only following FMT_PORT_IOC_INIT().
-+*//***************************************************************************/
-+#define FMT_PORT_IOC_SET_DPAECHO_MODE _IOW(FMT_IOC_TYPE_BASE, FMT_PORT_IOC_NUM(3), int)
-+
-+/** @} */ /* end of lnx_ioctl_FMTP_lib_grp group */
-+/** @} */ /* end of lnx_ioctl_FMT_lib_grp group */
-+/** @} */ /* end of lnx_ioctl_FMT_grp */
-+
-+
-+#endif /* __FM_TEST_IOCTLS_H */
---- /dev/null
-+++ b/include/uapi/linux/fmd/integrations/Kbuild
-@@ -0,0 +1 @@
-+header-y += integration_ioctls.h
---- /dev/null
-+++ b/include/uapi/linux/fmd/integrations/integration_ioctls.h
-@@ -0,0 +1,56 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File integration_ioctls.h
-+
-+ @Description External header file for Integration unit routines.
-+*//***************************************************************************/
-+
-+#ifndef __INTG_IOCTLS_H
-+#define __INTG_IOCTLS_H
-+
-+
-+#define FM_IOC_TYPE_BASE (NCSW_IOC_TYPE_BASE+1)
-+#define FMT_IOC_TYPE_BASE (NCSW_IOC_TYPE_BASE+3)
-+
-+/*#define FM_IOCTL_DBG*/
-+
-+#if defined(FM_IOCTL_DBG)
-+ #define _fm_ioctl_dbg(format, arg...) \
-+ printk("fm ioctl [%s:%u](cpu:%u) - " format, \
-+ __func__, __LINE__, smp_processor_id(), ##arg)
-+#else
-+# define _fm_ioctl_dbg(arg...)
-+#endif
-+
-+#endif /* __INTG_IOCTLS_H */
---- /dev/null
-+++ b/include/uapi/linux/fmd/ioctls.h
-@@ -0,0 +1,96 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**************************************************************************//**
-+ @File ioctls.h
-+
-+ @Description Structures and definitions for Command Relay Ioctls
-+*//***************************************************************************/
-+
-+#ifndef __IOCTLS_H__
-+#define __IOCTLS_H__
-+
-+#include <asm/ioctl.h>
-+
-+#include "integration_ioctls.h"
-+
-+
-+/**************************************************************************//**
-+ @Group lnx_ioctl_ncsw_grp NetCommSw Linux User-Space (IOCTL) API
-+ @{
-+*//***************************************************************************/
-+
-+#define NCSW_IOC_TYPE_BASE 0xe0 /**< defines the IOCTL type for all
-+ the NCSW Linux module commands */
-+
-+
-+/**************************************************************************//**
-+ @Description IOCTL Memory allocation types.
-+*//***************************************************************************/
-+typedef enum ioc_mem_type {
-+ e_IOC_MEM_INVALID = 0x00000000, /**< Invalid memory type (error) */
-+ e_IOC_MEM_CACHABLE_SYS = 0x00000001, /**< Primary DDR, cacheable segment */
-+ e_IOC_MEM_NOCACHE_SYS = 0x00000004, /**< Primary DDR, non-cacheable segment */
-+ e_IOC_MEM_SECONDARY = 0x00000002, /**< Either secondary DDR or SDRAM */
-+ e_IOC_MEM_PRAM = 0x00000008 /**< Multi-user RAM identifier */
-+} ioc_mem_type;
-+
-+/**************************************************************************//**
-+ @Description Enumeration (bit flags) of communication modes (Transmit,
-+ receive or both).
-+*//***************************************************************************/
-+typedef enum ioc_comm_mode {
-+ e_IOC_COMM_MODE_NONE = 0 /**< No transmit/receive communication */
-+ , e_IOC_COMM_MODE_RX = 1 /**< Only receive communication */
-+ , e_IOC_COMM_MODE_TX = 2 /**< Only transmit communication */
-+ , e_IOC_COMM_MODE_RX_AND_TX = 3 /**< Both transmit and receive communication */
-+} ioc_comm_mode;
-+
-+/**************************************************************************//**
-+ @Description General Diagnostic Mode
-+*//***************************************************************************/
-+typedef enum ioc_diag_mode
-+{
-+ e_IOC_DIAG_MODE_NONE = 0,
-+ e_IOC_DIAG_MODE_CTRL_LOOPBACK, /**< loopback in the controller; E.g. MAC, TDM, etc. */
-+ e_IOC_DIAG_MODE_CHIP_LOOPBACK, /**< loopback in the chip but not in controller;
-+ E.g. IO-pins, SerDes, etc. */
-+ e_IOC_DIAG_MODE_PHY_LOOPBACK, /**< loopback in the external PHY */
-+ e_IOC_DIAG_MODE_LINE_LOOPBACK, /**< loopback in the external line */
-+ e_IOC_DIAG_MODE_CTRL_ECHO, /**< */
-+ e_IOC_DIAG_MODE_PHY_ECHO /**< */
-+} ioc_diag_mode;
-+
-+/** @} */ /* end of lnx_ioctl_ncsw_grp */
-+
-+
-+#endif /* __IOCTLS_H__ */
---- /dev/null
-+++ b/include/uapi/linux/fmd/net_ioctls.h
-@@ -0,0 +1,430 @@
-+/* Copyright (c) 2008-2012 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+/**************************************************************************//**
-+ @File net_ioctls.h
-+
-+ @Description This file contains common and general netcomm headers definitions.
-+*//***************************************************************************/
-+#ifndef __NET_IOCTLS_H
-+#define __NET_IOCTLS_H
-+
-+#include "ioctls.h"
-+
-+
-+typedef uint8_t ioc_header_field_ppp_t;
-+
-+#define IOC_NET_HEADER_FIELD_PPP_PID (1)
-+#define IOC_NET_HEADER_FIELD_PPP_COMPRESSED (IOC_NET_HEADER_FIELD_PPP_PID << 1)
-+#define IOC_NET_HEADER_FIELD_PPP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPP_PID << 2) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_pppoe_t;
-+
-+#define IOC_NET_HEADER_FIELD_PPPoE_VER (1)
-+#define IOC_NET_HEADER_FIELD_PPPoE_TYPE (IOC_NET_HEADER_FIELD_PPPoE_VER << 1)
-+#define IOC_NET_HEADER_FIELD_PPPoE_CODE (IOC_NET_HEADER_FIELD_PPPoE_VER << 2)
-+#define IOC_NET_HEADER_FIELD_PPPoE_SID (IOC_NET_HEADER_FIELD_PPPoE_VER << 3)
-+#define IOC_NET_HEADER_FIELD_PPPoE_LEN (IOC_NET_HEADER_FIELD_PPPoE_VER << 4)
-+#define IOC_NET_HEADER_FIELD_PPPoE_SESSION (IOC_NET_HEADER_FIELD_PPPoE_VER << 5)
-+#define IOC_NET_HEADER_FIELD_PPPoE_PID (IOC_NET_HEADER_FIELD_PPPoE_VER << 6)
-+#define IOC_NET_HEADER_FIELD_PPPoE_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPPoE_VER << 7) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_PPPMUX_PID (1)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_CKSUM (IOC_NET_HEADER_FIELD_PPPMUX_PID << 1)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_COMPRESSED (IOC_NET_HEADER_FIELD_PPPMUX_PID << 2)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPPMUX_PID << 3) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF (1)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_LXT (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 1)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_LEN (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 2)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PID (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 3)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_USE_PID (IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 4)
-+#define IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 5) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_eth_t;
-+
-+#define IOC_NET_HEADER_FIELD_ETH_DA (1)
-+#define IOC_NET_HEADER_FIELD_ETH_SA (IOC_NET_HEADER_FIELD_ETH_DA << 1)
-+#define IOC_NET_HEADER_FIELD_ETH_LENGTH (IOC_NET_HEADER_FIELD_ETH_DA << 2)
-+#define IOC_NET_HEADER_FIELD_ETH_TYPE (IOC_NET_HEADER_FIELD_ETH_DA << 3)
-+#define IOC_NET_HEADER_FIELD_ETH_FINAL_CKSUM (IOC_NET_HEADER_FIELD_ETH_DA << 4)
-+#define IOC_NET_HEADER_FIELD_ETH_PADDING (IOC_NET_HEADER_FIELD_ETH_DA << 5)
-+#define IOC_NET_HEADER_FIELD_ETH_ALL_FIELDS ((IOC_NET_HEADER_FIELD_ETH_DA << 6) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_ETH_ADDR_SIZE 6
-+
-+typedef uint16_t ioc_header_field_ip_t;
-+
-+#define IOC_NET_HEADER_FIELD_IP_VER (1)
-+#define IOC_NET_HEADER_FIELD_IP_DSCP (IOC_NET_HEADER_FIELD_IP_VER << 2)
-+#define IOC_NET_HEADER_FIELD_IP_ECN (IOC_NET_HEADER_FIELD_IP_VER << 3)
-+#define IOC_NET_HEADER_FIELD_IP_PROTO (IOC_NET_HEADER_FIELD_IP_VER << 4)
-+
-+#define IOC_NET_HEADER_FIELD_IP_PROTO_SIZE 1
-+
-+typedef uint16_t ioc_header_field_ipv4_t;
-+
-+#define IOC_NET_HEADER_FIELD_IPv4_VER (1)
-+#define IOC_NET_HEADER_FIELD_IPv4_HDR_LEN (IOC_NET_HEADER_FIELD_IPv4_VER << 1)
-+#define IOC_NET_HEADER_FIELD_IPv4_TOS (IOC_NET_HEADER_FIELD_IPv4_VER << 2)
-+#define IOC_NET_HEADER_FIELD_IPv4_TOTAL_LEN (IOC_NET_HEADER_FIELD_IPv4_VER << 3)
-+#define IOC_NET_HEADER_FIELD_IPv4_ID (IOC_NET_HEADER_FIELD_IPv4_VER << 4)
-+#define IOC_NET_HEADER_FIELD_IPv4_FLAG_D (IOC_NET_HEADER_FIELD_IPv4_VER << 5)
-+#define IOC_NET_HEADER_FIELD_IPv4_FLAG_M (IOC_NET_HEADER_FIELD_IPv4_VER << 6)
-+#define IOC_NET_HEADER_FIELD_IPv4_OFFSET (IOC_NET_HEADER_FIELD_IPv4_VER << 7)
-+#define IOC_NET_HEADER_FIELD_IPv4_TTL (IOC_NET_HEADER_FIELD_IPv4_VER << 8)
-+#define IOC_NET_HEADER_FIELD_IPv4_PROTO (IOC_NET_HEADER_FIELD_IPv4_VER << 9)
-+#define IOC_NET_HEADER_FIELD_IPv4_CKSUM (IOC_NET_HEADER_FIELD_IPv4_VER << 10)
-+#define IOC_NET_HEADER_FIELD_IPv4_SRC_IP (IOC_NET_HEADER_FIELD_IPv4_VER << 11)
-+#define IOC_NET_HEADER_FIELD_IPv4_DST_IP (IOC_NET_HEADER_FIELD_IPv4_VER << 12)
-+#define IOC_NET_HEADER_FIELD_IPv4_OPTS (IOC_NET_HEADER_FIELD_IPv4_VER << 13)
-+#define IOC_NET_HEADER_FIELD_IPv4_OPTS_COUNT (IOC_NET_HEADER_FIELD_IPv4_VER << 14)
-+#define IOC_NET_HEADER_FIELD_IPv4_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPv4_VER << 15) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_IPv4_ADDR_SIZE 4
-+#define IOC_NET_HEADER_FIELD_IPv4_PROTO_SIZE 1
-+
-+
-+typedef uint8_t ioc_header_field_ipv6_t;
-+
-+#define IOC_NET_HEADER_FIELD_IPv6_VER (1)
-+#define IOC_NET_HEADER_FIELD_IPv6_TC (IOC_NET_HEADER_FIELD_IPv6_VER << 1)
-+#define IOC_NET_HEADER_FIELD_IPv6_SRC_IP (IOC_NET_HEADER_FIELD_IPv6_VER << 2)
-+#define IOC_NET_HEADER_FIELD_IPv6_DST_IP (IOC_NET_HEADER_FIELD_IPv6_VER << 3)
-+#define IOC_NET_HEADER_FIELD_IPv6_NEXT_HDR (IOC_NET_HEADER_FIELD_IPv6_VER << 4)
-+#define IOC_NET_HEADER_FIELD_IPv6_FL (IOC_NET_HEADER_FIELD_IPv6_VER << 5)
-+#define IOC_NET_HEADER_FIELD_IPv6_HOP_LIMIT (IOC_NET_HEADER_FIELD_IPv6_VER << 6)
-+#define IOC_NET_HEADER_FIELD_IPv6_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPv6_VER << 7) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_IPv6_ADDR_SIZE 16
-+#define IOC_NET_HEADER_FIELD_IPv6_NEXT_HDR_SIZE 1
-+
-+#define IOC_NET_HEADER_FIELD_ICMP_TYPE (1)
-+#define IOC_NET_HEADER_FIELD_ICMP_CODE (IOC_NET_HEADER_FIELD_ICMP_TYPE << 1)
-+#define IOC_NET_HEADER_FIELD_ICMP_CKSUM (IOC_NET_HEADER_FIELD_ICMP_TYPE << 2)
-+#define IOC_NET_HEADER_FIELD_ICMP_ID (IOC_NET_HEADER_FIELD_ICMP_TYPE << 3)
-+#define IOC_NET_HEADER_FIELD_ICMP_SQ_NUM (IOC_NET_HEADER_FIELD_ICMP_TYPE << 4)
-+#define IOC_NET_HEADER_FIELD_ICMP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_ICMP_TYPE << 5) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_ICMP_CODE_SIZE 1
-+#define IOC_NET_HEADER_FIELD_ICMP_TYPE_SIZE 1
-+
-+#define IOC_NET_HEADER_FIELD_IGMP_VERSION (1)
-+#define IOC_NET_HEADER_FIELD_IGMP_TYPE (IOC_NET_HEADER_FIELD_IGMP_VERSION << 1)
-+#define IOC_NET_HEADER_FIELD_IGMP_CKSUM (IOC_NET_HEADER_FIELD_IGMP_VERSION << 2)
-+#define IOC_NET_HEADER_FIELD_IGMP_DATA (IOC_NET_HEADER_FIELD_IGMP_VERSION << 3)
-+#define IOC_NET_HEADER_FIELD_IGMP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IGMP_VERSION << 4) - 1)
-+
-+
-+typedef uint16_t ioc_header_field_tcp_t;
-+
-+#define IOC_NET_HEADER_FIELD_TCP_PORT_SRC (1)
-+#define IOC_NET_HEADER_FIELD_TCP_PORT_DST (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 1)
-+#define IOC_NET_HEADER_FIELD_TCP_SEQ (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 2)
-+#define IOC_NET_HEADER_FIELD_TCP_ACK (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 3)
-+#define IOC_NET_HEADER_FIELD_TCP_OFFSET (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 4)
-+#define IOC_NET_HEADER_FIELD_TCP_FLAGS (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 5)
-+#define IOC_NET_HEADER_FIELD_TCP_WINDOW (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 6)
-+#define IOC_NET_HEADER_FIELD_TCP_CKSUM (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 7)
-+#define IOC_NET_HEADER_FIELD_TCP_URGPTR (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 8)
-+#define IOC_NET_HEADER_FIELD_TCP_OPTS (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 9)
-+#define IOC_NET_HEADER_FIELD_TCP_OPTS_COUNT (IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 10)
-+#define IOC_NET_HEADER_FIELD_TCP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_TCP_PORT_SRC << 11) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_TCP_PORT_SIZE 2
-+
-+
-+typedef uint8_t ioc_header_field_sctp_t;
-+
-+#define IOC_NET_HEADER_FIELD_SCTP_PORT_SRC (1)
-+#define IOC_NET_HEADER_FIELD_SCTP_PORT_DST (IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 1)
-+#define IOC_NET_HEADER_FIELD_SCTP_VER_TAG (IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 2)
-+#define IOC_NET_HEADER_FIELD_SCTP_CKSUM (IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 3)
-+#define IOC_NET_HEADER_FIELD_SCTP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_SCTP_PORT_SRC << 4) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_SCTP_PORT_SIZE 2
-+
-+typedef uint8_t ioc_header_field_dccp_t;
-+
-+#define IOC_NET_HEADER_FIELD_DCCP_PORT_SRC (1)
-+#define IOC_NET_HEADER_FIELD_DCCP_PORT_DST (IOC_NET_HEADER_FIELD_DCCP_PORT_SRC << 1)
-+#define IOC_NET_HEADER_FIELD_DCCP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_DCCP_PORT_SRC << 2) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_DCCP_PORT_SIZE 2
-+
-+
-+typedef uint8_t ioc_header_field_udp_t;
-+
-+#define IOC_NET_HEADER_FIELD_UDP_PORT_SRC (1)
-+#define IOC_NET_HEADER_FIELD_UDP_PORT_DST (IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 1)
-+#define IOC_NET_HEADER_FIELD_UDP_LEN (IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 2)
-+#define IOC_NET_HEADER_FIELD_UDP_CKSUM (IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 3)
-+#define IOC_NET_HEADER_FIELD_UDP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_UDP_PORT_SRC << 4) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_UDP_PORT_SIZE 2
-+
-+typedef uint8_t ioc_header_field_udp_lite_t;
-+
-+#define IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SRC (1)
-+#define IOC_NET_HEADER_FIELD_UDP_LITE_PORT_DST (IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 1)
-+#define IOC_NET_HEADER_FIELD_UDP_LITE_ALL_FIELDS ((IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SRC << 2) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_UDP_LITE_PORT_SIZE 2
-+
-+typedef uint8_t ioc_header_field_udp_encap_esp_t;
-+
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC (1)
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 1)
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 2)
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 3)
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 4)
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM (IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 5)
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 6) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SIZE 2
-+#define IOC_NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI_SIZE 4
-+
-+#define IOC_NET_HEADER_FIELD_IPHC_CID (1)
-+#define IOC_NET_HEADER_FIELD_IPHC_CID_TYPE (IOC_NET_HEADER_FIELD_IPHC_CID << 1)
-+#define IOC_NET_HEADER_FIELD_IPHC_HCINDEX (IOC_NET_HEADER_FIELD_IPHC_CID << 2)
-+#define IOC_NET_HEADER_FIELD_IPHC_GEN (IOC_NET_HEADER_FIELD_IPHC_CID << 3)
-+#define IOC_NET_HEADER_FIELD_IPHC_D_BIT (IOC_NET_HEADER_FIELD_IPHC_CID << 4)
-+#define IOC_NET_HEADER_FIELD_IPHC_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPHC_CID << 5) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE (1)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_FLAGS (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 1)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_LENGTH (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 2)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TSN (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 3)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_ID (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 4)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_SQN (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 5)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_PAYLOAD_PID (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 6)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_UNORDERED (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 7)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_BEGGINING (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 8)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_END (IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 9)
-+#define IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS ((IOC_NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT (1)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_LENGTH_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 1)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_SEQUENCE_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 2)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_OFFSET_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 3)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_PRIORITY_BIT (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 4)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_VERSION (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 5)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_LEN (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 6)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_TUNNEL_ID (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 7)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_SESSION_ID (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 8)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_NS (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 9)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_NR (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 10)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_OFFSET_SIZE (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 11)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_FIRST_BYTE (IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 12)
-+#define IOC_NET_HEADER_FIELD_L2TPv2_ALL_FIELDS ((IOC_NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 13) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT (1)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH_BIT (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 1)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_SEQUENCE_BIT (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 2)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_VERSION (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 3)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 4)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_CONTROL (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 5)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_SENT (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 6)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_RECV (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 7)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_FIRST_BYTE (IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 8)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS ((IOC_NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 9) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT (1)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_VERSION (IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 1)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_ID (IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 2)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_COOKIE (IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 3)
-+#define IOC_NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS ((IOC_NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 4) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_vlan_t;
-+
-+#define IOC_NET_HEADER_FIELD_VLAN_VPRI (1)
-+#define IOC_NET_HEADER_FIELD_VLAN_CFI (IOC_NET_HEADER_FIELD_VLAN_VPRI << 1)
-+#define IOC_NET_HEADER_FIELD_VLAN_VID (IOC_NET_HEADER_FIELD_VLAN_VPRI << 2)
-+#define IOC_NET_HEADER_FIELD_VLAN_LENGTH (IOC_NET_HEADER_FIELD_VLAN_VPRI << 3)
-+#define IOC_NET_HEADER_FIELD_VLAN_TYPE (IOC_NET_HEADER_FIELD_VLAN_VPRI << 4)
-+#define IOC_NET_HEADER_FIELD_VLAN_ALL_FIELDS ((IOC_NET_HEADER_FIELD_VLAN_VPRI << 5) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_VLAN_TCI (IOC_NET_HEADER_FIELD_VLAN_VPRI | \
-+ IOC_NET_HEADER_FIELD_VLAN_CFI | \
-+ IOC_NET_HEADER_FIELD_VLAN_VID)
-+
-+
-+typedef uint8_t ioc_header_field_llc_t;
-+
-+#define IOC_NET_HEADER_FIELD_LLC_DSAP (1)
-+#define IOC_NET_HEADER_FIELD_LLC_SSAP (IOC_NET_HEADER_FIELD_LLC_DSAP << 1)
-+#define IOC_NET_HEADER_FIELD_LLC_CTRL (IOC_NET_HEADER_FIELD_LLC_DSAP << 2)
-+#define IOC_NET_HEADER_FIELD_LLC_ALL_FIELDS ((IOC_NET_HEADER_FIELD_LLC_DSAP << 3) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_NLPID_NLPID (1)
-+#define IOC_NET_HEADER_FIELD_NLPID_ALL_FIELDS ((IOC_NET_HEADER_FIELD_NLPID_NLPID << 1) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_snap_t;
-+
-+#define IOC_NET_HEADER_FIELD_SNAP_OUI (1)
-+#define IOC_NET_HEADER_FIELD_SNAP_PID (IOC_NET_HEADER_FIELD_SNAP_OUI << 1)
-+#define IOC_NET_HEADER_FIELD_SNAP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_SNAP_OUI << 2) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_llc_snap_t;
-+
-+#define IOC_NET_HEADER_FIELD_LLC_SNAP_TYPE (1)
-+#define IOC_NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_LLC_SNAP_TYPE << 1) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_ARP_HTYPE (1)
-+#define IOC_NET_HEADER_FIELD_ARP_PTYPE (IOC_NET_HEADER_FIELD_ARP_HTYPE << 1)
-+#define IOC_NET_HEADER_FIELD_ARP_HLEN (IOC_NET_HEADER_FIELD_ARP_HTYPE << 2)
-+#define IOC_NET_HEADER_FIELD_ARP_PLEN (IOC_NET_HEADER_FIELD_ARP_HTYPE << 3)
-+#define IOC_NET_HEADER_FIELD_ARP_OPER (IOC_NET_HEADER_FIELD_ARP_HTYPE << 4)
-+#define IOC_NET_HEADER_FIELD_ARP_SHA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 5)
-+#define IOC_NET_HEADER_FIELD_ARP_SPA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 6)
-+#define IOC_NET_HEADER_FIELD_ARP_THA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 7)
-+#define IOC_NET_HEADER_FIELD_ARP_TPA (IOC_NET_HEADER_FIELD_ARP_HTYPE << 8)
-+#define IOC_NET_HEADER_FIELD_ARP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_ARP_HTYPE << 9) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_RFC2684_LLC (1)
-+#define IOC_NET_HEADER_FIELD_RFC2684_NLPID (IOC_NET_HEADER_FIELD_RFC2684_LLC << 1)
-+#define IOC_NET_HEADER_FIELD_RFC2684_OUI (IOC_NET_HEADER_FIELD_RFC2684_LLC << 2)
-+#define IOC_NET_HEADER_FIELD_RFC2684_PID (IOC_NET_HEADER_FIELD_RFC2684_LLC << 3)
-+#define IOC_NET_HEADER_FIELD_RFC2684_VPN_OUI (IOC_NET_HEADER_FIELD_RFC2684_LLC << 4)
-+#define IOC_NET_HEADER_FIELD_RFC2684_VPN_IDX (IOC_NET_HEADER_FIELD_RFC2684_LLC << 5)
-+#define IOC_NET_HEADER_FIELD_RFC2684_ALL_FIELDS ((IOC_NET_HEADER_FIELD_RFC2684_LLC << 6) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_USER_DEFINED_SRCPORT (1)
-+#define IOC_NET_HEADER_FIELD_USER_DEFINED_PCDID (IOC_NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 1)
-+#define IOC_NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS ((IOC_NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 2) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER (1)
-+#define IOC_NET_HEADER_FIELD_PAYLOAD_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 1)
-+#define IOC_NET_HEADER_FIELD_MAX_FRM_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 2)
-+#define IOC_NET_HEADER_FIELD_MIN_FRM_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 3)
-+#define IOC_NET_HEADER_FIELD_PAYLOAD_TYPE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 4)
-+#define IOC_NET_HEADER_FIELD_FRAME_SIZE (IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 5)
-+#define IOC_NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS ((IOC_NET_HEADER_FIELD_PAYLOAD_BUFFER << 6) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_gre_t;
-+
-+#define IOC_NET_HEADER_FIELD_GRE_TYPE (1)
-+#define IOC_NET_HEADER_FIELD_GRE_ALL_FIELDS ((IOC_NET_HEADER_FIELD_GRE_TYPE << 1) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_minencap_t;
-+
-+#define IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP (1)
-+#define IOC_NET_HEADER_FIELD_MINENCAP_DST_IP (IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP << 1)
-+#define IOC_NET_HEADER_FIELD_MINENCAP_TYPE (IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP << 2)
-+#define IOC_NET_HEADER_FIELD_MINENCAP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_MINENCAP_SRC_IP << 3) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_ipsec_ah_t;
-+
-+#define IOC_NET_HEADER_FIELD_IPSEC_AH_SPI (1)
-+#define IOC_NET_HEADER_FIELD_IPSEC_AH_NH (IOC_NET_HEADER_FIELD_IPSEC_AH_SPI << 1)
-+#define IOC_NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPSEC_AH_SPI << 2) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_ipsec_esp_t;
-+
-+#define IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI (1)
-+#define IOC_NET_HEADER_FIELD_IPSEC_ESP_SEQUENCE_NUM (IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI << 1)
-+#define IOC_NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS ((IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI << 2) - 1)
-+
-+#define IOC_NET_HEADER_FIELD_IPSEC_ESP_SPI_SIZE 4
-+
-+
-+typedef uint8_t ioc_header_field_mpls_t;
-+
-+#define IOC_NET_HEADER_FIELD_MPLS_LABEL_STACK (1)
-+#define IOC_NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS ((IOC_NET_HEADER_FIELD_MPLS_LABEL_STACK << 1) - 1)
-+
-+
-+typedef uint8_t ioc_header_field_macsec_t;
-+
-+#define IOC_NET_HEADER_FIELD_MACSEC_SECTAG (1)
-+#define IOC_NET_HEADER_FIELD_MACSEC_ALL_FIELDS ((IOC_NET_HEADER_FIELD_MACSEC_SECTAG << 1) - 1)
-+
-+
-+typedef enum {
-+ e_IOC_NET_HEADER_TYPE_NONE = 0,
-+ e_IOC_NET_HEADER_TYPE_PAYLOAD,
-+ e_IOC_NET_HEADER_TYPE_ETH,
-+ e_IOC_NET_HEADER_TYPE_VLAN,
-+ e_IOC_NET_HEADER_TYPE_IPv4,
-+ e_IOC_NET_HEADER_TYPE_IPv6,
-+ e_IOC_NET_HEADER_TYPE_IP,
-+ e_IOC_NET_HEADER_TYPE_TCP,
-+ e_IOC_NET_HEADER_TYPE_UDP,
-+ e_IOC_NET_HEADER_TYPE_UDP_LITE,
-+ e_IOC_NET_HEADER_TYPE_IPHC,
-+ e_IOC_NET_HEADER_TYPE_SCTP,
-+ e_IOC_NET_HEADER_TYPE_SCTP_CHUNK_DATA,
-+ e_IOC_NET_HEADER_TYPE_PPPoE,
-+ e_IOC_NET_HEADER_TYPE_PPP,
-+ e_IOC_NET_HEADER_TYPE_PPPMUX,
-+ e_IOC_NET_HEADER_TYPE_PPPMUX_SUBFRAME,
-+ e_IOC_NET_HEADER_TYPE_L2TPv2,
-+ e_IOC_NET_HEADER_TYPE_L2TPv3_CTRL,
-+ e_IOC_NET_HEADER_TYPE_L2TPv3_SESS,
-+ e_IOC_NET_HEADER_TYPE_LLC,
-+ e_IOC_NET_HEADER_TYPE_LLC_SNAP,
-+ e_IOC_NET_HEADER_TYPE_NLPID,
-+ e_IOC_NET_HEADER_TYPE_SNAP,
-+ e_IOC_NET_HEADER_TYPE_MPLS,
-+ e_IOC_NET_HEADER_TYPE_IPSEC_AH,
-+ e_IOC_NET_HEADER_TYPE_IPSEC_ESP,
-+ e_IOC_NET_HEADER_TYPE_UDP_ENCAP_ESP, /* RFC 3948 */
-+ e_IOC_NET_HEADER_TYPE_MACSEC,
-+ e_IOC_NET_HEADER_TYPE_GRE,
-+ e_IOC_NET_HEADER_TYPE_MINENCAP,
-+ e_IOC_NET_HEADER_TYPE_DCCP,
-+ e_IOC_NET_HEADER_TYPE_ICMP,
-+ e_IOC_NET_HEADER_TYPE_IGMP,
-+ e_IOC_NET_HEADER_TYPE_ARP,
-+ e_IOC_NET_HEADER_TYPE_CAPWAP,
-+ e_IOC_NET_HEADER_TYPE_CAPWAP_DTLS,
-+ e_IOC_NET_HEADER_TYPE_RFC2684,
-+ e_IOC_NET_HEADER_TYPE_USER_DEFINED_L2,
-+ e_IOC_NET_HEADER_TYPE_USER_DEFINED_L3,
-+ e_IOC_NET_HEADER_TYPE_USER_DEFINED_L4,
-+ e_IOC_NET_HEADER_TYPE_USER_DEFINED_SHIM1,
-+ e_IOC_NET_HEADER_TYPE_USER_DEFINED_SHIM2,
-+ e_IOC_NET_MAX_HEADER_TYPE_COUNT
-+} ioc_net_header_type;
-+
-+
-+#endif /* __NET_IOCTLS_H */
---- a/net/sched/sch_generic.c
-+++ b/net/sched/sch_generic.c
-@@ -313,6 +313,13 @@ static void dev_watchdog(unsigned long a
- txq->trans_timeout++;
- break;
- }
-+
-+ /* Devices with HW_ACCEL_MQ have multiple txqs
-+ * but update only the first one's transmission
-+ * timestamp so avoid checking the rest.
-+ */
-+ if (dev->features & NETIF_F_HW_ACCEL_MQ)
-+ break;
- }
-
- if (some_queue_timedout) {
diff --git a/target/linux/layerscape/patches-4.14/708-mc-bus-support-layerscape.patch b/target/linux/layerscape/patches-4.14/708-mc-bus-support-layerscape.patch
deleted file mode 100644
index 129b8ecc64..0000000000
--- a/target/linux/layerscape/patches-4.14/708-mc-bus-support-layerscape.patch
+++ /dev/null
@@ -1,12074 +0,0 @@
-From ab58c737bc723f52e787e1767bbbf0fcbe39a27b Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:43 +0800
-Subject: [PATCH] mc-bus: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of mc-bus for layerscape
-
-Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
-Signed-off-by: Cristian Sovaiala <cristian.sovaiala@freescale.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: J. German Rivera <German.Rivera@freescale.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Lijun Pan <Lijun.Pan@freescale.com>
-Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
-Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
-Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
----
- drivers/bus/Kconfig | 2 +
- drivers/bus/Makefile | 4 +
- drivers/bus/fsl-mc/Kconfig | 23 +
- drivers/bus/fsl-mc/Makefile | 21 +
- .../{staging/fsl-mc/bus => bus/fsl-mc}/dpbp.c | 97 +-
- .../fsl-mc/bus => bus/fsl-mc}/dpcon.c | 103 +-
- drivers/bus/fsl-mc/dpmcp.c | 99 ++
- .../fsl-mc/bus => bus/fsl-mc}/dprc-driver.c | 96 +-
- .../{staging/fsl-mc/bus => bus/fsl-mc}/dprc.c | 289 +----
- .../bus => bus/fsl-mc}/fsl-mc-allocator.c | 123 +-
- .../fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c | 322 +++++-
- .../fsl-mc/bus => bus/fsl-mc}/fsl-mc-msi.c | 16 +-
- drivers/bus/fsl-mc/fsl-mc-private.h | 223 ++++
- drivers/bus/fsl-mc/fsl-mc-restool.c | 219 ++++
- .../fsl-mc/bus => bus/fsl-mc}/mc-io.c | 51 +-
- .../fsl-mc/bus => bus/fsl-mc}/mc-sys.c | 33 +-
- drivers/irqchip/Kconfig | 6 +
- drivers/irqchip/Makefile | 1 +
- drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c | 98 ++
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2 +-
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 3 +-
- drivers/staging/fsl-dpaa2/ethernet/dpni.c | 2 +-
- drivers/staging/fsl-mc/bus/Kconfig | 15 +-
- drivers/staging/fsl-mc/bus/Makefile | 13 -
- drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 2 +-
- .../staging/fsl-mc/bus/dpio/dpio-service.c | 2 +-
- drivers/staging/fsl-mc/bus/dpio/dpio.c | 14 +-
- drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 56 -
- drivers/staging/fsl-mc/bus/dpmcp.h | 60 -
- drivers/staging/fsl-mc/bus/dpmng-cmd.h | 58 -
- drivers/staging/fsl-mc/bus/dprc-cmd.h | 451 --------
- drivers/staging/fsl-mc/bus/dprc.h | 268 -----
- .../fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 1 +
- include/linux/fsl/mc.h | 1029 +++++++++++++++++
- include/uapi/linux/fsl_mc.h | 31 +
- 35 files changed, 2302 insertions(+), 1531 deletions(-)
- create mode 100644 drivers/bus/fsl-mc/Kconfig
- create mode 100644 drivers/bus/fsl-mc/Makefile
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dpbp.c (67%)
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dpcon.c (70%)
- create mode 100644 drivers/bus/fsl-mc/dpmcp.c
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dprc-driver.c (93%)
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dprc.c (68%)
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-allocator.c (84%)
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c (75%)
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-msi.c (96%)
- create mode 100644 drivers/bus/fsl-mc/fsl-mc-private.h
- create mode 100644 drivers/bus/fsl-mc/fsl-mc-restool.c
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/mc-io.c (89%)
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/mc-sys.c (90%)
- create mode 100644 drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
- delete mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h
- delete mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h
- delete mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h
- delete mode 100644 drivers/staging/fsl-mc/bus/dprc-cmd.h
- delete mode 100644 drivers/staging/fsl-mc/bus/dprc.h
- create mode 100644 include/linux/fsl/mc.h
- create mode 100644 include/uapi/linux/fsl_mc.h
-
---- a/drivers/bus/Kconfig
-+++ b/drivers/bus/Kconfig
-@@ -184,4 +184,6 @@ config DA8XX_MSTPRI
- configuration. Allows to adjust the priorities of all master
- peripherals.
-
-+source "drivers/bus/fsl-mc/Kconfig"
-+
- endmenu
---- a/drivers/bus/Makefile
-+++ b/drivers/bus/Makefile
-@@ -8,6 +8,10 @@ obj-$(CONFIG_ARM_CCI) += arm-cci.o
- obj-$(CONFIG_ARM_CCN) += arm-ccn.o
-
- obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
-+
-+# DPAA2 fsl-mc bus
-+obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
-+
- obj-$(CONFIG_IMX_WEIM) += imx-weim.o
- obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
- obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
---- /dev/null
-+++ b/drivers/bus/fsl-mc/Kconfig
-@@ -0,0 +1,23 @@
-+# SPDX-License-Identifier: GPL-2.0
-+#
-+# DPAA2 fsl-mc bus
-+#
-+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-+#
-+
-+config FSL_MC_BUS
-+ bool "QorIQ DPAA2 fsl-mc bus driver"
-+ depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC)))
-+ select GENERIC_MSI_IRQ_DOMAIN
-+ help
-+ Driver to enable the bus infrastructure for the QorIQ DPAA2
-+ architecture. The fsl-mc bus driver handles discovery of
-+ DPAA2 objects (which are represented as Linux devices) and
-+ binding objects to drivers.
-+
-+config FSL_MC_RESTOOL
-+ bool "Management Complex (MC) restool support"
-+ depends on FSL_MC_BUS
-+ help
-+ Provides kernel support for the Management Complex resource
-+ manager user-space tool - restool.
---- /dev/null
-+++ b/drivers/bus/fsl-mc/Makefile
-@@ -0,0 +1,21 @@
-+# SPDX-License-Identifier: GPL-2.0
-+#
-+# Freescale Management Complex (MC) bus drivers
-+#
-+# Copyright (C) 2014 Freescale Semiconductor, Inc.
-+#
-+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
-+
-+mc-bus-driver-objs := fsl-mc-bus.o \
-+ mc-sys.o \
-+ mc-io.o \
-+ dpbp.o \
-+ dpcon.o \
-+ dprc.o \
-+ dprc-driver.o \
-+ fsl-mc-allocator.o \
-+ fsl-mc-msi.o \
-+ dpmcp.o
-+
-+# MC restool kernel support
-+obj-$(CONFIG_FSL_MC_RESTOOL) += fsl-mc-restool.o
---- a/drivers/staging/fsl-mc/bus/dpbp.c
-+++ /dev/null
-@@ -1,253 +0,0 @@
--// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- */
--#include <linux/kernel.h>
--#include "../include/mc.h"
--#include "../include/dpbp.h"
--
--#include "dpbp-cmd.h"
--
--/**
-- * dpbp_open() - Open a control session for the specified object.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @dpbp_id: DPBP unique ID
-- * @token: Returned token; use in subsequent API calls
-- *
-- * This function can be used to open a control session for an
-- * already created object; an object may have been declared in
-- * the DPL or by calling the dpbp_create function.
-- * This function returns a unique authentication token,
-- * associated with the specific object ID and the specific MC
-- * portal; this token must be used in all subsequent commands for
-- * this specific object
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_open(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int dpbp_id,
-- u16 *token)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_open *cmd_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
-- cmd_flags, 0);
-- cmd_params = (struct dpbp_cmd_open *)cmd.params;
-- cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- *token = mc_cmd_hdr_read_token(&cmd);
--
-- return err;
--}
--EXPORT_SYMBOL(dpbp_open);
--
--/**
-- * dpbp_close() - Close the control session of the object
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- *
-- * After this function is called, no further operations are
-- * allowed on the object without opening a new control session.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_close(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
-- token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpbp_close);
--
--/**
-- * dpbp_enable() - Enable the DPBP.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
-- token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpbp_enable);
--
--/**
-- * dpbp_disable() - Disable the DPBP.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_disable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
-- cmd_flags, token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpbp_disable);
--
--/**
-- * dpbp_is_enabled() - Check if the DPBP is enabled.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @en: Returns '1' if object is enabled; '0' otherwise
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int *en)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_rsp_is_enabled *rsp_params;
-- int err;
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
-- token);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
-- *en = rsp_params->enabled & DPBP_ENABLE;
--
-- return 0;
--}
--EXPORT_SYMBOL(dpbp_is_enabled);
--
--/**
-- * dpbp_reset() - Reset the DPBP, returns the object to initial state.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_reset(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
-- cmd_flags, token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpbp_reset);
--
--/**
-- * dpbp_get_attributes - Retrieve DPBP attributes.
-- *
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @attr: Returned object's attributes
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_get_attributes(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpbp_attr *attr)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_rsp_get_attributes *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
-- cmd_flags, token);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
-- attr->bpid = le16_to_cpu(rsp_params->bpid);
-- attr->id = le32_to_cpu(rsp_params->id);
--
-- return 0;
--}
--EXPORT_SYMBOL(dpbp_get_attributes);
--
--/**
-- * dpbp_get_api_version - Get Data Path Buffer Pool API version
-- * @mc_io: Pointer to Mc portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @major_ver: Major version of Buffer Pool API
-- * @minor_ver: Minor version of Buffer Pool API
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 *major_ver,
-- u16 *minor_ver)
--{
-- struct mc_command cmd = { 0 };
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
-- cmd_flags, 0);
--
-- /* send command to mc */
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
--
-- return 0;
--}
--EXPORT_SYMBOL(dpbp_get_api_version);
---- /dev/null
-+++ b/drivers/bus/fsl-mc/dpbp.c
-@@ -0,0 +1,186 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ */
-+#include <linux/kernel.h>
-+#include <linux/fsl/mc.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+/**
-+ * dpbp_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpbp_id: DPBP unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpbp_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpbp_id,
-+ u16 *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpbp_cmd_open *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
-+ cmd_flags, 0);
-+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
-+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(dpbp_open);
-+
-+/**
-+ * dpbp_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpbp_close);
-+
-+/**
-+ * dpbp_enable() - Enable the DPBP.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpbp_enable);
-+
-+/**
-+ * dpbp_disable() - Disable the DPBP.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpbp_disable);
-+
-+/**
-+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpbp_reset);
-+
-+/**
-+ * dpbp_get_attributes - Retrieve DPBP attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpbp_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpbp_rsp_get_attributes *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
-+ attr->bpid = le16_to_cpu(rsp_params->bpid);
-+ attr->id = le32_to_cpu(rsp_params->id);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dpbp_get_attributes);
---- a/drivers/staging/fsl-mc/bus/dpcon.c
-+++ /dev/null
-@@ -1,291 +0,0 @@
--// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- */
--#include <linux/kernel.h>
--#include "../include/mc.h"
--#include "../include/dpcon.h"
--
--#include "dpcon-cmd.h"
--
--/**
-- * dpcon_open() - Open a control session for the specified object
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @dpcon_id: DPCON unique ID
-- * @token: Returned token; use in subsequent API calls
-- *
-- * This function can be used to open a control session for an
-- * already created object; an object may have been declared in
-- * the DPL or by calling the dpcon_create() function.
-- * This function returns a unique authentication token,
-- * associated with the specific object ID and the specific MC
-- * portal; this token must be used in all subsequent commands for
-- * this specific object.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpcon_open(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int dpcon_id,
-- u16 *token)
--{
-- struct mc_command cmd = { 0 };
-- struct dpcon_cmd_open *dpcon_cmd;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
-- cmd_flags,
-- 0);
-- dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
-- dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- *token = mc_cmd_hdr_read_token(&cmd);
--
-- return 0;
--}
--EXPORT_SYMBOL(dpcon_open);
--
--/**
-- * dpcon_close() - Close the control session of the object
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPCON object
-- *
-- * After this function is called, no further operations are
-- * allowed on the object without opening a new control session.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpcon_close(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
-- cmd_flags,
-- token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpcon_close);
--
--/**
-- * dpcon_enable() - Enable the DPCON
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPCON object
-- *
-- * Return: '0' on Success; Error code otherwise
-- */
--int dpcon_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
-- cmd_flags,
-- token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpcon_enable);
--
--/**
-- * dpcon_disable() - Disable the DPCON
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPCON object
-- *
-- * Return: '0' on Success; Error code otherwise
-- */
--int dpcon_disable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
-- cmd_flags,
-- token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpcon_disable);
--
--/**
-- * dpcon_is_enabled() - Check if the DPCON is enabled.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPCON object
-- * @en: Returns '1' if object is enabled; '0' otherwise
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int *en)
--{
-- struct mc_command cmd = { 0 };
-- struct dpcon_rsp_is_enabled *dpcon_rsp;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
-- cmd_flags,
-- token);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
-- *en = dpcon_rsp->enabled & DPCON_ENABLE;
--
-- return 0;
--}
--EXPORT_SYMBOL(dpcon_is_enabled);
--
--/**
-- * dpcon_reset() - Reset the DPCON, returns the object to initial state.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPCON object
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpcon_reset(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
-- cmd_flags, token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpcon_reset);
--
--/**
-- * dpcon_get_attributes() - Retrieve DPCON attributes.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPCON object
-- * @attr: Object's attributes
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpcon_get_attributes(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpcon_attr *attr)
--{
-- struct mc_command cmd = { 0 };
-- struct dpcon_rsp_get_attr *dpcon_rsp;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
-- cmd_flags,
-- token);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
-- attr->id = le32_to_cpu(dpcon_rsp->id);
-- attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
-- attr->num_priorities = dpcon_rsp->num_priorities;
--
-- return 0;
--}
--EXPORT_SYMBOL(dpcon_get_attributes);
--
--/**
-- * dpcon_set_notification() - Set DPCON notification destination
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPCON object
-- * @cfg: Notification parameters
-- *
-- * Return: '0' on Success; Error code otherwise
-- */
--int dpcon_set_notification(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpcon_notification_cfg *cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dpcon_cmd_set_notification *dpcon_cmd;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
-- cmd_flags,
-- token);
-- dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
-- dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
-- dpcon_cmd->priority = cfg->priority;
-- dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dpcon_set_notification);
--
--/**
-- * dpcon_get_api_version - Get Data Path Concentrator API version
-- * @mc_io: Pointer to MC portal's DPCON object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @major_ver: Major version of DPCON API
-- * @minor_ver: Minor version of DPCON API
-- *
-- * Return: '0' on Success; Error code otherwise
-- */
--int dpcon_get_api_version(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 *major_ver,
-- u16 *minor_ver)
--{
-- struct mc_command cmd = { 0 };
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
-- cmd_flags, 0);
--
-- /* send command to mc */
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
--
-- return 0;
--}
--EXPORT_SYMBOL(dpcon_get_api_version);
---- /dev/null
-+++ b/drivers/bus/fsl-mc/dpcon.c
-@@ -0,0 +1,222 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ */
-+#include <linux/kernel.h>
-+#include <linux/fsl/mc.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+/**
-+ * dpcon_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpcon_id: DPCON unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpcon_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpcon_id,
-+ u16 *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpcon_cmd_open *dpcon_cmd;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
-+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dpcon_open);
-+
-+/**
-+ * dpcon_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpcon_close);
-+
-+/**
-+ * dpcon_enable() - Enable the DPCON
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpcon_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpcon_enable);
-+
-+/**
-+ * dpcon_disable() - Disable the DPCON
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpcon_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpcon_disable);
-+
-+/**
-+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpcon_reset);
-+
-+/**
-+ * dpcon_get_attributes() - Retrieve DPCON attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @attr: Object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpcon_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpcon_rsp_get_attr *dpcon_rsp;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
-+ attr->id = le32_to_cpu(dpcon_rsp->id);
-+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
-+ attr->num_priorities = dpcon_rsp->num_priorities;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dpcon_get_attributes);
-+
-+/**
-+ * dpcon_set_notification() - Set DPCON notification destination
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @cfg: Notification parameters
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpcon_set_notification(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpcon_notification_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpcon_cmd_set_notification *dpcon_cmd;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
-+ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
-+ dpcon_cmd->priority = cfg->priority;
-+ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dpcon_set_notification);
---- /dev/null
-+++ b/drivers/bus/fsl-mc/dpmcp.c
-@@ -0,0 +1,99 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ */
-+#include <linux/kernel.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+/**
-+ * dpmcp_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpmcp_id: DPMCP unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpmcp_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpmcp_id,
-+ u16 *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpmcp_cmd_open *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
-+ cmd_flags, 0);
-+ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
-+ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return err;
-+}
-+
-+/**
-+ * dpmcp_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
---- a/drivers/staging/fsl-mc/bus/dprc-driver.c
-+++ /dev/null
-@@ -1,813 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/*
-- * Freescale data path resource container (DPRC) driver
-- *
-- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-- * Author: German Rivera <German.Rivera@freescale.com>
-- *
-- */
--
--#include <linux/module.h>
--#include <linux/slab.h>
--#include <linux/interrupt.h>
--#include <linux/msi.h>
--#include "../include/mc.h"
--
--#include "dprc-cmd.h"
--#include "fsl-mc-private.h"
--
--#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
--
--struct fsl_mc_child_objs {
-- int child_count;
-- struct fsl_mc_obj_desc *child_array;
--};
--
--static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
-- struct fsl_mc_obj_desc *obj_desc)
--{
-- return mc_dev->obj_desc.id == obj_desc->id &&
-- strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
--
--}
--
--static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
--{
-- int i;
-- struct fsl_mc_child_objs *objs;
-- struct fsl_mc_device *mc_dev;
--
-- WARN_ON(!dev);
-- WARN_ON(!data);
-- mc_dev = to_fsl_mc_device(dev);
-- objs = data;
--
-- for (i = 0; i < objs->child_count; i++) {
-- struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
--
-- if (strlen(obj_desc->type) != 0 &&
-- fsl_mc_device_match(mc_dev, obj_desc))
-- break;
-- }
--
-- if (i == objs->child_count)
-- fsl_mc_device_remove(mc_dev);
--
-- return 0;
--}
--
--static int __fsl_mc_device_remove(struct device *dev, void *data)
--{
-- WARN_ON(!dev);
-- WARN_ON(data);
-- fsl_mc_device_remove(to_fsl_mc_device(dev));
-- return 0;
--}
--
--/**
-- * dprc_remove_devices - Removes devices for objects removed from a DPRC
-- *
-- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-- * @obj_desc_array: array of object descriptors for child objects currently
-- * present in the DPRC in the MC.
-- * @num_child_objects_in_mc: number of entries in obj_desc_array
-- *
-- * Synchronizes the state of the Linux bus driver with the actual state of
-- * the MC by removing devices that represent MC objects that have
-- * been dynamically removed in the physical DPRC.
-- */
--static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
-- struct fsl_mc_obj_desc *obj_desc_array,
-- int num_child_objects_in_mc)
--{
-- if (num_child_objects_in_mc != 0) {
-- /*
-- * Remove child objects that are in the DPRC in Linux,
-- * but not in the MC:
-- */
-- struct fsl_mc_child_objs objs;
--
-- objs.child_count = num_child_objects_in_mc;
-- objs.child_array = obj_desc_array;
-- device_for_each_child(&mc_bus_dev->dev, &objs,
-- __fsl_mc_device_remove_if_not_in_mc);
-- } else {
-- /*
-- * There are no child objects for this DPRC in the MC.
-- * So, remove all the child devices from Linux:
-- */
-- device_for_each_child(&mc_bus_dev->dev, NULL,
-- __fsl_mc_device_remove);
-- }
--}
--
--static int __fsl_mc_device_match(struct device *dev, void *data)
--{
-- struct fsl_mc_obj_desc *obj_desc = data;
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
--
-- return fsl_mc_device_match(mc_dev, obj_desc);
--}
--
--static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
-- *obj_desc,
-- struct fsl_mc_device
-- *mc_bus_dev)
--{
-- struct device *dev;
--
-- dev = device_find_child(&mc_bus_dev->dev, obj_desc,
-- __fsl_mc_device_match);
--
-- return dev ? to_fsl_mc_device(dev) : NULL;
--}
--
--/**
-- * check_plugged_state_change - Check change in an MC object's plugged state
-- *
-- * @mc_dev: pointer to the fsl-mc device for a given MC object
-- * @obj_desc: pointer to the MC object's descriptor in the MC
-- *
-- * If the plugged state has changed from unplugged to plugged, the fsl-mc
-- * device is bound to the corresponding device driver.
-- * If the plugged state has changed from plugged to unplugged, the fsl-mc
-- * device is unbound from the corresponding device driver.
-- */
--static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
-- struct fsl_mc_obj_desc *obj_desc)
--{
-- int error;
-- u32 plugged_flag_at_mc =
-- obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
--
-- if (plugged_flag_at_mc !=
-- (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
-- if (plugged_flag_at_mc) {
-- mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
-- error = device_attach(&mc_dev->dev);
-- if (error < 0) {
-- dev_err(&mc_dev->dev,
-- "device_attach() failed: %d\n",
-- error);
-- }
-- } else {
-- mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
-- device_release_driver(&mc_dev->dev);
-- }
-- }
--}
--
--/**
-- * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
-- *
-- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-- * @obj_desc_array: array of device descriptors for child devices currently
-- * present in the physical DPRC.
-- * @num_child_objects_in_mc: number of entries in obj_desc_array
-- *
-- * Synchronizes the state of the Linux bus driver with the actual
-- * state of the MC by adding objects that have been newly discovered
-- * in the physical DPRC.
-- */
--static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
-- struct fsl_mc_obj_desc *obj_desc_array,
-- int num_child_objects_in_mc)
--{
-- int error;
-- int i;
--
-- for (i = 0; i < num_child_objects_in_mc; i++) {
-- struct fsl_mc_device *child_dev;
-- struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
--
-- if (strlen(obj_desc->type) == 0)
-- continue;
--
-- /*
-- * Check if device is already known to Linux:
-- */
-- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
-- if (child_dev) {
-- check_plugged_state_change(child_dev, obj_desc);
-- put_device(&child_dev->dev);
-- continue;
-- }
--
-- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
-- &child_dev);
-- if (error < 0)
-- continue;
-- }
--}
--
--/**
-- * dprc_scan_objects - Discover objects in a DPRC
-- *
-- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-- * @total_irq_count: total number of IRQs needed by objects in the DPRC.
-- *
-- * Detects objects added and removed from a DPRC and synchronizes the
-- * state of the Linux bus driver, MC by adding and removing
-- * devices accordingly.
-- * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
-- * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
-- * All allocatable devices needed to be probed before all non-allocatable
-- * devices, to ensure that device drivers for non-allocatable
-- * devices can allocate any type of allocatable devices.
-- * That is, we need to ensure that the corresponding resource pools are
-- * populated before they can get allocation requests from probe callbacks
-- * of the device drivers for the non-allocatable devices.
-- */
--static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
-- unsigned int *total_irq_count)
--{
-- int num_child_objects;
-- int dprc_get_obj_failures;
-- int error;
-- unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
-- struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
--
-- error = dprc_get_obj_count(mc_bus_dev->mc_io,
-- 0,
-- mc_bus_dev->mc_handle,
-- &num_child_objects);
-- if (error < 0) {
-- dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
-- error);
-- return error;
-- }
--
-- if (num_child_objects != 0) {
-- int i;
--
-- child_obj_desc_array =
-- devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
-- sizeof(*child_obj_desc_array),
-- GFP_KERNEL);
-- if (!child_obj_desc_array)
-- return -ENOMEM;
--
-- /*
-- * Discover objects currently present in the physical DPRC:
-- */
-- dprc_get_obj_failures = 0;
-- for (i = 0; i < num_child_objects; i++) {
-- struct fsl_mc_obj_desc *obj_desc =
-- &child_obj_desc_array[i];
--
-- error = dprc_get_obj(mc_bus_dev->mc_io,
-- 0,
-- mc_bus_dev->mc_handle,
-- i, obj_desc);
-- if (error < 0) {
-- dev_err(&mc_bus_dev->dev,
-- "dprc_get_obj(i=%d) failed: %d\n",
-- i, error);
-- /*
-- * Mark the obj entry as "invalid", by using the
-- * empty string as obj type:
-- */
-- obj_desc->type[0] = '\0';
-- obj_desc->id = error;
-- dprc_get_obj_failures++;
-- continue;
-- }
--
-- /*
-- * add a quirk for all versions of dpsec < 4.0...none
-- * are coherent regardless of what the MC reports.
-- */
-- if ((strcmp(obj_desc->type, "dpseci") == 0) &&
-- (obj_desc->ver_major < 4))
-- obj_desc->flags |=
-- FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
--
-- irq_count += obj_desc->irq_count;
-- dev_dbg(&mc_bus_dev->dev,
-- "Discovered object: type %s, id %d\n",
-- obj_desc->type, obj_desc->id);
-- }
--
-- if (dprc_get_obj_failures != 0) {
-- dev_err(&mc_bus_dev->dev,
-- "%d out of %d devices could not be retrieved\n",
-- dprc_get_obj_failures, num_child_objects);
-- }
-- }
--
-- *total_irq_count = irq_count;
-- dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
-- num_child_objects);
--
-- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
-- num_child_objects);
--
-- if (child_obj_desc_array)
-- devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
--
-- return 0;
--}
--
--/**
-- * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
-- *
-- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-- *
-- * Scans the physical DPRC and synchronizes the state of the Linux
-- * bus driver with the actual state of the MC by adding and removing
-- * devices as appropriate.
-- */
--static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
--{
-- int error;
-- unsigned int irq_count;
-- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
--
-- fsl_mc_init_all_resource_pools(mc_bus_dev);
--
-- /*
-- * Discover objects in the DPRC:
-- */
-- mutex_lock(&mc_bus->scan_mutex);
-- error = dprc_scan_objects(mc_bus_dev, &irq_count);
-- mutex_unlock(&mc_bus->scan_mutex);
-- if (error < 0)
-- goto error;
--
-- if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
-- if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
-- dev_warn(&mc_bus_dev->dev,
-- "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
-- irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
-- }
--
-- error = fsl_mc_populate_irq_pool(
-- mc_bus,
-- FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
-- if (error < 0)
-- goto error;
-- }
--
-- return 0;
--error:
-- fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
-- return error;
--}
--
--/**
-- * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
-- *
-- * @irq: IRQ number of the interrupt being handled
-- * @arg: Pointer to device structure
-- */
--static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
--{
-- return IRQ_WAKE_THREAD;
--}
--
--/**
-- * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
-- *
-- * @irq: IRQ number of the interrupt being handled
-- * @arg: Pointer to device structure
-- */
--static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
--{
-- int error;
-- u32 status;
-- struct device *dev = arg;
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
-- struct fsl_mc_io *mc_io = mc_dev->mc_io;
-- struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
--
-- dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
-- irq_num, smp_processor_id());
--
-- if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC)))
-- return IRQ_HANDLED;
--
-- mutex_lock(&mc_bus->scan_mutex);
-- if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
-- goto out;
--
-- status = 0;
-- error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
-- &status);
-- if (error < 0) {
-- dev_err(dev,
-- "dprc_get_irq_status() failed: %d\n", error);
-- goto out;
-- }
--
-- error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
-- status);
-- if (error < 0) {
-- dev_err(dev,
-- "dprc_clear_irq_status() failed: %d\n", error);
-- goto out;
-- }
--
-- if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
-- DPRC_IRQ_EVENT_OBJ_REMOVED |
-- DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
-- DPRC_IRQ_EVENT_OBJ_DESTROYED |
-- DPRC_IRQ_EVENT_OBJ_CREATED)) {
-- unsigned int irq_count;
--
-- error = dprc_scan_objects(mc_dev, &irq_count);
-- if (error < 0) {
-- /*
-- * If the error is -ENXIO, we ignore it, as it indicates
-- * that the object scan was aborted, as we detected that
-- * an object was removed from the DPRC in the MC, while
-- * we were scanning the DPRC.
-- */
-- if (error != -ENXIO) {
-- dev_err(dev, "dprc_scan_objects() failed: %d\n",
-- error);
-- }
--
-- goto out;
-- }
--
-- if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
-- dev_warn(dev,
-- "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
-- irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
-- }
-- }
--
--out:
-- mutex_unlock(&mc_bus->scan_mutex);
-- return IRQ_HANDLED;
--}
--
--/*
-- * Disable and clear interrupt for a given DPRC object
-- */
--static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
--{
-- int error;
-- struct fsl_mc_io *mc_io = mc_dev->mc_io;
--
-- WARN_ON(mc_dev->obj_desc.irq_count != 1);
--
-- /*
-- * Disable generation of interrupt, while we configure it:
-- */
-- error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
-- if (error < 0) {
-- dev_err(&mc_dev->dev,
-- "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
-- error);
-- return error;
-- }
--
-- /*
-- * Disable all interrupt causes for the interrupt:
-- */
-- error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
-- if (error < 0) {
-- dev_err(&mc_dev->dev,
-- "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
-- error);
-- return error;
-- }
--
-- /*
-- * Clear any leftover interrupts:
-- */
-- error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
-- if (error < 0) {
-- dev_err(&mc_dev->dev,
-- "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
-- error);
-- return error;
-- }
--
-- return 0;
--}
--
--static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
--{
-- int error;
-- struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
--
-- WARN_ON(mc_dev->obj_desc.irq_count != 1);
--
-- /*
-- * NOTE: devm_request_threaded_irq() invokes the device-specific
-- * function that programs the MSI physically in the device
-- */
-- error = devm_request_threaded_irq(&mc_dev->dev,
-- irq->msi_desc->irq,
-- dprc_irq0_handler,
-- dprc_irq0_handler_thread,
-- IRQF_NO_SUSPEND | IRQF_ONESHOT,
-- dev_name(&mc_dev->dev),
-- &mc_dev->dev);
-- if (error < 0) {
-- dev_err(&mc_dev->dev,
-- "devm_request_threaded_irq() failed: %d\n",
-- error);
-- return error;
-- }
--
-- return 0;
--}
--
--static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
--{
-- int error;
--
-- /*
-- * Enable all interrupt causes for the interrupt:
-- */
-- error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
-- ~0x0u);
-- if (error < 0) {
-- dev_err(&mc_dev->dev,
-- "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
-- error);
--
-- return error;
-- }
--
-- /*
-- * Enable generation of the interrupt:
-- */
-- error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
-- if (error < 0) {
-- dev_err(&mc_dev->dev,
-- "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
-- error);
--
-- return error;
-- }
--
-- return 0;
--}
--
--/*
-- * Setup interrupt for a given DPRC device
-- */
--static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
--{
-- int error;
--
-- error = fsl_mc_allocate_irqs(mc_dev);
-- if (error < 0)
-- return error;
--
-- error = disable_dprc_irq(mc_dev);
-- if (error < 0)
-- goto error_free_irqs;
--
-- error = register_dprc_irq_handler(mc_dev);
-- if (error < 0)
-- goto error_free_irqs;
--
-- error = enable_dprc_irq(mc_dev);
-- if (error < 0)
-- goto error_free_irqs;
--
-- return 0;
--
--error_free_irqs:
-- fsl_mc_free_irqs(mc_dev);
-- return error;
--}
--
--/**
-- * dprc_probe - callback invoked when a DPRC is being bound to this driver
-- *
-- * @mc_dev: Pointer to fsl-mc device representing a DPRC
-- *
-- * It opens the physical DPRC in the MC.
-- * It scans the DPRC to discover the MC objects contained in it.
-- * It creates the interrupt pool for the MC bus associated with the DPRC.
-- * It configures the interrupts for the DPRC device itself.
-- */
--static int dprc_probe(struct fsl_mc_device *mc_dev)
--{
-- int error;
-- size_t region_size;
-- struct device *parent_dev = mc_dev->dev.parent;
-- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
-- bool mc_io_created = false;
-- bool msi_domain_set = false;
-- u16 major_ver, minor_ver;
--
-- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
-- return -EINVAL;
--
-- if (WARN_ON(dev_get_msi_domain(&mc_dev->dev)))
-- return -EINVAL;
--
-- if (!mc_dev->mc_io) {
-- /*
-- * This is a child DPRC:
-- */
-- if (WARN_ON(!dev_is_fsl_mc(parent_dev)))
-- return -EINVAL;
--
-- if (WARN_ON(mc_dev->obj_desc.region_count == 0))
-- return -EINVAL;
--
-- region_size = resource_size(mc_dev->regions);
--
-- error = fsl_create_mc_io(&mc_dev->dev,
-- mc_dev->regions[0].start,
-- region_size,
-- NULL,
-- FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
-- &mc_dev->mc_io);
-- if (error < 0)
-- return error;
--
-- mc_io_created = true;
--
-- /*
-- * Inherit parent MSI domain:
-- */
-- dev_set_msi_domain(&mc_dev->dev,
-- dev_get_msi_domain(parent_dev));
-- msi_domain_set = true;
-- } else {
-- /*
-- * This is a root DPRC
-- */
-- struct irq_domain *mc_msi_domain;
--
-- if (WARN_ON(dev_is_fsl_mc(parent_dev)))
-- return -EINVAL;
--
-- error = fsl_mc_find_msi_domain(parent_dev,
-- &mc_msi_domain);
-- if (error < 0) {
-- dev_warn(&mc_dev->dev,
-- "WARNING: MC bus without interrupt support\n");
-- } else {
-- dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
-- msi_domain_set = true;
-- }
-- }
--
-- error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
-- &mc_dev->mc_handle);
-- if (error < 0) {
-- dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
-- goto error_cleanup_msi_domain;
-- }
--
-- error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
-- &mc_bus->dprc_attr);
-- if (error < 0) {
-- dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
-- error);
-- goto error_cleanup_open;
-- }
--
-- error = dprc_get_api_version(mc_dev->mc_io, 0,
-- &major_ver,
-- &minor_ver);
-- if (error < 0) {
-- dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
-- error);
-- goto error_cleanup_open;
-- }
--
-- if (major_ver < DPRC_MIN_VER_MAJOR ||
-- (major_ver == DPRC_MIN_VER_MAJOR &&
-- minor_ver < DPRC_MIN_VER_MINOR)) {
-- dev_err(&mc_dev->dev,
-- "ERROR: DPRC version %d.%d not supported\n",
-- major_ver, minor_ver);
-- error = -ENOTSUPP;
-- goto error_cleanup_open;
-- }
--
-- mutex_init(&mc_bus->scan_mutex);
--
-- /*
-- * Discover MC objects in DPRC object:
-- */
-- error = dprc_scan_container(mc_dev);
-- if (error < 0)
-- goto error_cleanup_open;
--
-- /*
-- * Configure interrupt for the DPRC object associated with this MC bus:
-- */
-- error = dprc_setup_irq(mc_dev);
-- if (error < 0)
-- goto error_cleanup_open;
--
-- dev_info(&mc_dev->dev, "DPRC device bound to driver");
-- return 0;
--
--error_cleanup_open:
-- (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
--
--error_cleanup_msi_domain:
-- if (msi_domain_set)
-- dev_set_msi_domain(&mc_dev->dev, NULL);
--
-- if (mc_io_created) {
-- fsl_destroy_mc_io(mc_dev->mc_io);
-- mc_dev->mc_io = NULL;
-- }
--
-- return error;
--}
--
--/*
-- * Tear down interrupt for a given DPRC object
-- */
--static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
--{
-- struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
--
-- (void)disable_dprc_irq(mc_dev);
--
-- devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
--
-- fsl_mc_free_irqs(mc_dev);
--}
--
--/**
-- * dprc_remove - callback invoked when a DPRC is being unbound from this driver
-- *
-- * @mc_dev: Pointer to fsl-mc device representing the DPRC
-- *
-- * It removes the DPRC's child objects from Linux (not from the MC) and
-- * closes the DPRC device in the MC.
-- * It tears down the interrupts that were configured for the DPRC device.
-- * It destroys the interrupt pool associated with this MC bus.
-- */
--static int dprc_remove(struct fsl_mc_device *mc_dev)
--{
-- int error;
-- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
--
-- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
-- return -EINVAL;
-- if (WARN_ON(!mc_dev->mc_io))
-- return -EINVAL;
--
-- if (WARN_ON(!mc_bus->irq_resources))
-- return -EINVAL;
--
-- if (dev_get_msi_domain(&mc_dev->dev))
-- dprc_teardown_irq(mc_dev);
--
-- device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
--
-- if (dev_get_msi_domain(&mc_dev->dev)) {
-- fsl_mc_cleanup_irq_pool(mc_bus);
-- dev_set_msi_domain(&mc_dev->dev, NULL);
-- }
--
-- fsl_mc_cleanup_all_resource_pools(mc_dev);
--
-- error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-- if (error < 0)
-- dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
--
-- if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
-- fsl_destroy_mc_io(mc_dev->mc_io);
-- mc_dev->mc_io = NULL;
-- }
--
-- dev_info(&mc_dev->dev, "DPRC device unbound from driver");
-- return 0;
--}
--
--static const struct fsl_mc_device_id match_id_table[] = {
-- {
-- .vendor = FSL_MC_VENDOR_FREESCALE,
-- .obj_type = "dprc"},
-- {.vendor = 0x0},
--};
--
--static struct fsl_mc_driver dprc_driver = {
-- .driver = {
-- .name = FSL_MC_DPRC_DRIVER_NAME,
-- .owner = THIS_MODULE,
-- .pm = NULL,
-- },
-- .match_id_table = match_id_table,
-- .probe = dprc_probe,
-- .remove = dprc_remove,
--};
--
--int __init dprc_driver_init(void)
--{
-- return fsl_mc_driver_register(&dprc_driver);
--}
--
--void dprc_driver_exit(void)
--{
-- fsl_mc_driver_unregister(&dprc_driver);
--}
---- /dev/null
-+++ b/drivers/bus/fsl-mc/dprc-driver.c
-@@ -0,0 +1,815 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Freescale data path resource container (DPRC) driver
-+ *
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-+ * Author: German Rivera <German.Rivera@freescale.com>
-+ *
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/msi.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
-+
-+struct fsl_mc_child_objs {
-+ int child_count;
-+ struct fsl_mc_obj_desc *child_array;
-+};
-+
-+static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
-+ struct fsl_mc_obj_desc *obj_desc)
-+{
-+ return mc_dev->obj_desc.id == obj_desc->id &&
-+ strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
-+
-+}
-+
-+static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
-+{
-+ int i;
-+ struct fsl_mc_child_objs *objs;
-+ struct fsl_mc_device *mc_dev;
-+
-+ mc_dev = to_fsl_mc_device(dev);
-+ objs = data;
-+
-+ for (i = 0; i < objs->child_count; i++) {
-+ struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
-+
-+ if (strlen(obj_desc->type) != 0 &&
-+ fsl_mc_device_match(mc_dev, obj_desc))
-+ break;
-+ }
-+
-+ if (i == objs->child_count)
-+ fsl_mc_device_remove(mc_dev);
-+
-+ return 0;
-+}
-+
-+static int __fsl_mc_device_remove(struct device *dev, void *data)
-+{
-+ fsl_mc_device_remove(to_fsl_mc_device(dev));
-+ return 0;
-+}
-+
-+/**
-+ * dprc_remove_devices - Removes devices for objects removed from a DPRC
-+ *
-+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-+ * @obj_desc_array: array of object descriptors for child objects currently
-+ * present in the DPRC in the MC.
-+ * @num_child_objects_in_mc: number of entries in obj_desc_array
-+ *
-+ * Synchronizes the state of the Linux bus driver with the actual state of
-+ * the MC by removing devices that represent MC objects that have
-+ * been dynamically removed in the physical DPRC.
-+ */
-+static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
-+ struct fsl_mc_obj_desc *obj_desc_array,
-+ int num_child_objects_in_mc)
-+{
-+ if (num_child_objects_in_mc != 0) {
-+ /*
-+ * Remove child objects that are in the DPRC in Linux,
-+ * but not in the MC:
-+ */
-+ struct fsl_mc_child_objs objs;
-+
-+ objs.child_count = num_child_objects_in_mc;
-+ objs.child_array = obj_desc_array;
-+ device_for_each_child(&mc_bus_dev->dev, &objs,
-+ __fsl_mc_device_remove_if_not_in_mc);
-+ } else {
-+ /*
-+ * There are no child objects for this DPRC in the MC.
-+ * So, remove all the child devices from Linux:
-+ */
-+ device_for_each_child(&mc_bus_dev->dev, NULL,
-+ __fsl_mc_device_remove);
-+ }
-+}
-+
-+static int __fsl_mc_device_match(struct device *dev, void *data)
-+{
-+ struct fsl_mc_obj_desc *obj_desc = data;
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+
-+ return fsl_mc_device_match(mc_dev, obj_desc);
-+}
-+
-+static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
-+ *obj_desc,
-+ struct fsl_mc_device
-+ *mc_bus_dev)
-+{
-+ struct device *dev;
-+
-+ dev = device_find_child(&mc_bus_dev->dev, obj_desc,
-+ __fsl_mc_device_match);
-+
-+ return dev ? to_fsl_mc_device(dev) : NULL;
-+}
-+
-+/**
-+ * check_plugged_state_change - Check change in an MC object's plugged state
-+ *
-+ * @mc_dev: pointer to the fsl-mc device for a given MC object
-+ * @obj_desc: pointer to the MC object's descriptor in the MC
-+ *
-+ * If the plugged state has changed from unplugged to plugged, the fsl-mc
-+ * device is bound to the corresponding device driver.
-+ * If the plugged state has changed from plugged to unplugged, the fsl-mc
-+ * device is unbound from the corresponding device driver.
-+ */
-+static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
-+ struct fsl_mc_obj_desc *obj_desc)
-+{
-+ int error;
-+ u32 plugged_flag_at_mc =
-+ obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
-+
-+ if (plugged_flag_at_mc !=
-+ (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
-+ if (plugged_flag_at_mc) {
-+ mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
-+ error = device_attach(&mc_dev->dev);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev,
-+ "device_attach() failed: %d\n",
-+ error);
-+ }
-+ } else {
-+ mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
-+ device_release_driver(&mc_dev->dev);
-+ }
-+ }
-+}
-+
-+/**
-+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
-+ *
-+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-+ * @driver_override: driver override to apply to new objects found in the
-+ * DPRC, or NULL, if none.
-+ * @obj_desc_array: array of device descriptors for child devices currently
-+ * present in the physical DPRC.
-+ * @num_child_objects_in_mc: number of entries in obj_desc_array
-+ *
-+ * Synchronizes the state of the Linux bus driver with the actual
-+ * state of the MC by adding objects that have been newly discovered
-+ * in the physical DPRC.
-+ */
-+static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
-+ const char *driver_override,
-+ struct fsl_mc_obj_desc *obj_desc_array,
-+ int num_child_objects_in_mc)
-+{
-+ int error;
-+ int i;
-+
-+ for (i = 0; i < num_child_objects_in_mc; i++) {
-+ struct fsl_mc_device *child_dev;
-+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
-+
-+ if (strlen(obj_desc->type) == 0)
-+ continue;
-+
-+ /*
-+ * Check if device is already known to Linux:
-+ */
-+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
-+ if (child_dev) {
-+ check_plugged_state_change(child_dev, obj_desc);
-+ put_device(&child_dev->dev);
-+ continue;
-+ }
-+
-+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
-+ driver_override, &child_dev);
-+ if (error < 0)
-+ continue;
-+ }
-+}
-+
-+/**
-+ * dprc_scan_objects - Discover objects in a DPRC
-+ *
-+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-+ * @driver_override: driver override to apply to new objects found in the
-+ * DPRC, or NULL, if none.
-+ * @total_irq_count: If argument is provided the function populates the
-+ * total number of IRQs created by objects in the DPRC.
-+ *
-+ * Detects objects added and removed from a DPRC and synchronizes the
-+ * state of the Linux bus driver, MC by adding and removing
-+ * devices accordingly.
-+ * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
-+ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
-+ * All allocatable devices needed to be probed before all non-allocatable
-+ * devices, to ensure that device drivers for non-allocatable
-+ * devices can allocate any type of allocatable devices.
-+ * That is, we need to ensure that the corresponding resource pools are
-+ * populated before they can get allocation requests from probe callbacks
-+ * of the device drivers for the non-allocatable devices.
-+ */
-+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
-+ const char *driver_override,
-+ unsigned int *total_irq_count)
-+{
-+ int num_child_objects;
-+ int dprc_get_obj_failures;
-+ int error;
-+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
-+ struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+
-+ error = dprc_get_obj_count(mc_bus_dev->mc_io,
-+ 0,
-+ mc_bus_dev->mc_handle,
-+ &num_child_objects);
-+ if (error < 0) {
-+ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
-+ error);
-+ return error;
-+ }
-+
-+ if (num_child_objects != 0) {
-+ int i;
-+
-+ child_obj_desc_array =
-+ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
-+ sizeof(*child_obj_desc_array),
-+ GFP_KERNEL);
-+ if (!child_obj_desc_array)
-+ return -ENOMEM;
-+
-+ /*
-+ * Discover objects currently present in the physical DPRC:
-+ */
-+ dprc_get_obj_failures = 0;
-+ for (i = 0; i < num_child_objects; i++) {
-+ struct fsl_mc_obj_desc *obj_desc =
-+ &child_obj_desc_array[i];
-+
-+ error = dprc_get_obj(mc_bus_dev->mc_io,
-+ 0,
-+ mc_bus_dev->mc_handle,
-+ i, obj_desc);
-+ if (error < 0) {
-+ dev_err(&mc_bus_dev->dev,
-+ "dprc_get_obj(i=%d) failed: %d\n",
-+ i, error);
-+ /*
-+ * Mark the obj entry as "invalid", by using the
-+ * empty string as obj type:
-+ */
-+ obj_desc->type[0] = '\0';
-+ obj_desc->id = error;
-+ dprc_get_obj_failures++;
-+ continue;
-+ }
-+
-+ /*
-+ * add a quirk for all versions of dpsec < 4.0...none
-+ * are coherent regardless of what the MC reports.
-+ */
-+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
-+ (obj_desc->ver_major < 4))
-+ obj_desc->flags |=
-+ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
-+
-+ irq_count += obj_desc->irq_count;
-+ dev_dbg(&mc_bus_dev->dev,
-+ "Discovered object: type %s, id %d\n",
-+ obj_desc->type, obj_desc->id);
-+ }
-+
-+ if (dprc_get_obj_failures != 0) {
-+ dev_err(&mc_bus_dev->dev,
-+ "%d out of %d devices could not be retrieved\n",
-+ dprc_get_obj_failures, num_child_objects);
-+ }
-+ }
-+
-+ /*
-+ * Allocate IRQ's before binding the scanned devices with their
-+ * respective drivers.
-+ */
-+ if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
-+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
-+ dev_warn(&mc_bus_dev->dev,
-+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
-+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
-+ }
-+
-+ error = fsl_mc_populate_irq_pool(mc_bus,
-+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
-+ if (error < 0)
-+ return error;
-+ }
-+
-+ if (total_irq_count)
-+ *total_irq_count = irq_count;
-+
-+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
-+ num_child_objects);
-+
-+ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array,
-+ num_child_objects);
-+
-+ if (child_obj_desc_array)
-+ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
-+ *
-+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-+ *
-+ * Scans the physical DPRC and synchronizes the state of the Linux
-+ * bus driver with the actual state of the MC by adding and removing
-+ * devices as appropriate.
-+ */
-+static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
-+{
-+ int error;
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+
-+ fsl_mc_init_all_resource_pools(mc_bus_dev);
-+
-+ /*
-+ * Discover objects in the DPRC:
-+ */
-+ mutex_lock(&mc_bus->scan_mutex);
-+ error = dprc_scan_objects(mc_bus_dev, NULL, NULL);
-+ mutex_unlock(&mc_bus->scan_mutex);
-+ if (error < 0) {
-+ fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
-+ *
-+ * @irq: IRQ number of the interrupt being handled
-+ * @arg: Pointer to device structure
-+ */
-+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
-+{
-+ return IRQ_WAKE_THREAD;
-+}
-+
-+/**
-+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
-+ *
-+ * @irq: IRQ number of the interrupt being handled
-+ * @arg: Pointer to device structure
-+ */
-+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
-+{
-+ int error;
-+ u32 status;
-+ struct device *dev = arg;
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
-+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
-+ struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
-+
-+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
-+ irq_num, smp_processor_id());
-+
-+ if (!(mc_dev->flags & FSL_MC_IS_DPRC))
-+ return IRQ_HANDLED;
-+
-+ mutex_lock(&mc_bus->scan_mutex);
-+ if (!msi_desc || msi_desc->irq != (u32)irq_num)
-+ goto out;
-+
-+ status = 0;
-+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
-+ &status);
-+ if (error < 0) {
-+ dev_err(dev,
-+ "dprc_get_irq_status() failed: %d\n", error);
-+ goto out;
-+ }
-+
-+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
-+ status);
-+ if (error < 0) {
-+ dev_err(dev,
-+ "dprc_clear_irq_status() failed: %d\n", error);
-+ goto out;
-+ }
-+
-+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
-+ DPRC_IRQ_EVENT_OBJ_REMOVED |
-+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
-+ DPRC_IRQ_EVENT_OBJ_DESTROYED |
-+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
-+ unsigned int irq_count;
-+
-+ error = dprc_scan_objects(mc_dev, NULL, &irq_count);
-+ if (error < 0) {
-+ /*
-+ * If the error is -ENXIO, we ignore it, as it indicates
-+ * that the object scan was aborted, as we detected that
-+ * an object was removed from the DPRC in the MC, while
-+ * we were scanning the DPRC.
-+ */
-+ if (error != -ENXIO) {
-+ dev_err(dev, "dprc_scan_objects() failed: %d\n",
-+ error);
-+ }
-+
-+ goto out;
-+ }
-+
-+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
-+ dev_warn(dev,
-+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
-+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
-+ }
-+ }
-+
-+out:
-+ mutex_unlock(&mc_bus->scan_mutex);
-+ return IRQ_HANDLED;
-+}
-+
-+/*
-+ * Disable and clear interrupt for a given DPRC object
-+ */
-+static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
-+{
-+ int error;
-+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
-+
-+ /*
-+ * Disable generation of interrupt, while we configure it:
-+ */
-+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev,
-+ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
-+ error);
-+ return error;
-+ }
-+
-+ /*
-+ * Disable all interrupt causes for the interrupt:
-+ */
-+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev,
-+ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
-+ error);
-+ return error;
-+ }
-+
-+ /*
-+ * Clear any leftover interrupts:
-+ */
-+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev,
-+ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
-+ error);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+
-+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
-+{
-+ int error;
-+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
-+
-+ /*
-+ * NOTE: devm_request_threaded_irq() invokes the device-specific
-+ * function that programs the MSI physically in the device
-+ */
-+ error = devm_request_threaded_irq(&mc_dev->dev,
-+ irq->msi_desc->irq,
-+ dprc_irq0_handler,
-+ dprc_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(&mc_dev->dev),
-+ &mc_dev->dev);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev,
-+ "devm_request_threaded_irq() failed: %d\n",
-+ error);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+
-+static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
-+{
-+ int error;
-+
-+ /*
-+ * Enable all interrupt causes for the interrupt:
-+ */
-+ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
-+ ~0x0u);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev,
-+ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
-+ error);
-+
-+ return error;
-+ }
-+
-+ /*
-+ * Enable generation of the interrupt:
-+ */
-+ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev,
-+ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
-+ error);
-+
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Setup interrupt for a given DPRC device
-+ */
-+static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
-+{
-+ int error;
-+
-+ error = fsl_mc_allocate_irqs(mc_dev);
-+ if (error < 0)
-+ return error;
-+
-+ error = disable_dprc_irq(mc_dev);
-+ if (error < 0)
-+ goto error_free_irqs;
-+
-+ error = register_dprc_irq_handler(mc_dev);
-+ if (error < 0)
-+ goto error_free_irqs;
-+
-+ error = enable_dprc_irq(mc_dev);
-+ if (error < 0)
-+ goto error_free_irqs;
-+
-+ return 0;
-+
-+error_free_irqs:
-+ fsl_mc_free_irqs(mc_dev);
-+ return error;
-+}
-+
-+/**
-+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
-+ *
-+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
-+ *
-+ * It opens the physical DPRC in the MC.
-+ * It scans the DPRC to discover the MC objects contained in it.
-+ * It creates the interrupt pool for the MC bus associated with the DPRC.
-+ * It configures the interrupts for the DPRC device itself.
-+ */
-+static int dprc_probe(struct fsl_mc_device *mc_dev)
-+{
-+ int error;
-+ size_t region_size;
-+ struct device *parent_dev = mc_dev->dev.parent;
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
-+ bool mc_io_created = false;
-+ bool msi_domain_set = false;
-+ u16 major_ver, minor_ver;
-+
-+ if (!is_fsl_mc_bus_dprc(mc_dev))
-+ return -EINVAL;
-+
-+ if (dev_get_msi_domain(&mc_dev->dev))
-+ return -EINVAL;
-+
-+ if (!mc_dev->mc_io) {
-+ /*
-+ * This is a child DPRC:
-+ */
-+ if (!dev_is_fsl_mc(parent_dev))
-+ return -EINVAL;
-+
-+ if (mc_dev->obj_desc.region_count == 0)
-+ return -EINVAL;
-+
-+ region_size = resource_size(mc_dev->regions);
-+
-+ error = fsl_create_mc_io(&mc_dev->dev,
-+ mc_dev->regions[0].start,
-+ region_size,
-+ NULL,
-+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
-+ &mc_dev->mc_io);
-+ if (error < 0)
-+ return error;
-+
-+ mc_io_created = true;
-+
-+ /*
-+ * Inherit parent MSI domain:
-+ */
-+ dev_set_msi_domain(&mc_dev->dev,
-+ dev_get_msi_domain(parent_dev));
-+ msi_domain_set = true;
-+ } else {
-+ /*
-+ * This is a root DPRC
-+ */
-+ struct irq_domain *mc_msi_domain;
-+
-+ if (dev_is_fsl_mc(parent_dev))
-+ return -EINVAL;
-+
-+ error = fsl_mc_find_msi_domain(parent_dev,
-+ &mc_msi_domain);
-+ if (error < 0) {
-+ dev_warn(&mc_dev->dev,
-+ "WARNING: MC bus without interrupt support\n");
-+ } else {
-+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
-+ msi_domain_set = true;
-+ }
-+ }
-+
-+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
-+ &mc_dev->mc_handle);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
-+ goto error_cleanup_msi_domain;
-+ }
-+
-+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ &mc_bus->dprc_attr);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
-+ error);
-+ goto error_cleanup_open;
-+ }
-+
-+ error = dprc_get_api_version(mc_dev->mc_io, 0,
-+ &major_ver,
-+ &minor_ver);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
-+ error);
-+ goto error_cleanup_open;
-+ }
-+
-+ if (major_ver < DPRC_MIN_VER_MAJOR ||
-+ (major_ver == DPRC_MIN_VER_MAJOR &&
-+ minor_ver < DPRC_MIN_VER_MINOR)) {
-+ dev_err(&mc_dev->dev,
-+ "ERROR: DPRC version %d.%d not supported\n",
-+ major_ver, minor_ver);
-+ error = -ENOTSUPP;
-+ goto error_cleanup_open;
-+ }
-+
-+ mutex_init(&mc_bus->scan_mutex);
-+
-+ /*
-+ * Discover MC objects in DPRC object:
-+ */
-+ error = dprc_scan_container(mc_dev);
-+ if (error < 0)
-+ goto error_cleanup_open;
-+
-+ /*
-+ * Configure interrupt for the DPRC object associated with this MC bus:
-+ */
-+ error = dprc_setup_irq(mc_dev);
-+ if (error < 0)
-+ goto error_cleanup_open;
-+
-+ dev_info(&mc_dev->dev, "DPRC device bound to driver");
-+ return 0;
-+
-+error_cleanup_open:
-+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-+
-+error_cleanup_msi_domain:
-+ if (msi_domain_set)
-+ dev_set_msi_domain(&mc_dev->dev, NULL);
-+
-+ if (mc_io_created) {
-+ fsl_destroy_mc_io(mc_dev->mc_io);
-+ mc_dev->mc_io = NULL;
-+ }
-+
-+ return error;
-+}
-+
-+/*
-+ * Tear down interrupt for a given DPRC object
-+ */
-+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
-+{
-+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
-+
-+ (void)disable_dprc_irq(mc_dev);
-+
-+ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
-+
-+ fsl_mc_free_irqs(mc_dev);
-+}
-+
-+/**
-+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
-+ *
-+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
-+ *
-+ * It removes the DPRC's child objects from Linux (not from the MC) and
-+ * closes the DPRC device in the MC.
-+ * It tears down the interrupts that were configured for the DPRC device.
-+ * It destroys the interrupt pool associated with this MC bus.
-+ */
-+static int dprc_remove(struct fsl_mc_device *mc_dev)
-+{
-+ int error;
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
-+
-+ if (!is_fsl_mc_bus_dprc(mc_dev))
-+ return -EINVAL;
-+ if (!mc_dev->mc_io)
-+ return -EINVAL;
-+
-+ if (!mc_bus->irq_resources)
-+ return -EINVAL;
-+
-+ if (dev_get_msi_domain(&mc_dev->dev))
-+ dprc_teardown_irq(mc_dev);
-+
-+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
-+
-+ if (dev_get_msi_domain(&mc_dev->dev)) {
-+ fsl_mc_cleanup_irq_pool(mc_bus);
-+ dev_set_msi_domain(&mc_dev->dev, NULL);
-+ }
-+
-+ fsl_mc_cleanup_all_resource_pools(mc_dev);
-+
-+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-+ if (error < 0)
-+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
-+
-+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
-+ fsl_destroy_mc_io(mc_dev->mc_io);
-+ mc_dev->mc_io = NULL;
-+ }
-+
-+ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
-+ return 0;
-+}
-+
-+static const struct fsl_mc_device_id match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dprc"},
-+ {.vendor = 0x0},
-+};
-+
-+static struct fsl_mc_driver dprc_driver = {
-+ .driver = {
-+ .name = FSL_MC_DPRC_DRIVER_NAME,
-+ .owner = THIS_MODULE,
-+ .pm = NULL,
-+ },
-+ .match_id_table = match_id_table,
-+ .probe = dprc_probe,
-+ .remove = dprc_remove,
-+};
-+
-+int __init dprc_driver_init(void)
-+{
-+ return fsl_mc_driver_register(&dprc_driver);
-+}
-+
-+void dprc_driver_exit(void)
-+{
-+ fsl_mc_driver_unregister(&dprc_driver);
-+}
---- a/drivers/staging/fsl-mc/bus/dprc.c
-+++ /dev/null
-@@ -1,757 +0,0 @@
--// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- */
--#include <linux/kernel.h>
--#include "../include/mc.h"
--#include "dprc.h"
--
--#include "dprc-cmd.h"
--
--/**
-- * dprc_open() - Open DPRC object for use
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @container_id: Container ID to open
-- * @token: Returned token of DPRC object
-- *
-- * Return: '0' on Success; Error code otherwise.
-- *
-- * @warning Required before any operation on the object.
-- */
--int dprc_open(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int container_id,
-- u16 *token)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_open *cmd_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
-- 0);
-- cmd_params = (struct dprc_cmd_open *)cmd.params;
-- cmd_params->container_id = cpu_to_le32(container_id);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- *token = mc_cmd_hdr_read_token(&cmd);
--
-- return 0;
--}
--EXPORT_SYMBOL(dprc_open);
--
--/**
-- * dprc_close() - Close the control session of the object
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- *
-- * After this function is called, no further operations are
-- * allowed on the object without opening a new control session.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_close(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
-- token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dprc_close);
--
--/**
-- * dprc_get_irq() - Get IRQ information from the DPRC.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: The interrupt index to configure
-- * @type: Interrupt type: 0 represents message interrupt
-- * type (both irq_addr and irq_val are valid)
-- * @irq_cfg: IRQ attributes
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- int *type,
-- struct dprc_irq_cfg *irq_cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_irq *cmd_params;
-- struct dprc_rsp_get_irq *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_get_irq *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_irq *)cmd.params;
-- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
-- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
-- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
-- *type = le32_to_cpu(rsp_params->type);
--
-- return 0;
--}
--
--/**
-- * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: Identifies the interrupt index to configure
-- * @irq_cfg: IRQ configuration
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_set_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- struct dprc_irq_cfg *irq_cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_set_irq *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
-- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-- cmd_params->irq_index = irq_index;
-- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
-- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dprc_get_irq_enable() - Get overall interrupt state.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: The interrupt index to configure
-- * @en: Returned interrupt state - enable = 1, disable = 0
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 *en)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_irq_enable *cmd_params;
-- struct dprc_rsp_get_irq_enable *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_get_irq_enable *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_irq_enable *)cmd.params;
-- *en = rsp_params->enabled & DPRC_ENABLE;
--
-- return 0;
--}
--
--/**
-- * dprc_set_irq_enable() - Set overall interrupt state.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: The interrupt index to configure
-- * @en: Interrupt state - enable = 1, disable = 0
-- *
-- * Allows GPP software to control when interrupts are generated.
-- * Each interrupt can have up to 32 causes. The enable/disable control's the
-- * overall interrupt state. if the interrupt is disabled no causes will cause
-- * an interrupt.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 en)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_set_irq_enable *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
-- cmd_params->enable = en & DPRC_ENABLE;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dprc_get_irq_mask() - Get interrupt mask.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: The interrupt index to configure
-- * @mask: Returned event mask to trigger interrupt
-- *
-- * Every interrupt can have up to 32 causes and the interrupt model supports
-- * masking/unmasking each cause independently
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *mask)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_irq_mask *cmd_params;
-- struct dprc_rsp_get_irq_mask *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_get_irq_mask *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_irq_mask *)cmd.params;
-- *mask = le32_to_cpu(rsp_params->mask);
--
-- return 0;
--}
--
--/**
-- * dprc_set_irq_mask() - Set interrupt mask.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: The interrupt index to configure
-- * @mask: event mask to trigger interrupt;
-- * each bit:
-- * 0 = ignore event
-- * 1 = consider event for asserting irq
-- *
-- * Every interrupt can have up to 32 causes and the interrupt model supports
-- * masking/unmasking each cause independently
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 mask)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_set_irq_mask *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
-- cmd_params->mask = cpu_to_le32(mask);
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dprc_get_irq_status() - Get the current status of any pending interrupts.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: The interrupt index to configure
-- * @status: Returned interrupts status - one bit per cause:
-- * 0 = no interrupt pending
-- * 1 = interrupt pending
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *status)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_irq_status *cmd_params;
-- struct dprc_rsp_get_irq_status *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
-- cmd_params->status = cpu_to_le32(*status);
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
-- *status = le32_to_cpu(rsp_params->status);
--
-- return 0;
--}
--
--/**
-- * dprc_clear_irq_status() - Clear a pending interrupt's status
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @irq_index: The interrupt index to configure
-- * @status: bits to clear (W1C) - one bit per cause:
-- * 0 = don't change
-- * 1 = clear status bit
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 status)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_clear_irq_status *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
-- cmd_params->status = cpu_to_le32(status);
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dprc_get_attributes() - Obtains container attributes
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @attributes Returned container attributes
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_attributes(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dprc_attributes *attr)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_rsp_get_attributes *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
-- cmd_flags,
-- token);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
-- attr->container_id = le32_to_cpu(rsp_params->container_id);
-- attr->icid = le16_to_cpu(rsp_params->icid);
-- attr->options = le32_to_cpu(rsp_params->options);
-- attr->portal_id = le32_to_cpu(rsp_params->portal_id);
--
-- return 0;
--}
--
--/**
-- * dprc_get_obj_count() - Obtains the number of objects in the DPRC
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @obj_count: Number of objects assigned to the DPRC
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int *obj_count)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_rsp_get_obj_count *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
-- cmd_flags, token);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
-- *obj_count = le32_to_cpu(rsp_params->obj_count);
--
-- return 0;
--}
--EXPORT_SYMBOL(dprc_get_obj_count);
--
--/**
-- * dprc_get_obj() - Get general information on an object
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @obj_index: Index of the object to be queried (< obj_count)
-- * @obj_desc: Returns the requested object descriptor
-- *
-- * The object descriptors are retrieved one by one by incrementing
-- * obj_index up to (not including) the value of obj_count returned
-- * from dprc_get_obj_count(). dprc_get_obj_count() must
-- * be called prior to dprc_get_obj().
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_obj(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int obj_index,
-- struct fsl_mc_obj_desc *obj_desc)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_obj *cmd_params;
-- struct dprc_rsp_get_obj *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
-- cmd_params->obj_index = cpu_to_le32(obj_index);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
-- obj_desc->id = le32_to_cpu(rsp_params->id);
-- obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
-- obj_desc->irq_count = rsp_params->irq_count;
-- obj_desc->region_count = rsp_params->region_count;
-- obj_desc->state = le32_to_cpu(rsp_params->state);
-- obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
-- obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
-- obj_desc->flags = le16_to_cpu(rsp_params->flags);
-- strncpy(obj_desc->type, rsp_params->type, 16);
-- obj_desc->type[15] = '\0';
-- strncpy(obj_desc->label, rsp_params->label, 16);
-- obj_desc->label[15] = '\0';
-- return 0;
--}
--EXPORT_SYMBOL(dprc_get_obj);
--
--/**
-- * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @obj_type: Type of the object to set its IRQ
-- * @obj_id: ID of the object to set its IRQ
-- * @irq_index: The interrupt index to configure
-- * @irq_cfg: IRQ configuration
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 irq_index,
-- struct dprc_irq_cfg *irq_cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_set_obj_irq *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
-- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-- cmd_params->irq_index = irq_index;
-- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
-- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
-- cmd_params->obj_id = cpu_to_le32(obj_id);
-- strncpy(cmd_params->obj_type, obj_type, 16);
-- cmd_params->obj_type[15] = '\0';
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dprc_set_obj_irq);
--
--/**
-- * dprc_get_obj_irq() - Get IRQ information from object.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @obj_type: Type od the object to get its IRQ
-- * @obj_id: ID of the object to get its IRQ
-- * @irq_index: The interrupt index to configure
-- * @type: Interrupt type: 0 represents message interrupt
-- * type (both irq_addr and irq_val are valid)
-- * @irq_cfg: The returned IRQ attributes
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 irq_index,
-- int *type,
-- struct dprc_irq_cfg *irq_cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_obj_irq *cmd_params;
-- struct dprc_rsp_get_obj_irq *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_get_obj_irq *)cmd.params;
-- cmd_params->obj_id = cpu_to_le32(obj_id);
-- cmd_params->irq_index = irq_index;
-- strncpy(cmd_params->obj_type, obj_type, 16);
-- cmd_params->obj_type[15] = '\0';
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_obj_irq *)cmd.params;
-- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
-- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
-- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
-- *type = le32_to_cpu(rsp_params->type);
--
-- return 0;
--}
--EXPORT_SYMBOL(dprc_get_obj_irq);
--
--/**
-- * dprc_get_res_count() - Obtains the number of free resources that are assigned
-- * to this container, by pool type
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @type: pool type
-- * @res_count: Returned number of free resources of the given
-- * resource type that are assigned to this DPRC
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_res_count(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *type,
-- int *res_count)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_res_count *cmd_params;
-- struct dprc_rsp_get_res_count *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_get_res_count *)cmd.params;
-- strncpy(cmd_params->type, type, 16);
-- cmd_params->type[15] = '\0';
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_res_count *)cmd.params;
-- *res_count = le32_to_cpu(rsp_params->res_count);
--
-- return 0;
--}
--EXPORT_SYMBOL(dprc_get_res_count);
--
--/**
-- * dprc_get_obj_region() - Get region information for a specified object.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @obj_type; Object type as returned in dprc_get_obj()
-- * @obj_id: Unique object instance as returned in dprc_get_obj()
-- * @region_index: The specific region to query
-- * @region_desc: Returns the requested region descriptor
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 region_index,
-- struct dprc_region_desc *region_desc)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_obj_region *cmd_params;
-- struct dprc_rsp_get_obj_region *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
-- cmd_params->obj_id = cpu_to_le32(obj_id);
-- cmd_params->region_index = region_index;
-- strncpy(cmd_params->obj_type, obj_type, 16);
-- cmd_params->obj_type[15] = '\0';
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
-- region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
-- region_desc->size = le32_to_cpu(rsp_params->size);
--
-- return 0;
--}
--EXPORT_SYMBOL(dprc_get_obj_region);
--
--/**
-- * dprc_get_api_version - Get Data Path Resource Container API version
-- * @mc_io: Pointer to Mc portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @major_ver: Major version of Data Path Resource Container API
-- * @minor_ver: Minor version of Data Path Resource Container API
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_api_version(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 *major_ver,
-- u16 *minor_ver)
--{
-- struct mc_command cmd = { 0 };
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
-- cmd_flags, 0);
--
-- /* send command to mc */
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
--
-- return 0;
--}
--
--/**
-- * dprc_get_container_id - Get container ID associated with a given portal.
-- * @mc_io: Pointer to Mc portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @container_id: Requested container id
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_get_container_id(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int *container_id)
--{
-- struct mc_command cmd = { 0 };
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
-- cmd_flags,
-- 0);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- *container_id = (int)mc_cmd_read_object_id(&cmd);
--
-- return 0;
--}
---- /dev/null
-+++ b/drivers/bus/fsl-mc/dprc.c
-@@ -0,0 +1,576 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ */
-+#include <linux/kernel.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+/**
-+ * dprc_open() - Open DPRC object for use
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @container_id: Container ID to open
-+ * @token: Returned token of DPRC object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Required before any operation on the object.
-+ */
-+int dprc_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int container_id,
-+ u16 *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_open *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
-+ 0);
-+ cmd_params = (struct dprc_cmd_open *)cmd.params;
-+ cmd_params->container_id = cpu_to_le32(container_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dprc_open);
-+
-+/**
-+ * dprc_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dprc_close);
-+
-+/**
-+ * dprc_reset_container - Reset child container.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @child_container_id: ID of the container to reset
-+ *
-+ * In case a software context crashes or becomes non-responsive, the parent
-+ * may wish to reset its resources container before the software context is
-+ * restarted.
-+ *
-+ * This routine informs all objects assigned to the child container that the
-+ * container is being reset, so they may perform any cleanup operations that are
-+ * needed. All objects handles that were owned by the child container shall be
-+ * closed.
-+ *
-+ * Note that such request may be submitted even if the child software context
-+ * has not crashed, but the resulting object cleanup operations will not be
-+ * aware of that.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_reset_container(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int child_container_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_reset_container *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
-+ cmd_flags, token);
-+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
-+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dprc_reset_container);
-+
-+/**
-+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dprc_irq_cfg *irq_cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_set_irq *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
-+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
-+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprc_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_set_irq_enable *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags, token);
-+ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
-+ cmd_params->enable = en & DPRC_ENABLE;
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprc_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting irq
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_set_irq_mask *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
-+ cmd_flags, token);
-+ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprc_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_get_irq_status *cmd_params;
-+ struct dprc_rsp_get_irq_status *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
-+ cmd_flags, token);
-+ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprc_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_clear_irq_status *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags, token);
-+ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(status);
-+ cmd_params->irq_index = irq_index;
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dprc_get_attributes() - Obtains container attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @attributes Returned container attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dprc_attributes *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_rsp_get_attributes *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
-+ attr->container_id = le32_to_cpu(rsp_params->container_id);
-+ attr->icid = le32_to_cpu(rsp_params->icid);
-+ attr->options = le32_to_cpu(rsp_params->options);
-+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_count: Number of objects assigned to the DPRC
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *obj_count)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_rsp_get_obj_count *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
-+ *obj_count = le32_to_cpu(rsp_params->obj_count);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dprc_get_obj_count);
-+
-+/**
-+ * dprc_get_obj() - Get general information on an object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_index: Index of the object to be queried (< obj_count)
-+ * @obj_desc: Returns the requested object descriptor
-+ *
-+ * The object descriptors are retrieved one by one by incrementing
-+ * obj_index up to (not including) the value of obj_count returned
-+ * from dprc_get_obj_count(). dprc_get_obj_count() must
-+ * be called prior to dprc_get_obj().
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_obj(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int obj_index,
-+ struct fsl_mc_obj_desc *obj_desc)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_get_obj *cmd_params;
-+ struct dprc_rsp_get_obj *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
-+ cmd_params->obj_index = cpu_to_le32(obj_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
-+ obj_desc->id = le32_to_cpu(rsp_params->id);
-+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
-+ obj_desc->irq_count = rsp_params->irq_count;
-+ obj_desc->region_count = rsp_params->region_count;
-+ obj_desc->state = le32_to_cpu(rsp_params->state);
-+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
-+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
-+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
-+ strncpy(obj_desc->type, rsp_params->type, 16);
-+ obj_desc->type[15] = '\0';
-+ strncpy(obj_desc->label, rsp_params->label, 16);
-+ obj_desc->label[15] = '\0';
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dprc_get_obj);
-+
-+/**
-+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_type: Type of the object to set its IRQ
-+ * @obj_id: ID of the object to set its IRQ
-+ * @irq_index: The interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ char *obj_type,
-+ int obj_id,
-+ u8 irq_index,
-+ struct dprc_irq_cfg *irq_cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_set_obj_irq *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
-+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
-+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
-+ cmd_params->obj_id = cpu_to_le32(obj_id);
-+ strncpy(cmd_params->obj_type, obj_type, 16);
-+ cmd_params->obj_type[15] = '\0';
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
-+
-+/**
-+ * dprc_get_obj_region() - Get region information for a specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_type; Object type as returned in dprc_get_obj()
-+ * @obj_id: Unique object instance as returned in dprc_get_obj()
-+ * @region_index: The specific region to query
-+ * @region_desc: Returns the requested region descriptor
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ char *obj_type,
-+ int obj_id,
-+ u8 region_index,
-+ struct dprc_region_desc *region_desc)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dprc_cmd_get_obj_region *cmd_params;
-+ struct dprc_rsp_get_obj_region *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
-+ cmd_flags, token);
-+ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
-+ cmd_params->obj_id = cpu_to_le32(obj_id);
-+ cmd_params->region_index = region_index;
-+ strncpy(cmd_params->obj_type, obj_type, 16);
-+ cmd_params->obj_type[15] = '\0';
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
-+ region_desc->base_offset = le32_to_cpu(rsp_params->base_offset);
-+ region_desc->size = le32_to_cpu(rsp_params->size);
-+ region_desc->type = rsp_params->type;
-+ region_desc->flags = le32_to_cpu(rsp_params->flags);
-+ region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(dprc_get_obj_region);
-+
-+/**
-+ * dprc_get_api_version - Get Data Path Resource Container API version
-+ * @mc_io: Pointer to Mc portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of Data Path Resource Container API
-+ * @minor_ver: Minor version of Data Path Resource Container API
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
-+ cmd_flags, 0);
-+
-+ /* send command to mc */
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dprc_get_container_id - Get container ID associated with a given portal.
-+ * @mc_io: Pointer to Mc portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @container_id: Requested container id
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_container_id(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int *container_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
-+ cmd_flags,
-+ 0);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *container_id = (int)mc_cmd_read_object_id(&cmd);
-+
-+ return 0;
-+}
---- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
-+++ /dev/null
-@@ -1,663 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/*
-- * fsl-mc object allocator driver
-- *
-- * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
-- *
-- */
--
--#include <linux/module.h>
--#include <linux/msi.h>
--#include "../include/mc.h"
--
--#include "fsl-mc-private.h"
--
--static bool __must_check fsl_mc_is_allocatable(const char *obj_type)
--{
-- return strcmp(obj_type, "dpbp") == 0 ||
-- strcmp(obj_type, "dpmcp") == 0 ||
-- strcmp(obj_type, "dpcon") == 0;
--}
--
--/**
-- * fsl_mc_resource_pool_add_device - add allocatable object to a resource
-- * pool of a given fsl-mc bus
-- *
-- * @mc_bus: pointer to the fsl-mc bus
-- * @pool_type: pool type
-- * @mc_dev: pointer to allocatable fsl-mc device
-- */
--static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
-- *mc_bus,
-- enum fsl_mc_pool_type
-- pool_type,
-- struct fsl_mc_device
-- *mc_dev)
--{
-- struct fsl_mc_resource_pool *res_pool;
-- struct fsl_mc_resource *resource;
-- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-- int error = -EINVAL;
--
-- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
-- goto out;
-- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
-- goto out;
-- if (WARN_ON(mc_dev->resource))
-- goto out;
--
-- res_pool = &mc_bus->resource_pools[pool_type];
-- if (WARN_ON(res_pool->type != pool_type))
-- goto out;
-- if (WARN_ON(res_pool->mc_bus != mc_bus))
-- goto out;
--
-- mutex_lock(&res_pool->mutex);
--
-- if (WARN_ON(res_pool->max_count < 0))
-- goto out_unlock;
-- if (WARN_ON(res_pool->free_count < 0 ||
-- res_pool->free_count > res_pool->max_count))
-- goto out_unlock;
--
-- resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
-- GFP_KERNEL);
-- if (!resource) {
-- error = -ENOMEM;
-- dev_err(&mc_bus_dev->dev,
-- "Failed to allocate memory for fsl_mc_resource\n");
-- goto out_unlock;
-- }
--
-- resource->type = pool_type;
-- resource->id = mc_dev->obj_desc.id;
-- resource->data = mc_dev;
-- resource->parent_pool = res_pool;
-- INIT_LIST_HEAD(&resource->node);
-- list_add_tail(&resource->node, &res_pool->free_list);
-- mc_dev->resource = resource;
-- res_pool->free_count++;
-- res_pool->max_count++;
-- error = 0;
--out_unlock:
-- mutex_unlock(&res_pool->mutex);
--out:
-- return error;
--}
--
--/**
-- * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
-- * resource pool
-- *
-- * @mc_dev: pointer to allocatable fsl-mc device
-- *
-- * It permanently removes an allocatable fsl-mc device from the resource
-- * pool. It's an error if the device is in use.
-- */
--static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
-- *mc_dev)
--{
-- struct fsl_mc_device *mc_bus_dev;
-- struct fsl_mc_bus *mc_bus;
-- struct fsl_mc_resource_pool *res_pool;
-- struct fsl_mc_resource *resource;
-- int error = -EINVAL;
--
-- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
-- goto out;
--
-- resource = mc_dev->resource;
-- if (WARN_ON(!resource || resource->data != mc_dev))
-- goto out;
--
-- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-- mc_bus = to_fsl_mc_bus(mc_bus_dev);
-- res_pool = resource->parent_pool;
-- if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type]))
-- goto out;
--
-- mutex_lock(&res_pool->mutex);
--
-- if (WARN_ON(res_pool->max_count <= 0))
-- goto out_unlock;
-- if (WARN_ON(res_pool->free_count <= 0 ||
-- res_pool->free_count > res_pool->max_count))
-- goto out_unlock;
--
-- /*
-- * If the device is currently allocated, its resource is not
-- * in the free list and thus, the device cannot be removed.
-- */
-- if (list_empty(&resource->node)) {
-- error = -EBUSY;
-- dev_err(&mc_bus_dev->dev,
-- "Device %s cannot be removed from resource pool\n",
-- dev_name(&mc_dev->dev));
-- goto out_unlock;
-- }
--
-- list_del_init(&resource->node);
-- res_pool->free_count--;
-- res_pool->max_count--;
--
-- devm_kfree(&mc_bus_dev->dev, resource);
-- mc_dev->resource = NULL;
-- error = 0;
--out_unlock:
-- mutex_unlock(&res_pool->mutex);
--out:
-- return error;
--}
--
--static const char *const fsl_mc_pool_type_strings[] = {
-- [FSL_MC_POOL_DPMCP] = "dpmcp",
-- [FSL_MC_POOL_DPBP] = "dpbp",
-- [FSL_MC_POOL_DPCON] = "dpcon",
-- [FSL_MC_POOL_IRQ] = "irq",
--};
--
--static int __must_check object_type_to_pool_type(const char *object_type,
-- enum fsl_mc_pool_type
-- *pool_type)
--{
-- unsigned int i;
--
-- for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
-- if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
-- *pool_type = i;
-- return 0;
-- }
-- }
--
-- return -EINVAL;
--}
--
--int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
-- enum fsl_mc_pool_type pool_type,
-- struct fsl_mc_resource **new_resource)
--{
-- struct fsl_mc_resource_pool *res_pool;
-- struct fsl_mc_resource *resource;
-- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-- int error = -EINVAL;
--
-- BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
-- FSL_MC_NUM_POOL_TYPES);
--
-- *new_resource = NULL;
-- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
-- goto out;
--
-- res_pool = &mc_bus->resource_pools[pool_type];
-- if (WARN_ON(res_pool->mc_bus != mc_bus))
-- goto out;
--
-- mutex_lock(&res_pool->mutex);
-- resource = list_first_entry_or_null(&res_pool->free_list,
-- struct fsl_mc_resource, node);
--
-- if (!resource) {
-- WARN_ON(res_pool->free_count != 0);
-- error = -ENXIO;
-- dev_err(&mc_bus_dev->dev,
-- "No more resources of type %s left\n",
-- fsl_mc_pool_type_strings[pool_type]);
-- goto out_unlock;
-- }
--
-- if (WARN_ON(resource->type != pool_type))
-- goto out_unlock;
-- if (WARN_ON(resource->parent_pool != res_pool))
-- goto out_unlock;
-- if (WARN_ON(res_pool->free_count <= 0 ||
-- res_pool->free_count > res_pool->max_count))
-- goto out_unlock;
--
-- list_del_init(&resource->node);
--
-- res_pool->free_count--;
-- error = 0;
--out_unlock:
-- mutex_unlock(&res_pool->mutex);
-- *new_resource = resource;
--out:
-- return error;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
--
--void fsl_mc_resource_free(struct fsl_mc_resource *resource)
--{
-- struct fsl_mc_resource_pool *res_pool;
--
-- res_pool = resource->parent_pool;
-- if (WARN_ON(resource->type != res_pool->type))
-- return;
--
-- mutex_lock(&res_pool->mutex);
-- if (WARN_ON(res_pool->free_count < 0 ||
-- res_pool->free_count >= res_pool->max_count))
-- goto out_unlock;
--
-- if (WARN_ON(!list_empty(&resource->node)))
-- goto out_unlock;
--
-- list_add_tail(&resource->node, &res_pool->free_list);
-- res_pool->free_count++;
--out_unlock:
-- mutex_unlock(&res_pool->mutex);
--}
--EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
--
--/**
-- * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
-- * pool type from a given fsl-mc bus instance
-- *
-- * @mc_dev: fsl-mc device which is used in conjunction with the
-- * allocated object
-- * @pool_type: pool type
-- * @new_mc_dev: pointer to area where the pointer to the allocated device
-- * is to be returned
-- *
-- * Allocatable objects are always used in conjunction with some functional
-- * device. This function allocates an object of the specified type from
-- * the DPRC containing the functional device.
-- *
-- * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
-- * portals are allocated using fsl_mc_portal_allocate(), instead of
-- * this function.
-- */
--int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
-- enum fsl_mc_pool_type pool_type,
-- struct fsl_mc_device **new_mc_adev)
--{
-- struct fsl_mc_device *mc_bus_dev;
-- struct fsl_mc_bus *mc_bus;
-- struct fsl_mc_device *mc_adev;
-- int error = -EINVAL;
-- struct fsl_mc_resource *resource = NULL;
--
-- *new_mc_adev = NULL;
-- if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC))
-- goto error;
--
-- if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
-- goto error;
--
-- if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP))
-- goto error;
--
-- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-- mc_bus = to_fsl_mc_bus(mc_bus_dev);
-- error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
-- if (error < 0)
-- goto error;
--
-- mc_adev = resource->data;
-- if (WARN_ON(!mc_adev))
-- goto error;
--
-- *new_mc_adev = mc_adev;
-- return 0;
--error:
-- if (resource)
-- fsl_mc_resource_free(resource);
--
-- return error;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
--
--/**
-- * fsl_mc_object_free - Returns an fsl-mc object to the resource
-- * pool where it came from.
-- * @mc_adev: Pointer to the fsl-mc device
-- */
--void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
--{
-- struct fsl_mc_resource *resource;
--
-- resource = mc_adev->resource;
-- if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP))
-- return;
-- if (WARN_ON(resource->data != mc_adev))
-- return;
--
-- fsl_mc_resource_free(resource);
--}
--EXPORT_SYMBOL_GPL(fsl_mc_object_free);
--
--/*
-- * A DPRC and the devices in the DPRC all share the same GIC-ITS device
-- * ID. A block of IRQs is pre-allocated and maintained in a pool
-- * from which devices can allocate them when needed.
-- */
--
--/*
-- * Initialize the interrupt pool associated with an fsl-mc bus.
-- * It allocates a block of IRQs from the GIC-ITS.
-- */
--int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
-- unsigned int irq_count)
--{
-- unsigned int i;
-- struct msi_desc *msi_desc;
-- struct fsl_mc_device_irq *irq_resources;
-- struct fsl_mc_device_irq *mc_dev_irq;
-- int error;
-- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-- struct fsl_mc_resource_pool *res_pool =
-- &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
--
-- if (WARN_ON(irq_count == 0 ||
-- irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS))
-- return -EINVAL;
--
-- error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
-- if (error < 0)
-- return error;
--
-- irq_resources = devm_kzalloc(&mc_bus_dev->dev,
-- sizeof(*irq_resources) * irq_count,
-- GFP_KERNEL);
-- if (!irq_resources) {
-- error = -ENOMEM;
-- goto cleanup_msi_irqs;
-- }
--
-- for (i = 0; i < irq_count; i++) {
-- mc_dev_irq = &irq_resources[i];
--
-- /*
-- * NOTE: This mc_dev_irq's MSI addr/value pair will be set
-- * by the fsl_mc_msi_write_msg() callback
-- */
-- mc_dev_irq->resource.type = res_pool->type;
-- mc_dev_irq->resource.data = mc_dev_irq;
-- mc_dev_irq->resource.parent_pool = res_pool;
-- INIT_LIST_HEAD(&mc_dev_irq->resource.node);
-- list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
-- }
--
-- for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
-- mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
-- mc_dev_irq->msi_desc = msi_desc;
-- mc_dev_irq->resource.id = msi_desc->irq;
-- }
--
-- res_pool->max_count = irq_count;
-- res_pool->free_count = irq_count;
-- mc_bus->irq_resources = irq_resources;
-- return 0;
--
--cleanup_msi_irqs:
-- fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
-- return error;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
--
--/**
-- * Teardown the interrupt pool associated with an fsl-mc bus.
-- * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
-- */
--void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
--{
-- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-- struct fsl_mc_resource_pool *res_pool =
-- &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
--
-- if (WARN_ON(!mc_bus->irq_resources))
-- return;
--
-- if (WARN_ON(res_pool->max_count == 0))
-- return;
--
-- if (WARN_ON(res_pool->free_count != res_pool->max_count))
-- return;
--
-- INIT_LIST_HEAD(&res_pool->free_list);
-- res_pool->max_count = 0;
-- res_pool->free_count = 0;
-- mc_bus->irq_resources = NULL;
-- fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
--}
--EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
--
--/**
-- * Allocate the IRQs required by a given fsl-mc device.
-- */
--int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
--{
-- int i;
-- int irq_count;
-- int res_allocated_count = 0;
-- int error = -EINVAL;
-- struct fsl_mc_device_irq **irqs = NULL;
-- struct fsl_mc_bus *mc_bus;
-- struct fsl_mc_resource_pool *res_pool;
--
-- if (WARN_ON(mc_dev->irqs))
-- return -EINVAL;
--
-- irq_count = mc_dev->obj_desc.irq_count;
-- if (WARN_ON(irq_count == 0))
-- return -EINVAL;
--
-- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
-- mc_bus = to_fsl_mc_bus(mc_dev);
-- else
-- mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
--
-- if (WARN_ON(!mc_bus->irq_resources))
-- return -EINVAL;
--
-- res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
-- if (res_pool->free_count < irq_count) {
-- dev_err(&mc_dev->dev,
-- "Not able to allocate %u irqs for device\n", irq_count);
-- return -ENOSPC;
-- }
--
-- irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
-- GFP_KERNEL);
-- if (!irqs)
-- return -ENOMEM;
--
-- for (i = 0; i < irq_count; i++) {
-- struct fsl_mc_resource *resource;
--
-- error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
-- &resource);
-- if (error < 0)
-- goto error_resource_alloc;
--
-- irqs[i] = to_fsl_mc_irq(resource);
-- res_allocated_count++;
--
-- WARN_ON(irqs[i]->mc_dev);
-- irqs[i]->mc_dev = mc_dev;
-- irqs[i]->dev_irq_index = i;
-- }
--
-- mc_dev->irqs = irqs;
-- return 0;
--
--error_resource_alloc:
-- for (i = 0; i < res_allocated_count; i++) {
-- irqs[i]->mc_dev = NULL;
-- fsl_mc_resource_free(&irqs[i]->resource);
-- }
--
-- return error;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
--
--/*
-- * Frees the IRQs that were allocated for an fsl-mc device.
-- */
--void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
--{
-- int i;
-- int irq_count;
-- struct fsl_mc_bus *mc_bus;
-- struct fsl_mc_device_irq **irqs = mc_dev->irqs;
--
-- if (WARN_ON(!irqs))
-- return;
--
-- irq_count = mc_dev->obj_desc.irq_count;
--
-- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
-- mc_bus = to_fsl_mc_bus(mc_dev);
-- else
-- mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
--
-- if (WARN_ON(!mc_bus->irq_resources))
-- return;
--
-- for (i = 0; i < irq_count; i++) {
-- WARN_ON(!irqs[i]->mc_dev);
-- irqs[i]->mc_dev = NULL;
-- fsl_mc_resource_free(&irqs[i]->resource);
-- }
--
-- mc_dev->irqs = NULL;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
--
--void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
--{
-- int pool_type;
-- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
--
-- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
-- struct fsl_mc_resource_pool *res_pool =
-- &mc_bus->resource_pools[pool_type];
--
-- res_pool->type = pool_type;
-- res_pool->max_count = 0;
-- res_pool->free_count = 0;
-- res_pool->mc_bus = mc_bus;
-- INIT_LIST_HEAD(&res_pool->free_list);
-- mutex_init(&res_pool->mutex);
-- }
--}
--
--static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
-- enum fsl_mc_pool_type pool_type)
--{
-- struct fsl_mc_resource *resource;
-- struct fsl_mc_resource *next;
-- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-- struct fsl_mc_resource_pool *res_pool =
-- &mc_bus->resource_pools[pool_type];
-- int free_count = 0;
--
-- WARN_ON(res_pool->type != pool_type);
-- WARN_ON(res_pool->free_count != res_pool->max_count);
--
-- list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
-- free_count++;
-- WARN_ON(resource->type != res_pool->type);
-- WARN_ON(resource->parent_pool != res_pool);
-- devm_kfree(&mc_bus_dev->dev, resource);
-- }
--
-- WARN_ON(free_count != res_pool->free_count);
--}
--
--void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
--{
-- int pool_type;
--
-- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
-- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
--}
--
--/**
-- * fsl_mc_allocator_probe - callback invoked when an allocatable device is
-- * being added to the system
-- */
--static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
--{
-- enum fsl_mc_pool_type pool_type;
-- struct fsl_mc_device *mc_bus_dev;
-- struct fsl_mc_bus *mc_bus;
-- int error;
--
-- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
-- return -EINVAL;
--
-- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-- if (WARN_ON(!dev_is_fsl_mc(&mc_bus_dev->dev)))
-- return -EINVAL;
--
-- mc_bus = to_fsl_mc_bus(mc_bus_dev);
-- error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
-- if (error < 0)
-- return error;
--
-- error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
-- if (error < 0)
-- return error;
--
-- dev_dbg(&mc_dev->dev,
-- "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
-- return 0;
--}
--
--/**
-- * fsl_mc_allocator_remove - callback invoked when an allocatable device is
-- * being removed from the system
-- */
--static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
--{
-- int error;
--
-- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
-- return -EINVAL;
--
-- if (mc_dev->resource) {
-- error = fsl_mc_resource_pool_remove_device(mc_dev);
-- if (error < 0)
-- return error;
-- }
--
-- dev_dbg(&mc_dev->dev,
-- "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
-- return 0;
--}
--
--static const struct fsl_mc_device_id match_id_table[] = {
-- {
-- .vendor = FSL_MC_VENDOR_FREESCALE,
-- .obj_type = "dpbp",
-- },
-- {
-- .vendor = FSL_MC_VENDOR_FREESCALE,
-- .obj_type = "dpmcp",
-- },
-- {
-- .vendor = FSL_MC_VENDOR_FREESCALE,
-- .obj_type = "dpcon",
-- },
-- {.vendor = 0x0},
--};
--
--static struct fsl_mc_driver fsl_mc_allocator_driver = {
-- .driver = {
-- .name = "fsl_mc_allocator",
-- .pm = NULL,
-- },
-- .match_id_table = match_id_table,
-- .probe = fsl_mc_allocator_probe,
-- .remove = fsl_mc_allocator_remove,
--};
--
--int __init fsl_mc_allocator_driver_init(void)
--{
-- return fsl_mc_driver_register(&fsl_mc_allocator_driver);
--}
--
--void fsl_mc_allocator_driver_exit(void)
--{
-- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
--}
---- /dev/null
-+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
-@@ -0,0 +1,666 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * fsl-mc object allocator driver
-+ *
-+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
-+ *
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/msi.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
-+{
-+ return is_fsl_mc_bus_dpbp(mc_dev) ||
-+ is_fsl_mc_bus_dpmcp(mc_dev) ||
-+ is_fsl_mc_bus_dpcon(mc_dev);
-+}
-+
-+/**
-+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
-+ * pool of a given fsl-mc bus
-+ *
-+ * @mc_bus: pointer to the fsl-mc bus
-+ * @pool_type: pool type
-+ * @mc_dev: pointer to allocatable fsl-mc device
-+ */
-+static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
-+ *mc_bus,
-+ enum fsl_mc_pool_type
-+ pool_type,
-+ struct fsl_mc_device
-+ *mc_dev)
-+{
-+ struct fsl_mc_resource_pool *res_pool;
-+ struct fsl_mc_resource *resource;
-+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-+ int error = -EINVAL;
-+
-+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
-+ goto out;
-+ if (!fsl_mc_is_allocatable(mc_dev))
-+ goto out;
-+ if (mc_dev->resource)
-+ goto out;
-+
-+ res_pool = &mc_bus->resource_pools[pool_type];
-+ if (res_pool->type != pool_type)
-+ goto out;
-+ if (res_pool->mc_bus != mc_bus)
-+ goto out;
-+
-+ mutex_lock(&res_pool->mutex);
-+
-+ if (res_pool->max_count < 0)
-+ goto out_unlock;
-+ if (res_pool->free_count < 0 ||
-+ res_pool->free_count > res_pool->max_count)
-+ goto out_unlock;
-+
-+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
-+ GFP_KERNEL);
-+ if (!resource) {
-+ error = -ENOMEM;
-+ dev_err(&mc_bus_dev->dev,
-+ "Failed to allocate memory for fsl_mc_resource\n");
-+ goto out_unlock;
-+ }
-+
-+ resource->type = pool_type;
-+ resource->id = mc_dev->obj_desc.id;
-+ resource->data = mc_dev;
-+ resource->parent_pool = res_pool;
-+ INIT_LIST_HEAD(&resource->node);
-+ list_add_tail(&resource->node, &res_pool->free_list);
-+ mc_dev->resource = resource;
-+ res_pool->free_count++;
-+ res_pool->max_count++;
-+ error = 0;
-+out_unlock:
-+ mutex_unlock(&res_pool->mutex);
-+out:
-+ return error;
-+}
-+
-+/**
-+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
-+ * resource pool
-+ *
-+ * @mc_dev: pointer to allocatable fsl-mc device
-+ *
-+ * It permanently removes an allocatable fsl-mc device from the resource
-+ * pool. It's an error if the device is in use.
-+ */
-+static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
-+ *mc_dev)
-+{
-+ struct fsl_mc_device *mc_bus_dev;
-+ struct fsl_mc_bus *mc_bus;
-+ struct fsl_mc_resource_pool *res_pool;
-+ struct fsl_mc_resource *resource;
-+ int error = -EINVAL;
-+
-+ if (!fsl_mc_is_allocatable(mc_dev))
-+ goto out;
-+
-+ resource = mc_dev->resource;
-+ if (!resource || resource->data != mc_dev)
-+ goto out;
-+
-+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+ res_pool = resource->parent_pool;
-+ if (res_pool != &mc_bus->resource_pools[resource->type])
-+ goto out;
-+
-+ mutex_lock(&res_pool->mutex);
-+
-+ if (res_pool->max_count <= 0)
-+ goto out_unlock;
-+ if (res_pool->free_count <= 0 ||
-+ res_pool->free_count > res_pool->max_count)
-+ goto out_unlock;
-+
-+ /*
-+ * If the device is currently allocated, its resource is not
-+ * in the free list and thus, the device cannot be removed.
-+ */
-+ if (list_empty(&resource->node)) {
-+ error = -EBUSY;
-+ dev_err(&mc_bus_dev->dev,
-+ "Device %s cannot be removed from resource pool\n",
-+ dev_name(&mc_dev->dev));
-+ goto out_unlock;
-+ }
-+
-+ list_del_init(&resource->node);
-+ res_pool->free_count--;
-+ res_pool->max_count--;
-+
-+ devm_kfree(&mc_bus_dev->dev, resource);
-+ mc_dev->resource = NULL;
-+ error = 0;
-+out_unlock:
-+ mutex_unlock(&res_pool->mutex);
-+out:
-+ return error;
-+}
-+
-+static const char *const fsl_mc_pool_type_strings[] = {
-+ [FSL_MC_POOL_DPMCP] = "dpmcp",
-+ [FSL_MC_POOL_DPBP] = "dpbp",
-+ [FSL_MC_POOL_DPCON] = "dpcon",
-+ [FSL_MC_POOL_IRQ] = "irq",
-+};
-+
-+static int __must_check object_type_to_pool_type(const char *object_type,
-+ enum fsl_mc_pool_type
-+ *pool_type)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
-+ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
-+ *pool_type = i;
-+ return 0;
-+ }
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
-+ enum fsl_mc_pool_type pool_type,
-+ struct fsl_mc_resource **new_resource)
-+{
-+ struct fsl_mc_resource_pool *res_pool;
-+ struct fsl_mc_resource *resource;
-+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-+ int error = -EINVAL;
-+
-+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
-+ FSL_MC_NUM_POOL_TYPES);
-+
-+ *new_resource = NULL;
-+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
-+ goto out;
-+
-+ res_pool = &mc_bus->resource_pools[pool_type];
-+ if (res_pool->mc_bus != mc_bus)
-+ goto out;
-+
-+ mutex_lock(&res_pool->mutex);
-+ resource = list_first_entry_or_null(&res_pool->free_list,
-+ struct fsl_mc_resource, node);
-+
-+ if (!resource) {
-+ error = -ENXIO;
-+ dev_err(&mc_bus_dev->dev,
-+ "No more resources of type %s left\n",
-+ fsl_mc_pool_type_strings[pool_type]);
-+ goto out_unlock;
-+ }
-+
-+ if (resource->type != pool_type)
-+ goto out_unlock;
-+ if (resource->parent_pool != res_pool)
-+ goto out_unlock;
-+ if (res_pool->free_count <= 0 ||
-+ res_pool->free_count > res_pool->max_count)
-+ goto out_unlock;
-+
-+ list_del_init(&resource->node);
-+
-+ res_pool->free_count--;
-+ error = 0;
-+out_unlock:
-+ mutex_unlock(&res_pool->mutex);
-+ *new_resource = resource;
-+out:
-+ return error;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
-+
-+void fsl_mc_resource_free(struct fsl_mc_resource *resource)
-+{
-+ struct fsl_mc_resource_pool *res_pool;
-+
-+ res_pool = resource->parent_pool;
-+ if (resource->type != res_pool->type)
-+ return;
-+
-+ mutex_lock(&res_pool->mutex);
-+ if (res_pool->free_count < 0 ||
-+ res_pool->free_count >= res_pool->max_count)
-+ goto out_unlock;
-+
-+ if (!list_empty(&resource->node))
-+ goto out_unlock;
-+
-+ list_add_tail(&resource->node, &res_pool->free_list);
-+ res_pool->free_count++;
-+out_unlock:
-+ mutex_unlock(&res_pool->mutex);
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
-+
-+/**
-+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
-+ * pool type from a given fsl-mc bus instance
-+ *
-+ * @mc_dev: fsl-mc device which is used in conjunction with the
-+ * allocated object
-+ * @pool_type: pool type
-+ * @new_mc_dev: pointer to area where the pointer to the allocated device
-+ * is to be returned
-+ *
-+ * Allocatable objects are always used in conjunction with some functional
-+ * device. This function allocates an object of the specified type from
-+ * the DPRC containing the functional device.
-+ *
-+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
-+ * portals are allocated using fsl_mc_portal_allocate(), instead of
-+ * this function.
-+ */
-+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
-+ enum fsl_mc_pool_type pool_type,
-+ struct fsl_mc_device **new_mc_adev)
-+{
-+ struct fsl_mc_device *mc_bus_dev;
-+ struct fsl_mc_bus *mc_bus;
-+ struct fsl_mc_device *mc_adev;
-+ int error = -EINVAL;
-+ struct fsl_mc_resource *resource = NULL;
-+
-+ *new_mc_adev = NULL;
-+ if (mc_dev->flags & FSL_MC_IS_DPRC)
-+ goto error;
-+
-+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
-+ goto error;
-+
-+ if (pool_type == FSL_MC_POOL_DPMCP)
-+ goto error;
-+
-+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
-+ if (error < 0)
-+ goto error;
-+
-+ mc_adev = resource->data;
-+ if (!mc_adev)
-+ goto error;
-+
-+ mc_adev->consumer_link = device_link_add(&mc_dev->dev,
-+ &mc_adev->dev,
-+ DL_FLAG_AUTOREMOVE_CONSUMER);
-+ if (!mc_adev->consumer_link) {
-+ error = -EINVAL;
-+ goto error;
-+ }
-+
-+ *new_mc_adev = mc_adev;
-+ return 0;
-+error:
-+ if (resource)
-+ fsl_mc_resource_free(resource);
-+
-+ return error;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
-+
-+/**
-+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
-+ * pool where it came from.
-+ * @mc_adev: Pointer to the fsl-mc device
-+ */
-+void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
-+{
-+ struct fsl_mc_resource *resource;
-+
-+ resource = mc_adev->resource;
-+ if (resource->type == FSL_MC_POOL_DPMCP)
-+ return;
-+ if (resource->data != mc_adev)
-+ return;
-+
-+ fsl_mc_resource_free(resource);
-+
-+ device_link_del(mc_adev->consumer_link);
-+ mc_adev->consumer_link = NULL;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_object_free);
-+
-+/*
-+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
-+ * ID. A block of IRQs is pre-allocated and maintained in a pool
-+ * from which devices can allocate them when needed.
-+ */
-+
-+/*
-+ * Initialize the interrupt pool associated with an fsl-mc bus.
-+ * It allocates a block of IRQs from the GIC-ITS.
-+ */
-+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
-+ unsigned int irq_count)
-+{
-+ unsigned int i;
-+ struct msi_desc *msi_desc;
-+ struct fsl_mc_device_irq *irq_resources;
-+ struct fsl_mc_device_irq *mc_dev_irq;
-+ int error;
-+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-+ struct fsl_mc_resource_pool *res_pool =
-+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
-+
-+ if (irq_count == 0 ||
-+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
-+ return -EINVAL;
-+
-+ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
-+ if (error < 0)
-+ return error;
-+
-+ irq_resources = devm_kzalloc(&mc_bus_dev->dev,
-+ sizeof(*irq_resources) * irq_count,
-+ GFP_KERNEL);
-+ if (!irq_resources) {
-+ error = -ENOMEM;
-+ goto cleanup_msi_irqs;
-+ }
-+
-+ for (i = 0; i < irq_count; i++) {
-+ mc_dev_irq = &irq_resources[i];
-+
-+ /*
-+ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
-+ * by the fsl_mc_msi_write_msg() callback
-+ */
-+ mc_dev_irq->resource.type = res_pool->type;
-+ mc_dev_irq->resource.data = mc_dev_irq;
-+ mc_dev_irq->resource.parent_pool = res_pool;
-+ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
-+ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
-+ }
-+
-+ for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
-+ mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
-+ mc_dev_irq->msi_desc = msi_desc;
-+ mc_dev_irq->resource.id = msi_desc->irq;
-+ }
-+
-+ res_pool->max_count = irq_count;
-+ res_pool->free_count = irq_count;
-+ mc_bus->irq_resources = irq_resources;
-+ return 0;
-+
-+cleanup_msi_irqs:
-+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
-+ return error;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
-+
-+/**
-+ * Teardown the interrupt pool associated with an fsl-mc bus.
-+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
-+ */
-+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
-+{
-+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
-+ struct fsl_mc_resource_pool *res_pool =
-+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
-+
-+ if (!mc_bus->irq_resources)
-+ return;
-+
-+ if (res_pool->max_count == 0)
-+ return;
-+
-+ if (res_pool->free_count != res_pool->max_count)
-+ return;
-+
-+ INIT_LIST_HEAD(&res_pool->free_list);
-+ res_pool->max_count = 0;
-+ res_pool->free_count = 0;
-+ mc_bus->irq_resources = NULL;
-+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
-+
-+/**
-+ * Allocate the IRQs required by a given fsl-mc device.
-+ */
-+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
-+{
-+ int i;
-+ int irq_count;
-+ int res_allocated_count = 0;
-+ int error = -EINVAL;
-+ struct fsl_mc_device_irq **irqs = NULL;
-+ struct fsl_mc_bus *mc_bus;
-+ struct fsl_mc_resource_pool *res_pool;
-+
-+ if (mc_dev->irqs)
-+ return -EINVAL;
-+
-+ irq_count = mc_dev->obj_desc.irq_count;
-+ if (irq_count == 0)
-+ return -EINVAL;
-+
-+ if (is_fsl_mc_bus_dprc(mc_dev))
-+ mc_bus = to_fsl_mc_bus(mc_dev);
-+ else
-+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
-+
-+ if (!mc_bus->irq_resources)
-+ return -EINVAL;
-+
-+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
-+ if (res_pool->free_count < irq_count) {
-+ dev_err(&mc_dev->dev,
-+ "Not able to allocate %u irqs for device\n", irq_count);
-+ return -ENOSPC;
-+ }
-+
-+ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
-+ GFP_KERNEL);
-+ if (!irqs)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < irq_count; i++) {
-+ struct fsl_mc_resource *resource;
-+
-+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
-+ &resource);
-+ if (error < 0)
-+ goto error_resource_alloc;
-+
-+ irqs[i] = to_fsl_mc_irq(resource);
-+ res_allocated_count++;
-+
-+ irqs[i]->mc_dev = mc_dev;
-+ irqs[i]->dev_irq_index = i;
-+ }
-+
-+ mc_dev->irqs = irqs;
-+ return 0;
-+
-+error_resource_alloc:
-+ for (i = 0; i < res_allocated_count; i++) {
-+ irqs[i]->mc_dev = NULL;
-+ fsl_mc_resource_free(&irqs[i]->resource);
-+ }
-+
-+ return error;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
-+
-+/*
-+ * Frees the IRQs that were allocated for an fsl-mc device.
-+ */
-+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
-+{
-+ int i;
-+ int irq_count;
-+ struct fsl_mc_bus *mc_bus;
-+ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
-+
-+ if (!irqs)
-+ return;
-+
-+ irq_count = mc_dev->obj_desc.irq_count;
-+
-+ if (is_fsl_mc_bus_dprc(mc_dev))
-+ mc_bus = to_fsl_mc_bus(mc_dev);
-+ else
-+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
-+
-+ if (!mc_bus->irq_resources)
-+ return;
-+
-+ for (i = 0; i < irq_count; i++) {
-+ irqs[i]->mc_dev = NULL;
-+ fsl_mc_resource_free(&irqs[i]->resource);
-+ }
-+
-+ mc_dev->irqs = NULL;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
-+
-+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-+{
-+ int pool_type;
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+
-+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
-+ struct fsl_mc_resource_pool *res_pool =
-+ &mc_bus->resource_pools[pool_type];
-+
-+ res_pool->type = pool_type;
-+ res_pool->max_count = 0;
-+ res_pool->free_count = 0;
-+ res_pool->mc_bus = mc_bus;
-+ INIT_LIST_HEAD(&res_pool->free_list);
-+ mutex_init(&res_pool->mutex);
-+ }
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_init_all_resource_pools);
-+
-+static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
-+ enum fsl_mc_pool_type pool_type)
-+{
-+ struct fsl_mc_resource *resource;
-+ struct fsl_mc_resource *next;
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+ struct fsl_mc_resource_pool *res_pool =
-+ &mc_bus->resource_pools[pool_type];
-+ int free_count = 0;
-+
-+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
-+ free_count++;
-+ devm_kfree(&mc_bus_dev->dev, resource);
-+ }
-+}
-+
-+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-+{
-+ int pool_type;
-+
-+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
-+ fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_all_resource_pools);
-+
-+/**
-+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is
-+ * being added to the system
-+ */
-+static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
-+{
-+ enum fsl_mc_pool_type pool_type;
-+ struct fsl_mc_device *mc_bus_dev;
-+ struct fsl_mc_bus *mc_bus;
-+ int error;
-+
-+ if (!fsl_mc_is_allocatable(mc_dev))
-+ return -EINVAL;
-+
-+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-+ if (!dev_is_fsl_mc(&mc_bus_dev->dev))
-+ return -EINVAL;
-+
-+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+ error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
-+ if (error < 0)
-+ return error;
-+
-+ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
-+ if (error < 0)
-+ return error;
-+
-+ dev_dbg(&mc_dev->dev,
-+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
-+ return 0;
-+}
-+
-+/**
-+ * fsl_mc_allocator_remove - callback invoked when an allocatable device is
-+ * being removed from the system
-+ */
-+static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
-+{
-+ int error;
-+
-+ if (!fsl_mc_is_allocatable(mc_dev))
-+ return -EINVAL;
-+
-+ if (mc_dev->resource) {
-+ error = fsl_mc_resource_pool_remove_device(mc_dev);
-+ if (error < 0)
-+ return error;
-+ }
-+
-+ dev_dbg(&mc_dev->dev,
-+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
-+ return 0;
-+}
-+
-+static const struct fsl_mc_device_id match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpbp",
-+ },
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpmcp",
-+ },
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpcon",
-+ },
-+ {.vendor = 0x0},
-+};
-+
-+static struct fsl_mc_driver fsl_mc_allocator_driver = {
-+ .driver = {
-+ .name = "fsl_mc_allocator",
-+ .pm = NULL,
-+ },
-+ .match_id_table = match_id_table,
-+ .probe = fsl_mc_allocator_probe,
-+ .remove = fsl_mc_allocator_remove,
-+};
-+
-+int __init fsl_mc_allocator_driver_init(void)
-+{
-+ return fsl_mc_driver_register(&fsl_mc_allocator_driver);
-+}
-+
-+void fsl_mc_allocator_driver_exit(void)
-+{
-+ fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
-+}
---- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
-+++ /dev/null
-@@ -1,900 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/*
-- * Freescale Management Complex (MC) bus driver
-- *
-- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-- * Author: German Rivera <German.Rivera@freescale.com>
-- *
-- */
--
--#define pr_fmt(fmt) "fsl-mc: " fmt
--
--#include <linux/module.h>
--#include <linux/of_device.h>
--#include <linux/of_address.h>
--#include <linux/ioport.h>
--#include <linux/slab.h>
--#include <linux/limits.h>
--#include <linux/bitops.h>
--#include <linux/msi.h>
--#include <linux/dma-mapping.h>
--
--#include "fsl-mc-private.h"
--#include "dprc-cmd.h"
--#include "dpmng-cmd.h"
--
--/**
-- * Default DMA mask for devices on a fsl-mc bus
-- */
--#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
--
--/**
-- * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
-- * @root_mc_bus_dev: fsl-mc device representing the root DPRC
-- * @num_translation_ranges: number of entries in addr_translation_ranges
-- * @translation_ranges: array of bus to system address translation ranges
-- */
--struct fsl_mc {
-- struct fsl_mc_device *root_mc_bus_dev;
-- u8 num_translation_ranges;
-- struct fsl_mc_addr_translation_range *translation_ranges;
--};
--
--/**
-- * struct fsl_mc_addr_translation_range - bus to system address translation
-- * range
-- * @mc_region_type: Type of MC region for the range being translated
-- * @start_mc_offset: Start MC offset of the range being translated
-- * @end_mc_offset: MC offset of the first byte after the range (last MC
-- * offset of the range is end_mc_offset - 1)
-- * @start_phys_addr: system physical address corresponding to start_mc_addr
-- */
--struct fsl_mc_addr_translation_range {
-- enum dprc_region_type mc_region_type;
-- u64 start_mc_offset;
-- u64 end_mc_offset;
-- phys_addr_t start_phys_addr;
--};
--
--/**
-- * struct mc_version
-- * @major: Major version number: incremented on API compatibility changes
-- * @minor: Minor version number: incremented on API additions (that are
-- * backward compatible); reset when major version is incremented
-- * @revision: Internal revision number: incremented on implementation changes
-- * and/or bug fixes that have no impact on API
-- */
--struct mc_version {
-- u32 major;
-- u32 minor;
-- u32 revision;
--};
--
--/**
-- * fsl_mc_bus_match - device to driver matching callback
-- * @dev: the fsl-mc device to match against
-- * @drv: the device driver to search for matching fsl-mc object type
-- * structures
-- *
-- * Returns 1 on success, 0 otherwise.
-- */
--static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
--{
-- const struct fsl_mc_device_id *id;
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
-- bool found = false;
--
-- if (!mc_drv->match_id_table)
-- goto out;
--
-- /*
-- * If the object is not 'plugged' don't match.
-- * Only exception is the root DPRC, which is a special case.
-- */
-- if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
-- !fsl_mc_is_root_dprc(&mc_dev->dev))
-- goto out;
--
-- /*
-- * Traverse the match_id table of the given driver, trying to find
-- * a matching for the given device.
-- */
-- for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
-- if (id->vendor == mc_dev->obj_desc.vendor &&
-- strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
-- found = true;
--
-- break;
-- }
-- }
--
--out:
-- dev_dbg(dev, "%smatched\n", found ? "" : "not ");
-- return found;
--}
--
--/**
-- * fsl_mc_bus_uevent - callback invoked when a device is added
-- */
--static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
--{
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
--
-- if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
-- mc_dev->obj_desc.vendor,
-- mc_dev->obj_desc.type))
-- return -ENOMEM;
--
-- return 0;
--}
--
--static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
-- char *buf)
--{
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
--
-- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
-- mc_dev->obj_desc.type);
--}
--static DEVICE_ATTR_RO(modalias);
--
--static struct attribute *fsl_mc_dev_attrs[] = {
-- &dev_attr_modalias.attr,
-- NULL,
--};
--
--ATTRIBUTE_GROUPS(fsl_mc_dev);
--
--struct bus_type fsl_mc_bus_type = {
-- .name = "fsl-mc",
-- .match = fsl_mc_bus_match,
-- .uevent = fsl_mc_bus_uevent,
-- .dev_groups = fsl_mc_dev_groups,
--};
--EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
--
--static int fsl_mc_driver_probe(struct device *dev)
--{
-- struct fsl_mc_driver *mc_drv;
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-- int error;
--
-- if (WARN_ON(!dev->driver))
-- return -EINVAL;
--
-- mc_drv = to_fsl_mc_driver(dev->driver);
-- if (WARN_ON(!mc_drv->probe))
-- return -EINVAL;
--
-- error = mc_drv->probe(mc_dev);
-- if (error < 0) {
-- dev_err(dev, "%s failed: %d\n", __func__, error);
-- return error;
-- }
--
-- return 0;
--}
--
--static int fsl_mc_driver_remove(struct device *dev)
--{
-- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-- int error;
--
-- if (WARN_ON(!dev->driver))
-- return -EINVAL;
--
-- error = mc_drv->remove(mc_dev);
-- if (error < 0) {
-- dev_err(dev, "%s failed: %d\n", __func__, error);
-- return error;
-- }
--
-- return 0;
--}
--
--static void fsl_mc_driver_shutdown(struct device *dev)
--{
-- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
--
-- mc_drv->shutdown(mc_dev);
--}
--
--/**
-- * __fsl_mc_driver_register - registers a child device driver with the
-- * MC bus
-- *
-- * This function is implicitly invoked from the registration function of
-- * fsl_mc device drivers, which is generated by the
-- * module_fsl_mc_driver() macro.
-- */
--int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
-- struct module *owner)
--{
-- int error;
--
-- mc_driver->driver.owner = owner;
-- mc_driver->driver.bus = &fsl_mc_bus_type;
--
-- if (mc_driver->probe)
-- mc_driver->driver.probe = fsl_mc_driver_probe;
--
-- if (mc_driver->remove)
-- mc_driver->driver.remove = fsl_mc_driver_remove;
--
-- if (mc_driver->shutdown)
-- mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
--
-- error = driver_register(&mc_driver->driver);
-- if (error < 0) {
-- pr_err("driver_register() failed for %s: %d\n",
-- mc_driver->driver.name, error);
-- return error;
-- }
--
-- return 0;
--}
--EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
--
--/**
-- * fsl_mc_driver_unregister - unregisters a device driver from the
-- * MC bus
-- */
--void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
--{
-- driver_unregister(&mc_driver->driver);
--}
--EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
--
--/**
-- * mc_get_version() - Retrieves the Management Complex firmware
-- * version information
-- * @mc_io: Pointer to opaque I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @mc_ver_info: Returned version information structure
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--static int mc_get_version(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- struct mc_version *mc_ver_info)
--{
-- struct mc_command cmd = { 0 };
-- struct dpmng_rsp_get_version *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
-- cmd_flags,
-- 0);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
-- mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
-- mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
-- mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
--
-- return 0;
--}
--
--/**
-- * fsl_mc_get_root_dprc - function to traverse to the root dprc
-- */
--static void fsl_mc_get_root_dprc(struct device *dev,
-- struct device **root_dprc_dev)
--{
-- if (WARN_ON(!dev)) {
-- *root_dprc_dev = NULL;
-- } else if (WARN_ON(!dev_is_fsl_mc(dev))) {
-- *root_dprc_dev = NULL;
-- } else {
-- *root_dprc_dev = dev;
-- while (dev_is_fsl_mc((*root_dprc_dev)->parent))
-- *root_dprc_dev = (*root_dprc_dev)->parent;
-- }
--}
--
--static int get_dprc_attr(struct fsl_mc_io *mc_io,
-- int container_id, struct dprc_attributes *attr)
--{
-- u16 dprc_handle;
-- int error;
--
-- error = dprc_open(mc_io, 0, container_id, &dprc_handle);
-- if (error < 0) {
-- dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
-- return error;
-- }
--
-- memset(attr, 0, sizeof(struct dprc_attributes));
-- error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
-- if (error < 0) {
-- dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
-- error);
-- goto common_cleanup;
-- }
--
-- error = 0;
--
--common_cleanup:
-- (void)dprc_close(mc_io, 0, dprc_handle);
-- return error;
--}
--
--static int get_dprc_icid(struct fsl_mc_io *mc_io,
-- int container_id, u16 *icid)
--{
-- struct dprc_attributes attr;
-- int error;
--
-- error = get_dprc_attr(mc_io, container_id, &attr);
-- if (error == 0)
-- *icid = attr.icid;
--
-- return error;
--}
--
--static int translate_mc_addr(struct fsl_mc_device *mc_dev,
-- enum dprc_region_type mc_region_type,
-- u64 mc_offset, phys_addr_t *phys_addr)
--{
-- int i;
-- struct device *root_dprc_dev;
-- struct fsl_mc *mc;
--
-- fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
-- if (WARN_ON(!root_dprc_dev))
-- return -EINVAL;
-- mc = dev_get_drvdata(root_dprc_dev->parent);
--
-- if (mc->num_translation_ranges == 0) {
-- /*
-- * Do identity mapping:
-- */
-- *phys_addr = mc_offset;
-- return 0;
-- }
--
-- for (i = 0; i < mc->num_translation_ranges; i++) {
-- struct fsl_mc_addr_translation_range *range =
-- &mc->translation_ranges[i];
--
-- if (mc_region_type == range->mc_region_type &&
-- mc_offset >= range->start_mc_offset &&
-- mc_offset < range->end_mc_offset) {
-- *phys_addr = range->start_phys_addr +
-- (mc_offset - range->start_mc_offset);
-- return 0;
-- }
-- }
--
-- return -EFAULT;
--}
--
--static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
-- struct fsl_mc_device *mc_bus_dev)
--{
-- int i;
-- int error;
-- struct resource *regions;
-- struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
-- struct device *parent_dev = mc_dev->dev.parent;
-- enum dprc_region_type mc_region_type;
--
-- if (strcmp(obj_desc->type, "dprc") == 0 ||
-- strcmp(obj_desc->type, "dpmcp") == 0) {
-- mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
-- } else if (strcmp(obj_desc->type, "dpio") == 0) {
-- mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
-- } else {
-- /*
-- * This function should not have been called for this MC object
-- * type, as this object type is not supposed to have MMIO
-- * regions
-- */
-- WARN_ON(true);
-- return -EINVAL;
-- }
--
-- regions = kmalloc_array(obj_desc->region_count,
-- sizeof(regions[0]), GFP_KERNEL);
-- if (!regions)
-- return -ENOMEM;
--
-- for (i = 0; i < obj_desc->region_count; i++) {
-- struct dprc_region_desc region_desc;
--
-- error = dprc_get_obj_region(mc_bus_dev->mc_io,
-- 0,
-- mc_bus_dev->mc_handle,
-- obj_desc->type,
-- obj_desc->id, i, &region_desc);
-- if (error < 0) {
-- dev_err(parent_dev,
-- "dprc_get_obj_region() failed: %d\n", error);
-- goto error_cleanup_regions;
-- }
--
-- WARN_ON(region_desc.size == 0);
-- error = translate_mc_addr(mc_dev, mc_region_type,
-- region_desc.base_offset,
-- &regions[i].start);
-- if (error < 0) {
-- dev_err(parent_dev,
-- "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
-- region_desc.base_offset,
-- obj_desc->type, obj_desc->id, i);
-- goto error_cleanup_regions;
-- }
--
-- regions[i].end = regions[i].start + region_desc.size - 1;
-- regions[i].name = "fsl-mc object MMIO region";
-- regions[i].flags = IORESOURCE_IO;
-- if (region_desc.flags & DPRC_REGION_CACHEABLE)
-- regions[i].flags |= IORESOURCE_CACHEABLE;
-- }
--
-- mc_dev->regions = regions;
-- return 0;
--
--error_cleanup_regions:
-- kfree(regions);
-- return error;
--}
--
--/**
-- * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
-- */
--bool fsl_mc_is_root_dprc(struct device *dev)
--{
-- struct device *root_dprc_dev;
--
-- fsl_mc_get_root_dprc(dev, &root_dprc_dev);
-- if (!root_dprc_dev)
-- return false;
-- return dev == root_dprc_dev;
--}
--
--static void fsl_mc_device_release(struct device *dev)
--{
-- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
--
-- kfree(mc_dev->regions);
--
-- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
-- kfree(to_fsl_mc_bus(mc_dev));
-- else
-- kfree(mc_dev);
--}
--
--/**
-- * Add a newly discovered fsl-mc device to be visible in Linux
-- */
--int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
-- struct fsl_mc_io *mc_io,
-- struct device *parent_dev,
-- struct fsl_mc_device **new_mc_dev)
--{
-- int error;
-- struct fsl_mc_device *mc_dev = NULL;
-- struct fsl_mc_bus *mc_bus = NULL;
-- struct fsl_mc_device *parent_mc_dev;
--
-- if (dev_is_fsl_mc(parent_dev))
-- parent_mc_dev = to_fsl_mc_device(parent_dev);
-- else
-- parent_mc_dev = NULL;
--
-- if (strcmp(obj_desc->type, "dprc") == 0) {
-- /*
-- * Allocate an MC bus device object:
-- */
-- mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
-- if (!mc_bus)
-- return -ENOMEM;
--
-- mc_dev = &mc_bus->mc_dev;
-- } else {
-- /*
-- * Allocate a regular fsl_mc_device object:
-- */
-- mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
-- if (!mc_dev)
-- return -ENOMEM;
-- }
--
-- mc_dev->obj_desc = *obj_desc;
-- mc_dev->mc_io = mc_io;
-- device_initialize(&mc_dev->dev);
-- mc_dev->dev.parent = parent_dev;
-- mc_dev->dev.bus = &fsl_mc_bus_type;
-- mc_dev->dev.release = fsl_mc_device_release;
-- dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
--
-- if (strcmp(obj_desc->type, "dprc") == 0) {
-- struct fsl_mc_io *mc_io2;
--
-- mc_dev->flags |= FSL_MC_IS_DPRC;
--
-- /*
-- * To get the DPRC's ICID, we need to open the DPRC
-- * in get_dprc_icid(). For child DPRCs, we do so using the
-- * parent DPRC's MC portal instead of the child DPRC's MC
-- * portal, in case the child DPRC is already opened with
-- * its own portal (e.g., the DPRC used by AIOP).
-- *
-- * NOTE: There cannot be more than one active open for a
-- * given MC object, using the same MC portal.
-- */
-- if (parent_mc_dev) {
-- /*
-- * device being added is a child DPRC device
-- */
-- mc_io2 = parent_mc_dev->mc_io;
-- } else {
-- /*
-- * device being added is the root DPRC device
-- */
-- if (WARN_ON(!mc_io)) {
-- error = -EINVAL;
-- goto error_cleanup_dev;
-- }
--
-- mc_io2 = mc_io;
-- }
--
-- error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
-- if (error < 0)
-- goto error_cleanup_dev;
-- } else {
-- /*
-- * A non-DPRC object has to be a child of a DPRC, use the
-- * parent's ICID and interrupt domain.
-- */
-- mc_dev->icid = parent_mc_dev->icid;
-- mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
-- mc_dev->dev.dma_mask = &mc_dev->dma_mask;
-- dev_set_msi_domain(&mc_dev->dev,
-- dev_get_msi_domain(&parent_mc_dev->dev));
-- }
--
-- /*
-- * Get MMIO regions for the device from the MC:
-- *
-- * NOTE: the root DPRC is a special case as its MMIO region is
-- * obtained from the device tree
-- */
-- if (parent_mc_dev && obj_desc->region_count != 0) {
-- error = fsl_mc_device_get_mmio_regions(mc_dev,
-- parent_mc_dev);
-- if (error < 0)
-- goto error_cleanup_dev;
-- }
--
-- /* Objects are coherent, unless 'no shareability' flag set. */
-- if (!(obj_desc->flags & FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
-- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
--
-- /*
-- * The device-specific probe callback will get invoked by device_add()
-- */
-- error = device_add(&mc_dev->dev);
-- if (error < 0) {
-- dev_err(parent_dev,
-- "device_add() failed for device %s: %d\n",
-- dev_name(&mc_dev->dev), error);
-- goto error_cleanup_dev;
-- }
--
-- dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
--
-- *new_mc_dev = mc_dev;
-- return 0;
--
--error_cleanup_dev:
-- kfree(mc_dev->regions);
-- kfree(mc_bus);
-- kfree(mc_dev);
--
-- return error;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_device_add);
--
--/**
-- * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
-- * Linux
-- *
-- * @mc_dev: Pointer to an fsl-mc device
-- */
--void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
--{
-- /*
-- * The device-specific remove callback will get invoked by device_del()
-- */
-- device_del(&mc_dev->dev);
-- put_device(&mc_dev->dev);
--}
--EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
--
--static int parse_mc_ranges(struct device *dev,
-- int *paddr_cells,
-- int *mc_addr_cells,
-- int *mc_size_cells,
-- const __be32 **ranges_start)
--{
-- const __be32 *prop;
-- int range_tuple_cell_count;
-- int ranges_len;
-- int tuple_len;
-- struct device_node *mc_node = dev->of_node;
--
-- *ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
-- if (!(*ranges_start) || !ranges_len) {
-- dev_warn(dev,
-- "missing or empty ranges property for device tree node '%s'\n",
-- mc_node->name);
-- return 0;
-- }
--
-- *paddr_cells = of_n_addr_cells(mc_node);
--
-- prop = of_get_property(mc_node, "#address-cells", NULL);
-- if (prop)
-- *mc_addr_cells = be32_to_cpup(prop);
-- else
-- *mc_addr_cells = *paddr_cells;
--
-- prop = of_get_property(mc_node, "#size-cells", NULL);
-- if (prop)
-- *mc_size_cells = be32_to_cpup(prop);
-- else
-- *mc_size_cells = of_n_size_cells(mc_node);
--
-- range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
-- *mc_size_cells;
--
-- tuple_len = range_tuple_cell_count * sizeof(__be32);
-- if (ranges_len % tuple_len != 0) {
-- dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
-- return -EINVAL;
-- }
--
-- return ranges_len / tuple_len;
--}
--
--static int get_mc_addr_translation_ranges(struct device *dev,
-- struct fsl_mc_addr_translation_range
-- **ranges,
-- u8 *num_ranges)
--{
-- int ret;
-- int paddr_cells;
-- int mc_addr_cells;
-- int mc_size_cells;
-- int i;
-- const __be32 *ranges_start;
-- const __be32 *cell;
--
-- ret = parse_mc_ranges(dev,
-- &paddr_cells,
-- &mc_addr_cells,
-- &mc_size_cells,
-- &ranges_start);
-- if (ret < 0)
-- return ret;
--
-- *num_ranges = ret;
-- if (!ret) {
-- /*
-- * Missing or empty ranges property ("ranges;") for the
-- * 'fsl,qoriq-mc' node. In this case, identity mapping
-- * will be used.
-- */
-- *ranges = NULL;
-- return 0;
-- }
--
-- *ranges = devm_kcalloc(dev, *num_ranges,
-- sizeof(struct fsl_mc_addr_translation_range),
-- GFP_KERNEL);
-- if (!(*ranges))
-- return -ENOMEM;
--
-- cell = ranges_start;
-- for (i = 0; i < *num_ranges; ++i) {
-- struct fsl_mc_addr_translation_range *range = &(*ranges)[i];
--
-- range->mc_region_type = of_read_number(cell, 1);
-- range->start_mc_offset = of_read_number(cell + 1,
-- mc_addr_cells - 1);
-- cell += mc_addr_cells;
-- range->start_phys_addr = of_read_number(cell, paddr_cells);
-- cell += paddr_cells;
-- range->end_mc_offset = range->start_mc_offset +
-- of_read_number(cell, mc_size_cells);
--
-- cell += mc_size_cells;
-- }
--
-- return 0;
--}
--
--/**
-- * fsl_mc_bus_probe - callback invoked when the root MC bus is being
-- * added
-- */
--static int fsl_mc_bus_probe(struct platform_device *pdev)
--{
-- struct fsl_mc_obj_desc obj_desc;
-- int error;
-- struct fsl_mc *mc;
-- struct fsl_mc_device *mc_bus_dev = NULL;
-- struct fsl_mc_io *mc_io = NULL;
-- int container_id;
-- phys_addr_t mc_portal_phys_addr;
-- u32 mc_portal_size;
-- struct mc_version mc_version;
-- struct resource res;
--
-- mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
-- if (!mc)
-- return -ENOMEM;
--
-- platform_set_drvdata(pdev, mc);
--
-- /*
-- * Get physical address of MC portal for the root DPRC:
-- */
-- error = of_address_to_resource(pdev->dev.of_node, 0, &res);
-- if (error < 0) {
-- dev_err(&pdev->dev,
-- "of_address_to_resource() failed for %pOF\n",
-- pdev->dev.of_node);
-- return error;
-- }
--
-- mc_portal_phys_addr = res.start;
-- mc_portal_size = resource_size(&res);
-- error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
-- mc_portal_size, NULL,
-- FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
-- if (error < 0)
-- return error;
--
-- error = mc_get_version(mc_io, 0, &mc_version);
-- if (error != 0) {
-- dev_err(&pdev->dev,
-- "mc_get_version() failed with error %d\n", error);
-- goto error_cleanup_mc_io;
-- }
--
-- dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
-- mc_version.major, mc_version.minor, mc_version.revision);
--
-- error = get_mc_addr_translation_ranges(&pdev->dev,
-- &mc->translation_ranges,
-- &mc->num_translation_ranges);
-- if (error < 0)
-- goto error_cleanup_mc_io;
--
-- error = dprc_get_container_id(mc_io, 0, &container_id);
-- if (error < 0) {
-- dev_err(&pdev->dev,
-- "dprc_get_container_id() failed: %d\n", error);
-- goto error_cleanup_mc_io;
-- }
--
-- memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
-- error = dprc_get_api_version(mc_io, 0,
-- &obj_desc.ver_major,
-- &obj_desc.ver_minor);
-- if (error < 0)
-- goto error_cleanup_mc_io;
--
-- obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
-- strcpy(obj_desc.type, "dprc");
-- obj_desc.id = container_id;
-- obj_desc.irq_count = 1;
-- obj_desc.region_count = 0;
--
-- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
-- if (error < 0)
-- goto error_cleanup_mc_io;
--
-- mc->root_mc_bus_dev = mc_bus_dev;
-- return 0;
--
--error_cleanup_mc_io:
-- fsl_destroy_mc_io(mc_io);
-- return error;
--}
--
--/**
-- * fsl_mc_bus_remove - callback invoked when the root MC bus is being
-- * removed
-- */
--static int fsl_mc_bus_remove(struct platform_device *pdev)
--{
-- struct fsl_mc *mc = platform_get_drvdata(pdev);
--
-- if (WARN_ON(!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)))
-- return -EINVAL;
--
-- fsl_mc_device_remove(mc->root_mc_bus_dev);
--
-- fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
-- mc->root_mc_bus_dev->mc_io = NULL;
--
-- return 0;
--}
--
--static const struct of_device_id fsl_mc_bus_match_table[] = {
-- {.compatible = "fsl,qoriq-mc",},
-- {},
--};
--
--MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
--
--static struct platform_driver fsl_mc_bus_driver = {
-- .driver = {
-- .name = "fsl_mc_bus",
-- .pm = NULL,
-- .of_match_table = fsl_mc_bus_match_table,
-- },
-- .probe = fsl_mc_bus_probe,
-- .remove = fsl_mc_bus_remove,
--};
--
--static int __init fsl_mc_bus_driver_init(void)
--{
-- int error;
--
-- error = bus_register(&fsl_mc_bus_type);
-- if (error < 0) {
-- pr_err("bus type registration failed: %d\n", error);
-- goto error_cleanup_cache;
-- }
--
-- error = platform_driver_register(&fsl_mc_bus_driver);
-- if (error < 0) {
-- pr_err("platform_driver_register() failed: %d\n", error);
-- goto error_cleanup_bus;
-- }
--
-- error = dprc_driver_init();
-- if (error < 0)
-- goto error_cleanup_driver;
--
-- error = fsl_mc_allocator_driver_init();
-- if (error < 0)
-- goto error_cleanup_dprc_driver;
--
-- error = its_fsl_mc_msi_init();
-- if (error < 0)
-- goto error_cleanup_mc_allocator;
--
-- return 0;
--
--error_cleanup_mc_allocator:
-- fsl_mc_allocator_driver_exit();
--
--error_cleanup_dprc_driver:
-- dprc_driver_exit();
--
--error_cleanup_driver:
-- platform_driver_unregister(&fsl_mc_bus_driver);
--
--error_cleanup_bus:
-- bus_unregister(&fsl_mc_bus_type);
--
--error_cleanup_cache:
-- return error;
--}
--postcore_initcall(fsl_mc_bus_driver_init);
---- /dev/null
-+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
-@@ -0,0 +1,1148 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Freescale Management Complex (MC) bus driver
-+ *
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-+ * Author: German Rivera <German.Rivera@freescale.com>
-+ *
-+ */
-+
-+#define pr_fmt(fmt) "fsl-mc: " fmt
-+
-+#include <linux/module.h>
-+#include <linux/of_device.h>
-+#include <linux/of_address.h>
-+#include <linux/ioport.h>
-+#include <linux/slab.h>
-+#include <linux/limits.h>
-+#include <linux/bitops.h>
-+#include <linux/msi.h>
-+#include <linux/dma-mapping.h>
-+
-+#include "fsl-mc-private.h"
-+
-+/**
-+ * Default DMA mask for devices on a fsl-mc bus
-+ */
-+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
-+
-+/**
-+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
-+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
-+ * @num_translation_ranges: number of entries in addr_translation_ranges
-+ * @translation_ranges: array of bus to system address translation ranges
-+ */
-+struct fsl_mc {
-+ struct fsl_mc_device *root_mc_bus_dev;
-+ u8 num_translation_ranges;
-+ struct fsl_mc_addr_translation_range *translation_ranges;
-+};
-+
-+/**
-+ * struct fsl_mc_addr_translation_range - bus to system address translation
-+ * range
-+ * @mc_region_type: Type of MC region for the range being translated
-+ * @start_mc_offset: Start MC offset of the range being translated
-+ * @end_mc_offset: MC offset of the first byte after the range (last MC
-+ * offset of the range is end_mc_offset - 1)
-+ * @start_phys_addr: system physical address corresponding to start_mc_addr
-+ */
-+struct fsl_mc_addr_translation_range {
-+ enum dprc_region_type mc_region_type;
-+ u64 start_mc_offset;
-+ u64 end_mc_offset;
-+ phys_addr_t start_phys_addr;
-+};
-+
-+/**
-+ * struct mc_version
-+ * @major: Major version number: incremented on API compatibility changes
-+ * @minor: Minor version number: incremented on API additions (that are
-+ * backward compatible); reset when major version is incremented
-+ * @revision: Internal revision number: incremented on implementation changes
-+ * and/or bug fixes that have no impact on API
-+ */
-+struct mc_version {
-+ u32 major;
-+ u32 minor;
-+ u32 revision;
-+};
-+
-+/**
-+ * fsl_mc_bus_match - device to driver matching callback
-+ * @dev: the fsl-mc device to match against
-+ * @drv: the device driver to search for matching fsl-mc object type
-+ * structures
-+ *
-+ * Returns 1 on success, 0 otherwise.
-+ */
-+static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
-+{
-+ const struct fsl_mc_device_id *id;
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
-+ bool found = false;
-+
-+ /* When driver_override is set, only bind to the matching driver */
-+ if (mc_dev->driver_override) {
-+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
-+ goto out;
-+ }
-+
-+ if (!mc_drv->match_id_table)
-+ goto out;
-+
-+ /*
-+ * If the object is not 'plugged' don't match.
-+ * Only exception is the root DPRC, which is a special case.
-+ */
-+ if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
-+ !fsl_mc_is_root_dprc(&mc_dev->dev))
-+ goto out;
-+
-+ /*
-+ * Traverse the match_id table of the given driver, trying to find
-+ * a matching for the given device.
-+ */
-+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
-+ if (id->vendor == mc_dev->obj_desc.vendor &&
-+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
-+ found = true;
-+
-+ break;
-+ }
-+ }
-+
-+out:
-+ dev_dbg(dev, "%smatched\n", found ? "" : "not ");
-+ return found;
-+}
-+
-+/**
-+ * fsl_mc_bus_uevent - callback invoked when a device is added
-+ */
-+static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
-+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+
-+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
-+ mc_dev->obj_desc.vendor,
-+ mc_dev->obj_desc.type))
-+ return -ENOMEM;
-+
-+ return 0;
-+}
-+
-+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+
-+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
-+ mc_dev->obj_desc.type);
-+}
-+static DEVICE_ATTR_RO(modalias);
-+
-+static ssize_t rescan_store(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct fsl_mc_device *root_mc_dev;
-+ struct fsl_mc_bus *root_mc_bus;
-+ unsigned long val;
-+
-+ if (!fsl_mc_is_root_dprc(dev))
-+ return -EINVAL;
-+
-+ root_mc_dev = to_fsl_mc_device(dev);
-+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
-+
-+ if (kstrtoul(buf, 0, &val) < 0)
-+ return -EINVAL;
-+
-+ if (val) {
-+ mutex_lock(&root_mc_bus->scan_mutex);
-+ dprc_scan_objects(root_mc_dev, NULL, NULL);
-+ mutex_unlock(&root_mc_bus->scan_mutex);
-+ }
-+
-+ return count;
-+}
-+static DEVICE_ATTR_WO(rescan);
-+
-+static ssize_t driver_override_store(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ const char *driver_override, *old = mc_dev->driver_override;
-+ char *cp;
-+
-+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
-+ return -EINVAL;
-+
-+ if (count >= (PAGE_SIZE - 1))
-+ return -EINVAL;
-+
-+ driver_override = kstrndup(buf, count, GFP_KERNEL);
-+ if (!driver_override)
-+ return -ENOMEM;
-+
-+ cp = strchr(driver_override, '\n');
-+ if (cp)
-+ *cp = '\0';
-+
-+ if (strlen(driver_override)) {
-+ mc_dev->driver_override = driver_override;
-+ } else {
-+ kfree(driver_override);
-+ mc_dev->driver_override = NULL;
-+ }
-+
-+ kfree(old);
-+
-+ return count;
-+}
-+
-+static ssize_t driver_override_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+
-+ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
-+}
-+static DEVICE_ATTR_RW(driver_override);
-+
-+static struct attribute *fsl_mc_dev_attrs[] = {
-+ &dev_attr_modalias.attr,
-+ &dev_attr_rescan.attr,
-+ &dev_attr_driver_override.attr,
-+ NULL,
-+};
-+
-+ATTRIBUTE_GROUPS(fsl_mc_dev);
-+
-+static int scan_fsl_mc_bus(struct device *dev, void *data)
-+{
-+ struct fsl_mc_device *root_mc_dev;
-+ struct fsl_mc_bus *root_mc_bus;
-+
-+ if (!fsl_mc_is_root_dprc(dev))
-+ goto exit;
-+
-+ root_mc_dev = to_fsl_mc_device(dev);
-+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
-+ mutex_lock(&root_mc_bus->scan_mutex);
-+ dprc_scan_objects(root_mc_dev, NULL, NULL);
-+ mutex_unlock(&root_mc_bus->scan_mutex);
-+
-+exit:
-+ return 0;
-+}
-+
-+static ssize_t bus_rescan_store(struct bus_type *bus,
-+ const char *buf, size_t count)
-+{
-+ unsigned long val;
-+
-+ if (kstrtoul(buf, 0, &val) < 0)
-+ return -EINVAL;
-+
-+ if (val)
-+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
-+
-+ return count;
-+}
-+static BUS_ATTR(rescan, 0220, NULL, bus_rescan_store);
-+
-+static struct attribute *fsl_mc_bus_attrs[] = {
-+ &bus_attr_rescan.attr,
-+ NULL,
-+};
-+
-+static const struct attribute_group fsl_mc_bus_group = {
-+ .attrs = fsl_mc_bus_attrs,
-+};
-+
-+static const struct attribute_group *fsl_mc_bus_groups[] = {
-+ &fsl_mc_bus_group,
-+ NULL,
-+};
-+
-+struct bus_type fsl_mc_bus_type = {
-+ .name = "fsl-mc",
-+ .match = fsl_mc_bus_match,
-+ .uevent = fsl_mc_bus_uevent,
-+ .dev_groups = fsl_mc_dev_groups,
-+ .bus_groups = fsl_mc_bus_groups,
-+};
-+EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-+
-+struct device_type fsl_mc_bus_dprc_type = {
-+ .name = "fsl_mc_bus_dprc"
-+};
-+
-+struct device_type fsl_mc_bus_dpni_type = {
-+ .name = "fsl_mc_bus_dpni"
-+};
-+
-+struct device_type fsl_mc_bus_dpio_type = {
-+ .name = "fsl_mc_bus_dpio"
-+};
-+
-+struct device_type fsl_mc_bus_dpsw_type = {
-+ .name = "fsl_mc_bus_dpsw"
-+};
-+
-+struct device_type fsl_mc_bus_dpdmux_type = {
-+ .name = "fsl_mc_bus_dpdmux"
-+};
-+
-+struct device_type fsl_mc_bus_dpbp_type = {
-+ .name = "fsl_mc_bus_dpbp"
-+};
-+
-+struct device_type fsl_mc_bus_dpcon_type = {
-+ .name = "fsl_mc_bus_dpcon"
-+};
-+
-+struct device_type fsl_mc_bus_dpmcp_type = {
-+ .name = "fsl_mc_bus_dpmcp"
-+};
-+
-+struct device_type fsl_mc_bus_dpmac_type = {
-+ .name = "fsl_mc_bus_dpmac"
-+};
-+
-+struct device_type fsl_mc_bus_dprtc_type = {
-+ .name = "fsl_mc_bus_dprtc"
-+};
-+
-+struct device_type fsl_mc_bus_dpseci_type = {
-+ .name = "fsl_mc_bus_dpseci"
-+};
-+
-+struct device_type fsl_mc_bus_dpdcei_type = {
-+ .name = "fsl_mc_bus_dpdcei"
-+};
-+
-+struct device_type fsl_mc_bus_dpaiop_type = {
-+ .name = "fsl_mc_bus_dpaiop"
-+};
-+
-+struct device_type fsl_mc_bus_dpci_type = {
-+ .name = "fsl_mc_bus_dpci"
-+};
-+
-+struct device_type fsl_mc_bus_dpdmai_type = {
-+ .name = "fsl_mc_bus_dpdmai"
-+};
-+
-+static struct device_type *fsl_mc_get_device_type(const char *type)
-+{
-+ static const struct {
-+ struct device_type *dev_type;
-+ const char *type;
-+ } dev_types[] = {
-+ { &fsl_mc_bus_dprc_type, "dprc" },
-+ { &fsl_mc_bus_dpni_type, "dpni" },
-+ { &fsl_mc_bus_dpio_type, "dpio" },
-+ { &fsl_mc_bus_dpsw_type, "dpsw" },
-+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
-+ { &fsl_mc_bus_dpbp_type, "dpbp" },
-+ { &fsl_mc_bus_dpcon_type, "dpcon" },
-+ { &fsl_mc_bus_dpmcp_type, "dpmcp" },
-+ { &fsl_mc_bus_dpmac_type, "dpmac" },
-+ { &fsl_mc_bus_dprtc_type, "dprtc" },
-+ { &fsl_mc_bus_dpseci_type, "dpseci" },
-+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
-+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
-+ { &fsl_mc_bus_dpci_type, "dpci" },
-+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
-+ { NULL, NULL }
-+ };
-+ int i;
-+
-+ for (i = 0; dev_types[i].dev_type; i++)
-+ if (!strcmp(dev_types[i].type, type))
-+ return dev_types[i].dev_type;
-+
-+ return NULL;
-+}
-+
-+static int fsl_mc_driver_probe(struct device *dev)
-+{
-+ struct fsl_mc_driver *mc_drv;
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ int error;
-+
-+ mc_drv = to_fsl_mc_driver(dev->driver);
-+
-+ error = mc_drv->probe(mc_dev);
-+ if (error < 0) {
-+ if (error != -EPROBE_DEFER)
-+ dev_err(dev, "%s failed: %d\n", __func__, error);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+
-+static int fsl_mc_driver_remove(struct device *dev)
-+{
-+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ int error;
-+
-+ error = mc_drv->remove(mc_dev);
-+ if (error < 0) {
-+ dev_err(dev, "%s failed: %d\n", __func__, error);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+
-+static void fsl_mc_driver_shutdown(struct device *dev)
-+{
-+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+
-+ mc_drv->shutdown(mc_dev);
-+}
-+
-+/**
-+ * __fsl_mc_driver_register - registers a child device driver with the
-+ * MC bus
-+ *
-+ * This function is implicitly invoked from the registration function of
-+ * fsl_mc device drivers, which is generated by the
-+ * module_fsl_mc_driver() macro.
-+ */
-+int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
-+ struct module *owner)
-+{
-+ int error;
-+
-+ mc_driver->driver.owner = owner;
-+ mc_driver->driver.bus = &fsl_mc_bus_type;
-+
-+ if (mc_driver->probe)
-+ mc_driver->driver.probe = fsl_mc_driver_probe;
-+
-+ if (mc_driver->remove)
-+ mc_driver->driver.remove = fsl_mc_driver_remove;
-+
-+ if (mc_driver->shutdown)
-+ mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
-+
-+ error = driver_register(&mc_driver->driver);
-+ if (error < 0) {
-+ pr_err("driver_register() failed for %s: %d\n",
-+ mc_driver->driver.name, error);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
-+
-+/**
-+ * fsl_mc_driver_unregister - unregisters a device driver from the
-+ * MC bus
-+ */
-+void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
-+{
-+ driver_unregister(&mc_driver->driver);
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
-+
-+/**
-+ * mc_get_version() - Retrieves the Management Complex firmware
-+ * version information
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @mc_ver_info: Returned version information structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+static int mc_get_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ struct mc_version *mc_ver_info)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpmng_rsp_get_version *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
-+ cmd_flags,
-+ 0);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
-+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
-+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
-+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
-+
-+ return 0;
-+}
-+
-+/**
-+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
-+ */
-+void fsl_mc_get_root_dprc(struct device *dev,
-+ struct device **root_dprc_dev)
-+{
-+ if (!dev) {
-+ *root_dprc_dev = NULL;
-+ } else if (!dev_is_fsl_mc(dev)) {
-+ *root_dprc_dev = NULL;
-+ } else {
-+ *root_dprc_dev = dev;
-+ while (dev_is_fsl_mc((*root_dprc_dev)->parent))
-+ *root_dprc_dev = (*root_dprc_dev)->parent;
-+ }
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_get_root_dprc);
-+
-+static int get_dprc_attr(struct fsl_mc_io *mc_io,
-+ int container_id, struct dprc_attributes *attr)
-+{
-+ u16 dprc_handle;
-+ int error;
-+
-+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
-+ if (error < 0) {
-+ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
-+ return error;
-+ }
-+
-+ memset(attr, 0, sizeof(struct dprc_attributes));
-+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
-+ if (error < 0) {
-+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
-+ error);
-+ goto common_cleanup;
-+ }
-+
-+ error = 0;
-+
-+common_cleanup:
-+ (void)dprc_close(mc_io, 0, dprc_handle);
-+ return error;
-+}
-+
-+static int get_dprc_icid(struct fsl_mc_io *mc_io,
-+ int container_id, u32 *icid)
-+{
-+ struct dprc_attributes attr;
-+ int error;
-+
-+ error = get_dprc_attr(mc_io, container_id, &attr);
-+ if (error == 0)
-+ *icid = attr.icid;
-+
-+ return error;
-+}
-+
-+static int translate_mc_addr(struct fsl_mc_device *mc_dev,
-+ enum dprc_region_type mc_region_type,
-+ u64 mc_offset, phys_addr_t *phys_addr)
-+{
-+ int i;
-+ struct device *root_dprc_dev;
-+ struct fsl_mc *mc;
-+
-+ fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
-+ mc = dev_get_drvdata(root_dprc_dev->parent);
-+
-+ if (mc->num_translation_ranges == 0) {
-+ /*
-+ * Do identity mapping:
-+ */
-+ *phys_addr = mc_offset;
-+ return 0;
-+ }
-+
-+ for (i = 0; i < mc->num_translation_ranges; i++) {
-+ struct fsl_mc_addr_translation_range *range =
-+ &mc->translation_ranges[i];
-+
-+ if (mc_region_type == range->mc_region_type &&
-+ mc_offset >= range->start_mc_offset &&
-+ mc_offset < range->end_mc_offset) {
-+ *phys_addr = range->start_phys_addr +
-+ (mc_offset - range->start_mc_offset);
-+ return 0;
-+ }
-+ }
-+
-+ return -EFAULT;
-+}
-+
-+static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
-+ struct fsl_mc_device *mc_bus_dev)
-+{
-+ int i;
-+ int error;
-+ struct resource *regions;
-+ struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
-+ struct device *parent_dev = mc_dev->dev.parent;
-+ enum dprc_region_type mc_region_type;
-+
-+ if (is_fsl_mc_bus_dprc(mc_dev) ||
-+ is_fsl_mc_bus_dpmcp(mc_dev)) {
-+ mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
-+ } else if (is_fsl_mc_bus_dpio(mc_dev)) {
-+ mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
-+ } else {
-+ /*
-+ * This function should not have been called for this MC object
-+ * type, as this object type is not supposed to have MMIO
-+ * regions
-+ */
-+ return -EINVAL;
-+ }
-+
-+ regions = kmalloc_array(obj_desc->region_count,
-+ sizeof(regions[0]), GFP_KERNEL);
-+ if (!regions)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < obj_desc->region_count; i++) {
-+ struct dprc_region_desc region_desc;
-+
-+ error = dprc_get_obj_region(mc_bus_dev->mc_io,
-+ 0,
-+ mc_bus_dev->mc_handle,
-+ obj_desc->type,
-+ obj_desc->id, i, &region_desc);
-+ if (error < 0) {
-+ dev_err(parent_dev,
-+ "dprc_get_obj_region() failed: %d\n", error);
-+ goto error_cleanup_regions;
-+ }
-+ /* Older MC only returned region offset and no base address
-+ * If base address is in the region_desc use it otherwise
-+ * revert to old mechanism
-+ */
-+ if (region_desc.base_address)
-+ regions[i].start = region_desc.base_address +
-+ region_desc.base_offset;
-+ else
-+ error = translate_mc_addr(mc_dev, mc_region_type,
-+ region_desc.base_offset,
-+ &regions[i].start);
-+ if (error < 0) {
-+ dev_err(parent_dev,
-+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
-+ region_desc.base_offset,
-+ obj_desc->type, obj_desc->id, i);
-+ goto error_cleanup_regions;
-+ }
-+
-+ regions[i].end = regions[i].start + region_desc.size - 1;
-+ regions[i].name = "fsl-mc object MMIO region";
-+ regions[i].flags = IORESOURCE_IO;
-+ if (region_desc.flags & DPRC_REGION_CACHEABLE)
-+ regions[i].flags |= IORESOURCE_CACHEABLE;
-+ if (region_desc.flags & DPRC_REGION_SHAREABLE)
-+ regions[i].flags |= IORESOURCE_MEM;
-+ }
-+
-+ mc_dev->regions = regions;
-+ return 0;
-+
-+error_cleanup_regions:
-+ kfree(regions);
-+ return error;
-+}
-+
-+/**
-+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
-+ */
-+bool fsl_mc_is_root_dprc(struct device *dev)
-+{
-+ struct device *root_dprc_dev;
-+
-+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
-+ if (!root_dprc_dev)
-+ return false;
-+ return dev == root_dprc_dev;
-+}
-+
-+static void fsl_mc_device_release(struct device *dev)
-+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+
-+ kfree(mc_dev->regions);
-+
-+ if (is_fsl_mc_bus_dprc(mc_dev))
-+ kfree(to_fsl_mc_bus(mc_dev));
-+ else
-+ kfree(mc_dev);
-+}
-+
-+/**
-+ * Add a newly discovered fsl-mc device to be visible in Linux
-+ */
-+int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
-+ struct fsl_mc_io *mc_io,
-+ struct device *parent_dev,
-+ const char *driver_override,
-+ struct fsl_mc_device **new_mc_dev)
-+{
-+ int error;
-+ struct fsl_mc_device *mc_dev = NULL;
-+ struct fsl_mc_bus *mc_bus = NULL;
-+ struct fsl_mc_device *parent_mc_dev;
-+
-+ if (dev_is_fsl_mc(parent_dev))
-+ parent_mc_dev = to_fsl_mc_device(parent_dev);
-+ else
-+ parent_mc_dev = NULL;
-+
-+ if (strcmp(obj_desc->type, "dprc") == 0) {
-+ /*
-+ * Allocate an MC bus device object:
-+ */
-+ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
-+ if (!mc_bus)
-+ return -ENOMEM;
-+
-+ mc_dev = &mc_bus->mc_dev;
-+ } else {
-+ /*
-+ * Allocate a regular fsl_mc_device object:
-+ */
-+ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
-+ if (!mc_dev)
-+ return -ENOMEM;
-+ }
-+
-+ mc_dev->obj_desc = *obj_desc;
-+ mc_dev->mc_io = mc_io;
-+
-+ if (driver_override) {
-+ /*
-+ * We trust driver_override, so we don't need to use
-+ * kstrndup() here
-+ */
-+ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL);
-+ if (!mc_dev->driver_override) {
-+ error = -ENOMEM;
-+ goto error_cleanup_dev;
-+ }
-+ }
-+
-+ device_initialize(&mc_dev->dev);
-+ mc_dev->dev.parent = parent_dev;
-+ mc_dev->dev.bus = &fsl_mc_bus_type;
-+ mc_dev->dev.release = fsl_mc_device_release;
-+ mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
-+ if (!mc_dev->dev.type) {
-+ error = -ENODEV;
-+ dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
-+ goto error_cleanup_dev;
-+ }
-+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
-+
-+ if (strcmp(obj_desc->type, "dprc") == 0) {
-+ struct fsl_mc_io *mc_io2;
-+
-+ mc_dev->flags |= FSL_MC_IS_DPRC;
-+
-+ /*
-+ * To get the DPRC's ICID, we need to open the DPRC
-+ * in get_dprc_icid(). For child DPRCs, we do so using the
-+ * parent DPRC's MC portal instead of the child DPRC's MC
-+ * portal, in case the child DPRC is already opened with
-+ * its own portal (e.g., the DPRC used by AIOP).
-+ *
-+ * NOTE: There cannot be more than one active open for a
-+ * given MC object, using the same MC portal.
-+ */
-+ if (parent_mc_dev) {
-+ /*
-+ * device being added is a child DPRC device
-+ */
-+ mc_io2 = parent_mc_dev->mc_io;
-+ } else {
-+ /*
-+ * device being added is the root DPRC device
-+ */
-+ if (!mc_io) {
-+ error = -EINVAL;
-+ goto error_cleanup_dev;
-+ }
-+
-+ mc_io2 = mc_io;
-+ }
-+
-+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
-+ if (error < 0)
-+ goto error_cleanup_dev;
-+ } else {
-+ /*
-+ * A non-DPRC object has to be a child of a DPRC, use the
-+ * parent's ICID and interrupt domain.
-+ */
-+ mc_dev->icid = parent_mc_dev->icid;
-+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
-+ mc_dev->dev.dma_mask = &mc_dev->dma_mask;
-+ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
-+ dev_set_msi_domain(&mc_dev->dev,
-+ dev_get_msi_domain(&parent_mc_dev->dev));
-+ }
-+
-+ /*
-+ * Get MMIO regions for the device from the MC:
-+ *
-+ * NOTE: the root DPRC is a special case as its MMIO region is
-+ * obtained from the device tree
-+ */
-+ if (parent_mc_dev && obj_desc->region_count != 0) {
-+ error = fsl_mc_device_get_mmio_regions(mc_dev,
-+ parent_mc_dev);
-+ if (error < 0)
-+ goto error_cleanup_dev;
-+ }
-+
-+ /*
-+ * The device-specific probe callback will get invoked by device_add()
-+ */
-+ error = device_add(&mc_dev->dev);
-+ if (error < 0) {
-+ dev_err(parent_dev,
-+ "device_add() failed for device %s: %d\n",
-+ dev_name(&mc_dev->dev), error);
-+ goto error_cleanup_dev;
-+ }
-+
-+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
-+
-+ *new_mc_dev = mc_dev;
-+ return 0;
-+
-+error_cleanup_dev:
-+ kfree(mc_dev->regions);
-+ kfree(mc_bus);
-+ kfree(mc_dev);
-+
-+ return error;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_device_add);
-+
-+/**
-+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
-+ * Linux
-+ *
-+ * @mc_dev: Pointer to an fsl-mc device
-+ */
-+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
-+{
-+ kfree(mc_dev->driver_override);
-+ mc_dev->driver_override = NULL;
-+
-+ /*
-+ * The device-specific remove callback will get invoked by device_del()
-+ */
-+ device_del(&mc_dev->dev);
-+ put_device(&mc_dev->dev);
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
-+
-+static int parse_mc_ranges(struct device *dev,
-+ int *paddr_cells,
-+ int *mc_addr_cells,
-+ int *mc_size_cells,
-+ const __be32 **ranges_start)
-+{
-+ const __be32 *prop;
-+ int range_tuple_cell_count;
-+ int ranges_len;
-+ int tuple_len;
-+ struct device_node *mc_node = dev->of_node;
-+
-+ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
-+ if (!(*ranges_start) || !ranges_len) {
-+ dev_warn(dev,
-+ "missing or empty ranges property for device tree node '%s'\n",
-+ mc_node->name);
-+ return 0;
-+ }
-+
-+ *paddr_cells = of_n_addr_cells(mc_node);
-+
-+ prop = of_get_property(mc_node, "#address-cells", NULL);
-+ if (prop)
-+ *mc_addr_cells = be32_to_cpup(prop);
-+ else
-+ *mc_addr_cells = *paddr_cells;
-+
-+ prop = of_get_property(mc_node, "#size-cells", NULL);
-+ if (prop)
-+ *mc_size_cells = be32_to_cpup(prop);
-+ else
-+ *mc_size_cells = of_n_size_cells(mc_node);
-+
-+ range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
-+ *mc_size_cells;
-+
-+ tuple_len = range_tuple_cell_count * sizeof(__be32);
-+ if (ranges_len % tuple_len != 0) {
-+ dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
-+ return -EINVAL;
-+ }
-+
-+ return ranges_len / tuple_len;
-+}
-+
-+static int get_mc_addr_translation_ranges(struct device *dev,
-+ struct fsl_mc_addr_translation_range
-+ **ranges,
-+ u8 *num_ranges)
-+{
-+ int ret;
-+ int paddr_cells;
-+ int mc_addr_cells;
-+ int mc_size_cells;
-+ int i;
-+ const __be32 *ranges_start;
-+ const __be32 *cell;
-+
-+ ret = parse_mc_ranges(dev,
-+ &paddr_cells,
-+ &mc_addr_cells,
-+ &mc_size_cells,
-+ &ranges_start);
-+ if (ret < 0)
-+ return ret;
-+
-+ *num_ranges = ret;
-+ if (!ret) {
-+ /*
-+ * Missing or empty ranges property ("ranges;") for the
-+ * 'fsl,qoriq-mc' node. In this case, identity mapping
-+ * will be used.
-+ */
-+ *ranges = NULL;
-+ return 0;
-+ }
-+
-+ *ranges = devm_kcalloc(dev, *num_ranges,
-+ sizeof(struct fsl_mc_addr_translation_range),
-+ GFP_KERNEL);
-+ if (!(*ranges))
-+ return -ENOMEM;
-+
-+ cell = ranges_start;
-+ for (i = 0; i < *num_ranges; ++i) {
-+ struct fsl_mc_addr_translation_range *range = &(*ranges)[i];
-+
-+ range->mc_region_type = of_read_number(cell, 1);
-+ range->start_mc_offset = of_read_number(cell + 1,
-+ mc_addr_cells - 1);
-+ cell += mc_addr_cells;
-+ range->start_phys_addr = of_read_number(cell, paddr_cells);
-+ cell += paddr_cells;
-+ range->end_mc_offset = range->start_mc_offset +
-+ of_read_number(cell, mc_size_cells);
-+
-+ cell += mc_size_cells;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * fsl_mc_bus_probe - callback invoked when the root MC bus is being
-+ * added
-+ */
-+static int fsl_mc_bus_probe(struct platform_device *pdev)
-+{
-+ struct fsl_mc_obj_desc obj_desc;
-+ int error;
-+ struct fsl_mc *mc;
-+ struct fsl_mc_device *mc_bus_dev = NULL;
-+ struct fsl_mc_io *mc_io = NULL;
-+ struct fsl_mc_bus *mc_bus = NULL;
-+ int container_id;
-+ phys_addr_t mc_portal_phys_addr;
-+ u32 mc_portal_size;
-+ struct mc_version mc_version;
-+ struct resource res;
-+
-+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
-+ if (!mc)
-+ return -ENOMEM;
-+
-+ platform_set_drvdata(pdev, mc);
-+
-+ /*
-+ * Get physical address of MC portal for the root DPRC:
-+ */
-+ error = of_address_to_resource(pdev->dev.of_node, 0, &res);
-+ if (error < 0) {
-+ dev_err(&pdev->dev,
-+ "of_address_to_resource() failed for %pOF\n",
-+ pdev->dev.of_node);
-+ return error;
-+ }
-+
-+ mc_portal_phys_addr = res.start;
-+ mc_portal_size = resource_size(&res);
-+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
-+ mc_portal_size, NULL,
-+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
-+ if (error < 0)
-+ return error;
-+
-+ error = mc_get_version(mc_io, 0, &mc_version);
-+ if (error != 0) {
-+ dev_err(&pdev->dev,
-+ "mc_get_version() failed with error %d\n", error);
-+ goto error_cleanup_mc_io;
-+ }
-+
-+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
-+ mc_version.major, mc_version.minor, mc_version.revision);
-+
-+ error = get_mc_addr_translation_ranges(&pdev->dev,
-+ &mc->translation_ranges,
-+ &mc->num_translation_ranges);
-+ if (error < 0)
-+ goto error_cleanup_mc_io;
-+
-+ error = dprc_get_container_id(mc_io, 0, &container_id);
-+ if (error < 0) {
-+ dev_err(&pdev->dev,
-+ "dprc_get_container_id() failed: %d\n", error);
-+ goto error_cleanup_mc_io;
-+ }
-+
-+ memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
-+ error = dprc_get_api_version(mc_io, 0,
-+ &obj_desc.ver_major,
-+ &obj_desc.ver_minor);
-+ if (error < 0)
-+ goto error_cleanup_mc_io;
-+
-+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
-+ strcpy(obj_desc.type, "dprc");
-+ obj_desc.id = container_id;
-+ obj_desc.irq_count = 1;
-+ obj_desc.region_count = 0;
-+
-+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL,
-+ &mc_bus_dev);
-+ if (error < 0)
-+ goto error_cleanup_mc_io;
-+
-+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+ error = fsl_mc_restool_create_device_file(mc_bus);
-+ if (error < 0)
-+ goto error_cleanup_device;
-+
-+ mc->root_mc_bus_dev = mc_bus_dev;
-+
-+ return 0;
-+
-+error_cleanup_device:
-+ fsl_mc_device_remove(mc_bus_dev);
-+
-+error_cleanup_mc_io:
-+ fsl_destroy_mc_io(mc_io);
-+ return error;
-+}
-+
-+/**
-+ * fsl_mc_bus_remove - callback invoked when the root MC bus is being
-+ * removed
-+ */
-+static int fsl_mc_bus_remove(struct platform_device *pdev)
-+{
-+ struct fsl_mc *mc = platform_get_drvdata(pdev);
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc->root_mc_bus_dev);
-+
-+ if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
-+ return -EINVAL;
-+
-+ fsl_mc_restool_remove_device_file(mc_bus);
-+ fsl_mc_device_remove(mc->root_mc_bus_dev);
-+
-+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
-+ mc->root_mc_bus_dev->mc_io = NULL;
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id fsl_mc_bus_match_table[] = {
-+ {.compatible = "fsl,qoriq-mc",},
-+ {},
-+};
-+
-+MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
-+
-+static struct platform_driver fsl_mc_bus_driver = {
-+ .driver = {
-+ .name = "fsl_mc_bus",
-+ .pm = NULL,
-+ .of_match_table = fsl_mc_bus_match_table,
-+ },
-+ .probe = fsl_mc_bus_probe,
-+ .remove = fsl_mc_bus_remove,
-+};
-+
-+static int __init fsl_mc_bus_driver_init(void)
-+{
-+ int error;
-+
-+ error = bus_register(&fsl_mc_bus_type);
-+ if (error < 0) {
-+ pr_err("bus type registration failed: %d\n", error);
-+ goto error_cleanup_cache;
-+ }
-+
-+ error = platform_driver_register(&fsl_mc_bus_driver);
-+ if (error < 0) {
-+ pr_err("platform_driver_register() failed: %d\n", error);
-+ goto error_cleanup_bus;
-+ }
-+
-+ error = dprc_driver_init();
-+ if (error < 0)
-+ goto error_cleanup_driver;
-+
-+ error = fsl_mc_allocator_driver_init();
-+ if (error < 0)
-+ goto error_cleanup_dprc_driver;
-+
-+ error = fsl_mc_restool_init();
-+ if (error < 0)
-+ goto error_cleanup_mc_allocator;
-+
-+ return 0;
-+
-+error_cleanup_mc_allocator:
-+ fsl_mc_allocator_driver_exit();
-+
-+error_cleanup_dprc_driver:
-+ dprc_driver_exit();
-+
-+error_cleanup_driver:
-+ platform_driver_unregister(&fsl_mc_bus_driver);
-+
-+error_cleanup_bus:
-+ bus_unregister(&fsl_mc_bus_type);
-+
-+error_cleanup_cache:
-+ return error;
-+}
-+postcore_initcall(fsl_mc_bus_driver_init);
---- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
-+++ /dev/null
-@@ -1,285 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/*
-- * Freescale Management Complex (MC) bus driver MSI support
-- *
-- * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
-- * Author: German Rivera <German.Rivera@freescale.com>
-- *
-- */
--
--#include <linux/of_device.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/irq.h>
--#include <linux/irqdomain.h>
--#include <linux/msi.h>
--#include "fsl-mc-private.h"
--
--#ifdef GENERIC_MSI_DOMAIN_OPS
--/*
-- * Generate a unique ID identifying the interrupt (only used within the MSI
-- * irqdomain. Combine the icid with the interrupt index.
-- */
--static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
-- struct msi_desc *desc)
--{
-- /*
-- * Make the base hwirq value for ICID*10000 so it is readable
-- * as a decimal value in /proc/interrupts.
-- */
-- return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
--}
--
--static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
-- struct msi_desc *desc)
--{
-- arg->desc = desc;
-- arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
-- desc);
--}
--#else
--#define fsl_mc_msi_set_desc NULL
--#endif
--
--static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
--{
-- struct msi_domain_ops *ops = info->ops;
--
-- if (WARN_ON(!ops))
-- return;
--
-- /*
-- * set_desc should not be set by the caller
-- */
-- if (!ops->set_desc)
-- ops->set_desc = fsl_mc_msi_set_desc;
--}
--
--static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
-- struct fsl_mc_device_irq *mc_dev_irq)
--{
-- int error;
-- struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
-- struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
-- struct dprc_irq_cfg irq_cfg;
--
-- /*
-- * msi_desc->msg.address is 0x0 when this function is invoked in
-- * the free_irq() code path. In this case, for the MC, we don't
-- * really need to "unprogram" the MSI, so we just return.
-- */
-- if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
-- return;
--
-- if (WARN_ON(!owner_mc_dev))
-- return;
--
-- irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
-- msi_desc->msg.address_lo;
-- irq_cfg.val = msi_desc->msg.data;
-- irq_cfg.irq_num = msi_desc->irq;
--
-- if (owner_mc_dev == mc_bus_dev) {
-- /*
-- * IRQ is for the mc_bus_dev's DPRC itself
-- */
-- error = dprc_set_irq(mc_bus_dev->mc_io,
-- MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
-- mc_bus_dev->mc_handle,
-- mc_dev_irq->dev_irq_index,
-- &irq_cfg);
-- if (error < 0) {
-- dev_err(&owner_mc_dev->dev,
-- "dprc_set_irq() failed: %d\n", error);
-- }
-- } else {
-- /*
-- * IRQ is for for a child device of mc_bus_dev
-- */
-- error = dprc_set_obj_irq(mc_bus_dev->mc_io,
-- MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
-- mc_bus_dev->mc_handle,
-- owner_mc_dev->obj_desc.type,
-- owner_mc_dev->obj_desc.id,
-- mc_dev_irq->dev_irq_index,
-- &irq_cfg);
-- if (error < 0) {
-- dev_err(&owner_mc_dev->dev,
-- "dprc_obj_set_irq() failed: %d\n", error);
-- }
-- }
--}
--
--/*
-- * NOTE: This function is invoked with interrupts disabled
-- */
--static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
-- struct msi_msg *msg)
--{
-- struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
-- struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
-- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-- struct fsl_mc_device_irq *mc_dev_irq =
-- &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
--
-- WARN_ON(mc_dev_irq->msi_desc != msi_desc);
-- msi_desc->msg = *msg;
--
-- /*
-- * Program the MSI (paddr, value) pair in the device:
-- */
-- __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
--}
--
--static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
--{
-- struct irq_chip *chip = info->chip;
--
-- if (WARN_ON((!chip)))
-- return;
--
-- /*
-- * irq_write_msi_msg should not be set by the caller
-- */
-- if (!chip->irq_write_msi_msg)
-- chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
--}
--
--/**
-- * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
-- * @np: Optional device-tree node of the interrupt controller
-- * @info: MSI domain info
-- * @parent: Parent irq domain
-- *
-- * Updates the domain and chip ops and creates a fsl-mc MSI
-- * interrupt domain.
-- *
-- * Returns:
-- * A domain pointer or NULL in case of failure.
-- */
--struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
-- struct msi_domain_info *info,
-- struct irq_domain *parent)
--{
-- struct irq_domain *domain;
--
-- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
-- fsl_mc_msi_update_dom_ops(info);
-- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
-- fsl_mc_msi_update_chip_ops(info);
--
-- domain = msi_create_irq_domain(fwnode, info, parent);
-- if (domain)
-- irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
--
-- return domain;
--}
--
--int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
-- struct irq_domain **mc_msi_domain)
--{
-- struct irq_domain *msi_domain;
-- struct device_node *mc_of_node = mc_platform_dev->of_node;
--
-- msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
-- DOMAIN_BUS_FSL_MC_MSI);
-- if (!msi_domain) {
-- pr_err("Unable to find fsl-mc MSI domain for %pOF\n",
-- mc_of_node);
--
-- return -ENOENT;
-- }
--
-- *mc_msi_domain = msi_domain;
-- return 0;
--}
--
--static void fsl_mc_msi_free_descs(struct device *dev)
--{
-- struct msi_desc *desc, *tmp;
--
-- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
-- list_del(&desc->list);
-- free_msi_entry(desc);
-- }
--}
--
--static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
--
--{
-- unsigned int i;
-- int error;
-- struct msi_desc *msi_desc;
--
-- for (i = 0; i < irq_count; i++) {
-- msi_desc = alloc_msi_entry(dev, 1, NULL);
-- if (!msi_desc) {
-- dev_err(dev, "Failed to allocate msi entry\n");
-- error = -ENOMEM;
-- goto cleanup_msi_descs;
-- }
--
-- msi_desc->fsl_mc.msi_index = i;
-- INIT_LIST_HEAD(&msi_desc->list);
-- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
-- }
--
-- return 0;
--
--cleanup_msi_descs:
-- fsl_mc_msi_free_descs(dev);
-- return error;
--}
--
--int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
-- unsigned int irq_count)
--{
-- struct irq_domain *msi_domain;
-- int error;
--
-- if (WARN_ON(!list_empty(dev_to_msi_list(dev))))
-- return -EINVAL;
--
-- error = fsl_mc_msi_alloc_descs(dev, irq_count);
-- if (error < 0)
-- return error;
--
-- msi_domain = dev_get_msi_domain(dev);
-- if (WARN_ON(!msi_domain)) {
-- error = -EINVAL;
-- goto cleanup_msi_descs;
-- }
--
-- /*
-- * NOTE: Calling this function will trigger the invocation of the
-- * its_fsl_mc_msi_prepare() callback
-- */
-- error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
--
-- if (error) {
-- dev_err(dev, "Failed to allocate IRQs\n");
-- goto cleanup_msi_descs;
-- }
--
-- return 0;
--
--cleanup_msi_descs:
-- fsl_mc_msi_free_descs(dev);
-- return error;
--}
--
--void fsl_mc_msi_domain_free_irqs(struct device *dev)
--{
-- struct irq_domain *msi_domain;
--
-- msi_domain = dev_get_msi_domain(dev);
-- if (WARN_ON(!msi_domain))
-- return;
--
-- msi_domain_free_irqs(msi_domain, dev);
--
-- if (WARN_ON(list_empty(dev_to_msi_list(dev))))
-- return;
--
-- fsl_mc_msi_free_descs(dev);
--}
---- /dev/null
-+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
-@@ -0,0 +1,285 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Freescale Management Complex (MC) bus driver MSI support
-+ *
-+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
-+ * Author: German Rivera <German.Rivera@freescale.com>
-+ *
-+ */
-+
-+#include <linux/of_device.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/irq.h>
-+#include <linux/irqdomain.h>
-+#include <linux/msi.h>
-+
-+#include "fsl-mc-private.h"
-+
-+#ifdef GENERIC_MSI_DOMAIN_OPS
-+/*
-+ * Generate a unique ID identifying the interrupt (only used within the MSI
-+ * irqdomain. Combine the icid with the interrupt index.
-+ */
-+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
-+ struct msi_desc *desc)
-+{
-+ /*
-+ * Make the base hwirq value for ICID*10000 so it is readable
-+ * as a decimal value in /proc/interrupts.
-+ */
-+ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
-+}
-+
-+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
-+ struct msi_desc *desc)
-+{
-+ arg->desc = desc;
-+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
-+ desc);
-+}
-+#else
-+#define fsl_mc_msi_set_desc NULL
-+#endif
-+
-+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
-+{
-+ struct msi_domain_ops *ops = info->ops;
-+
-+ if (!ops)
-+ return;
-+
-+ /*
-+ * set_desc should not be set by the caller
-+ */
-+ if (!ops->set_desc)
-+ ops->set_desc = fsl_mc_msi_set_desc;
-+}
-+
-+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
-+ struct fsl_mc_device_irq *mc_dev_irq)
-+{
-+ int error;
-+ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
-+ struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
-+ struct dprc_irq_cfg irq_cfg;
-+
-+ /*
-+ * msi_desc->msg.address is 0x0 when this function is invoked in
-+ * the free_irq() code path. In this case, for the MC, we don't
-+ * really need to "unprogram" the MSI, so we just return.
-+ */
-+ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
-+ return;
-+
-+ if (!owner_mc_dev)
-+ return;
-+
-+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
-+ msi_desc->msg.address_lo;
-+ irq_cfg.val = msi_desc->msg.data;
-+ irq_cfg.irq_num = msi_desc->irq;
-+
-+ if (owner_mc_dev == mc_bus_dev) {
-+ /*
-+ * IRQ is for the mc_bus_dev's DPRC itself
-+ */
-+ error = dprc_set_irq(mc_bus_dev->mc_io,
-+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
-+ mc_bus_dev->mc_handle,
-+ mc_dev_irq->dev_irq_index,
-+ &irq_cfg);
-+ if (error < 0) {
-+ dev_err(&owner_mc_dev->dev,
-+ "dprc_set_irq() failed: %d\n", error);
-+ }
-+ } else {
-+ /*
-+ * IRQ is for for a child device of mc_bus_dev
-+ */
-+ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
-+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
-+ mc_bus_dev->mc_handle,
-+ owner_mc_dev->obj_desc.type,
-+ owner_mc_dev->obj_desc.id,
-+ mc_dev_irq->dev_irq_index,
-+ &irq_cfg);
-+ if (error < 0) {
-+ dev_err(&owner_mc_dev->dev,
-+ "dprc_obj_set_irq() failed: %d\n", error);
-+ }
-+ }
-+}
-+
-+/*
-+ * NOTE: This function is invoked with interrupts disabled
-+ */
-+static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
-+ struct msi_msg *msg)
-+{
-+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
-+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
-+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+ struct fsl_mc_device_irq *mc_dev_irq =
-+ &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
-+
-+ msi_desc->msg = *msg;
-+
-+ /*
-+ * Program the MSI (paddr, value) pair in the device:
-+ */
-+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
-+}
-+
-+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
-+{
-+ struct irq_chip *chip = info->chip;
-+
-+ if (!chip)
-+ return;
-+
-+ /*
-+ * irq_write_msi_msg should not be set by the caller
-+ */
-+ if (!chip->irq_write_msi_msg)
-+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
-+}
-+
-+/**
-+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
-+ * @np: Optional device-tree node of the interrupt controller
-+ * @info: MSI domain info
-+ * @parent: Parent irq domain
-+ *
-+ * Updates the domain and chip ops and creates a fsl-mc MSI
-+ * interrupt domain.
-+ *
-+ * Returns:
-+ * A domain pointer or NULL in case of failure.
-+ */
-+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
-+ struct msi_domain_info *info,
-+ struct irq_domain *parent)
-+{
-+ struct irq_domain *domain;
-+
-+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
-+ fsl_mc_msi_update_dom_ops(info);
-+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
-+ fsl_mc_msi_update_chip_ops(info);
-+
-+ domain = msi_create_irq_domain(fwnode, info, parent);
-+ if (domain)
-+ irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
-+
-+ return domain;
-+}
-+
-+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
-+ struct irq_domain **mc_msi_domain)
-+{
-+ struct irq_domain *msi_domain;
-+ struct device_node *mc_of_node = mc_platform_dev->of_node;
-+
-+ msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
-+ DOMAIN_BUS_FSL_MC_MSI);
-+ if (!msi_domain) {
-+ pr_err("Unable to find fsl-mc MSI domain for %pOF\n",
-+ mc_of_node);
-+
-+ return -ENOENT;
-+ }
-+
-+ *mc_msi_domain = msi_domain;
-+ return 0;
-+}
-+
-+static void fsl_mc_msi_free_descs(struct device *dev)
-+{
-+ struct msi_desc *desc, *tmp;
-+
-+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
-+ list_del(&desc->list);
-+ free_msi_entry(desc);
-+ }
-+}
-+
-+static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
-+
-+{
-+ unsigned int i;
-+ int error;
-+ struct msi_desc *msi_desc;
-+
-+ for (i = 0; i < irq_count; i++) {
-+ msi_desc = alloc_msi_entry(dev, 1, NULL);
-+ if (!msi_desc) {
-+ dev_err(dev, "Failed to allocate msi entry\n");
-+ error = -ENOMEM;
-+ goto cleanup_msi_descs;
-+ }
-+
-+ msi_desc->fsl_mc.msi_index = i;
-+ INIT_LIST_HEAD(&msi_desc->list);
-+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
-+ }
-+
-+ return 0;
-+
-+cleanup_msi_descs:
-+ fsl_mc_msi_free_descs(dev);
-+ return error;
-+}
-+
-+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
-+ unsigned int irq_count)
-+{
-+ struct irq_domain *msi_domain;
-+ int error;
-+
-+ if (!list_empty(dev_to_msi_list(dev)))
-+ return -EINVAL;
-+
-+ error = fsl_mc_msi_alloc_descs(dev, irq_count);
-+ if (error < 0)
-+ return error;
-+
-+ msi_domain = dev_get_msi_domain(dev);
-+ if (!msi_domain) {
-+ error = -EINVAL;
-+ goto cleanup_msi_descs;
-+ }
-+
-+ /*
-+ * NOTE: Calling this function will trigger the invocation of the
-+ * its_fsl_mc_msi_prepare() callback
-+ */
-+ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
-+
-+ if (error) {
-+ dev_err(dev, "Failed to allocate IRQs\n");
-+ goto cleanup_msi_descs;
-+ }
-+
-+ return 0;
-+
-+cleanup_msi_descs:
-+ fsl_mc_msi_free_descs(dev);
-+ return error;
-+}
-+
-+void fsl_mc_msi_domain_free_irqs(struct device *dev)
-+{
-+ struct irq_domain *msi_domain;
-+
-+ msi_domain = dev_get_msi_domain(dev);
-+ if (!msi_domain)
-+ return;
-+
-+ msi_domain_free_irqs(msi_domain, dev);
-+
-+ if (list_empty(dev_to_msi_list(dev)))
-+ return;
-+
-+ fsl_mc_msi_free_descs(dev);
-+}
---- /dev/null
-+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
-@@ -0,0 +1,223 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * Freescale Management Complex (MC) bus private declarations
-+ *
-+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
-+ *
-+ */
-+#ifndef _FSL_MC_PRIVATE_H_
-+#define _FSL_MC_PRIVATE_H_
-+
-+#include <linux/fsl/mc.h>
-+#include <linux/mutex.h>
-+#include <linux/cdev.h>
-+#include <linux/ioctl.h>
-+
-+/*
-+ * Data Path Management Complex (DPMNG) General API
-+ */
-+
-+/* DPMNG command versioning */
-+#define DPMNG_CMD_BASE_VERSION 1
-+#define DPMNG_CMD_ID_OFFSET 4
-+
-+#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
-+
-+/* DPMNG command IDs */
-+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
-+
-+struct dpmng_rsp_get_version {
-+ __le32 revision;
-+ __le32 version_major;
-+ __le32 version_minor;
-+};
-+
-+/*
-+ * Data Path Management Command Portal (DPMCP) API
-+ */
-+
-+/* Minimal supported DPMCP Version */
-+#define DPMCP_MIN_VER_MAJOR 3
-+#define DPMCP_MIN_VER_MINOR 0
-+
-+/* DPMCP command versioning */
-+#define DPMCP_CMD_BASE_VERSION 1
-+#define DPMCP_CMD_ID_OFFSET 4
-+
-+#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
-+
-+/* DPMCP command IDs */
-+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
-+#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
-+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
-+
-+struct dpmcp_cmd_open {
-+ __le32 dpmcp_id;
-+};
-+
-+/*
-+ * Initialization and runtime control APIs for DPMCP
-+ */
-+int dpmcp_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpmcp_id,
-+ u16 *token);
-+
-+int dpmcp_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpmcp_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/*
-+ * Data Path Buffer Pool (DPBP) API
-+ */
-+
-+/* DPBP Version */
-+#define DPBP_VER_MAJOR 3
-+#define DPBP_VER_MINOR 2
-+
-+/* Command versioning */
-+#define DPBP_CMD_BASE_VERSION 1
-+#define DPBP_CMD_ID_OFFSET 4
-+
-+#define DPBP_CMD(id) (((id) << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
-+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
-+
-+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
-+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
-+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
-+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
-+
-+struct dpbp_cmd_open {
-+ __le32 dpbp_id;
-+};
-+
-+#define DPBP_ENABLE 0x1
-+
-+struct dpbp_rsp_get_attributes {
-+ /* response word 0 */
-+ __le16 pad;
-+ __le16 bpid;
-+ __le32 id;
-+ /* response word 1 */
-+ __le16 version_major;
-+ __le16 version_minor;
-+};
-+
-+/*
-+ * Data Path Concentrator (DPCON) API
-+ */
-+
-+/* DPCON Version */
-+#define DPCON_VER_MAJOR 3
-+#define DPCON_VER_MINOR 2
-+
-+/* Command versioning */
-+#define DPCON_CMD_BASE_VERSION 1
-+#define DPCON_CMD_ID_OFFSET 4
-+
-+#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
-+#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
-+
-+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
-+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
-+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
-+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
-+
-+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
-+
-+struct dpcon_cmd_open {
-+ __le32 dpcon_id;
-+};
-+
-+#define DPCON_ENABLE 1
-+
-+struct dpcon_rsp_get_attr {
-+ /* response word 0 */
-+ __le32 id;
-+ __le16 qbman_ch_id;
-+ u8 num_priorities;
-+ u8 pad;
-+};
-+
-+struct dpcon_cmd_set_notification {
-+ /* cmd word 0 */
-+ __le32 dpio_id;
-+ u8 priority;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ __le64 user_ctx;
-+};
-+
-+int __must_check fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
-+ struct fsl_mc_io *mc_io,
-+ struct device *parent_dev,
-+ const char *driver_override,
-+ struct fsl_mc_device **new_mc_dev);
-+
-+int __init dprc_driver_init(void);
-+
-+void dprc_driver_exit(void);
-+
-+int __init fsl_mc_allocator_driver_init(void);
-+
-+void fsl_mc_allocator_driver_exit(void);
-+
-+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
-+ enum fsl_mc_pool_type pool_type,
-+ struct fsl_mc_resource
-+ **new_resource);
-+
-+void fsl_mc_resource_free(struct fsl_mc_resource *resource);
-+
-+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
-+ unsigned int irq_count);
-+
-+void fsl_mc_msi_domain_free_irqs(struct device *dev);
-+
-+int __must_check fsl_create_mc_io(struct device *dev,
-+ phys_addr_t mc_portal_phys_addr,
-+ u32 mc_portal_size,
-+ struct fsl_mc_device *dpmcp_dev,
-+ u32 flags, struct fsl_mc_io **new_mc_io);
-+
-+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
-+
-+bool fsl_mc_is_root_dprc(struct device *dev);
-+
-+#ifdef CONFIG_FSL_MC_RESTOOL
-+
-+int fsl_mc_restool_create_device_file(struct fsl_mc_bus *mc_bus);
-+
-+void fsl_mc_restool_remove_device_file(struct fsl_mc_bus *mc_bus);
-+
-+int fsl_mc_restool_init(void);
-+
-+#else
-+
-+static inline int fsl_mc_restool_create_device_file(struct fsl_mc_bus *mc_bus)
-+{
-+ return 0;
-+}
-+
-+static inline void fsl_mc_restool_remove_device_file(struct fsl_mc_bus *mc_bus)
-+{
-+}
-+
-+static inline int fsl_mc_restool_init(void)
-+{
-+ return 0;
-+}
-+
-+#endif
-+
-+#endif /* _FSL_MC_PRIVATE_H_ */
---- /dev/null
-+++ b/drivers/bus/fsl-mc/fsl-mc-restool.c
-@@ -0,0 +1,219 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Management Complex (MC) restool support
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ */
-+
-+#include <linux/slab.h>
-+#include <linux/cdev.h>
-+#include <linux/fs.h>
-+#include <linux/uaccess.h>
-+
-+#include "fsl-mc-private.h"
-+
-+#define FSL_MC_BUS_MAX_MINORS 1
-+
-+static struct class *fsl_mc_bus_class;
-+static int fsl_mc_bus_major;
-+
-+static int fsl_mc_restool_send_command(unsigned long arg,
-+ struct fsl_mc_io *mc_io)
-+{
-+ struct fsl_mc_command mc_cmd;
-+ int error;
-+
-+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
-+ if (error)
-+ return -EFAULT;
-+
-+ error = mc_send_command(mc_io, &mc_cmd);
-+ if (error)
-+ return error;
-+
-+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
-+ if (error)
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+int fsl_mc_restool_init(void)
-+{
-+ dev_t dev;
-+ int error;
-+
-+ fsl_mc_bus_class = class_create(THIS_MODULE, "fsl_mc_bus");
-+ if (IS_ERR(fsl_mc_bus_class)) {
-+ error = PTR_ERR(fsl_mc_bus_class);
-+ return error;
-+ }
-+
-+ error = alloc_chrdev_region(&dev, 0,
-+ FSL_MC_BUS_MAX_MINORS,
-+ "fsl_mc_bus");
-+ if (error < 0)
-+ return error;
-+
-+ fsl_mc_bus_major = MAJOR(dev);
-+
-+ return 0;
-+}
-+
-+static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep)
-+{
-+ struct fsl_mc_device *root_mc_device;
-+ struct fsl_mc_restool *mc_restool;
-+ struct fsl_mc_bus *mc_bus;
-+ struct fsl_mc_io *dynamic_mc_io;
-+ int error;
-+
-+ mc_restool = container_of(inode->i_cdev, struct fsl_mc_restool, cdev);
-+ mc_bus = container_of(mc_restool, struct fsl_mc_bus, restool_misc);
-+ root_mc_device = &mc_bus->mc_dev;
-+
-+ mutex_lock(&mc_restool->mutex);
-+
-+ if (!mc_restool->local_instance_in_use) {
-+ filep->private_data = root_mc_device->mc_io;
-+ mc_restool->local_instance_in_use = true;
-+ } else {
-+ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL);
-+ if (!dynamic_mc_io) {
-+ error = -ENOMEM;
-+ goto error_alloc_mc_io;
-+ }
-+
-+ error = fsl_mc_portal_allocate(root_mc_device, 0,
-+ &dynamic_mc_io);
-+ if (error) {
-+ pr_err("Could not allocate MC portal\n");
-+ goto error_portal_allocate;
-+ }
-+
-+ mc_restool->dynamic_instance_count++;
-+ filep->private_data = dynamic_mc_io;
-+ }
-+
-+ mutex_unlock(&mc_restool->mutex);
-+
-+ return 0;
-+
-+error_portal_allocate:
-+ kfree(dynamic_mc_io);
-+
-+error_alloc_mc_io:
-+ mutex_unlock(&mc_restool->mutex);
-+
-+ return error;
-+}
-+
-+static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep)
-+{
-+ struct fsl_mc_device *root_mc_device;
-+ struct fsl_mc_restool *mc_restool;
-+ struct fsl_mc_bus *mc_bus;
-+ struct fsl_mc_io *mc_io;
-+
-+ mc_restool = container_of(inode->i_cdev, struct fsl_mc_restool, cdev);
-+ mc_bus = container_of(mc_restool, struct fsl_mc_bus, restool_misc);
-+ root_mc_device = &mc_bus->mc_dev;
-+ mc_io = filep->private_data;
-+
-+ mutex_lock(&mc_restool->mutex);
-+
-+ if (WARN_ON(!mc_restool->local_instance_in_use &&
-+ mc_restool->dynamic_instance_count == 0)) {
-+ mutex_unlock(&mc_restool->mutex);
-+ return -EINVAL;
-+ }
-+
-+ if (filep->private_data == root_mc_device->mc_io) {
-+ mc_restool->local_instance_in_use = false;
-+ } else {
-+ fsl_mc_portal_free(mc_io);
-+ kfree(mc_io);
-+ mc_restool->dynamic_instance_count--;
-+ }
-+
-+ filep->private_data = NULL;
-+ mutex_unlock(&mc_restool->mutex);
-+
-+ return 0;
-+}
-+
-+static long fsl_mc_restool_dev_ioctl(struct file *file,
-+ unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int error;
-+
-+ switch (cmd) {
-+ case RESTOOL_SEND_MC_COMMAND:
-+ error = fsl_mc_restool_send_command(arg, file->private_data);
-+ break;
-+ default:
-+ pr_err("%s: unexpected ioctl call number\n", __func__);
-+ error = -EINVAL;
-+ }
-+
-+ return error;
-+}
-+
-+static const struct file_operations fsl_mc_restool_dev_fops = {
-+ .owner = THIS_MODULE,
-+ .open = fsl_mc_restool_dev_open,
-+ .release = fsl_mc_restool_dev_release,
-+ .unlocked_ioctl = fsl_mc_restool_dev_ioctl,
-+};
-+
-+int fsl_mc_restool_create_device_file(struct fsl_mc_bus *mc_bus)
-+{
-+ struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
-+ struct fsl_mc_restool *mc_restool = &mc_bus->restool_misc;
-+ int error;
-+
-+ mc_restool = &mc_bus->restool_misc;
-+ mc_restool->dev = MKDEV(fsl_mc_bus_major, 0);
-+ cdev_init(&mc_restool->cdev, &fsl_mc_restool_dev_fops);
-+
-+ error = cdev_add(&mc_restool->cdev,
-+ mc_restool->dev,
-+ FSL_MC_BUS_MAX_MINORS);
-+ if (error)
-+ return error;
-+
-+ mc_restool->device = device_create(fsl_mc_bus_class,
-+ NULL,
-+ mc_restool->dev,
-+ NULL,
-+ "%s",
-+ dev_name(&mc_dev->dev));
-+ if (IS_ERR(mc_restool->device)) {
-+ error = PTR_ERR(mc_restool->device);
-+ goto error_device_create;
-+ }
-+
-+ mutex_init(&mc_restool->mutex);
-+
-+ return 0;
-+
-+error_device_create:
-+ cdev_del(&mc_restool->cdev);
-+
-+ return error;
-+}
-+
-+void fsl_mc_restool_remove_device_file(struct fsl_mc_bus *mc_bus)
-+{
-+ struct fsl_mc_restool *mc_restool = &mc_bus->restool_misc;
-+
-+ if (WARN_ON(mc_restool->local_instance_in_use))
-+ return;
-+
-+ if (WARN_ON(mc_restool->dynamic_instance_count != 0))
-+ return;
-+
-+ cdev_del(&mc_restool->cdev);
-+}
---- a/drivers/staging/fsl-mc/bus/mc-io.c
-+++ /dev/null
-@@ -1,292 +0,0 @@
--// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- */
--
--#include <linux/io.h>
--#include "../include/mc.h"
--
--#include "fsl-mc-private.h"
--#include "dpmcp.h"
--#include "dpmcp-cmd.h"
--
--static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
-- struct fsl_mc_device *dpmcp_dev)
--{
-- int error;
--
-- if (WARN_ON(!dpmcp_dev))
-- return -EINVAL;
--
-- if (WARN_ON(mc_io->dpmcp_dev))
-- return -EINVAL;
--
-- if (WARN_ON(dpmcp_dev->mc_io))
-- return -EINVAL;
--
-- error = dpmcp_open(mc_io,
-- 0,
-- dpmcp_dev->obj_desc.id,
-- &dpmcp_dev->mc_handle);
-- if (error < 0)
-- return error;
--
-- mc_io->dpmcp_dev = dpmcp_dev;
-- dpmcp_dev->mc_io = mc_io;
-- return 0;
--}
--
--static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
--{
-- int error;
-- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
--
-- if (WARN_ON(!dpmcp_dev))
-- return;
--
-- if (WARN_ON(dpmcp_dev->mc_io != mc_io))
-- return;
--
-- error = dpmcp_close(mc_io,
-- 0,
-- dpmcp_dev->mc_handle);
-- if (error < 0) {
-- dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
-- error);
-- }
--
-- mc_io->dpmcp_dev = NULL;
-- dpmcp_dev->mc_io = NULL;
--}
--
--/**
-- * Creates an MC I/O object
-- *
-- * @dev: device to be associated with the MC I/O object
-- * @mc_portal_phys_addr: physical address of the MC portal to use
-- * @mc_portal_size: size in bytes of the MC portal
-- * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
-- * object or NULL if none.
-- * @flags: flags for the new MC I/O object
-- * @new_mc_io: Area to return pointer to newly created MC I/O object
-- *
-- * Returns '0' on Success; Error code otherwise.
-- */
--int __must_check fsl_create_mc_io(struct device *dev,
-- phys_addr_t mc_portal_phys_addr,
-- u32 mc_portal_size,
-- struct fsl_mc_device *dpmcp_dev,
-- u32 flags, struct fsl_mc_io **new_mc_io)
--{
-- int error;
-- struct fsl_mc_io *mc_io;
-- void __iomem *mc_portal_virt_addr;
-- struct resource *res;
--
-- mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
-- if (!mc_io)
-- return -ENOMEM;
--
-- mc_io->dev = dev;
-- mc_io->flags = flags;
-- mc_io->portal_phys_addr = mc_portal_phys_addr;
-- mc_io->portal_size = mc_portal_size;
-- if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
-- spin_lock_init(&mc_io->spinlock);
-- else
-- mutex_init(&mc_io->mutex);
--
-- res = devm_request_mem_region(dev,
-- mc_portal_phys_addr,
-- mc_portal_size,
-- "mc_portal");
-- if (!res) {
-- dev_err(dev,
-- "devm_request_mem_region failed for MC portal %pa\n",
-- &mc_portal_phys_addr);
-- return -EBUSY;
-- }
--
-- mc_portal_virt_addr = devm_ioremap_nocache(dev,
-- mc_portal_phys_addr,
-- mc_portal_size);
-- if (!mc_portal_virt_addr) {
-- dev_err(dev,
-- "devm_ioremap_nocache failed for MC portal %pa\n",
-- &mc_portal_phys_addr);
-- return -ENXIO;
-- }
--
-- mc_io->portal_virt_addr = mc_portal_virt_addr;
-- if (dpmcp_dev) {
-- error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
-- if (error < 0)
-- goto error_destroy_mc_io;
-- }
--
-- *new_mc_io = mc_io;
-- return 0;
--
--error_destroy_mc_io:
-- fsl_destroy_mc_io(mc_io);
-- return error;
--}
--
--/**
-- * Destroys an MC I/O object
-- *
-- * @mc_io: MC I/O object to destroy
-- */
--void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
--{
-- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
--
-- if (dpmcp_dev)
-- fsl_mc_io_unset_dpmcp(mc_io);
--
-- devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
-- devm_release_mem_region(mc_io->dev,
-- mc_io->portal_phys_addr,
-- mc_io->portal_size);
--
-- mc_io->portal_virt_addr = NULL;
-- devm_kfree(mc_io->dev, mc_io);
--}
--
--/**
-- * fsl_mc_portal_allocate - Allocates an MC portal
-- *
-- * @mc_dev: MC device for which the MC portal is to be allocated
-- * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
-- * MC portal.
-- * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
-- * that wraps the allocated MC portal is to be returned
-- *
-- * This function allocates an MC portal from the device's parent DPRC,
-- * from the corresponding MC bus' pool of MC portals and wraps
-- * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
-- * portal is allocated from its own MC bus.
-- */
--int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
-- u16 mc_io_flags,
-- struct fsl_mc_io **new_mc_io)
--{
-- struct fsl_mc_device *mc_bus_dev;
-- struct fsl_mc_bus *mc_bus;
-- phys_addr_t mc_portal_phys_addr;
-- size_t mc_portal_size;
-- struct fsl_mc_device *dpmcp_dev;
-- int error = -EINVAL;
-- struct fsl_mc_resource *resource = NULL;
-- struct fsl_mc_io *mc_io = NULL;
--
-- if (mc_dev->flags & FSL_MC_IS_DPRC) {
-- mc_bus_dev = mc_dev;
-- } else {
-- if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
-- return error;
--
-- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-- }
--
-- mc_bus = to_fsl_mc_bus(mc_bus_dev);
-- *new_mc_io = NULL;
-- error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
-- if (error < 0)
-- return error;
--
-- error = -EINVAL;
-- dpmcp_dev = resource->data;
-- if (WARN_ON(!dpmcp_dev))
-- goto error_cleanup_resource;
--
-- if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
-- (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
-- dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
-- dev_err(&dpmcp_dev->dev,
-- "ERROR: Version %d.%d of DPMCP not supported.\n",
-- dpmcp_dev->obj_desc.ver_major,
-- dpmcp_dev->obj_desc.ver_minor);
-- error = -ENOTSUPP;
-- goto error_cleanup_resource;
-- }
--
-- if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
-- goto error_cleanup_resource;
--
-- mc_portal_phys_addr = dpmcp_dev->regions[0].start;
-- mc_portal_size = resource_size(dpmcp_dev->regions);
--
-- if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size))
-- goto error_cleanup_resource;
--
-- error = fsl_create_mc_io(&mc_bus_dev->dev,
-- mc_portal_phys_addr,
-- mc_portal_size, dpmcp_dev,
-- mc_io_flags, &mc_io);
-- if (error < 0)
-- goto error_cleanup_resource;
--
-- *new_mc_io = mc_io;
-- return 0;
--
--error_cleanup_resource:
-- fsl_mc_resource_free(resource);
-- return error;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
--
--/**
-- * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
-- * of a given MC bus
-- *
-- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
-- */
--void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
--{
-- struct fsl_mc_device *dpmcp_dev;
-- struct fsl_mc_resource *resource;
--
-- /*
-- * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
-- * to have a DPMCP object associated with.
-- */
-- dpmcp_dev = mc_io->dpmcp_dev;
-- if (WARN_ON(!dpmcp_dev))
-- return;
--
-- resource = dpmcp_dev->resource;
-- if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP))
-- return;
--
-- if (WARN_ON(resource->data != dpmcp_dev))
-- return;
--
-- fsl_destroy_mc_io(mc_io);
-- fsl_mc_resource_free(resource);
--}
--EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
--
--/**
-- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
-- *
-- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
-- */
--int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
--{
-- int error;
-- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
--
-- if (WARN_ON(!dpmcp_dev))
-- return -EINVAL;
--
-- error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
-- if (error < 0) {
-- dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
-- return error;
-- }
--
-- return 0;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
---- /dev/null
-+++ b/drivers/bus/fsl-mc/mc-io.c
-@@ -0,0 +1,281 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ */
-+
-+#include <linux/io.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
-+ struct fsl_mc_device *dpmcp_dev)
-+{
-+ int error;
-+
-+ if (mc_io->dpmcp_dev)
-+ return -EINVAL;
-+
-+ if (dpmcp_dev->mc_io)
-+ return -EINVAL;
-+
-+ error = dpmcp_open(mc_io,
-+ 0,
-+ dpmcp_dev->obj_desc.id,
-+ &dpmcp_dev->mc_handle);
-+ if (error < 0)
-+ return error;
-+
-+ mc_io->dpmcp_dev = dpmcp_dev;
-+ dpmcp_dev->mc_io = mc_io;
-+ return 0;
-+}
-+
-+static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
-+{
-+ int error;
-+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-+
-+ error = dpmcp_close(mc_io,
-+ 0,
-+ dpmcp_dev->mc_handle);
-+ if (error < 0) {
-+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
-+ error);
-+ }
-+
-+ mc_io->dpmcp_dev = NULL;
-+ dpmcp_dev->mc_io = NULL;
-+}
-+
-+/**
-+ * Creates an MC I/O object
-+ *
-+ * @dev: device to be associated with the MC I/O object
-+ * @mc_portal_phys_addr: physical address of the MC portal to use
-+ * @mc_portal_size: size in bytes of the MC portal
-+ * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
-+ * object or NULL if none.
-+ * @flags: flags for the new MC I/O object
-+ * @new_mc_io: Area to return pointer to newly created MC I/O object
-+ *
-+ * Returns '0' on Success; Error code otherwise.
-+ */
-+int __must_check fsl_create_mc_io(struct device *dev,
-+ phys_addr_t mc_portal_phys_addr,
-+ u32 mc_portal_size,
-+ struct fsl_mc_device *dpmcp_dev,
-+ u32 flags, struct fsl_mc_io **new_mc_io)
-+{
-+ int error;
-+ struct fsl_mc_io *mc_io;
-+ void __iomem *mc_portal_virt_addr;
-+ struct resource *res;
-+
-+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
-+ if (!mc_io)
-+ return -ENOMEM;
-+
-+ mc_io->dev = dev;
-+ mc_io->flags = flags;
-+ mc_io->portal_phys_addr = mc_portal_phys_addr;
-+ mc_io->portal_size = mc_portal_size;
-+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
-+ spin_lock_init(&mc_io->spinlock);
-+ else
-+ mutex_init(&mc_io->mutex);
-+
-+ res = devm_request_mem_region(dev,
-+ mc_portal_phys_addr,
-+ mc_portal_size,
-+ "mc_portal");
-+ if (!res) {
-+ dev_err(dev,
-+ "devm_request_mem_region failed for MC portal %pa\n",
-+ &mc_portal_phys_addr);
-+ return -EBUSY;
-+ }
-+
-+ mc_portal_virt_addr = devm_ioremap_nocache(dev,
-+ mc_portal_phys_addr,
-+ mc_portal_size);
-+ if (!mc_portal_virt_addr) {
-+ dev_err(dev,
-+ "devm_ioremap_nocache failed for MC portal %pa\n",
-+ &mc_portal_phys_addr);
-+ return -ENXIO;
-+ }
-+
-+ mc_io->portal_virt_addr = mc_portal_virt_addr;
-+ if (dpmcp_dev) {
-+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
-+ if (error < 0)
-+ goto error_destroy_mc_io;
-+ }
-+
-+ *new_mc_io = mc_io;
-+ return 0;
-+
-+error_destroy_mc_io:
-+ fsl_destroy_mc_io(mc_io);
-+ return error;
-+}
-+
-+/**
-+ * Destroys an MC I/O object
-+ *
-+ * @mc_io: MC I/O object to destroy
-+ */
-+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
-+{
-+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-+
-+ if (dpmcp_dev)
-+ fsl_mc_io_unset_dpmcp(mc_io);
-+
-+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
-+ devm_release_mem_region(mc_io->dev,
-+ mc_io->portal_phys_addr,
-+ mc_io->portal_size);
-+
-+ mc_io->portal_virt_addr = NULL;
-+ devm_kfree(mc_io->dev, mc_io);
-+}
-+
-+/**
-+ * fsl_mc_portal_allocate - Allocates an MC portal
-+ *
-+ * @mc_dev: MC device for which the MC portal is to be allocated
-+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
-+ * MC portal.
-+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
-+ * that wraps the allocated MC portal is to be returned
-+ *
-+ * This function allocates an MC portal from the device's parent DPRC,
-+ * from the corresponding MC bus' pool of MC portals and wraps
-+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
-+ * portal is allocated from its own MC bus.
-+ */
-+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
-+ u16 mc_io_flags,
-+ struct fsl_mc_io **new_mc_io)
-+{
-+ struct fsl_mc_device *mc_bus_dev;
-+ struct fsl_mc_bus *mc_bus;
-+ phys_addr_t mc_portal_phys_addr;
-+ size_t mc_portal_size;
-+ struct fsl_mc_device *dpmcp_dev;
-+ int error = -EINVAL;
-+ struct fsl_mc_resource *resource = NULL;
-+ struct fsl_mc_io *mc_io = NULL;
-+
-+ if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
-+ mc_bus_dev = mc_dev;
-+ } else {
-+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
-+ return error;
-+
-+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-+ }
-+
-+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
-+ *new_mc_io = NULL;
-+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
-+ if (error < 0)
-+ return error;
-+
-+ error = -EINVAL;
-+ dpmcp_dev = resource->data;
-+
-+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
-+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
-+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
-+ dev_err(&dpmcp_dev->dev,
-+ "ERROR: Version %d.%d of DPMCP not supported.\n",
-+ dpmcp_dev->obj_desc.ver_major,
-+ dpmcp_dev->obj_desc.ver_minor);
-+ error = -ENOTSUPP;
-+ goto error_cleanup_resource;
-+ }
-+
-+ mc_portal_phys_addr = dpmcp_dev->regions[0].start;
-+ mc_portal_size = resource_size(dpmcp_dev->regions);
-+
-+ error = fsl_create_mc_io(&mc_bus_dev->dev,
-+ mc_portal_phys_addr,
-+ mc_portal_size, dpmcp_dev,
-+ mc_io_flags, &mc_io);
-+ if (error < 0)
-+ goto error_cleanup_resource;
-+
-+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
-+ &dpmcp_dev->dev,
-+ DL_FLAG_AUTOREMOVE_CONSUMER);
-+ if (!dpmcp_dev->consumer_link) {
-+ error = -EINVAL;
-+ goto error_cleanup_mc_io;
-+ }
-+
-+ *new_mc_io = mc_io;
-+ return 0;
-+
-+error_cleanup_mc_io:
-+ fsl_destroy_mc_io(mc_io);
-+error_cleanup_resource:
-+ fsl_mc_resource_free(resource);
-+ return error;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
-+
-+/**
-+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
-+ * of a given MC bus
-+ *
-+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
-+ */
-+void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
-+{
-+ struct fsl_mc_device *dpmcp_dev;
-+ struct fsl_mc_resource *resource;
-+
-+ /*
-+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
-+ * to have a DPMCP object associated with.
-+ */
-+ dpmcp_dev = mc_io->dpmcp_dev;
-+
-+ resource = dpmcp_dev->resource;
-+ if (!resource || resource->type != FSL_MC_POOL_DPMCP)
-+ return;
-+
-+ if (resource->data != dpmcp_dev)
-+ return;
-+
-+ fsl_destroy_mc_io(mc_io);
-+ fsl_mc_resource_free(resource);
-+
-+ device_link_del(dpmcp_dev->consumer_link);
-+ dpmcp_dev->consumer_link = NULL;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
-+
-+/**
-+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
-+ *
-+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
-+ */
-+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
-+{
-+ int error;
-+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-+
-+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
-+ if (error < 0) {
-+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
-+ return error;
-+ }
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
---- a/drivers/staging/fsl-mc/bus/mc-sys.c
-+++ /dev/null
-@@ -1,297 +0,0 @@
--// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- * I/O services to send MC commands to the MC hardware
-- *
-- */
--
--#include <linux/delay.h>
--#include <linux/slab.h>
--#include <linux/ioport.h>
--#include <linux/device.h>
--#include <linux/io.h>
--#include <linux/io-64-nonatomic-hi-lo.h>
--#include "../include/mc.h"
--
--#include "dpmcp.h"
--
--/**
-- * Timeout in milliseconds to wait for the completion of an MC command
-- */
--#define MC_CMD_COMPLETION_TIMEOUT_MS 500
--
--/*
-- * usleep_range() min and max values used to throttle down polling
-- * iterations while waiting for MC command completion
-- */
--#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
--#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
--
--static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
--{
-- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
--
-- return (enum mc_cmd_status)hdr->status;
--}
--
--static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
--{
-- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
-- u16 cmd_id = le16_to_cpu(hdr->cmd_id);
--
-- return cmd_id;
--}
--
--static int mc_status_to_error(enum mc_cmd_status status)
--{
-- static const int mc_status_to_error_map[] = {
-- [MC_CMD_STATUS_OK] = 0,
-- [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
-- [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
-- [MC_CMD_STATUS_DMA_ERR] = -EIO,
-- [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
-- [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
-- [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
-- [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
-- [MC_CMD_STATUS_BUSY] = -EBUSY,
-- [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
-- [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
-- };
--
-- if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map)))
-- return -EINVAL;
--
-- return mc_status_to_error_map[status];
--}
--
--static const char *mc_status_to_string(enum mc_cmd_status status)
--{
-- static const char *const status_strings[] = {
-- [MC_CMD_STATUS_OK] = "Command completed successfully",
-- [MC_CMD_STATUS_READY] = "Command ready to be processed",
-- [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
-- [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
-- [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
-- [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
-- [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
-- [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
-- [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
-- [MC_CMD_STATUS_BUSY] = "Device is busy",
-- [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
-- [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
-- };
--
-- if ((unsigned int)status >= ARRAY_SIZE(status_strings))
-- return "Unknown MC error";
--
-- return status_strings[status];
--}
--
--/**
-- * mc_write_command - writes a command to a Management Complex (MC) portal
-- *
-- * @portal: pointer to an MC portal
-- * @cmd: pointer to a filled command
-- */
--static inline void mc_write_command(struct mc_command __iomem *portal,
-- struct mc_command *cmd)
--{
-- int i;
--
-- /* copy command parameters into the portal */
-- for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-- /*
-- * Data is already in the expected LE byte-order. Do an
-- * extra LE -> CPU conversion so that the CPU -> LE done in
-- * the device io write api puts it back in the right order.
-- */
-- writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
--
-- /* submit the command by writing the header */
-- writeq(le64_to_cpu(cmd->header), &portal->header);
--}
--
--/**
-- * mc_read_response - reads the response for the last MC command from a
-- * Management Complex (MC) portal
-- *
-- * @portal: pointer to an MC portal
-- * @resp: pointer to command response buffer
-- *
-- * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
-- */
--static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
-- portal,
-- struct mc_command *resp)
--{
-- int i;
-- enum mc_cmd_status status;
--
-- /* Copy command response header from MC portal: */
-- resp->header = cpu_to_le64(readq_relaxed(&portal->header));
-- status = mc_cmd_hdr_read_status(resp);
-- if (status != MC_CMD_STATUS_OK)
-- return status;
--
-- /* Copy command response data from MC portal: */
-- for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-- /*
-- * Data is expected to be in LE byte-order. Do an
-- * extra CPU -> LE to revert the LE -> CPU done in
-- * the device io read api.
-- */
-- resp->params[i] =
-- cpu_to_le64(readq_relaxed(&portal->params[i]));
--
-- return status;
--}
--
--/**
-- * Waits for the completion of an MC command doing preemptible polling.
-- * uslepp_range() is called between polling iterations.
-- *
-- * @mc_io: MC I/O object to be used
-- * @cmd: command buffer to receive MC response
-- * @mc_status: MC command completion status
-- */
--static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
-- struct mc_command *cmd,
-- enum mc_cmd_status *mc_status)
--{
-- enum mc_cmd_status status;
-- unsigned long jiffies_until_timeout =
-- jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
--
-- /*
-- * Wait for response from the MC hardware:
-- */
-- for (;;) {
-- status = mc_read_response(mc_io->portal_virt_addr, cmd);
-- if (status != MC_CMD_STATUS_READY)
-- break;
--
-- /*
-- * TODO: When MC command completion interrupts are supported
-- * call wait function here instead of usleep_range()
-- */
-- usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
-- MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
--
-- if (time_after_eq(jiffies, jiffies_until_timeout)) {
-- dev_dbg(mc_io->dev,
-- "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
-- &mc_io->portal_phys_addr,
-- (unsigned int)mc_cmd_hdr_read_token(cmd),
-- (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
--
-- return -ETIMEDOUT;
-- }
-- }
--
-- *mc_status = status;
-- return 0;
--}
--
--/**
-- * Waits for the completion of an MC command doing atomic polling.
-- * udelay() is called between polling iterations.
-- *
-- * @mc_io: MC I/O object to be used
-- * @cmd: command buffer to receive MC response
-- * @mc_status: MC command completion status
-- */
--static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
-- struct mc_command *cmd,
-- enum mc_cmd_status *mc_status)
--{
-- enum mc_cmd_status status;
-- unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
--
-- BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
-- MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
--
-- for (;;) {
-- status = mc_read_response(mc_io->portal_virt_addr, cmd);
-- if (status != MC_CMD_STATUS_READY)
-- break;
--
-- udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
-- timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
-- if (timeout_usecs == 0) {
-- dev_dbg(mc_io->dev,
-- "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
-- &mc_io->portal_phys_addr,
-- (unsigned int)mc_cmd_hdr_read_token(cmd),
-- (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
--
-- return -ETIMEDOUT;
-- }
-- }
--
-- *mc_status = status;
-- return 0;
--}
--
--/**
-- * Sends a command to the MC device using the given MC I/O object
-- *
-- * @mc_io: MC I/O object to be used
-- * @cmd: command to be sent
-- *
-- * Returns '0' on Success; Error code otherwise.
-- */
--int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
--{
-- int error;
-- enum mc_cmd_status status;
-- unsigned long irq_flags = 0;
--
-- if (WARN_ON(in_irq() &&
-- !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))
-- return -EINVAL;
--
-- if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
-- spin_lock_irqsave(&mc_io->spinlock, irq_flags);
-- else
-- mutex_lock(&mc_io->mutex);
--
-- /*
-- * Send command to the MC hardware:
-- */
-- mc_write_command(mc_io->portal_virt_addr, cmd);
--
-- /*
-- * Wait for response from the MC hardware:
-- */
-- if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
-- error = mc_polling_wait_preemptible(mc_io, cmd, &status);
-- else
-- error = mc_polling_wait_atomic(mc_io, cmd, &status);
--
-- if (error < 0)
-- goto common_exit;
--
-- if (status != MC_CMD_STATUS_OK) {
-- dev_dbg(mc_io->dev,
-- "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
-- &mc_io->portal_phys_addr,
-- (unsigned int)mc_cmd_hdr_read_token(cmd),
-- (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
-- mc_status_to_string(status),
-- (unsigned int)status);
--
-- error = mc_status_to_error(status);
-- goto common_exit;
-- }
--
-- error = 0;
--common_exit:
-- if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
-- spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
-- else
-- mutex_unlock(&mc_io->mutex);
--
-- return error;
--}
--EXPORT_SYMBOL(mc_send_command);
---- /dev/null
-+++ b/drivers/bus/fsl-mc/mc-sys.c
-@@ -0,0 +1,296 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * I/O services to send MC commands to the MC hardware
-+ *
-+ */
-+
-+#include <linux/delay.h>
-+#include <linux/slab.h>
-+#include <linux/ioport.h>
-+#include <linux/device.h>
-+#include <linux/io.h>
-+#include <linux/io-64-nonatomic-hi-lo.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "fsl-mc-private.h"
-+
-+/**
-+ * Timeout in milliseconds to wait for the completion of an MC command
-+ */
-+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
-+
-+/*
-+ * usleep_range() min and max values used to throttle down polling
-+ * iterations while waiting for MC command completion
-+ */
-+#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
-+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
-+
-+static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
-+{
-+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
-+
-+ return (enum mc_cmd_status)hdr->status;
-+}
-+
-+static u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
-+{
-+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
-+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
-+
-+ return cmd_id;
-+}
-+
-+static int mc_status_to_error(enum mc_cmd_status status)
-+{
-+ static const int mc_status_to_error_map[] = {
-+ [MC_CMD_STATUS_OK] = 0,
-+ [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
-+ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
-+ [MC_CMD_STATUS_DMA_ERR] = -EIO,
-+ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
-+ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
-+ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
-+ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
-+ [MC_CMD_STATUS_BUSY] = -EBUSY,
-+ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
-+ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
-+ };
-+
-+ if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
-+ return -EINVAL;
-+
-+ return mc_status_to_error_map[status];
-+}
-+
-+static const char *mc_status_to_string(enum mc_cmd_status status)
-+{
-+ static const char *const status_strings[] = {
-+ [MC_CMD_STATUS_OK] = "Command completed successfully",
-+ [MC_CMD_STATUS_READY] = "Command ready to be processed",
-+ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
-+ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
-+ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
-+ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
-+ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
-+ [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
-+ [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
-+ [MC_CMD_STATUS_BUSY] = "Device is busy",
-+ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
-+ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
-+ };
-+
-+ if ((unsigned int)status >= ARRAY_SIZE(status_strings))
-+ return "Unknown MC error";
-+
-+ return status_strings[status];
-+}
-+
-+/**
-+ * mc_write_command - writes a command to a Management Complex (MC) portal
-+ *
-+ * @portal: pointer to an MC portal
-+ * @cmd: pointer to a filled command
-+ */
-+static inline void mc_write_command(struct fsl_mc_command __iomem *portal,
-+ struct fsl_mc_command *cmd)
-+{
-+ int i;
-+
-+ /* copy command parameters into the portal */
-+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-+ /*
-+ * Data is already in the expected LE byte-order. Do an
-+ * extra LE -> CPU conversion so that the CPU -> LE done in
-+ * the device io write api puts it back in the right order.
-+ */
-+ writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
-+
-+ /* submit the command by writing the header */
-+ writeq(le64_to_cpu(cmd->header), &portal->header);
-+}
-+
-+/**
-+ * mc_read_response - reads the response for the last MC command from a
-+ * Management Complex (MC) portal
-+ *
-+ * @portal: pointer to an MC portal
-+ * @resp: pointer to command response buffer
-+ *
-+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
-+ */
-+static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
-+ *portal,
-+ struct fsl_mc_command *resp)
-+{
-+ int i;
-+ enum mc_cmd_status status;
-+
-+ /* Copy command response header from MC portal: */
-+ resp->header = cpu_to_le64(readq_relaxed(&portal->header));
-+ status = mc_cmd_hdr_read_status(resp);
-+ if (status != MC_CMD_STATUS_OK)
-+ return status;
-+
-+ /* Copy command response data from MC portal: */
-+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-+ /*
-+ * Data is expected to be in LE byte-order. Do an
-+ * extra CPU -> LE to revert the LE -> CPU done in
-+ * the device io read api.
-+ */
-+ resp->params[i] =
-+ cpu_to_le64(readq_relaxed(&portal->params[i]));
-+
-+ return status;
-+}
-+
-+/**
-+ * Waits for the completion of an MC command doing preemptible polling.
-+ * uslepp_range() is called between polling iterations.
-+ *
-+ * @mc_io: MC I/O object to be used
-+ * @cmd: command buffer to receive MC response
-+ * @mc_status: MC command completion status
-+ */
-+static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
-+ struct fsl_mc_command *cmd,
-+ enum mc_cmd_status *mc_status)
-+{
-+ enum mc_cmd_status status;
-+ unsigned long jiffies_until_timeout =
-+ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
-+
-+ /*
-+ * Wait for response from the MC hardware:
-+ */
-+ for (;;) {
-+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
-+ if (status != MC_CMD_STATUS_READY)
-+ break;
-+
-+ /*
-+ * TODO: When MC command completion interrupts are supported
-+ * call wait function here instead of usleep_range()
-+ */
-+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
-+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
-+
-+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
-+ dev_dbg(mc_io->dev,
-+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
-+ &mc_io->portal_phys_addr,
-+ (unsigned int)mc_cmd_hdr_read_token(cmd),
-+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
-+
-+ return -ETIMEDOUT;
-+ }
-+ }
-+
-+ *mc_status = status;
-+ return 0;
-+}
-+
-+/**
-+ * Waits for the completion of an MC command doing atomic polling.
-+ * udelay() is called between polling iterations.
-+ *
-+ * @mc_io: MC I/O object to be used
-+ * @cmd: command buffer to receive MC response
-+ * @mc_status: MC command completion status
-+ */
-+static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
-+ struct fsl_mc_command *cmd,
-+ enum mc_cmd_status *mc_status)
-+{
-+ enum mc_cmd_status status;
-+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
-+
-+ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
-+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
-+
-+ for (;;) {
-+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
-+ if (status != MC_CMD_STATUS_READY)
-+ break;
-+
-+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
-+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
-+ if (timeout_usecs == 0) {
-+ dev_dbg(mc_io->dev,
-+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
-+ &mc_io->portal_phys_addr,
-+ (unsigned int)mc_cmd_hdr_read_token(cmd),
-+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
-+
-+ return -ETIMEDOUT;
-+ }
-+ }
-+
-+ *mc_status = status;
-+ return 0;
-+}
-+
-+/**
-+ * Sends a command to the MC device using the given MC I/O object
-+ *
-+ * @mc_io: MC I/O object to be used
-+ * @cmd: command to be sent
-+ *
-+ * Returns '0' on Success; Error code otherwise.
-+ */
-+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
-+{
-+ int error;
-+ enum mc_cmd_status status;
-+ unsigned long irq_flags = 0;
-+
-+ if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
-+ return -EINVAL;
-+
-+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
-+ spin_lock_irqsave(&mc_io->spinlock, irq_flags);
-+ else
-+ mutex_lock(&mc_io->mutex);
-+
-+ /*
-+ * Send command to the MC hardware:
-+ */
-+ mc_write_command(mc_io->portal_virt_addr, cmd);
-+
-+ /*
-+ * Wait for response from the MC hardware:
-+ */
-+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
-+ error = mc_polling_wait_preemptible(mc_io, cmd, &status);
-+ else
-+ error = mc_polling_wait_atomic(mc_io, cmd, &status);
-+
-+ if (error < 0)
-+ goto common_exit;
-+
-+ if (status != MC_CMD_STATUS_OK) {
-+ dev_dbg(mc_io->dev,
-+ "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
-+ &mc_io->portal_phys_addr,
-+ (unsigned int)mc_cmd_hdr_read_token(cmd),
-+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
-+ mc_status_to_string(status),
-+ (unsigned int)status);
-+
-+ error = mc_status_to_error(status);
-+ goto common_exit;
-+ }
-+
-+ error = 0;
-+common_exit:
-+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
-+ spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
-+ else
-+ mutex_unlock(&mc_io->mutex);
-+
-+ return error;
-+}
-+EXPORT_SYMBOL_GPL(mc_send_command);
---- a/drivers/irqchip/Kconfig
-+++ b/drivers/irqchip/Kconfig
-@@ -42,6 +42,12 @@ config ARM_GIC_V3_ITS
- depends on PCI
- depends on PCI_MSI
-
-+config ARM_GIC_V3_ITS_FSL_MC
-+ bool
-+ depends on ARM_GIC_V3_ITS
-+ depends on FSL_MC_BUS
-+ default ARM_GIC_V3_ITS
-+
- config ARM_NVIC
- bool
- select IRQ_DOMAIN
---- a/drivers/irqchip/Makefile
-+++ b/drivers/irqchip/Makefile
-@@ -30,6 +30,7 @@ obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-
- obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
- obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
- obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
-+obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
- obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
- obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
- obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
---- /dev/null
-+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
-@@ -0,0 +1,98 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Freescale Management Complex (MC) bus driver MSI support
-+ *
-+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
-+ * Author: German Rivera <German.Rivera@freescale.com>
-+ *
-+ */
-+
-+#include <linux/of_device.h>
-+#include <linux/of_address.h>
-+#include <linux/irq.h>
-+#include <linux/msi.h>
-+#include <linux/of.h>
-+#include <linux/of_irq.h>
-+#include <linux/fsl/mc.h>
-+
-+static struct irq_chip its_msi_irq_chip = {
-+ .name = "ITS-fMSI",
-+ .irq_mask = irq_chip_mask_parent,
-+ .irq_unmask = irq_chip_unmask_parent,
-+ .irq_eoi = irq_chip_eoi_parent,
-+ .irq_set_affinity = msi_domain_set_affinity
-+};
-+
-+static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
-+ struct device *dev,
-+ int nvec, msi_alloc_info_t *info)
-+{
-+ struct fsl_mc_device *mc_bus_dev;
-+ struct msi_domain_info *msi_info;
-+
-+ if (!dev_is_fsl_mc(dev))
-+ return -EINVAL;
-+
-+ mc_bus_dev = to_fsl_mc_device(dev);
-+ if (!(mc_bus_dev->flags & FSL_MC_IS_DPRC))
-+ return -EINVAL;
-+
-+ /*
-+ * Set the device Id to be passed to the GIC-ITS:
-+ *
-+ * NOTE: This device id corresponds to the IOMMU stream ID
-+ * associated with the DPRC object (ICID).
-+ */
-+ info->scratchpad[0].ul = mc_bus_dev->icid;
-+ msi_info = msi_get_domain_info(msi_domain->parent);
-+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
-+}
-+
-+static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = {
-+ .msi_prepare = its_fsl_mc_msi_prepare,
-+};
-+
-+static struct msi_domain_info its_fsl_mc_msi_domain_info = {
-+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
-+ .ops = &its_fsl_mc_msi_ops,
-+ .chip = &its_msi_irq_chip,
-+};
-+
-+static const struct of_device_id its_device_id[] = {
-+ { .compatible = "arm,gic-v3-its", },
-+ {},
-+};
-+
-+static int __init its_fsl_mc_msi_init(void)
-+{
-+ struct device_node *np;
-+ struct irq_domain *parent;
-+ struct irq_domain *mc_msi_domain;
-+
-+ for (np = of_find_matching_node(NULL, its_device_id); np;
-+ np = of_find_matching_node(np, its_device_id)) {
-+ if (!of_property_read_bool(np, "msi-controller"))
-+ continue;
-+
-+ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
-+ if (!parent || !msi_get_domain_info(parent)) {
-+ pr_err("%pOF: unable to locate ITS domain\n", np);
-+ continue;
-+ }
-+
-+ mc_msi_domain = fsl_mc_msi_create_irq_domain(
-+ of_node_to_fwnode(np),
-+ &its_fsl_mc_msi_domain_info,
-+ parent);
-+ if (!mc_msi_domain) {
-+ pr_err("%pOF: unable to create fsl-mc domain\n", np);
-+ continue;
-+ }
-+
-+ pr_info("fsl-mc MSI: %pOF domain created\n", np);
-+ }
-+
-+ return 0;
-+}
-+
-+early_initcall(its_fsl_mc_msi_init);
---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-@@ -16,7 +16,7 @@
- #include <linux/filter.h>
- #include <linux/atomic.h>
- #include <net/sock.h>
--#include "../../fsl-mc/include/mc.h"
-+#include <linux/fsl/mc.h>
- #include "dpaa2-eth.h"
- #include "dpaa2-eth-ceetm.h"
-
---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-@@ -9,12 +9,11 @@
- #include <linux/dcbnl.h>
- #include <linux/netdevice.h>
- #include <linux/if_vlan.h>
-+#include <linux/fsl/mc.h>
- #include <linux/filter.h>
-
- #include "../../fsl-mc/include/dpaa2-io.h"
- #include "../../fsl-mc/include/dpaa2-fd.h"
--#include "../../fsl-mc/include/dpbp.h"
--#include "../../fsl-mc/include/dpcon.h"
- #include "dpni.h"
- #include "dpni-cmd.h"
-
---- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-@@ -4,7 +4,7 @@
- */
- #include <linux/kernel.h>
- #include <linux/errno.h>
--#include "../../fsl-mc/include/mc.h"
-+#include <linux/fsl/mc.h>
- #include "dpni.h"
- #include "dpni-cmd.h"
-
---- a/drivers/staging/fsl-mc/bus/Kconfig
-+++ b/drivers/staging/fsl-mc/bus/Kconfig
-@@ -5,15 +5,6 @@
- # Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- #
-
--config FSL_MC_BUS
-- bool "QorIQ DPAA2 fsl-mc bus driver"
-- depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
-- select GENERIC_MSI_IRQ_DOMAIN
-- help
-- Driver to enable the bus infrastructure for the QorIQ DPAA2
-- architecture. The fsl-mc bus driver handles discovery of
-- DPAA2 objects (which are represented as Linux devices) and
-- binding objects to drivers.
-
- config FSL_MC_DPIO
- tristate "QorIQ DPAA2 DPIO driver"
-@@ -24,3 +15,9 @@ config FSL_MC_DPIO
- other DPAA2 objects. This driver does not expose the DPIO
- objects individually, but groups them under a service layer
- API.
-+
-+config FSL_QBMAN_DEBUG
-+ tristate "Freescale QBMAN Debug APIs"
-+ depends on FSL_MC_DPIO
-+ help
-+ QBMan debug assistant APIs.
---- a/drivers/staging/fsl-mc/bus/Makefile
-+++ b/drivers/staging/fsl-mc/bus/Makefile
-@@ -4,19 +4,6 @@
- #
- # Copyright (C) 2014 Freescale Semiconductor, Inc.
- #
--obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
--
--mc-bus-driver-objs := fsl-mc-bus.o \
-- mc-sys.o \
-- mc-io.o \
-- dprc.o \
-- dprc-driver.o \
-- fsl-mc-allocator.o \
-- fsl-mc-msi.o \
-- irq-gic-v3-its-fsl-mc-msi.o \
-- dpmcp.o \
-- dpbp.o \
-- dpcon.o
-
- # MC DPIO driver
- obj-$(CONFIG_FSL_MC_DPIO) += dpio/
---- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
-@@ -15,7 +15,7 @@
- #include <linux/delay.h>
- #include <linux/io.h>
-
--#include "../../include/mc.h"
-+#include <linux/fsl/mc.h>
- #include "../../include/dpaa2-io.h"
-
- #include "qbman-portal.h"
---- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
-@@ -5,7 +5,7 @@
- *
- */
- #include <linux/types.h>
--#include "../../include/mc.h"
-+#include <linux/fsl/mc.h>
- #include "../../include/dpaa2-io.h"
- #include <linux/init.h>
- #include <linux/module.h>
---- a/drivers/staging/fsl-mc/bus/dpio/dpio.c
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
-@@ -5,7 +5,7 @@
- *
- */
- #include <linux/kernel.h>
--#include "../../include/mc.h"
-+#include <linux/fsl/mc.h>
-
- #include "dpio.h"
- #include "dpio-cmd.h"
-@@ -37,7 +37,7 @@ int dpio_open(struct fsl_mc_io *mc_io,
- int dpio_id,
- u16 *token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpio_cmd_open *dpio_cmd;
- int err;
-
-@@ -70,7 +70,7 @@ int dpio_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
-@@ -92,7 +92,7 @@ int dpio_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
-@@ -114,7 +114,7 @@ int dpio_disable(struct fsl_mc_io *mc_io
- u32 cmd_flags,
- u16 token)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
-@@ -138,7 +138,7 @@ int dpio_get_attributes(struct fsl_mc_io
- u16 token,
- struct dpio_attr *attr)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- struct dpio_rsp_get_attr *dpio_rsp;
- int err;
-
-@@ -180,7 +180,7 @@ int dpio_get_api_version(struct fsl_mc_i
- u16 *major_ver,
- u16 *minor_ver)
- {
-- struct mc_command cmd = { 0 };
-+ struct fsl_mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
---- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
-+++ /dev/null
-@@ -1,56 +0,0 @@
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- */
--#ifndef _FSL_DPMCP_CMD_H
--#define _FSL_DPMCP_CMD_H
--
--/* Minimal supported DPMCP Version */
--#define DPMCP_MIN_VER_MAJOR 3
--#define DPMCP_MIN_VER_MINOR 0
--
--/* Command versioning */
--#define DPMCP_CMD_BASE_VERSION 1
--#define DPMCP_CMD_ID_OFFSET 4
--
--#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
--
--/* Command IDs */
--#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
--#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
--#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
--
--#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
--
--struct dpmcp_cmd_open {
-- __le32 dpmcp_id;
--};
--
--#endif /* _FSL_DPMCP_CMD_H */
---- a/drivers/staging/fsl-mc/bus/dpmcp.h
-+++ /dev/null
-@@ -1,60 +0,0 @@
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- */
--#ifndef __FSL_DPMCP_H
--#define __FSL_DPMCP_H
--
--/*
-- * Data Path Management Command Portal API
-- * Contains initialization APIs and runtime control APIs for DPMCP
-- */
--
--struct fsl_mc_io;
--
--int dpmcp_open(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int dpmcp_id,
-- u16 *token);
--
--int dpmcp_close(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token);
--
--int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 *major_ver,
-- u16 *minor_ver);
--
--int dpmcp_reset(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token);
--
--#endif /* __FSL_DPMCP_H */
---- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
-+++ /dev/null
-@@ -1,58 +0,0 @@
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- */
--
--/*
-- * dpmng-cmd.h
-- *
-- * defines portal commands
-- *
-- */
--
--#ifndef __FSL_DPMNG_CMD_H
--#define __FSL_DPMNG_CMD_H
--
--/* Command versioning */
--#define DPMNG_CMD_BASE_VERSION 1
--#define DPMNG_CMD_ID_OFFSET 4
--
--#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
--
--/* Command IDs */
--#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
--
--struct dpmng_rsp_get_version {
-- __le32 revision;
-- __le32 version_major;
-- __le32 version_minor;
--};
--
--#endif /* __FSL_DPMNG_CMD_H */
---- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
-+++ /dev/null
-@@ -1,451 +0,0 @@
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- */
--
--/*
-- * dprc-cmd.h
-- *
-- * defines dprc portal commands
-- *
-- */
--
--#ifndef _FSL_DPRC_CMD_H
--#define _FSL_DPRC_CMD_H
--
--/* Minimal supported DPRC Version */
--#define DPRC_MIN_VER_MAJOR 6
--#define DPRC_MIN_VER_MINOR 0
--
--/* Command versioning */
--#define DPRC_CMD_BASE_VERSION 1
--#define DPRC_CMD_ID_OFFSET 4
--
--#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
--
--/* Command IDs */
--#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
--#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
--#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
--
--#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
--
--#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
--#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
--#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
--#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
--#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
--#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
--#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
--#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
--
--#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
--#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
--#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
--#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
--#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
--#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
--#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
--
--struct dprc_cmd_open {
-- __le32 container_id;
--};
--
--struct dprc_cmd_create_container {
-- /* cmd word 0 */
-- __le32 options;
-- __le16 icid;
-- __le16 pad0;
-- /* cmd word 1 */
-- __le32 pad1;
-- __le32 portal_id;
-- /* cmd words 2-3 */
-- u8 label[16];
--};
--
--struct dprc_rsp_create_container {
-- /* response word 0 */
-- __le64 pad0;
-- /* response word 1 */
-- __le32 child_container_id;
-- __le32 pad1;
-- /* response word 2 */
-- __le64 child_portal_addr;
--};
--
--struct dprc_cmd_destroy_container {
-- __le32 child_container_id;
--};
--
--struct dprc_cmd_reset_container {
-- __le32 child_container_id;
--};
--
--struct dprc_cmd_set_irq {
-- /* cmd word 0 */
-- __le32 irq_val;
-- u8 irq_index;
-- u8 pad[3];
-- /* cmd word 1 */
-- __le64 irq_addr;
-- /* cmd word 2 */
-- __le32 irq_num;
--};
--
--struct dprc_cmd_get_irq {
-- __le32 pad;
-- u8 irq_index;
--};
--
--struct dprc_rsp_get_irq {
-- /* response word 0 */
-- __le32 irq_val;
-- __le32 pad;
-- /* response word 1 */
-- __le64 irq_addr;
-- /* response word 2 */
-- __le32 irq_num;
-- __le32 type;
--};
--
--#define DPRC_ENABLE 0x1
--
--struct dprc_cmd_set_irq_enable {
-- u8 enable;
-- u8 pad[3];
-- u8 irq_index;
--};
--
--struct dprc_cmd_get_irq_enable {
-- __le32 pad;
-- u8 irq_index;
--};
--
--struct dprc_rsp_get_irq_enable {
-- u8 enabled;
--};
--
--struct dprc_cmd_set_irq_mask {
-- __le32 mask;
-- u8 irq_index;
--};
--
--struct dprc_cmd_get_irq_mask {
-- __le32 pad;
-- u8 irq_index;
--};
--
--struct dprc_rsp_get_irq_mask {
-- __le32 mask;
--};
--
--struct dprc_cmd_get_irq_status {
-- __le32 status;
-- u8 irq_index;
--};
--
--struct dprc_rsp_get_irq_status {
-- __le32 status;
--};
--
--struct dprc_cmd_clear_irq_status {
-- __le32 status;
-- u8 irq_index;
--};
--
--struct dprc_rsp_get_attributes {
-- /* response word 0 */
-- __le32 container_id;
-- __le16 icid;
-- __le16 pad;
-- /* response word 1 */
-- __le32 options;
-- __le32 portal_id;
--};
--
--struct dprc_cmd_set_res_quota {
-- /* cmd word 0 */
-- __le32 child_container_id;
-- __le16 quota;
-- __le16 pad;
-- /* cmd words 1-2 */
-- u8 type[16];
--};
--
--struct dprc_cmd_get_res_quota {
-- /* cmd word 0 */
-- __le32 child_container_id;
-- __le32 pad;
-- /* cmd word 1-2 */
-- u8 type[16];
--};
--
--struct dprc_rsp_get_res_quota {
-- __le32 pad;
-- __le16 quota;
--};
--
--struct dprc_cmd_assign {
-- /* cmd word 0 */
-- __le32 container_id;
-- __le32 options;
-- /* cmd word 1 */
-- __le32 num;
-- __le32 id_base_align;
-- /* cmd word 2-3 */
-- u8 type[16];
--};
--
--struct dprc_cmd_unassign {
-- /* cmd word 0 */
-- __le32 child_container_id;
-- __le32 options;
-- /* cmd word 1 */
-- __le32 num;
-- __le32 id_base_align;
-- /* cmd word 2-3 */
-- u8 type[16];
--};
--
--struct dprc_rsp_get_pool_count {
-- __le32 pool_count;
--};
--
--struct dprc_cmd_get_pool {
-- __le32 pool_index;
--};
--
--struct dprc_rsp_get_pool {
-- /* response word 0 */
-- __le64 pad;
-- /* response word 1-2 */
-- u8 type[16];
--};
--
--struct dprc_rsp_get_obj_count {
-- __le32 pad;
-- __le32 obj_count;
--};
--
--struct dprc_cmd_get_obj {
-- __le32 obj_index;
--};
--
--struct dprc_rsp_get_obj {
-- /* response word 0 */
-- __le32 pad0;
-- __le32 id;
-- /* response word 1 */
-- __le16 vendor;
-- u8 irq_count;
-- u8 region_count;
-- __le32 state;
-- /* response word 2 */
-- __le16 version_major;
-- __le16 version_minor;
-- __le16 flags;
-- __le16 pad1;
-- /* response word 3-4 */
-- u8 type[16];
-- /* response word 5-6 */
-- u8 label[16];
--};
--
--struct dprc_cmd_get_obj_desc {
-- /* cmd word 0 */
-- __le32 obj_id;
-- __le32 pad;
-- /* cmd word 1-2 */
-- u8 type[16];
--};
--
--struct dprc_rsp_get_obj_desc {
-- /* response word 0 */
-- __le32 pad0;
-- __le32 id;
-- /* response word 1 */
-- __le16 vendor;
-- u8 irq_count;
-- u8 region_count;
-- __le32 state;
-- /* response word 2 */
-- __le16 version_major;
-- __le16 version_minor;
-- __le16 flags;
-- __le16 pad1;
-- /* response word 3-4 */
-- u8 type[16];
-- /* response word 5-6 */
-- u8 label[16];
--};
--
--struct dprc_cmd_get_res_count {
-- /* cmd word 0 */
-- __le64 pad;
-- /* cmd word 1-2 */
-- u8 type[16];
--};
--
--struct dprc_rsp_get_res_count {
-- __le32 res_count;
--};
--
--struct dprc_cmd_get_res_ids {
-- /* cmd word 0 */
-- u8 pad0[5];
-- u8 iter_status;
-- __le16 pad1;
-- /* cmd word 1 */
-- __le32 base_id;
-- __le32 last_id;
-- /* cmd word 2-3 */
-- u8 type[16];
--};
--
--struct dprc_rsp_get_res_ids {
-- /* response word 0 */
-- u8 pad0[5];
-- u8 iter_status;
-- __le16 pad1;
-- /* response word 1 */
-- __le32 base_id;
-- __le32 last_id;
--};
--
--struct dprc_cmd_get_obj_region {
-- /* cmd word 0 */
-- __le32 obj_id;
-- __le16 pad0;
-- u8 region_index;
-- u8 pad1;
-- /* cmd word 1-2 */
-- __le64 pad2[2];
-- /* cmd word 3-4 */
-- u8 obj_type[16];
--};
--
--struct dprc_rsp_get_obj_region {
-- /* response word 0 */
-- __le64 pad;
-- /* response word 1 */
-- __le64 base_addr;
-- /* response word 2 */
-- __le32 size;
--};
--
--struct dprc_cmd_set_obj_label {
-- /* cmd word 0 */
-- __le32 obj_id;
-- __le32 pad;
-- /* cmd word 1-2 */
-- u8 label[16];
-- /* cmd word 3-4 */
-- u8 obj_type[16];
--};
--
--struct dprc_cmd_set_obj_irq {
-- /* cmd word 0 */
-- __le32 irq_val;
-- u8 irq_index;
-- u8 pad[3];
-- /* cmd word 1 */
-- __le64 irq_addr;
-- /* cmd word 2 */
-- __le32 irq_num;
-- __le32 obj_id;
-- /* cmd word 3-4 */
-- u8 obj_type[16];
--};
--
--struct dprc_cmd_get_obj_irq {
-- /* cmd word 0 */
-- __le32 obj_id;
-- u8 irq_index;
-- u8 pad[3];
-- /* cmd word 1-2 */
-- u8 obj_type[16];
--};
--
--struct dprc_rsp_get_obj_irq {
-- /* response word 0 */
-- __le32 irq_val;
-- __le32 pad;
-- /* response word 1 */
-- __le64 irq_addr;
-- /* response word 2 */
-- __le32 irq_num;
-- __le32 type;
--};
--
--struct dprc_cmd_connect {
-- /* cmd word 0 */
-- __le32 ep1_id;
-- __le32 ep1_interface_id;
-- /* cmd word 1 */
-- __le32 ep2_id;
-- __le32 ep2_interface_id;
-- /* cmd word 2-3 */
-- u8 ep1_type[16];
-- /* cmd word 4 */
-- __le32 max_rate;
-- __le32 committed_rate;
-- /* cmd word 5-6 */
-- u8 ep2_type[16];
--};
--
--struct dprc_cmd_disconnect {
-- /* cmd word 0 */
-- __le32 id;
-- __le32 interface_id;
-- /* cmd word 1-2 */
-- u8 type[16];
--};
--
--struct dprc_cmd_get_connection {
-- /* cmd word 0 */
-- __le32 ep1_id;
-- __le32 ep1_interface_id;
-- /* cmd word 1-2 */
-- u8 ep1_type[16];
--};
--
--struct dprc_rsp_get_connection {
-- /* response word 0-2 */
-- __le64 pad[3];
-- /* response word 3 */
-- __le32 ep2_id;
-- __le32 ep2_interface_id;
-- /* response word 4-5 */
-- u8 ep2_type[16];
-- /* response word 6 */
-- __le32 state;
--};
--
--#endif /* _FSL_DPRC_CMD_H */
---- a/drivers/staging/fsl-mc/bus/dprc.h
-+++ /dev/null
-@@ -1,268 +0,0 @@
--/*
-- * Copyright 2013-2016 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- */
--#ifndef _FSL_DPRC_H
--#define _FSL_DPRC_H
--
--/*
-- * Data Path Resource Container API
-- * Contains DPRC API for managing and querying DPAA resources
-- */
--
--struct fsl_mc_io;
--struct fsl_mc_obj_desc;
--
--int dprc_open(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int container_id,
-- u16 *token);
--
--int dprc_close(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token);
--
--/* IRQ */
--
--/* IRQ index */
--#define DPRC_IRQ_INDEX 0
--
--/* Number of dprc's IRQs */
--#define DPRC_NUM_OF_IRQS 1
--
--/* DPRC IRQ events */
--
--/* IRQ event - Indicates that a new object added to the container */
--#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
--/* IRQ event - Indicates that an object was removed from the container */
--#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
--/* IRQ event - Indicates that resources added to the container */
--#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
--/* IRQ event - Indicates that resources removed from the container */
--#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
--/*
-- * IRQ event - Indicates that one of the descendant containers that opened by
-- * this container is destroyed
-- */
--#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
--
--/*
-- * IRQ event - Indicates that on one of the container's opened object is
-- * destroyed
-- */
--#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
--
--/* Irq event - Indicates that object is created at the container */
--#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
--
--/**
-- * struct dprc_irq_cfg - IRQ configuration
-- * @paddr: Address that must be written to signal a message-based interrupt
-- * @val: Value to write into irq_addr address
-- * @irq_num: A user defined number associated with this IRQ
-- */
--struct dprc_irq_cfg {
-- phys_addr_t paddr;
-- u32 val;
-- int irq_num;
--};
--
--int dprc_set_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_get_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- int *type,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 en);
--
--int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 *en);
--
--int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 mask);
--
--int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *mask);
--
--int dprc_get_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *status);
--
--int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 status);
--
--/**
-- * struct dprc_attributes - Container attributes
-- * @container_id: Container's ID
-- * @icid: Container's ICID
-- * @portal_id: Container's portal ID
-- * @options: Container's options as set at container's creation
-- */
--struct dprc_attributes {
-- int container_id;
-- u16 icid;
-- int portal_id;
-- u64 options;
--};
--
--int dprc_get_attributes(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dprc_attributes *attributes);
--
--int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int *obj_count);
--
--int dprc_get_obj(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int obj_index,
-- struct fsl_mc_obj_desc *obj_desc);
--
--int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- struct fsl_mc_obj_desc *obj_desc);
--
--int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 irq_index,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 irq_index,
-- int *type,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_get_res_count(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *type,
-- int *res_count);
--
--/**
-- * enum dprc_iter_status - Iteration status
-- * @DPRC_ITER_STATUS_FIRST: Perform first iteration
-- * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed
-- * @DPRC_ITER_STATUS_LAST: Indicates last iteration
-- */
--enum dprc_iter_status {
-- DPRC_ITER_STATUS_FIRST = 0,
-- DPRC_ITER_STATUS_MORE = 1,
-- DPRC_ITER_STATUS_LAST = 2
--};
--
--/* Region flags */
--/* Cacheable - Indicates that region should be mapped as cacheable */
--#define DPRC_REGION_CACHEABLE 0x00000001
--
--/**
-- * enum dprc_region_type - Region type
-- * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
-- * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
-- */
--enum dprc_region_type {
-- DPRC_REGION_TYPE_MC_PORTAL,
-- DPRC_REGION_TYPE_QBMAN_PORTAL
--};
--
--/**
-- * struct dprc_region_desc - Mappable region descriptor
-- * @base_offset: Region offset from region's base address.
-- * For DPMCP and DPRC objects, region base is offset from SoC MC portals
-- * base address; For DPIO, region base is offset from SoC QMan portals
-- * base address
-- * @size: Region size (in bytes)
-- * @flags: Region attributes
-- * @type: Portal region type
-- */
--struct dprc_region_desc {
-- u32 base_offset;
-- u32 size;
-- u32 flags;
-- enum dprc_region_type type;
--};
--
--int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 region_index,
-- struct dprc_region_desc *region_desc);
--
--int dprc_get_api_version(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 *major_ver,
-- u16 *minor_ver);
--
--int dprc_get_container_id(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int *container_id);
--
--#endif /* _FSL_DPRC_H */
--
---- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
-+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
-@@ -13,6 +13,7 @@
- #include <linux/msi.h>
- #include <linux/of.h>
- #include <linux/of_irq.h>
-+#include <linux/fsl/mc.h>
- #include "fsl-mc-private.h"
-
- static struct irq_chip its_msi_irq_chip = {
---- /dev/null
-+++ b/include/linux/fsl/mc.h
-@@ -0,0 +1,1029 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * Freescale Management Complex (MC) bus public interface
-+ *
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-+ * Author: German Rivera <German.Rivera@freescale.com>
-+ *
-+ */
-+#ifndef _FSL_MC_H_
-+#define _FSL_MC_H_
-+
-+#include <linux/device.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/interrupt.h>
-+#include <linux/cdev.h>
-+#include <uapi/linux/fsl_mc.h>
-+
-+#define FSL_MC_VENDOR_FREESCALE 0x1957
-+
-+struct irq_domain;
-+struct msi_domain_info;
-+
-+struct fsl_mc_device;
-+struct fsl_mc_io;
-+
-+/**
-+ * struct fsl_mc_driver - MC object device driver object
-+ * @driver: Generic device driver
-+ * @match_id_table: table of supported device matching Ids
-+ * @probe: Function called when a device is added
-+ * @remove: Function called when a device is removed
-+ * @shutdown: Function called at shutdown time to quiesce the device
-+ * @suspend: Function called when a device is stopped
-+ * @resume: Function called when a device is resumed
-+ *
-+ * Generic DPAA device driver object for device drivers that are registered
-+ * with a DPRC bus. This structure is to be embedded in each device-specific
-+ * driver structure.
-+ */
-+struct fsl_mc_driver {
-+ struct device_driver driver;
-+ const struct fsl_mc_device_id *match_id_table;
-+ int (*probe)(struct fsl_mc_device *dev);
-+ int (*remove)(struct fsl_mc_device *dev);
-+ void (*shutdown)(struct fsl_mc_device *dev);
-+ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
-+ int (*resume)(struct fsl_mc_device *dev);
-+};
-+
-+#define to_fsl_mc_driver(_drv) \
-+ container_of(_drv, struct fsl_mc_driver, driver)
-+
-+#define to_fsl_mc_bus(_mc_dev) \
-+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
-+
-+/**
-+ * enum fsl_mc_pool_type - Types of allocatable MC bus resources
-+ *
-+ * Entries in these enum are used as indices in the array of resource
-+ * pools of an fsl_mc_bus object.
-+ */
-+enum fsl_mc_pool_type {
-+ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
-+ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
-+ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
-+ FSL_MC_POOL_IRQ,
-+
-+ /*
-+ * NOTE: New resource pool types must be added before this entry
-+ */
-+ FSL_MC_NUM_POOL_TYPES
-+};
-+
-+/**
-+ * struct fsl_mc_resource - MC generic resource
-+ * @type: type of resource
-+ * @id: unique MC resource Id within the resources of the same type
-+ * @data: pointer to resource-specific data if the resource is currently
-+ * allocated, or NULL if the resource is not currently allocated.
-+ * @parent_pool: pointer to the parent resource pool from which this
-+ * resource is allocated from.
-+ * @node: Node in the free list of the corresponding resource pool
-+ *
-+ * NOTE: This structure is to be embedded as a field of specific
-+ * MC resource structures.
-+ */
-+struct fsl_mc_resource {
-+ enum fsl_mc_pool_type type;
-+ s32 id;
-+ void *data;
-+ struct fsl_mc_resource_pool *parent_pool;
-+ struct list_head node;
-+};
-+
-+/**
-+ * struct fsl_mc_device_irq - MC object device message-based interrupt
-+ * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
-+ * @mc_dev: MC object device that owns this interrupt
-+ * @dev_irq_index: device-relative IRQ index
-+ * @resource: MC generic resource associated with the interrupt
-+ */
-+struct fsl_mc_device_irq {
-+ struct msi_desc *msi_desc;
-+ struct fsl_mc_device *mc_dev;
-+ u8 dev_irq_index;
-+ struct fsl_mc_resource resource;
-+};
-+
-+#define to_fsl_mc_irq(_mc_resource) \
-+ container_of(_mc_resource, struct fsl_mc_device_irq, resource)
-+
-+/* Opened state - Indicates that an object is open by at least one owner */
-+#define FSL_MC_OBJ_STATE_OPEN 0x00000001
-+/* Plugged state - Indicates that the object is plugged */
-+#define FSL_MC_OBJ_STATE_PLUGGED 0x00000002
-+
-+/**
-+ * Shareability flag - Object flag indicating no memory shareability.
-+ * the object generates memory accesses that are non coherent with other
-+ * masters;
-+ * user is responsible for proper memory handling through IOMMU configuration.
-+ */
-+#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
-+
-+/**
-+ * struct fsl_mc_obj_desc - Object descriptor
-+ * @type: Type of object: NULL terminated string
-+ * @id: ID of logical object resource
-+ * @vendor: Object vendor identifier
-+ * @ver_major: Major version number
-+ * @ver_minor: Minor version number
-+ * @irq_count: Number of interrupts supported by the object
-+ * @region_count: Number of mappable regions supported by the object
-+ * @state: Object state: combination of FSL_MC_OBJ_STATE_ states
-+ * @label: Object label: NULL terminated string
-+ * @flags: Object's flags
-+ */
-+struct fsl_mc_obj_desc {
-+ char type[16];
-+ int id;
-+ u16 vendor;
-+ u16 ver_major;
-+ u16 ver_minor;
-+ u8 irq_count;
-+ u8 region_count;
-+ u32 state;
-+ char label[16];
-+ u16 flags;
-+};
-+
-+/**
-+ * Bit masks for a MC object device (struct fsl_mc_device) flags
-+ */
-+#define FSL_MC_IS_DPRC 0x0001
-+
-+/**
-+ * struct fsl_mc_device - MC object device object
-+ * @dev: Linux driver model device object
-+ * @dma_mask: Default DMA mask
-+ * @flags: MC object device flags
-+ * @icid: Isolation context ID for the device
-+ * @mc_handle: MC handle for the corresponding MC object opened
-+ * @mc_io: Pointer to MC IO object assigned to this device or
-+ * NULL if none.
-+ * @obj_desc: MC description of the DPAA device
-+ * @regions: pointer to array of MMIO region entries
-+ * @irqs: pointer to array of pointers to interrupts allocated to this device
-+ * @resource: generic resource associated with this MC object device, if any.
-+ * @driver_override: Driver name to force a match
-+ *
-+ * Generic device object for MC object devices that are "attached" to a
-+ * MC bus.
-+ *
-+ * NOTES:
-+ * - For a non-DPRC object its icid is the same as its parent DPRC's icid.
-+ * - The SMMU notifier callback gets invoked after device_add() has been
-+ * called for an MC object device, but before the device-specific probe
-+ * callback gets called.
-+ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC
-+ * portals. For all other MC objects, their device drivers are responsible for
-+ * allocating MC portals for them by calling fsl_mc_portal_allocate().
-+ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are
-+ * treated as resources that can be allocated/deallocated from the
-+ * corresponding resource pool in the object's parent DPRC, using the
-+ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects
-+ * are known as "allocatable" objects. For them, the corresponding
-+ * fsl_mc_device's 'resource' points to the associated resource object.
-+ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI),
-+ * 'resource' is NULL.
-+ */
-+struct fsl_mc_device {
-+ struct device dev;
-+ u64 dma_mask;
-+ u16 flags;
-+ u32 icid;
-+ u16 mc_handle;
-+ struct fsl_mc_io *mc_io;
-+ struct fsl_mc_obj_desc obj_desc;
-+ struct resource *regions;
-+ struct fsl_mc_device_irq **irqs;
-+ struct fsl_mc_resource *resource;
-+ const char *driver_override;
-+ struct device_link *consumer_link;
-+};
-+
-+#define to_fsl_mc_device(_dev) \
-+ container_of(_dev, struct fsl_mc_device, dev)
-+
-+struct mc_cmd_header {
-+ u8 src_id;
-+ u8 flags_hw;
-+ u8 status;
-+ u8 flags_sw;
-+ __le16 token;
-+ __le16 cmd_id;
-+};
-+
-+enum mc_cmd_status {
-+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
-+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
-+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
-+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
-+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
-+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
-+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
-+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
-+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
-+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
-+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
-+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
-+};
-+
-+/*
-+ * MC command flags
-+ */
-+
-+/* High priority flag */
-+#define MC_CMD_FLAG_PRI 0x80
-+/* Command completion flag */
-+#define MC_CMD_FLAG_INTR_DIS 0x01
-+
-+static inline u64 mc_encode_cmd_header(u16 cmd_id,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ u64 header = 0;
-+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
-+
-+ hdr->cmd_id = cpu_to_le16(cmd_id);
-+ hdr->token = cpu_to_le16(token);
-+ hdr->status = MC_CMD_STATUS_READY;
-+ if (cmd_flags & MC_CMD_FLAG_PRI)
-+ hdr->flags_hw = MC_CMD_FLAG_PRI;
-+ if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
-+ hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
-+
-+ return header;
-+}
-+
-+static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd)
-+{
-+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
-+ u16 token = le16_to_cpu(hdr->token);
-+
-+ return token;
-+}
-+
-+struct mc_rsp_create {
-+ __le32 object_id;
-+};
-+
-+struct mc_rsp_api_ver {
-+ __le16 major_ver;
-+ __le16 minor_ver;
-+};
-+
-+static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd)
-+{
-+ struct mc_rsp_create *rsp_params;
-+
-+ rsp_params = (struct mc_rsp_create *)cmd->params;
-+ return le32_to_cpu(rsp_params->object_id);
-+}
-+
-+static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd,
-+ u16 *major_ver,
-+ u16 *minor_ver)
-+{
-+ struct mc_rsp_api_ver *rsp_params;
-+
-+ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
-+ *major_ver = le16_to_cpu(rsp_params->major_ver);
-+ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
-+}
-+
-+/**
-+ * Bit masks for a MC I/O object (struct fsl_mc_io) flags
-+ */
-+#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001
-+
-+/**
-+ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
-+ * @dev: device associated with this Mc I/O object
-+ * @flags: flags for mc_send_command()
-+ * @portal_size: MC command portal size in bytes
-+ * @portal_phys_addr: MC command portal physical address
-+ * @portal_virt_addr: MC command portal virtual address
-+ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
-+ *
-+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
-+ * set:
-+ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
-+ * portal, if the fsl_mc_io object was created with the
-+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
-+ * fsl_mc_io object must be made only from non-atomic context.
-+ *
-+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
-+ * set:
-+ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
-+ * portal, if the fsl_mc_io object was created with the
-+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
-+ * fsl_mc_io object can be made from atomic or non-atomic context.
-+ */
-+struct fsl_mc_io {
-+ struct device *dev;
-+ u16 flags;
-+ u32 portal_size;
-+ phys_addr_t portal_phys_addr;
-+ void __iomem *portal_virt_addr;
-+ struct fsl_mc_device *dpmcp_dev;
-+ union {
-+ /*
-+ * This field is only meaningful if the
-+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
-+ */
-+ struct mutex mutex; /* serializes mc_send_command() */
-+
-+ /*
-+ * This field is only meaningful if the
-+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
-+ */
-+ spinlock_t spinlock; /* serializes mc_send_command() */
-+ };
-+};
-+
-+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
-+
-+#ifdef CONFIG_FSL_MC_BUS
-+#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
-+#else
-+/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
-+#define dev_is_fsl_mc(_dev) (0)
-+#endif
-+
-+/* Macro to check if a device is a container device */
-+#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
-+ FSL_MC_IS_DPRC)
-+
-+/* Macro to get the container device of a MC device */
-+#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
-+ (_dev) : (_dev)->parent)
-+
-+#define fsl_mc_is_dev_coherent(_dev) \
-+ (!((to_fsl_mc_device(_dev))->obj_desc.flags & \
-+ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
-+
-+/*
-+ * module_fsl_mc_driver() - Helper macro for drivers that don't do
-+ * anything special in module init/exit. This eliminates a lot of
-+ * boilerplate. Each module may only use this macro once, and
-+ * calling it replaces module_init() and module_exit()
-+ */
-+#define module_fsl_mc_driver(__fsl_mc_driver) \
-+ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
-+ fsl_mc_driver_unregister)
-+
-+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
-+
-+/*
-+ * Macro to avoid include chaining to get THIS_MODULE
-+ */
-+#define fsl_mc_driver_register(drv) \
-+ __fsl_mc_driver_register(drv, THIS_MODULE)
-+
-+int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
-+ struct module *owner);
-+
-+void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
-+
-+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
-+ u16 mc_io_flags,
-+ struct fsl_mc_io **new_mc_io);
-+
-+void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
-+
-+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
-+
-+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
-+ enum fsl_mc_pool_type pool_type,
-+ struct fsl_mc_device **new_mc_adev);
-+
-+void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
-+
-+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
-+ struct msi_domain_info *info,
-+ struct irq_domain *parent);
-+
-+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
-+
-+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
-+
-+extern struct bus_type fsl_mc_bus_type;
-+
-+extern struct device_type fsl_mc_bus_dprc_type;
-+extern struct device_type fsl_mc_bus_dpni_type;
-+extern struct device_type fsl_mc_bus_dpio_type;
-+extern struct device_type fsl_mc_bus_dpsw_type;
-+extern struct device_type fsl_mc_bus_dpdmux_type;
-+extern struct device_type fsl_mc_bus_dpbp_type;
-+extern struct device_type fsl_mc_bus_dpcon_type;
-+extern struct device_type fsl_mc_bus_dpmcp_type;
-+extern struct device_type fsl_mc_bus_dpmac_type;
-+extern struct device_type fsl_mc_bus_dprtc_type;
-+extern struct device_type fsl_mc_bus_dpseci_type;
-+extern struct device_type fsl_mc_bus_dpdcei_type;
-+extern struct device_type fsl_mc_bus_dpaiop_type;
-+extern struct device_type fsl_mc_bus_dpci_type;
-+extern struct device_type fsl_mc_bus_dpdmai_type;
-+
-+static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dprc_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpni_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpio_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpcon_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpmac_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpci_type;
-+}
-+
-+static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
-+{
-+ return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
-+}
-+
-+/*
-+ * Data Path Resource Container (DPRC) API
-+ */
-+
-+/* Minimal supported DPRC Version */
-+#define DPRC_MIN_VER_MAJOR 6
-+#define DPRC_MIN_VER_MINOR 0
-+
-+/* DPRC command versioning */
-+#define DPRC_CMD_BASE_VERSION 1
-+#define DPRC_CMD_2ND_VERSION 2
-+#define DPRC_CMD_ID_OFFSET 4
-+
-+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
-+#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
-+
-+/* DPRC command IDs */
-+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
-+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
-+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
-+
-+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
-+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
-+
-+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
-+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
-+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
-+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
-+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
-+
-+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
-+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
-+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
-+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD_V2(0x15E)
-+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
-+
-+struct dprc_cmd_open {
-+ __le32 container_id;
-+};
-+
-+struct dprc_cmd_reset_container {
-+ __le32 child_container_id;
-+};
-+
-+struct dprc_cmd_set_irq {
-+ /* cmd word 0 */
-+ __le32 irq_val;
-+ u8 irq_index;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+};
-+
-+#define DPRC_ENABLE 0x1
-+
-+struct dprc_cmd_set_irq_enable {
-+ u8 enable;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
-+
-+struct dprc_cmd_set_irq_mask {
-+ __le32 mask;
-+ u8 irq_index;
-+};
-+
-+struct dprc_cmd_get_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
-+
-+struct dprc_rsp_get_irq_status {
-+ __le32 status;
-+};
-+
-+struct dprc_cmd_clear_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
-+
-+struct dprc_rsp_get_attributes {
-+ /* response word 0 */
-+ __le32 container_id;
-+ __le32 icid;
-+ /* response word 1 */
-+ __le32 options;
-+ __le32 portal_id;
-+};
-+
-+struct dprc_rsp_get_obj_count {
-+ __le32 pad;
-+ __le32 obj_count;
-+};
-+
-+struct dprc_cmd_get_obj {
-+ __le32 obj_index;
-+};
-+
-+struct dprc_rsp_get_obj {
-+ /* response word 0 */
-+ __le32 pad0;
-+ __le32 id;
-+ /* response word 1 */
-+ __le16 vendor;
-+ u8 irq_count;
-+ u8 region_count;
-+ __le32 state;
-+ /* response word 2 */
-+ __le16 version_major;
-+ __le16 version_minor;
-+ __le16 flags;
-+ __le16 pad1;
-+ /* response word 3-4 */
-+ u8 type[16];
-+ /* response word 5-6 */
-+ u8 label[16];
-+};
-+
-+struct dprc_cmd_get_obj_region {
-+ /* cmd word 0 */
-+ __le32 obj_id;
-+ __le16 pad0;
-+ u8 region_index;
-+ u8 pad1;
-+ /* cmd word 1-2 */
-+ __le64 pad2[2];
-+ /* cmd word 3-4 */
-+ u8 obj_type[16];
-+};
-+
-+struct dprc_rsp_get_obj_region {
-+ /* response word 0 */
-+ __le64 pad0;
-+ /* response word 1 */
-+ __le32 base_offset;
-+ __le32 pad1;
-+ /* response word 2 */
-+ __le32 size;
-+ u8 type;
-+ u8 pad2[3];
-+ /* response word 3 */
-+ __le32 flags;
-+ __le32 pad3;
-+ /* response word 4 */
-+ __le64 base_addr;
-+};
-+
-+struct dprc_cmd_set_obj_irq {
-+ /* cmd word 0 */
-+ __le32 irq_val;
-+ u8 irq_index;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+ __le32 obj_id;
-+ /* cmd word 3-4 */
-+ u8 obj_type[16];
-+};
-+
-+/*
-+ * DPRC API for managing and querying DPAA resources
-+ */
-+int dprc_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int container_id,
-+ u16 *token);
-+
-+int dprc_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/* DPRC IRQ events */
-+
-+/* IRQ event - Indicates that a new object added to the container */
-+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
-+/* IRQ event - Indicates that an object was removed from the container */
-+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
-+/*
-+ * IRQ event - Indicates that one of the descendant containers that opened by
-+ * this container is destroyed
-+ */
-+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
-+
-+/*
-+ * IRQ event - Indicates that on one of the container's opened object is
-+ * destroyed
-+ */
-+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
-+
-+/* Irq event - Indicates that object is created at the container */
-+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
-+
-+/**
-+ * struct dprc_irq_cfg - IRQ configuration
-+ * @paddr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dprc_irq_cfg {
-+ phys_addr_t paddr;
-+ u32 val;
-+ int irq_num;
-+};
-+
-+int dprc_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en);
-+
-+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask);
-+
-+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status);
-+
-+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status);
-+
-+/**
-+ * struct dprc_attributes - Container attributes
-+ * @container_id: Container's ID
-+ * @icid: Container's ICID
-+ * @portal_id: Container's portal ID
-+ * @options: Container's options as set at container's creation
-+ */
-+struct dprc_attributes {
-+ int container_id;
-+ u32 icid;
-+ int portal_id;
-+ u64 options;
-+};
-+
-+int dprc_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dprc_attributes *attributes);
-+
-+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *obj_count);
-+
-+int dprc_get_obj(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int obj_index,
-+ struct fsl_mc_obj_desc *obj_desc);
-+
-+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ char *obj_type,
-+ int obj_id,
-+ u8 irq_index,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+/* Region flags */
-+/* Cacheable - Indicates that region should be mapped as cacheable */
-+#define DPRC_REGION_CACHEABLE 0x00000001
-+#define DPRC_REGION_SHAREABLE 0x00000002
-+
-+/**
-+ * enum dprc_region_type - Region type
-+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
-+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
-+ */
-+enum dprc_region_type {
-+ DPRC_REGION_TYPE_MC_PORTAL,
-+ DPRC_REGION_TYPE_QBMAN_PORTAL,
-+ DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
-+};
-+
-+/**
-+ * struct dprc_region_desc - Mappable region descriptor
-+ * @base_offset: Region offset from region's base address.
-+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
-+ * base address; For DPIO, region base is offset from SoC QMan portals
-+ * base address
-+ * @size: Region size (in bytes)
-+ * @flags: Region attributes
-+ * @type: Portal region type
-+ */
-+struct dprc_region_desc {
-+ u32 base_offset;
-+ u32 size;
-+ u32 flags;
-+ enum dprc_region_type type;
-+ u64 base_address;
-+};
-+
-+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ char *obj_type,
-+ int obj_id,
-+ u8 region_index,
-+ struct dprc_region_desc *region_desc);
-+
-+int dprc_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
-+
-+int dprc_get_container_id(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int *container_id);
-+
-+int dprc_reset_container(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int child_container_id);
-+
-+/*
-+ * Data Path Buffer Pool (DPBP) API
-+ * Contains initialization APIs and runtime control APIs for DPBP
-+ */
-+
-+int dpbp_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpbp_id,
-+ u16 *token);
-+
-+int dpbp_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpbp_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpbp_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpbp_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * struct dpbp_attr - Structure representing DPBP attributes
-+ * @id: DPBP object ID
-+ * @bpid: Hardware buffer pool ID; should be used as an argument in
-+ * acquire/release operations on buffers
-+ */
-+struct dpbp_attr {
-+ int id;
-+ u16 bpid;
-+};
-+
-+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpbp_attr *attr);
-+
-+/* Data Path Concentrator (DPCON) API
-+ * Contains initialization APIs and runtime control APIs for DPCON
-+ */
-+
-+/**
-+ * Use it to disable notifications; see dpcon_set_notification()
-+ */
-+#define DPCON_INVALID_DPIO_ID (int)(-1)
-+
-+int dpcon_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpcon_id,
-+ u16 *token);
-+
-+int dpcon_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpcon_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpcon_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+int dpcon_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
-+
-+/**
-+ * struct dpcon_attr - Structure representing DPCON attributes
-+ * @id: DPCON object ID
-+ * @qbman_ch_id: Channel ID to be used by dequeue operation
-+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
-+ */
-+struct dpcon_attr {
-+ int id;
-+ u16 qbman_ch_id;
-+ u8 num_priorities;
-+};
-+
-+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpcon_attr *attr);
-+
-+/**
-+ * struct dpcon_notification_cfg - Structure representing notification params
-+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
-+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
-+ * @priority: Priority selection within the DPIO channel; valid values
-+ * are 0-7, depending on the number of priorities in that channel
-+ * @user_ctx: User context value provided with each CDAN message
-+ */
-+struct dpcon_notification_cfg {
-+ int dpio_id;
-+ u8 priority;
-+ u64 user_ctx;
-+};
-+
-+int dpcon_set_notification(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpcon_notification_cfg *cfg);
-+
-+struct irq_domain;
-+struct msi_domain_info;
-+
-+/**
-+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
-+ * IRQ pool
-+ */
-+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
-+
-+/**
-+ * struct fsl_mc_resource_pool - Pool of MC resources of a given
-+ * type
-+ * @type: type of resources in the pool
-+ * @max_count: maximum number of resources in the pool
-+ * @free_count: number of free resources in the pool
-+ * @mutex: mutex to serialize access to the pool's free list
-+ * @free_list: anchor node of list of free resources in the pool
-+ * @mc_bus: pointer to the MC bus that owns this resource pool
-+ */
-+struct fsl_mc_resource_pool {
-+ enum fsl_mc_pool_type type;
-+ int max_count;
-+ int free_count;
-+ struct mutex mutex; /* serializes access to free_list */
-+ struct list_head free_list;
-+ struct fsl_mc_bus *mc_bus;
-+};
-+
-+/**
-+ * struct fsl_mc_restool - information associated with a restool device file
-+ * @cdev: struct char device linked to the root dprc
-+ * @dev: dev_t for the char device to be added
-+ * @device: newly created device in /dev
-+ * @mutex: mutex lock to serialize the open/release operations
-+ * @local_instance_in_use: local MC I/O instance in use or not
-+ * @dynamic_instance_count: number of dynamically created MC I/O instances
-+ */
-+struct fsl_mc_restool {
-+ struct cdev cdev;
-+ dev_t dev;
-+ struct device *device;
-+ struct mutex mutex; /* serialize open/release operations */
-+ bool local_instance_in_use;
-+ u32 dynamic_instance_count;
-+};
-+
-+/**
-+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
-+ * @mc_dev: fsl-mc device for the bus device itself.
-+ * @resource_pools: array of resource pools (one pool per resource type)
-+ * for this MC bus. These resources represent allocatable entities
-+ * from the physical DPRC.
-+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
-+ * @scan_mutex: Serializes bus scanning
-+ * @dprc_attr: DPRC attributes
-+ * @restool_misc: struct that abstracts the interaction with userspace restool
-+ */
-+struct fsl_mc_bus {
-+ struct fsl_mc_device mc_dev;
-+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
-+ struct fsl_mc_device_irq *irq_resources;
-+ struct mutex scan_mutex; /* serializes bus scanning */
-+ struct dprc_attributes dprc_attr;
-+ struct fsl_mc_restool restool_misc;
-+};
-+
-+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
-+ const char *driver_override,
-+ unsigned int *total_irq_count);
-+
-+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
-+ struct irq_domain **mc_msi_domain);
-+
-+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
-+ unsigned int irq_count);
-+
-+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
-+
-+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-+
-+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-+
-+void fsl_mc_get_root_dprc(struct device *dev, struct device **root_dprc_dev);
-+
-+#endif /* _FSL_MC_H_ */
---- /dev/null
-+++ b/include/uapi/linux/fsl_mc.h
-@@ -0,0 +1,31 @@
-+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-+/*
-+ * Management Complex (MC) userspace public interface
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ */
-+#ifndef _UAPI_FSL_MC_H_
-+#define _UAPI_FSL_MC_H_
-+
-+#define MC_CMD_NUM_OF_PARAMS 7
-+
-+/**
-+ * struct fsl_mc_command - Management Complex (MC) command structure
-+ * @header: MC command header
-+ * @params: MC command parameters
-+ *
-+ * Used by RESTOOL_SEND_MC_COMMAND
-+ */
-+struct fsl_mc_command {
-+ __u64 header;
-+ __u64 params[MC_CMD_NUM_OF_PARAMS];
-+};
-+
-+#define RESTOOL_IOCTL_TYPE 'R'
-+#define RESTOOL_IOCTL_SEQ 0xE0
-+
-+#define RESTOOL_SEND_MC_COMMAND \
-+ _IOWR(RESTOOL_IOCTL_TYPE, RESTOOL_IOCTL_SEQ, struct fsl_mc_command)
-+
-+#endif /* _UAPI_FSL_MC_H_ */
diff --git a/target/linux/layerscape/patches-4.14/709-mdio-phy-support-layerscape.patch b/target/linux/layerscape/patches-4.14/709-mdio-phy-support-layerscape.patch
deleted file mode 100644
index 17fe50cdd5..0000000000
--- a/target/linux/layerscape/patches-4.14/709-mdio-phy-support-layerscape.patch
+++ /dev/null
@@ -1,3729 +0,0 @@
-From c24cbb648c5bde8312dbd5498a4b8c12b2692205 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:45 +0800
-Subject: [PATCH] mdio-phy: support layerscape
-
-This is an integrated patch of mdio-phy for layerscape
-
-Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
-Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
-Signed-off-by: costi <constantin.tudor@freescale.com>
-Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
-Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
-Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
-Signed-off-by: Shaohui Xie <Shaohui.Xie@freescale.com>
-Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
-Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
----
- drivers/net/phy/Kconfig | 33 +
- drivers/net/phy/Makefile | 5 +
- drivers/net/phy/aquantia.c | 286 ++++-
- drivers/net/phy/at803x.c | 21 +
- drivers/net/phy/fsl_backplane.c | 1780 ++++++++++++++++++++++++++++
- drivers/net/phy/fsl_backplane.h | 41 +
- drivers/net/phy/fsl_backplane_serdes_10g.c | 281 +++++
- drivers/net/phy/fsl_backplane_serdes_28g.c | 336 ++++++
- drivers/net/phy/inphi.c | 594 ++++++++++
- drivers/net/phy/mdio-mux-multiplexer.c | 122 ++
- drivers/net/phy/swphy.c | 1 +
- include/linux/phy.h | 3 +
- 12 files changed, 3484 insertions(+), 19 deletions(-)
- create mode 100644 drivers/net/phy/fsl_backplane.c
- create mode 100644 drivers/net/phy/fsl_backplane.h
- create mode 100644 drivers/net/phy/fsl_backplane_serdes_10g.c
- create mode 100644 drivers/net/phy/fsl_backplane_serdes_28g.c
- create mode 100644 drivers/net/phy/inphi.c
- create mode 100644 drivers/net/phy/mdio-mux-multiplexer.c
-
---- a/drivers/net/phy/Kconfig
-+++ b/drivers/net/phy/Kconfig
-@@ -87,9 +87,27 @@ config MDIO_BUS_MUX_MMIOREG
-
- Currently, only 8/16/32 bits registers are supported.
-
-+config MDIO_BUS_MUX_MULTIPLEXER
-+ tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
-+ depends on OF
-+ select MULTIPLEXER
-+ select MDIO_BUS_MUX
-+ help
-+ This module provides a driver for MDIO bus multiplexer
-+ that is controlled via the kernel multiplexer subsystem. The
-+ bus multiplexer connects one of several child MDIO busses to
-+ a parent bus. Child bus selection is under the control of
-+ the kernel multiplexer subsystem.
-+
- config MDIO_CAVIUM
- tristate
-
-+config MDIO_FSL_BACKPLANE
-+ tristate "Support for backplane on Freescale XFI interface"
-+ depends on OF_MDIO
-+ help
-+ This module provides a driver for Freescale XFI's backplane.
-+
- config MDIO_GPIO
- tristate "GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG && GPIOLIB
-@@ -303,6 +321,16 @@ config AT803X_PHY
- ---help---
- Currently supports the AT8030 and AT8035 model
-
-+config AT803X_PHY_SMART_EEE
-+ depends on AT803X_PHY
-+ default n
-+ tristate "SmartEEE feature for AT803X PHYs"
-+ ---help---
-+ Enables the Atheros SmartEEE feature (not IEEE 802.3az). When 2 PHYs
-+ which support this feature are connected back-to-back, they may
-+ negotiate a low-power sleep mode autonomously, without the Ethernet
-+ controller's knowledge. May cause packet loss.
-+
- config BCM63XX_PHY
- tristate "Broadcom 63xx SOCs internal PHY"
- depends on BCM63XX
-@@ -385,6 +413,11 @@ config ICPLUS_PHY
- ---help---
- Currently supports the IP175C and IP1001 PHYs.
-
-+config INPHI_PHY
-+ tristate "Inphi CDR 10G/25G Ethernet PHY"
-+ ---help---
-+ Currently supports the IN112525_S03 part @ 25G
-+
- config INTEL_XWAY_PHY
- tristate "Intel XWAY PHYs"
- ---help---
---- a/drivers/net/phy/Makefile
-+++ b/drivers/net/phy/Makefile
-@@ -44,7 +44,11 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
- obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
- obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
- obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
-+obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
- obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
-+obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
-+obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_10g.o
-+obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_28g.o
- obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
- obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
- obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
-@@ -75,6 +79,7 @@ obj-$(CONFIG_DP83848_PHY) += dp83848.o
- obj-$(CONFIG_DP83867_PHY) += dp83867.o
- obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
- obj-$(CONFIG_ICPLUS_PHY) += icplus.o
-+obj-$(CONFIG_INPHI_PHY) += inphi.o
- obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
- obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
- obj-$(CONFIG_LXT_PHY) += lxt.o
---- a/drivers/net/phy/aquantia.c
-+++ b/drivers/net/phy/aquantia.c
-@@ -4,6 +4,7 @@
- * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
- *
- * Copyright 2015 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
-@@ -27,15 +28,174 @@
-
- #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
- SUPPORTED_1000baseT_Full | \
-+ SUPPORTED_2500baseX_Full | \
- SUPPORTED_100baseT_Full | \
- PHY_DEFAULT_FEATURES)
-
-+#define MDIO_PMA_CTRL1_AQ_SPEED10 0
-+#define MDIO_PMA_CTRL1_AQ_SPEED2500 0x2058
-+#define MDIO_PMA_CTRL1_AQ_SPEED5000 0x205c
-+#define MDIO_PMA_CTRL2_AQ_2500BT 0x30
-+#define MDIO_PMA_CTRL2_AQ_5000BT 0x31
-+#define MDIO_PMA_CTRL2_AQ_TYPE_MASK 0x3F
-+
-+#define MDIO_AN_VENDOR_PROV_CTRL 0xc400
-+#define MDIO_AN_RECV_LP_STATUS 0xe820
-+
-+static int aquantia_write_reg(struct phy_device *phydev, int devad,
-+ u32 regnum, u16 val)
-+{
-+ u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
-+
-+ return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, addr, val);
-+}
-+
-+static int aquantia_read_reg(struct phy_device *phydev, int devad, u32 regnum)
-+{
-+ u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
-+
-+ return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
-+}
-+
-+static int aquantia_pma_setup_forced(struct phy_device *phydev)
-+{
-+ int ctrl1, ctrl2, ret;
-+
-+ /* Half duplex is not supported */
-+ if (phydev->duplex != DUPLEX_FULL)
-+ return -EINVAL;
-+
-+ ctrl1 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
-+ if (ctrl1 < 0)
-+ return ctrl1;
-+
-+ ctrl2 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2);
-+ if (ctrl2 < 0)
-+ return ctrl2;
-+
-+ ctrl1 &= ~MDIO_CTRL1_SPEEDSEL;
-+ ctrl2 &= ~(MDIO_PMA_CTRL2_AQ_TYPE_MASK);
-+
-+ switch (phydev->speed) {
-+ case SPEED_10:
-+ ctrl2 |= MDIO_PMA_CTRL2_10BT;
-+ break;
-+ case SPEED_100:
-+ ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
-+ ctrl2 |= MDIO_PMA_CTRL2_100BTX;
-+ break;
-+ case SPEED_1000:
-+ ctrl1 |= MDIO_PMA_CTRL1_SPEED1000;
-+ /* Assume 1000base-T */
-+ ctrl2 |= MDIO_PMA_CTRL2_1000BT;
-+ break;
-+ case SPEED_10000:
-+ ctrl1 |= MDIO_CTRL1_SPEED10G;
-+ /* Assume 10Gbase-T */
-+ ctrl2 |= MDIO_PMA_CTRL2_10GBT;
-+ break;
-+ case SPEED_2500:
-+ ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED2500;
-+ ctrl2 |= MDIO_PMA_CTRL2_AQ_2500BT;
-+ break;
-+ case SPEED_5000:
-+ ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED5000;
-+ ctrl2 |= MDIO_PMA_CTRL2_AQ_5000BT;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ ret = aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1);
-+ if (ret < 0)
-+ return ret;
-+
-+ return aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
-+}
-+
-+static int aquantia_aneg(struct phy_device *phydev, bool control)
-+{
-+ int reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1);
-+
-+ if (reg < 0)
-+ return reg;
-+
-+ if (control)
-+ reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
-+ else
-+ reg &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
-+
-+ return aquantia_write_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1, reg);
-+}
-+
-+static int aquantia_config_advert(struct phy_device *phydev)
-+{
-+ u32 advertise;
-+ int oldadv, adv, oldadv1, adv1;
-+ int err, changed = 0;
-+
-+ /* Only allow advertising what this PHY supports */
-+ phydev->advertising &= phydev->supported;
-+ advertise = phydev->advertising;
-+
-+ /* Setup standard advertisement */
-+ oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_10GBT_CTRL);
-+ if (oldadv < 0)
-+ return oldadv;
-+
-+ /* Aquantia vendor specific advertisments */
-+ oldadv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_VENDOR_PROV_CTRL);
-+ if (oldadv1 < 0)
-+ return oldadv1;
-+
-+ adv = 0;
-+ adv1 = 0;
-+
-+ /*100BaseT_full is supported by default*/
-+
-+ if (advertise & ADVERTISED_1000baseT_Full)
-+ adv1 |= 0x8000;
-+ if (advertise & ADVERTISED_10000baseT_Full)
-+ adv |= 0x1000;
-+ if (advertise & ADVERTISED_2500baseX_Full)
-+ adv1 |= 0x400;
-+
-+ if (adv != oldadv) {
-+ err = aquantia_write_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_10GBT_CTRL, adv);
-+ if (err < 0)
-+ return err;
-+ changed = 1;
-+ }
-+ if (adv1 != oldadv1) {
-+ err = aquantia_write_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_VENDOR_PROV_CTRL, adv1);
-+ if (err < 0)
-+ return err;
-+ changed = 1;
-+ }
-+
-+ return changed;
-+}
-+
- static int aquantia_config_aneg(struct phy_device *phydev)
- {
-+ int ret = 0;
-+
- phydev->supported = PHY_AQUANTIA_FEATURES;
-- phydev->advertising = phydev->supported;
-+ if (phydev->autoneg == AUTONEG_DISABLE) {
-+ aquantia_pma_setup_forced(phydev);
-+ return aquantia_aneg(phydev, false);
-+ }
-
-- return 0;
-+ ret = aquantia_config_advert(phydev);
-+ if (ret > 0)
-+ /* restart autoneg */
-+ return aquantia_aneg(phydev, true);
-+
-+ return ret;
- }
-
- static int aquantia_aneg_done(struct phy_device *phydev)
-@@ -51,25 +211,26 @@ static int aquantia_config_intr(struct p
- int err;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
-- err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
-+ err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 1);
- if (err < 0)
- return err;
-
-- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
-+ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 1);
- if (err < 0)
- return err;
-
-- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
-+ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1,
-+ 0xff01, 0x1001);
- } else {
-- err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
-+ err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 0);
- if (err < 0)
- return err;
-
-- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
-+ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 0);
- if (err < 0)
- return err;
-
-- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
-+ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff01, 0);
- }
-
- return err;
-@@ -79,42 +240,129 @@ static int aquantia_ack_interrupt(struct
- {
- int reg;
-
-- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
-+ reg = aquantia_read_reg(phydev, MDIO_MMD_AN, 0xcc01);
- return (reg < 0) ? reg : 0;
- }
-
-+static int aquantia_read_advert(struct phy_device *phydev)
-+{
-+ int adv, adv1;
-+
-+ /* Setup standard advertisement */
-+ adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_10GBT_CTRL);
-+
-+ /* Aquantia vendor specific advertisments */
-+ adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_VENDOR_PROV_CTRL);
-+
-+ /*100BaseT_full is supported by default*/
-+ phydev->advertising |= ADVERTISED_100baseT_Full;
-+
-+ if (adv & 0x1000)
-+ phydev->advertising |= ADVERTISED_10000baseT_Full;
-+ else
-+ phydev->advertising &= ~ADVERTISED_10000baseT_Full;
-+ if (adv1 & 0x8000)
-+ phydev->advertising |= ADVERTISED_1000baseT_Full;
-+ else
-+ phydev->advertising &= ~ADVERTISED_1000baseT_Full;
-+ if (adv1 & 0x400)
-+ phydev->advertising |= ADVERTISED_2500baseX_Full;
-+ else
-+ phydev->advertising &= ~ADVERTISED_2500baseX_Full;
-+ return 0;
-+}
-+
-+static int aquantia_read_lp_advert(struct phy_device *phydev)
-+{
-+ int adv, adv1;
-+
-+ /* Read standard link partner advertisement */
-+ adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
-+ MDIO_STAT1);
-+
-+ if (adv & 0x1)
-+ phydev->lp_advertising |= ADVERTISED_Autoneg |
-+ ADVERTISED_100baseT_Full;
-+ else
-+ phydev->lp_advertising &= ~(ADVERTISED_Autoneg |
-+ ADVERTISED_100baseT_Full);
-+
-+ /* Read standard link partner advertisement */
-+ adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_10GBT_STAT);
-+
-+ /* Aquantia link partner advertisments */
-+ adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
-+ MDIO_AN_RECV_LP_STATUS);
-+
-+ if (adv & 0x800)
-+ phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
-+ else
-+ phydev->lp_advertising &= ~ADVERTISED_10000baseT_Full;
-+ if (adv1 & 0x8000)
-+ phydev->lp_advertising |= ADVERTISED_1000baseT_Full;
-+ else
-+ phydev->lp_advertising &= ~ADVERTISED_1000baseT_Full;
-+ if (adv1 & 0x400)
-+ phydev->lp_advertising |= ADVERTISED_2500baseX_Full;
-+ else
-+ phydev->lp_advertising &= ~ADVERTISED_2500baseX_Full;
-+
-+ return 0;
-+}
-+
- static int aquantia_read_status(struct phy_device *phydev)
- {
- int reg;
-
-- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
-- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
-+ /* Read the link status twice; the bit is latching low */
-+ reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
-+ reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
-+
- if (reg & MDIO_STAT1_LSTATUS)
- phydev->link = 1;
- else
- phydev->link = 0;
-
-- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
- mdelay(10);
-- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
-+ reg = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
-+
-+ if ((reg & MDIO_CTRL1_SPEEDSELEXT) == MDIO_CTRL1_SPEEDSELEXT)
-+ reg &= MDIO_CTRL1_SPEEDSEL;
-+ else
-+ reg &= MDIO_CTRL1_SPEEDSELEXT;
-
- switch (reg) {
-- case 0x9:
-+ case MDIO_PMA_CTRL1_AQ_SPEED5000:
-+ phydev->speed = SPEED_5000;
-+ break;
-+ case MDIO_PMA_CTRL1_AQ_SPEED2500:
- phydev->speed = SPEED_2500;
- break;
-- case 0x5:
-- phydev->speed = SPEED_1000;
-+ case MDIO_PMA_CTRL1_AQ_SPEED10:
-+ phydev->speed = SPEED_10;
- break;
-- case 0x3:
-+ case MDIO_PMA_CTRL1_SPEED100:
- phydev->speed = SPEED_100;
- break;
-- case 0x7:
-- default:
-+ case MDIO_PMA_CTRL1_SPEED1000:
-+ phydev->speed = SPEED_1000;
-+ break;
-+ case MDIO_CTRL1_SPEED10G:
- phydev->speed = SPEED_10000;
- break;
-+ default:
-+ phydev->speed = SPEED_UNKNOWN;
-+ break;
- }
-+
- phydev->duplex = DUPLEX_FULL;
-
-+ aquantia_read_advert(phydev);
-+ aquantia_read_lp_advert(phydev);
-+
- return 0;
- }
-
---- a/drivers/net/phy/at803x.c
-+++ b/drivers/net/phy/at803x.c
-@@ -68,6 +68,8 @@
- #define AT803X_DEBUG_REG_5 0x05
- #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-
-+#define AT803X_LPI_EN BIT(8)
-+
- #define ATH8030_PHY_ID 0x004dd076
- #define ATH8031_PHY_ID 0x004dd074
- #define ATH8032_PHY_ID 0x004dd023
-@@ -290,6 +292,19 @@ static void at803x_disable_smarteee(stru
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
- }
-
-+static void at803x_enable_smart_eee(struct phy_device *phydev, int on)
-+{
-+ int value;
-+
-+ /* 5.1.11 Smart_eee control3 */
-+ value = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x805D);
-+ if (on)
-+ value |= AT803X_LPI_EN;
-+ else
-+ value &= ~AT803X_LPI_EN;
-+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x805D, value);
-+}
-+
- static int at803x_config_init(struct phy_device *phydev)
- {
- struct at803x_platform_data *pdata;
-@@ -320,6 +335,12 @@ static int at803x_config_init(struct phy
- if (ret < 0)
- return ret;
-
-+#ifdef CONFIG_AT803X_PHY_SMART_EEE
-+ at803x_enable_smart_eee(phydev, 1);
-+#else
-+ at803x_enable_smart_eee(phydev, 0);
-+#endif
-+
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- ret = at803x_enable_rx_delay(phydev);
---- /dev/null
-+++ b/drivers/net/phy/fsl_backplane.c
-@@ -0,0 +1,1780 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * DPAA backplane driver.
-+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
-+ * Florinel Iordache <florinel.iordache@nxp.com>
-+ *
-+ * Copyright 2015 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
-+ *
-+ * Licensed under the GPL-2 or later.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/mii.h>
-+#include <linux/mdio.h>
-+#include <linux/ethtool.h>
-+#include <linux/phy.h>
-+#include <linux/io.h>
-+#include <linux/of.h>
-+#include <linux/of_net.h>
-+#include <linux/of_address.h>
-+#include <linux/of_platform.h>
-+#include <linux/timer.h>
-+#include <linux/delay.h>
-+#include <linux/workqueue.h>
-+#include <linux/netdevice.h>
-+
-+#include "fsl_backplane.h"
-+
-+
-+/* PCS Device Identifier */
-+#define PCS_PHY_DEVICE_ID 0x0083e400
-+#define PCS_PHY_DEVICE_ID_MASK 0xffffffff
-+
-+/* 10G Long cables setup: 1 m to 2 m cables */
-+#define RATIO_PREQ_10G 0x3
-+#define RATIO_PST1Q_10G 0xd
-+#define RATIO_EQ_10G 0x20
-+
-+/* 10G Short cables setup: up to 30 cm cable */
-+//#define RATIO_PREQ_10G 0x3
-+//#define RATIO_PST1Q_10G 0xa
-+//#define RATIO_EQ_10G 0x29
-+
-+/* 40G Long cables setup: 1 m to 2 m cables */
-+#define RATIO_PREQ_40G 0x2
-+#define RATIO_PST1Q_40G 0xd
-+#define RATIO_EQ_40G 0x20
-+
-+/* 40G Short cables setup: up to 30 cm cable */
-+//#define RATIO_PREQ_40G 0x1
-+//#define RATIO_PST1Q_40G 0x3
-+//#define RATIO_EQ_40G 0x29
-+
-+/* LX2 2x40G default RCW setup */
-+//#define RATIO_PREQ_40G 0x0
-+//#define RATIO_PST1Q_40G 0x3
-+//#define RATIO_EQ_40G 0x30
-+
-+/* Max/Min coefficient values */
-+#define PRE_COE_MAX 0x0
-+#define PRE_COE_MIN 0x8
-+#define POST_COE_MAX 0x0
-+#define POST_COE_MIN 0x10
-+#define ZERO_COE_MAX 0x30
-+#define ZERO_COE_MIN 0x0
-+
-+/* KR PMD defines */
-+#define PMD_RESET 0x1
-+#define PMD_STATUS_SUP_STAT 0x4
-+#define PMD_STATUS_FRAME_LOCK 0x2
-+#define TRAIN_EN 0x3
-+#define TRAIN_DISABLE 0x1
-+#define RX_STAT 0x1
-+
-+/* PCS Link up */
-+#define XFI_PCS_SR1 0x20
-+#define KR_RX_LINK_STAT_MASK 0x1000
-+
-+/* KX PCS mode register */
-+#define KX_PCS_IF_MODE 0x8014
-+
-+/* KX PCS mode register init value */
-+#define KX_IF_MODE_INIT 0x8
-+
-+/* KX/KR AN registers */
-+#define AN_CTRL_INIT 0x1200
-+#define KX_AN_AD1_INIT 0x25
-+#define KR_AN_AD1_INIT_10G 0x85
-+#define KR_AN_AD1_INIT_40G 0x105
-+#define AN_LNK_UP_MASK 0x4
-+#define KR_AN_MASK_10G 0x8
-+#define KR_AN_MASK_40G 0x20
-+#define TRAIN_FAIL 0x8
-+#define KR_AN_40G_MDIO_OFFSET 4
-+
-+/* XGKR Timeouts */
-+#define XGKR_TIMEOUT 1050
-+#define XGKR_DENY_RT_INTERVAL 3000
-+#define XGKR_AN_WAIT_ITERATIONS 5
-+
-+/* XGKR Increment/Decrement Requests */
-+#define INCREMENT 1
-+#define DECREMENT 2
-+#define TIMEOUT_LONG 3
-+#define TIMEOUT_M1 3
-+
-+/* XGKR Masks */
-+#define RX_READY_MASK 0x8000
-+#define PRESET_MASK 0x2000
-+#define INIT_MASK 0x1000
-+#define COP1_MASK 0x30
-+#define COP1_SHIFT 4
-+#define COZ_MASK 0xc
-+#define COZ_SHIFT 2
-+#define COM1_MASK 0x3
-+#define COM1_SHIFT 0
-+#define REQUEST_MASK 0x3f
-+#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
-+ COP1_MASK | COZ_MASK | COM1_MASK)
-+
-+/* Lanes definitions */
-+#define MASTER_LANE 0
-+#define SINGLE_LANE 0
-+#define MAX_PHY_LANES_NO 4
-+
-+/* Invalid value */
-+#define VAL_INVALID 0xff
-+
-+/* New XGKR Training Algorithm */
-+#define NEW_ALGORITHM_TRAIN_TX
-+
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+#define FORCE_INC_COP1_NUMBER 0
-+#define FORCE_INC_COM1_NUMBER 1
-+#endif
-+
-+/* Link_Training_Registers offsets */
-+static int lt_MDIO_MMD = 0;
-+static u32 lt_KR_PMD_CTRL = 0;
-+static u32 lt_KR_PMD_STATUS = 0;
-+static u32 lt_KR_LP_CU = 0;
-+static u32 lt_KR_LP_STATUS = 0;
-+static u32 lt_KR_LD_CU = 0;
-+static u32 lt_KR_LD_STATUS = 0;
-+
-+/* KX/KR AN registers offsets */
-+static u32 g_an_AD1 = 0;
-+static u32 g_an_BP_STAT = 0;
-+
-+static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
-+ 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
-+static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
-+ 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
-+
-+enum backplane_mode {
-+ PHY_BACKPLANE_1000BASE_KX,
-+ PHY_BACKPLANE_10GBASE_KR,
-+ PHY_BACKPLANE_40GBASE_KR,
-+ PHY_BACKPLANE_INVAL
-+};
-+
-+enum serdes_type {
-+ SERDES_10G,
-+ SERDES_28G,
-+ SERDES_INVAL
-+};
-+
-+enum coe_filed {
-+ COE_COP1,
-+ COE_COZ,
-+ COE_COM
-+};
-+
-+enum coe_update {
-+ COE_NOTUPDATED,
-+ COE_UPDATED,
-+ COE_MIN,
-+ COE_MAX,
-+ COE_INV
-+};
-+
-+enum train_state {
-+ DETECTING_LP,
-+ TRAINED,
-+};
-+
-+struct tx_condition {
-+ bool bin_m1_late_early;
-+ bool bin_long_late_early;
-+ bool bin_m1_stop;
-+ bool bin_long_stop;
-+ bool tx_complete;
-+ bool sent_init;
-+ int m1_min_max_cnt;
-+ int long_min_max_cnt;
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+ int pre_inc;
-+ int post_inc;
-+#endif
-+};
-+
-+struct xgkr_params {
-+ void *reg_base; /* lane memory map: registers base address */
-+ int idx; /* lane relative index inside a multi-lane PHY */
-+ struct phy_device *phydev;
-+ struct serdes_access *srds;
-+ struct tx_condition tx_c;
-+ struct delayed_work xgkr_wk;
-+ enum train_state state;
-+ int an_wait_count;
-+ unsigned long rt_time;
-+ u32 ld_update;
-+ u32 ld_status;
-+ u32 ratio_preq;
-+ u32 ratio_pst1q;
-+ u32 adpt_eq;
-+ u32 tuned_ratio_preq;
-+ u32 tuned_ratio_pst1q;
-+ u32 tuned_adpt_eq;
-+};
-+
-+struct xgkr_phy_data {
-+ int bp_mode;
-+ u32 phy_lanes;
-+ struct mutex phy_lock;
-+ bool aneg_done;
-+ struct xgkr_params xgkr[MAX_PHY_LANES_NO];
-+};
-+
-+static void setup_an_lt_ls(void)
-+{
-+ /* KR PMD registers */
-+ lt_MDIO_MMD = MDIO_MMD_PMAPMD;
-+ lt_KR_PMD_CTRL = 0x96;
-+ lt_KR_PMD_STATUS = 0x97;
-+ lt_KR_LP_CU = 0x98;
-+ lt_KR_LP_STATUS = 0x99;
-+ lt_KR_LD_CU = 0x9a;
-+ lt_KR_LD_STATUS = 0x9b;
-+
-+ /* KX/KR AN registers */
-+ g_an_AD1 = 0x11;
-+ g_an_BP_STAT = 0x30;
-+}
-+
-+static void setup_an_lt_lx(void)
-+{
-+ /* Auto-Negotiation and Link Training Core Registers page 1: 256 = 0x100 */
-+ lt_MDIO_MMD = MDIO_MMD_AN;
-+ lt_KR_PMD_CTRL = 0x100;
-+ lt_KR_PMD_STATUS = 0x101;
-+ lt_KR_LP_CU = 0x102;
-+ lt_KR_LP_STATUS = 0x103;
-+ lt_KR_LD_CU = 0x104;
-+ lt_KR_LD_STATUS = 0x105;
-+
-+ /* KX/KR AN registers */
-+ g_an_AD1 = 0x03;
-+ g_an_BP_STAT = 0x0F;
-+}
-+
-+static u32 le_ioread32(u32 *reg)
-+{
-+ return ioread32(reg);
-+}
-+
-+static void le_iowrite32(u32 value, u32 *reg)
-+{
-+ iowrite32(value, reg);
-+}
-+
-+static u32 be_ioread32(u32 *reg)
-+{
-+ return ioread32be(reg);
-+}
-+
-+static void be_iowrite32(u32 value, u32 *reg)
-+{
-+ iowrite32be(value, reg);
-+}
-+
-+/**
-+ * xgkr_phy_write_mmd - Wrapper function for phy_write_mmd
-+ * for writing a register on an MMD on a given PHY.
-+ *
-+ * Same rules as for phy_write_mmd();
-+ */
-+static int xgkr_phy_write_mmd(struct xgkr_params *xgkr, int devad, u32 regnum, u16 val)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int mdio_addr = phydev->mdio.addr;
-+ int err;
-+
-+ mutex_lock(&xgkr_inst->phy_lock);
-+
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
-+ //40G AN: prepare mdio address for writing phydev AN registers for 40G on respective lane
-+ phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
-+ }
-+
-+ err = phy_write_mmd(phydev, devad, regnum, val);
-+ if (err)
-+ dev_err(&phydev->mdio.dev, "Writing PHY (%p) MMD = 0x%02x register = 0x%02x failed with error code: 0x%08x \n", phydev, devad, regnum, err);
-+
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
-+ //40G AN: restore mdio address
-+ phydev->mdio.addr = mdio_addr;
-+ }
-+
-+ mutex_unlock(&xgkr_inst->phy_lock);
-+
-+ return err;
-+}
-+
-+/**
-+ * xgkr_phy_read_mmd - Wrapper function for phy_read_mmd
-+ * for reading a register from an MMD on a given PHY.
-+ *
-+ * Same rules as for phy_read_mmd();
-+ */
-+static int xgkr_phy_read_mmd(struct xgkr_params *xgkr, int devad, u32 regnum)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int mdio_addr = phydev->mdio.addr;
-+ int ret;
-+
-+ mutex_lock(&xgkr_inst->phy_lock);
-+
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
-+ //40G AN: prepare mdio address for reading phydev AN registers for 40G on respective lane
-+ phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
-+ }
-+
-+ ret = phy_read_mmd(phydev, devad, regnum);
-+
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
-+ //40G AN: restore mdio address
-+ phydev->mdio.addr = mdio_addr;
-+ }
-+
-+ mutex_unlock(&xgkr_inst->phy_lock);
-+
-+ return ret;
-+}
-+
-+static void tx_condition_init(struct tx_condition *tx_c)
-+{
-+ tx_c->bin_m1_late_early = true;
-+ tx_c->bin_long_late_early = false;
-+ tx_c->bin_m1_stop = false;
-+ tx_c->bin_long_stop = false;
-+ tx_c->tx_complete = false;
-+ tx_c->sent_init = false;
-+ tx_c->m1_min_max_cnt = 0;
-+ tx_c->long_min_max_cnt = 0;
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+ tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
-+ tx_c->post_inc = FORCE_INC_COP1_NUMBER;
-+#endif
-+}
-+
-+void tune_tecr(struct xgkr_params *xgkr)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ bool reset = false;
-+
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
-+ /* Reset only the Master Lane */
-+ reset = (xgkr->idx == MASTER_LANE);
-+ } else {
-+ reset = true;
-+ }
-+
-+ xgkr->srds->tune_tecr(xgkr->reg_base, xgkr->ratio_preq, xgkr->ratio_pst1q, xgkr->adpt_eq, reset);
-+
-+ xgkr->tuned_ratio_preq = xgkr->ratio_preq;
-+ xgkr->tuned_ratio_pst1q = xgkr->ratio_pst1q;
-+ xgkr->tuned_adpt_eq = xgkr->adpt_eq;
-+}
-+
-+static void start_lt(struct xgkr_params *xgkr)
-+{
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_EN);
-+}
-+
-+static void stop_lt(struct xgkr_params *xgkr)
-+{
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
-+}
-+
-+static void reset_lt(struct xgkr_params *xgkr)
-+{
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, MDIO_CTRL1, PMD_RESET);
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_CU, 0);
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_STATUS, 0);
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS, 0);
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU, 0);
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS, 0);
-+
-+}
-+
-+static void ld_coe_status(struct xgkr_params *xgkr)
-+{
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
-+ lt_KR_LD_STATUS, xgkr->ld_status);
-+}
-+
-+static void ld_coe_update(struct xgkr_params *xgkr)
-+{
-+ dev_dbg(&xgkr->phydev->mdio.dev, "sending request: %x\n", xgkr->ld_update);
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
-+ lt_KR_LD_CU, xgkr->ld_update);
-+}
-+
-+static void start_xgkr_state_machine(struct delayed_work *work)
-+{
-+ queue_delayed_work(system_power_efficient_wq, work,
-+ msecs_to_jiffies(XGKR_TIMEOUT));
-+}
-+
-+static void start_xgkr_an(struct xgkr_params *xgkr)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int i;
-+ int err;
-+
-+ switch (xgkr_inst->bp_mode)
-+ {
-+ case PHY_BACKPLANE_1000BASE_KX:
-+ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
-+ break;
-+
-+ case PHY_BACKPLANE_10GBASE_KR:
-+ err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_10G);
-+ if (err)
-+ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", g_an_AD1, err);
-+ udelay(1);
-+ err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
-+ if (err)
-+ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", MDIO_CTRL1, err);
-+ break;
-+
-+ case PHY_BACKPLANE_40GBASE_KR:
-+ if (xgkr->idx == MASTER_LANE) {
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
-+ err = xgkr_phy_write_mmd(&xgkr_inst->xgkr[i], MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_40G);
-+ if (err)
-+ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on lane %d failed with error code: 0x%08x \n", g_an_AD1, xgkr_inst->xgkr[i].idx, err);
-+ }
-+ udelay(1);
-+ err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
-+ if (err)
-+ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on Master Lane failed with error code: 0x%08x \n", MDIO_CTRL1, err);
-+ }
-+ break;
-+ }
-+}
-+
-+static void start_1gkx_an(struct phy_device *phydev)
-+{
-+ phy_write_mmd(phydev, MDIO_MMD_PCS, KX_PCS_IF_MODE, KX_IF_MODE_INIT);
-+ phy_write_mmd(phydev, MDIO_MMD_AN, g_an_AD1, KX_AN_AD1_INIT);
-+ phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
-+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
-+}
-+
-+static void reset_tecr(struct xgkr_params *xgkr)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+
-+ switch (xgkr_inst->bp_mode)
-+ {
-+ case PHY_BACKPLANE_1000BASE_KX:
-+ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
-+ break;
-+
-+ case PHY_BACKPLANE_10GBASE_KR:
-+ xgkr->ratio_preq = RATIO_PREQ_10G;
-+ xgkr->ratio_pst1q = RATIO_PST1Q_10G;
-+ xgkr->adpt_eq = RATIO_EQ_10G;
-+ break;
-+
-+ case PHY_BACKPLANE_40GBASE_KR:
-+ xgkr->ratio_preq = RATIO_PREQ_40G;
-+ xgkr->ratio_pst1q = RATIO_PST1Q_40G;
-+ xgkr->adpt_eq = RATIO_EQ_40G;
-+ break;
-+ }
-+
-+ tune_tecr(xgkr);
-+}
-+
-+static void init_xgkr(struct xgkr_params *xgkr, int reset)
-+{
-+ if (reset)
-+ reset_tecr(xgkr);
-+
-+ tx_condition_init(&xgkr->tx_c);
-+ xgkr->state = DETECTING_LP;
-+
-+ xgkr->ld_status &= RX_READY_MASK;
-+ ld_coe_status(xgkr);
-+ xgkr->ld_update = 0;
-+ xgkr->ld_status &= ~RX_READY_MASK;
-+ ld_coe_status(xgkr);
-+
-+}
-+
-+static void initialize(struct xgkr_params *xgkr)
-+{
-+ reset_tecr(xgkr);
-+
-+ xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
-+ xgkr->ld_status |= COE_UPDATED << COP1_SHIFT |
-+ COE_UPDATED << COZ_SHIFT |
-+ COE_UPDATED << COM1_SHIFT;
-+ ld_coe_status(xgkr);
-+}
-+
-+static void train_remote_tx(struct xgkr_params *xgkr)
-+{
-+ struct tx_condition *tx_c = &xgkr->tx_c;
-+ bool bin_m1_early, bin_long_early;
-+ u32 lp_status, old_ld_update;
-+ u32 status_cop1, status_coz, status_com1;
-+ u32 req_cop1, req_coz, req_com1, req_preset, req_init;
-+ u32 temp;
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+ u32 median_gaink2;
-+#endif
-+
-+recheck:
-+ if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
-+ tx_c->tx_complete = true;
-+ xgkr->ld_status |= RX_READY_MASK;
-+ ld_coe_status(xgkr);
-+
-+ /* tell LP we are ready */
-+ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
-+ lt_KR_PMD_STATUS, RX_STAT);
-+
-+ return;
-+ }
-+
-+ /* We start by checking the current LP status. If we got any responses,
-+ * we can clear up the appropriate update request so that the
-+ * subsequent code may easily issue new update requests if needed.
-+ */
-+ lp_status = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
-+ REQUEST_MASK;
-+
-+ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
-+ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
-+ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
-+
-+ old_ld_update = xgkr->ld_update;
-+ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
-+ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
-+ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
-+ req_preset = old_ld_update & PRESET_MASK;
-+ req_init = old_ld_update & INIT_MASK;
-+
-+ /* IEEE802.3-2008, 72.6.10.2.3.1
-+ * We may clear PRESET when all coefficients show UPDATED or MAX.
-+ */
-+ if (req_preset) {
-+ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
-+ (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
-+ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
-+ xgkr->ld_update &= ~PRESET_MASK;
-+ }
-+ }
-+
-+ /* IEEE802.3-2008, 72.6.10.2.3.2
-+ * We may clear INITIALIZE when no coefficients show NOT UPDATED.
-+ */
-+ if (req_init) {
-+ if (status_cop1 != COE_NOTUPDATED &&
-+ status_coz != COE_NOTUPDATED &&
-+ status_com1 != COE_NOTUPDATED) {
-+ xgkr->ld_update &= ~INIT_MASK;
-+ }
-+ }
-+
-+ /* IEEE802.3-2008, 72.6.10.2.3.2
-+ * we send initialize to the other side to ensure default settings
-+ * for the LP. Naturally, we should do this only once.
-+ */
-+ if (!tx_c->sent_init) {
-+ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
-+ xgkr->ld_update = INIT_MASK;
-+ tx_c->sent_init = true;
-+ }
-+ }
-+
-+ /* IEEE802.3-2008, 72.6.10.2.3.3
-+ * We set coefficient requests to HOLD when we get the information
-+ * about any updates On clearing our prior response, we also update
-+ * our internal status.
-+ */
-+ if (status_cop1 != COE_NOTUPDATED) {
-+ if (req_cop1) {
-+ xgkr->ld_update &= ~COP1_MASK;
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+ if (tx_c->post_inc) {
-+ if (req_cop1 == INCREMENT &&
-+ status_cop1 == COE_MAX) {
-+ tx_c->post_inc = 0;
-+ tx_c->bin_long_stop = true;
-+ tx_c->bin_m1_stop = true;
-+ } else {
-+ tx_c->post_inc -= 1;
-+ }
-+
-+ ld_coe_update(xgkr);
-+ goto recheck;
-+ }
-+#endif
-+ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
-+ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
-+ dev_dbg(&xgkr->phydev->mdio.dev, "COP1 hit limit %s",
-+ (status_cop1 == COE_MIN) ?
-+ "DEC MIN" : "INC MAX");
-+ tx_c->long_min_max_cnt++;
-+ if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
-+ tx_c->bin_long_stop = true;
-+ ld_coe_update(xgkr);
-+ goto recheck;
-+ }
-+ }
-+ }
-+ }
-+
-+ if (status_coz != COE_NOTUPDATED) {
-+ if (req_coz)
-+ xgkr->ld_update &= ~COZ_MASK;
-+ }
-+
-+ if (status_com1 != COE_NOTUPDATED) {
-+ if (req_com1) {
-+ xgkr->ld_update &= ~COM1_MASK;
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+ if (tx_c->pre_inc) {
-+ if (req_com1 == INCREMENT &&
-+ status_com1 == COE_MAX)
-+ tx_c->pre_inc = 0;
-+ else
-+ tx_c->pre_inc -= 1;
-+
-+ ld_coe_update(xgkr);
-+ goto recheck;
-+ }
-+#endif
-+ /* Stop If we have reached the limit for a parameter. */
-+ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
-+ (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
-+ dev_dbg(&xgkr->phydev->mdio.dev, "COM1 hit limit %s",
-+ (status_com1 == COE_MIN) ?
-+ "DEC MIN" : "INC MAX");
-+ tx_c->m1_min_max_cnt++;
-+ if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
-+ tx_c->bin_m1_stop = true;
-+ ld_coe_update(xgkr);
-+ goto recheck;
-+ }
-+ }
-+ }
-+ }
-+
-+ if (old_ld_update != xgkr->ld_update) {
-+ ld_coe_update(xgkr);
-+ /* Redo these status checks and updates until we have no more
-+ * changes, to speed up the overall process.
-+ */
-+ goto recheck;
-+ }
-+
-+ /* Do nothing if we have pending request. */
-+ if ((req_coz || req_com1 || req_cop1))
-+ return;
-+ else if (lp_status)
-+ /* No pending request but LP status was not reverted to
-+ * not updated.
-+ */
-+ return;
-+
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+ if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
-+ if (tx_c->pre_inc) {
-+ xgkr->ld_update = INCREMENT << COM1_SHIFT;
-+ ld_coe_update(xgkr);
-+ return;
-+ }
-+
-+ if (status_cop1 != COE_MAX) {
-+ median_gaink2 = xgkr->srds->get_median_gaink2(xgkr->reg_base);
-+ if (median_gaink2 == 0xf) {
-+ tx_c->post_inc = 1;
-+ } else {
-+ /* Gaink2 median lower than "F" */
-+ tx_c->bin_m1_stop = true;
-+ tx_c->bin_long_stop = true;
-+ goto recheck;
-+ }
-+ } else {
-+ /* C1 MAX */
-+ tx_c->bin_m1_stop = true;
-+ tx_c->bin_long_stop = true;
-+ goto recheck;
-+ }
-+
-+ if (tx_c->post_inc) {
-+ xgkr->ld_update = INCREMENT << COP1_SHIFT;
-+ ld_coe_update(xgkr);
-+ return;
-+ }
-+ }
-+#endif
-+
-+ /* snapshot and select bin */
-+ bin_m1_early = xgkr->srds->is_bin_early(BIN_M1, xgkr->reg_base);
-+ bin_long_early = xgkr->srds->is_bin_early(BIN_LONG, xgkr->reg_base);
-+
-+ if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
-+ tx_c->bin_m1_stop = true;
-+ goto recheck;
-+ }
-+
-+ if (!tx_c->bin_long_stop &&
-+ tx_c->bin_long_late_early && !bin_long_early) {
-+ tx_c->bin_long_stop = true;
-+ goto recheck;
-+ }
-+
-+ /* IEEE802.3-2008, 72.6.10.2.3.3
-+ * We only request coefficient updates when no PRESET/INITIALIZE is
-+ * pending. We also only request coefficient updates when the
-+ * corresponding status is NOT UPDATED and nothing is pending.
-+ */
-+ if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
-+ if (!tx_c->bin_long_stop) {
-+ /* BinM1 correction means changing COM1 */
-+ if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
-+ /* Avoid BinM1Late by requesting an
-+ * immediate decrement.
-+ */
-+ if (!bin_m1_early) {
-+ /* request decrement c(-1) */
-+ temp = DECREMENT << COM1_SHIFT;
-+ xgkr->ld_update = temp;
-+ ld_coe_update(xgkr);
-+ tx_c->bin_m1_late_early = bin_m1_early;
-+ return;
-+ }
-+ }
-+
-+ /* BinLong correction means changing COP1 */
-+ if (!status_cop1 && !(xgkr->ld_update & COP1_MASK)) {
-+ /* Locate BinLong transition point (if any)
-+ * while avoiding BinM1Late.
-+ */
-+ if (bin_long_early) {
-+ /* request increment c(1) */
-+ temp = INCREMENT << COP1_SHIFT;
-+ xgkr->ld_update = temp;
-+ } else {
-+ /* request decrement c(1) */
-+ temp = DECREMENT << COP1_SHIFT;
-+ xgkr->ld_update = temp;
-+ }
-+
-+ ld_coe_update(xgkr);
-+ tx_c->bin_long_late_early = bin_long_early;
-+ }
-+ /* We try to finish BinLong before we do BinM1 */
-+ return;
-+ }
-+
-+ if (!tx_c->bin_m1_stop) {
-+ /* BinM1 correction means changing COM1 */
-+ if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
-+ /* Locate BinM1 transition point (if any) */
-+ if (bin_m1_early) {
-+ /* request increment c(-1) */
-+ temp = INCREMENT << COM1_SHIFT;
-+ xgkr->ld_update = temp;
-+ } else {
-+ /* request decrement c(-1) */
-+ temp = DECREMENT << COM1_SHIFT;
-+ xgkr->ld_update = temp;
-+ }
-+
-+ ld_coe_update(xgkr);
-+ tx_c->bin_m1_late_early = bin_m1_early;
-+ }
-+ }
-+ }
-+}
-+
-+static int is_link_up(struct phy_device *phydev)
-+{
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int val = 0;
-+
-+ mutex_lock(&xgkr_inst->phy_lock);
-+
-+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, XFI_PCS_SR1);
-+
-+ mutex_unlock(&xgkr_inst->phy_lock);
-+
-+ return (val & KR_RX_LINK_STAT_MASK) ? 1 : 0;
-+}
-+
-+static int is_link_training_fail(struct xgkr_params *xgkr)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ int val;
-+ int timeout = 100;
-+
-+ val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS);
-+
-+ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
-+ /* check LNK_STAT for sure */
-+ while (timeout--) {
-+ if (is_link_up(phydev))
-+ return 0;
-+
-+ usleep_range(100, 500);
-+ }
-+ }
-+
-+ return 1;
-+}
-+
-+static int check_rx(struct xgkr_params *xgkr)
-+{
-+ return xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
-+ RX_READY_MASK;
-+}
-+
-+/* Coefficient values have hardware restrictions */
-+static int is_ld_valid(struct xgkr_params *xgkr)
-+{
-+ u32 ratio_pst1q = xgkr->ratio_pst1q;
-+ u32 adpt_eq = xgkr->adpt_eq;
-+ u32 ratio_preq = xgkr->ratio_preq;
-+
-+ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
-+ return 0;
-+
-+ if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
-+ ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
-+ return 0;
-+
-+ if (ratio_preq > ratio_pst1q)
-+ return 0;
-+
-+ if (ratio_preq > 8)
-+ return 0;
-+
-+ if (adpt_eq < 26)
-+ return 0;
-+
-+ if (ratio_pst1q > 16)
-+ return 0;
-+
-+ return 1;
-+}
-+
-+static int is_value_allowed(const u32 *val_table, u32 val)
-+{
-+ int i;
-+
-+ for (i = 0;; i++) {
-+ if (*(val_table + i) == VAL_INVALID)
-+ return 0;
-+ if (*(val_table + i) == val)
-+ return 1;
-+ }
-+}
-+
-+static enum coe_update inc_dec(struct xgkr_params *xgkr, int field, int request)
-+{
-+ u32 ld_limit[3], ld_coe[3], step[3];
-+
-+ ld_coe[0] = xgkr->ratio_pst1q;
-+ ld_coe[1] = xgkr->adpt_eq;
-+ ld_coe[2] = xgkr->ratio_preq;
-+
-+ /* Information specific to the SerDes for 10GBase-KR:
-+ * Incrementing C(+1) means *decrementing* RATIO_PST1Q
-+ * Incrementing C(0) means incrementing ADPT_EQ
-+ * Incrementing C(-1) means *decrementing* RATIO_PREQ
-+ */
-+ step[0] = -1;
-+ step[1] = 1;
-+ step[2] = -1;
-+
-+ switch (request) {
-+ case INCREMENT:
-+ ld_limit[0] = POST_COE_MAX;
-+ ld_limit[1] = ZERO_COE_MAX;
-+ ld_limit[2] = PRE_COE_MAX;
-+ if (ld_coe[field] != ld_limit[field])
-+ ld_coe[field] += step[field];
-+ else
-+ /* MAX */
-+ return COE_MAX;
-+ break;
-+ case DECREMENT:
-+ ld_limit[0] = POST_COE_MIN;
-+ ld_limit[1] = ZERO_COE_MIN;
-+ ld_limit[2] = PRE_COE_MIN;
-+ if (ld_coe[field] != ld_limit[field])
-+ ld_coe[field] -= step[field];
-+ else
-+ /* MIN */
-+ return COE_MIN;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ if (is_ld_valid(xgkr)) {
-+ /* accept new ld */
-+ xgkr->ratio_pst1q = ld_coe[0];
-+ xgkr->adpt_eq = ld_coe[1];
-+ xgkr->ratio_preq = ld_coe[2];
-+ /* only some values for preq and pst1q can be used.
-+ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
-+ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
-+ */
-+ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
-+ dev_dbg(&xgkr->phydev->mdio.dev,
-+ "preq skipped value: %d\n", ld_coe[2]);
-+ /* NOT UPDATED */
-+ return COE_NOTUPDATED;
-+ }
-+
-+ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
-+ dev_dbg(&xgkr->phydev->mdio.dev,
-+ "pst1q skipped value: %d\n", ld_coe[0]);
-+ /* NOT UPDATED */
-+ return COE_NOTUPDATED;
-+ }
-+
-+ tune_tecr(xgkr);
-+ } else {
-+ if (request == DECREMENT)
-+ /* MIN */
-+ return COE_MIN;
-+ if (request == INCREMENT)
-+ /* MAX */
-+ return COE_MAX;
-+ }
-+
-+ /* UPDATED */
-+ return COE_UPDATED;
-+}
-+
-+static void min_max_updated(struct xgkr_params *xgkr, int field, enum coe_update cs)
-+{
-+ u32 mask, val;
-+ u32 ld_cs = cs;
-+
-+ if (cs == COE_INV)
-+ return;
-+
-+ switch (field) {
-+ case COE_COP1:
-+ mask = COP1_MASK;
-+ val = ld_cs << COP1_SHIFT;
-+ break;
-+ case COE_COZ:
-+ mask = COZ_MASK;
-+ val = ld_cs << COZ_SHIFT;
-+ break;
-+ case COE_COM:
-+ mask = COM1_MASK;
-+ val = ld_cs << COM1_SHIFT;
-+ break;
-+ default:
-+ return;
-+ }
-+
-+ xgkr->ld_status &= ~mask;
-+ xgkr->ld_status |= val;
-+}
-+
-+static void check_request(struct xgkr_params *xgkr, int request)
-+{
-+ int cop1_req, coz_req, com_req;
-+ int old_status;
-+ enum coe_update cu;
-+
-+ cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
-+ coz_req = (request & COZ_MASK) >> COZ_SHIFT;
-+ com_req = (request & COM1_MASK) >> COM1_SHIFT;
-+
-+ /* IEEE802.3-2008, 72.6.10.2.5
-+ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
-+ */
-+ old_status = xgkr->ld_status;
-+
-+ if (cop1_req && !(xgkr->ld_status & COP1_MASK)) {
-+ cu = inc_dec(xgkr, COE_COP1, cop1_req);
-+ min_max_updated(xgkr, COE_COP1, cu);
-+ }
-+
-+ if (coz_req && !(xgkr->ld_status & COZ_MASK)) {
-+ cu = inc_dec(xgkr, COE_COZ, coz_req);
-+ min_max_updated(xgkr, COE_COZ, cu);
-+ }
-+
-+ if (com_req && !(xgkr->ld_status & COM1_MASK)) {
-+ cu = inc_dec(xgkr, COE_COM, com_req);
-+ min_max_updated(xgkr, COE_COM, cu);
-+ }
-+
-+ if (old_status != xgkr->ld_status)
-+ ld_coe_status(xgkr);
-+}
-+
-+static void preset(struct xgkr_params *xgkr)
-+{
-+ /* These are all MAX values from the IEEE802.3 perspective. */
-+ xgkr->ratio_pst1q = POST_COE_MAX;
-+ xgkr->adpt_eq = ZERO_COE_MAX;
-+ xgkr->ratio_preq = PRE_COE_MAX;
-+
-+ tune_tecr(xgkr);
-+ xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
-+ xgkr->ld_status |= COE_MAX << COP1_SHIFT |
-+ COE_MAX << COZ_SHIFT |
-+ COE_MAX << COM1_SHIFT;
-+ ld_coe_status(xgkr);
-+}
-+
-+static void train_local_tx(struct xgkr_params *xgkr)
-+{
-+ int request, old_ld_status;
-+
-+ /* get request from LP */
-+ request = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU) &
-+ (LD_ALL_MASK);
-+
-+ old_ld_status = xgkr->ld_status;
-+
-+ /* IEEE802.3-2008, 72.6.10.2.5
-+ * Ensure we always go to NOT UDPATED for status reporting in
-+ * response to HOLD requests.
-+ * IEEE802.3-2008, 72.6.10.2.3.1/2
-+ * ... but only if PRESET/INITIALIZE are not active to ensure
-+ * we keep status until they are released.
-+ */
-+ if (!(request & (PRESET_MASK | INIT_MASK))) {
-+ if (!(request & COP1_MASK))
-+ xgkr->ld_status &= ~COP1_MASK;
-+
-+ if (!(request & COZ_MASK))
-+ xgkr->ld_status &= ~COZ_MASK;
-+
-+ if (!(request & COM1_MASK))
-+ xgkr->ld_status &= ~COM1_MASK;
-+
-+ if (old_ld_status != xgkr->ld_status)
-+ ld_coe_status(xgkr);
-+ }
-+
-+ /* As soon as the LP shows ready, no need to do any more updates. */
-+ if (check_rx(xgkr)) {
-+ /* LP receiver is ready */
-+ if (xgkr->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
-+ xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
-+ ld_coe_status(xgkr);
-+ }
-+ } else {
-+ /* IEEE802.3-2008, 72.6.10.2.3.1/2
-+ * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
-+ */
-+ if (request & (PRESET_MASK | INIT_MASK)) {
-+ if (!(xgkr->ld_status &
-+ (COP1_MASK | COZ_MASK | COM1_MASK))) {
-+ if (request & PRESET_MASK)
-+ preset(xgkr);
-+
-+ if (request & INIT_MASK)
-+ initialize(xgkr);
-+ }
-+ }
-+
-+ /* LP Coefficient are not in HOLD */
-+ if (request & REQUEST_MASK)
-+ check_request(xgkr, request & REQUEST_MASK);
-+ }
-+}
-+
-+static void xgkr_start_train(struct xgkr_params *xgkr)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ struct tx_condition *tx_c = &xgkr->tx_c;
-+ int val = 0, i, j;
-+ int lt_state;
-+ unsigned long dead_line;
-+ int lp_rx_ready, tx_training_complete;
-+ u32 lt_timeout = 500;
-+
-+ init_xgkr(xgkr, 0);
-+
-+ start_lt(xgkr);
-+
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
-+ lt_timeout = 2000;
-+ }
-+
-+ for (i = 0; i < 2;) {
-+
-+ dead_line = jiffies + msecs_to_jiffies(lt_timeout);
-+
-+ while (time_before(jiffies, dead_line)) {
-+
-+ val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
-+ lt_KR_PMD_STATUS);
-+
-+ if (val & TRAIN_FAIL) {
-+ /* LT failed already, reset lane to avoid
-+ * it run into hanging, then start LT again.
-+ */
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
-+ /* Reset only the Master Lane */
-+ if (xgkr->idx == MASTER_LANE)
-+ xgkr->srds->reset_lane(xgkr->reg_base);
-+ } else {
-+ xgkr->srds->reset_lane(xgkr->reg_base);
-+ }
-+
-+ start_lt(xgkr);
-+ } else if ((val & PMD_STATUS_SUP_STAT) &&
-+ (val & PMD_STATUS_FRAME_LOCK))
-+ break;
-+ usleep_range(100, 500);
-+ }
-+
-+ if (!((val & PMD_STATUS_FRAME_LOCK) &&
-+ (val & PMD_STATUS_SUP_STAT))) {
-+ i++;
-+ continue;
-+ }
-+
-+ /* init process */
-+ lp_rx_ready = false;
-+ tx_training_complete = false;
-+ /* the LT should be finished in 500ms, failed or OK. */
-+ dead_line = jiffies + msecs_to_jiffies(lt_timeout);
-+
-+ while (time_before(jiffies, dead_line)) {
-+ /* check if the LT is already failed */
-+
-+ lt_state = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
-+ lt_KR_PMD_STATUS);
-+
-+ if (lt_state & TRAIN_FAIL) {
-+
-+ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
-+ /* Reset only the Master Lane */
-+ if (xgkr->idx == MASTER_LANE)
-+ xgkr->srds->reset_lane(xgkr->reg_base);
-+ } else {
-+ xgkr->srds->reset_lane(xgkr->reg_base);
-+ }
-+
-+ break;
-+ }
-+
-+ lp_rx_ready = check_rx(xgkr);
-+ tx_training_complete = tx_c->tx_complete;
-+
-+ if (lp_rx_ready && tx_training_complete)
-+ break;
-+
-+ if (!lp_rx_ready)
-+ train_local_tx(xgkr);
-+
-+ if (!tx_training_complete)
-+ train_remote_tx(xgkr);
-+
-+ usleep_range(100, 500);
-+ }
-+
-+ i++;
-+ /* check LT result */
-+ if (is_link_training_fail(xgkr)) {
-+ init_xgkr(xgkr, 0);
-+ continue;
-+ } else {
-+ stop_lt(xgkr);
-+ xgkr->state = TRAINED;
-+
-+ switch (xgkr_inst->bp_mode)
-+ {
-+ case PHY_BACKPLANE_10GBASE_KR:
-+ if (phydev->attached_dev == NULL)
-+ dev_info(&phydev->mdio.dev, "10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
-+ xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
-+ else
-+ dev_info(&phydev->mdio.dev, "%s %s: 10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
-+ dev_driver_string(phydev->attached_dev->dev.parent),
-+ dev_name(phydev->attached_dev->dev.parent),
-+ xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
-+ break;
-+
-+ case PHY_BACKPLANE_40GBASE_KR:
-+ if (xgkr->idx == xgkr_inst->phy_lanes - 1) {
-+ if (phydev->attached_dev == NULL)
-+ dev_info(&phydev->mdio.dev, "40GBase-KR link trained at lanes Tx equalization:\n");
-+ else
-+ dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR link trained at lanes Tx equalization:\n",
-+ dev_driver_string(phydev->attached_dev->dev.parent),
-+ dev_name(phydev->attached_dev->dev.parent));
-+
-+ for (j = 0; j < xgkr_inst->phy_lanes; j++) {
-+ if (phydev->attached_dev == NULL)
-+ dev_info(&phydev->mdio.dev, "40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
-+ j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
-+ else
-+ dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
-+ dev_driver_string(phydev->attached_dev->dev.parent),
-+ dev_name(phydev->attached_dev->dev.parent),
-+ j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
-+ }
-+ }
-+ break;
-+ }
-+
-+ break;
-+ }
-+ }
-+}
-+
-+static void xgkr_request_restart_an(struct xgkr_params *xgkr)
-+{
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int i;
-+
-+ if (time_before(jiffies, xgkr->rt_time))
-+ return;
-+
-+ switch (xgkr_inst->bp_mode)
-+ {
-+ case PHY_BACKPLANE_1000BASE_KX:
-+ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
-+ break;
-+
-+ case PHY_BACKPLANE_10GBASE_KR:
-+ init_xgkr(xgkr, 0);
-+ reset_lt(xgkr);
-+ xgkr->state = DETECTING_LP;
-+ start_xgkr_an(xgkr);
-+ start_xgkr_state_machine(&xgkr->xgkr_wk);
-+ break;
-+
-+ case PHY_BACKPLANE_40GBASE_KR:
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
-+ init_xgkr(&xgkr_inst->xgkr[i], 0);
-+ reset_lt(&xgkr_inst->xgkr[i]);
-+ xgkr_inst->xgkr[i].state = DETECTING_LP;
-+ }
-+ //Start AN only for Master Lane
-+ start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
-+ //start state machine
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
-+ start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
-+ }
-+ break;
-+ }
-+
-+ xgkr->rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
-+}
-+
-+static void xgkr_state_machine(struct work_struct *work)
-+{
-+ struct delayed_work *dwork = to_delayed_work(work);
-+ struct xgkr_params *xgkr = container_of(dwork,
-+ struct xgkr_params, xgkr_wk);
-+ struct phy_device *phydev = xgkr->phydev;
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int an_state;
-+ bool start_train = false;
-+ bool all_lanes_trained = false;
-+ int i;
-+
-+ if (!xgkr_inst->aneg_done) {
-+ start_xgkr_state_machine(&xgkr->xgkr_wk);
-+ return;
-+ }
-+
-+ mutex_lock(&phydev->lock);
-+
-+ switch (xgkr->state) {
-+ case DETECTING_LP:
-+
-+ switch (xgkr_inst->bp_mode)
-+ {
-+ case PHY_BACKPLANE_1000BASE_KX:
-+ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
-+ break;
-+
-+ case PHY_BACKPLANE_10GBASE_KR:
-+ an_state = xgkr_phy_read_mmd(xgkr, MDIO_MMD_AN, g_an_BP_STAT);
-+ if (an_state & KR_AN_MASK_10G) {
-+ //AN acquired: Train the lane
-+ xgkr->an_wait_count = 0;
-+ start_train = true;
-+ } else {
-+ //AN lost or not yet acquired
-+ if (!is_link_up(phydev)) {
-+ //Link is down: restart training
-+ xgkr->an_wait_count = 0;
-+ xgkr_request_restart_an(xgkr);
-+ } else {
-+ //Link is up: wait few iterations for AN to be acquired
-+ if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
-+ xgkr->an_wait_count = 0;
-+ xgkr_request_restart_an(xgkr);
-+ } else {
-+ xgkr->an_wait_count++;
-+ }
-+ }
-+ }
-+ break;
-+
-+ case PHY_BACKPLANE_40GBASE_KR:
-+ //Check AN state only on Master Lane
-+ an_state = xgkr_phy_read_mmd(&xgkr_inst->xgkr[MASTER_LANE], MDIO_MMD_AN, g_an_BP_STAT);
-+ if (an_state & KR_AN_MASK_40G) {
-+ //AN acquired: Train all lanes in order starting with Master Lane
-+ xgkr->an_wait_count = 0;
-+ if (xgkr->idx == MASTER_LANE) {
-+ start_train = true;
-+ }
-+ else if (xgkr_inst->xgkr[xgkr->idx - 1].state == TRAINED) {
-+ start_train = true;
-+ }
-+ } else {
-+ //AN lost or not yet acquired
-+ if (!is_link_up(phydev)) {
-+ //Link is down: restart training
-+ xgkr->an_wait_count = 0;
-+ xgkr_request_restart_an(xgkr);
-+ } else {
-+ //Link is up: wait few iterations for AN to be acquired
-+ if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
-+ xgkr->an_wait_count = 0;
-+ xgkr_request_restart_an(xgkr);
-+ } else {
-+ xgkr->an_wait_count++;
-+ }
-+ }
-+ }
-+ break;
-+ }
-+ break;
-+
-+ case TRAINED:
-+ if (!is_link_up(phydev)) {
-+ switch (xgkr_inst->bp_mode)
-+ {
-+ case PHY_BACKPLANE_1000BASE_KX:
-+ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
-+ break;
-+
-+ case PHY_BACKPLANE_10GBASE_KR:
-+ dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
-+ xgkr_request_restart_an(xgkr);
-+ break;
-+
-+ case PHY_BACKPLANE_40GBASE_KR:
-+ if (xgkr->idx == MASTER_LANE) {
-+ //check if all lanes are trained only on Master Lane
-+ all_lanes_trained = true;
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
-+ if (xgkr_inst->xgkr[i].state != TRAINED) {
-+ all_lanes_trained = false;
-+ break;
-+ }
-+ }
-+ if (all_lanes_trained) {
-+ dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
-+ xgkr_request_restart_an(xgkr);
-+ }
-+ }
-+ break;
-+ }
-+ }
-+ break;
-+ }
-+
-+ if (start_train) {
-+ xgkr_start_train(xgkr);
-+ }
-+
-+ mutex_unlock(&phydev->lock);
-+ start_xgkr_state_machine(&xgkr->xgkr_wk);
-+}
-+
-+static int fsl_backplane_probe(struct phy_device *phydev)
-+{
-+ struct xgkr_phy_data *xgkr_inst;
-+ struct device_node *phy_node, *lane_node;
-+ struct resource res_lane;
-+ struct serdes_access *srds = NULL;
-+ int serdes_type;
-+ const char *st;
-+ const char *bm;
-+ int ret, i, phy_lanes;
-+ int bp_mode;
-+ u32 lane_base_addr[MAX_PHY_LANES_NO], lane_memmap_size;
-+
-+ phy_node = phydev->mdio.dev.of_node;
-+ if (!phy_node) {
-+ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
-+ return -EINVAL;
-+ }
-+
-+ bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
-+ if (bp_mode < 0)
-+ return -EINVAL;
-+
-+ phy_lanes = 1;
-+ if (!strcasecmp(bm, "1000base-kx")) {
-+ bp_mode = PHY_BACKPLANE_1000BASE_KX;
-+ } else if (!strcasecmp(bm, "10gbase-kr")) {
-+ bp_mode = PHY_BACKPLANE_10GBASE_KR;
-+ } else if (!strcasecmp(bm, "40gbase-kr")) {
-+ bp_mode = PHY_BACKPLANE_40GBASE_KR;
-+ phy_lanes = 4;
-+ } else {
-+ dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
-+ return -EINVAL;
-+ }
-+
-+ lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
-+ if (!lane_node) {
-+ dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = of_property_read_string(lane_node, "compatible", &st);
-+ if (ret < 0) {
-+ //assume SERDES-10G if compatible property is not specified
-+ serdes_type = SERDES_10G;
-+ }
-+ else if (!strcasecmp(st, "fsl,serdes-10g")) {
-+ serdes_type = SERDES_10G;
-+ } else if (!strcasecmp(st, "fsl,serdes-28g")) {
-+ serdes_type = SERDES_28G;
-+ } else {
-+ dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = of_address_to_resource(lane_node, 0, &res_lane);
-+ if (ret) {
-+ dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
-+ return ret;
-+ }
-+
-+ of_node_put(lane_node);
-+ ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
-+ (u32 *)lane_base_addr, phy_lanes);
-+ if (ret) {
-+ dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (serdes_type)
-+ {
-+ case SERDES_10G:
-+ setup_an_lt_ls();
-+ srds = setup_serdes_access_10g();
-+ break;
-+
-+ case SERDES_28G:
-+ setup_an_lt_lx();
-+ srds = setup_serdes_access_28g();
-+ break;
-+
-+ default:
-+ dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!srds) {
-+ dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
-+ return -EINVAL;
-+ }
-+
-+ srds->serdes_type = serdes_type;
-+ srds->is_little_endian = of_property_read_bool(lane_node, "little-endian");
-+
-+ if (srds->is_little_endian) {
-+ srds->ioread32 = le_ioread32;
-+ srds->iowrite32 = le_iowrite32;
-+ } else {
-+ srds->ioread32 = be_ioread32;
-+ srds->iowrite32 = be_iowrite32;
-+ }
-+
-+ xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
-+ sizeof(*xgkr_inst), GFP_KERNEL);
-+ if (!xgkr_inst)
-+ return -ENOMEM;
-+
-+ xgkr_inst->phy_lanes = phy_lanes;
-+ xgkr_inst->bp_mode = bp_mode;
-+ mutex_init(&xgkr_inst->phy_lock);
-+
-+ lane_memmap_size = srds->get_lane_memmap_size();
-+
-+ for (i = 0; i < phy_lanes; i++) {
-+ xgkr_inst->xgkr[i].idx = i;
-+ xgkr_inst->xgkr[i].phydev = phydev;
-+ xgkr_inst->xgkr[i].srds = srds;
-+ xgkr_inst->xgkr[i].reg_base = devm_ioremap_nocache(&phydev->mdio.dev,
-+ res_lane.start + lane_base_addr[i],
-+ lane_memmap_size);
-+ if (!xgkr_inst->xgkr[i].reg_base) {
-+ dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
-+ return -ENOMEM;
-+ }
-+ xgkr_inst->xgkr[i].rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
-+ }
-+
-+ phydev->priv = xgkr_inst;
-+
-+ switch (bp_mode)
-+ {
-+ case PHY_BACKPLANE_1000BASE_KX:
-+ phydev->speed = SPEED_1000;
-+ /* configure the lane for 1000BASE-KX */
-+ srds->lane_set_1gkx(xgkr_inst->xgkr[SINGLE_LANE].reg_base);
-+ break;
-+
-+ case PHY_BACKPLANE_10GBASE_KR:
-+ phydev->speed = SPEED_10000;
-+ INIT_DELAYED_WORK(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk, xgkr_state_machine);
-+ break;
-+
-+ case PHY_BACKPLANE_40GBASE_KR:
-+ phydev->speed = SPEED_40000;
-+ for (i = 0; i < phy_lanes; i++)
-+ INIT_DELAYED_WORK(&xgkr_inst->xgkr[i].xgkr_wk, xgkr_state_machine);
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int fsl_backplane_aneg_done(struct phy_device *phydev)
-+{
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+
-+ if (!phydev->mdio.dev.of_node) {
-+ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
-+ return -EINVAL;
-+ }
-+
-+ xgkr_inst->aneg_done = true;
-+
-+ return 1;
-+}
-+
-+static int fsl_backplane_config_aneg(struct phy_device *phydev)
-+{
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int i;
-+
-+ if (!phydev->mdio.dev.of_node) {
-+ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (phydev->speed)
-+ {
-+ case SPEED_1000:
-+ phydev->supported |= SUPPORTED_1000baseKX_Full;
-+ start_1gkx_an(phydev);
-+ break;
-+
-+ case SPEED_10000:
-+ phydev->supported |= SUPPORTED_10000baseKR_Full;
-+ reset_lt(&xgkr_inst->xgkr[SINGLE_LANE]);
-+ start_xgkr_an(&xgkr_inst->xgkr[SINGLE_LANE]);
-+ /* start state machine*/
-+ start_xgkr_state_machine(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk);
-+ break;
-+
-+ case SPEED_40000:
-+ phydev->supported |= SUPPORTED_40000baseKR4_Full;
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
-+ reset_lt(&xgkr_inst->xgkr[i]);
-+ }
-+ //Start AN only for Master Lane
-+ start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
-+ /* start state machine*/
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
-+ start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
-+ }
-+
-+ break;
-+ }
-+
-+ phydev->advertising = phydev->supported;
-+ phydev->duplex = 1;
-+
-+ return 0;
-+}
-+
-+static int fsl_backplane_suspend(struct phy_device *phydev)
-+{
-+ int i;
-+
-+ if (!phydev->mdio.dev.of_node) {
-+ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
-+ return -EINVAL;
-+ }
-+
-+ if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++)
-+ cancel_delayed_work_sync(&xgkr_inst->xgkr[i].xgkr_wk);
-+ }
-+ return 0;
-+}
-+
-+static int fsl_backplane_resume(struct phy_device *phydev)
-+{
-+ struct xgkr_phy_data *xgkr_inst = phydev->priv;
-+ int i;
-+
-+ if (!phydev->mdio.dev.of_node) {
-+ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
-+ return -EINVAL;
-+ }
-+
-+ if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
-+ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
-+ init_xgkr(&xgkr_inst->xgkr[i], 1);
-+ start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
-+ }
-+ }
-+ return 0;
-+}
-+
-+static int fsl_backplane_read_status(struct phy_device *phydev)
-+{
-+ if (!phydev->mdio.dev.of_node) {
-+ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
-+ return -EINVAL;
-+ }
-+
-+ if (is_link_up(phydev))
-+ phydev->link = 1;
-+ else
-+ phydev->link = 0;
-+
-+ return 0;
-+}
-+
-+static int fsl_backplane_match_phy_device(struct phy_device *phydev)
-+{
-+ struct device_node *phy_node, *lane_node;
-+ const char *st;
-+ int serdes_type, i, ret;
-+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
-+
-+ if (!phydev->mdio.dev.of_node) {
-+ return 0;
-+ }
-+
-+ // WORKAROUND:
-+ // Required for LX2 devices
-+ // where PHY ID cannot be verified in PCS
-+ // because PCS Device Identifier Upper and Lower registers are hidden
-+ // and always return 0 when they are read:
-+ // 2 02 Device_ID0 RO Bits 15:0 0
-+ // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x2);
-+ // 3 03 Device_ID1 RO Bits 31:16 0
-+ // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x3);
-+ //
-+ // To be removed: After the issue will be fixed on LX2 devices
-+
-+ if (!phydev->is_c45)
-+ return 0;
-+
-+ phy_node = phydev->mdio.dev.of_node;
-+
-+ lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
-+ if (!lane_node) {
-+ dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
-+ return 0;
-+ }
-+
-+ ret = of_property_read_string(lane_node, "compatible", &st);
-+ if (ret < 0) {
-+ //assume SERDES-10G if compatible property is not specified
-+ serdes_type = SERDES_10G;
-+ }
-+ else if (!strcasecmp(st, "fsl,serdes-10g")) {
-+ serdes_type = SERDES_10G;
-+ } else if (!strcasecmp(st, "fsl,serdes-28g")) {
-+ serdes_type = SERDES_28G;
-+ } else {
-+ dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
-+ return 0;
-+ }
-+
-+ if (serdes_type == SERDES_10G) {
-+ //On LS devices we must find the c45 device with correct PHY ID
-+ //Implementation similar with the one existent in phy_device: @function: phy_bus_match
-+ for (i = 1; i < num_ids; i++) {
-+ if (!(phydev->c45_ids.devices_in_package & (1 << i)))
-+ continue;
-+
-+ if ((PCS_PHY_DEVICE_ID & PCS_PHY_DEVICE_ID_MASK) ==
-+ (phydev->c45_ids.device_ids[i] & PCS_PHY_DEVICE_ID_MASK))
-+ {
-+ return 1;
-+ }
-+ }
-+ return 0;
-+ }
-+
-+ //On LX devices we cannot verify PHY ID
-+ //so we are happy only with preliminary verifications already made: mdio.dev.of_node and is_c45
-+ //because we already filtered other undesired devices: non clause 45
-+
-+ return 1;
-+}
-+
-+static struct phy_driver fsl_backplane_driver[] = {
-+ {
-+ .phy_id = PCS_PHY_DEVICE_ID,
-+ .name = "Freescale Backplane",
-+ .phy_id_mask = PCS_PHY_DEVICE_ID_MASK,
-+ .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
-+ SUPPORTED_MII,
-+ .probe = fsl_backplane_probe,
-+ .aneg_done = fsl_backplane_aneg_done,
-+ .config_aneg = fsl_backplane_config_aneg,
-+ .read_status = fsl_backplane_read_status,
-+ .suspend = fsl_backplane_suspend,
-+ .resume = fsl_backplane_resume,
-+ .match_phy_device = fsl_backplane_match_phy_device,
-+ },
-+};
-+
-+module_phy_driver(fsl_backplane_driver);
-+
-+static struct mdio_device_id __maybe_unused freescale_tbl[] = {
-+ { PCS_PHY_DEVICE_ID, PCS_PHY_DEVICE_ID_MASK },
-+ { }
-+};
-+
-+MODULE_DEVICE_TABLE(mdio, freescale_tbl);
-+
-+MODULE_DESCRIPTION("Freescale Backplane driver");
-+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
-+MODULE_LICENSE("GPL v2");
---- /dev/null
-+++ b/drivers/net/phy/fsl_backplane.h
-@@ -0,0 +1,41 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * DPAA backplane driver.
-+ * Author: Florinel Iordache <florinel.iordache@nxp.com>
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Licensed under the GPL-2 or later.
-+ */
-+
-+#ifndef FSL_BACKPLANE_H
-+#define FSL_BACKPLANE_H
-+
-+/* C(-1) */
-+#define BIN_M1 0
-+/* C(1) */
-+#define BIN_LONG 1
-+
-+#define BIN_SNAPSHOT_NUM 5
-+#define BIN_M1_THRESHOLD 3
-+#define BIN_LONG_THRESHOLD 2
-+
-+struct serdes_access {
-+
-+ int serdes_type;
-+ bool is_little_endian;
-+ u32 (*ioread32)(u32 *reg);
-+ void (*iowrite32)(u32 value, u32 *reg);
-+ u32 (*get_lane_memmap_size)(void);
-+ void (*tune_tecr)(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset);
-+ void (*reset_lane)(void *reg);
-+ void (*lane_set_1gkx)(void *reg);
-+ int (*get_median_gaink2)(u32 *reg);
-+ bool (*is_bin_early)(int bin_sel, void *reg);
-+};
-+
-+struct serdes_access* setup_serdes_access_10g(void);
-+struct serdes_access* setup_serdes_access_28g(void);
-+
-+
-+#endif //FSL_BACKPLANE_H
---- /dev/null
-+++ b/drivers/net/phy/fsl_backplane_serdes_10g.c
-@@ -0,0 +1,281 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * DPAA backplane driver for SerDes 10G.
-+ * Author: Florinel Iordache <florinel.iordache@nxp.com>
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Licensed under the GPL-2 or later.
-+ */
-+
-+#include <linux/io.h>
-+#include <linux/delay.h>
-+
-+#include "fsl_backplane.h"
-+
-+#define BIN_M1_SEL 6
-+#define BIN_Long_SEL 7
-+#define CDR_SEL_MASK 0x00070000
-+
-+#define PRE_COE_SHIFT 22
-+#define POST_COE_SHIFT 16
-+#define ZERO_COE_SHIFT 8
-+
-+#define TECR0_INIT 0x24200000
-+
-+#define GCR0_RESET_MASK 0x00600000
-+
-+#define GCR1_SNP_START_MASK 0x00000040
-+#define GCR1_CTL_SNP_START_MASK 0x00002000
-+
-+#define RECR1_CTL_SNP_DONE_MASK 0x00000002
-+#define RECR1_SNP_DONE_MASK 0x00000004
-+#define TCSR1_SNP_DATA_MASK 0x0000ffc0
-+#define TCSR1_SNP_DATA_SHIFT 6
-+#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
-+
-+#define RECR1_GAINK2_MASK 0x0f000000
-+#define RECR1_GAINK2_SHIFT 24
-+
-+/* Required only for 1000BASE KX */
-+#define GCR1_REIDL_TH_MASK 0x00700000
-+#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
-+#define GCR1_REIDL_ET_MAS_MASK 0x00004000
-+#define TECR0_AMP_RED_MASK 0x0000003f
-+
-+struct per_lane_ctrl_status {
-+ u32 gcr0; /* 0x.000 - General Control Register 0 */
-+ u32 gcr1; /* 0x.004 - General Control Register 1 */
-+ u32 gcr2; /* 0x.008 - General Control Register 2 */
-+ u32 resv1; /* 0x.00C - Reserved */
-+ u32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
-+ u32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
-+ u32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
-+ u32 resv2; /* 0x.01C - Reserved */
-+ u32 tlcr0; /* 0x.020 - TTL Control Register 0 */
-+ u32 tlcr1; /* 0x.024 - TTL Control Register 1 */
-+ u32 tlcr2; /* 0x.028 - TTL Control Register 2 */
-+ u32 tlcr3; /* 0x.02C - TTL Control Register 3 */
-+ u32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
-+ u32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
-+ u32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
-+ u32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
-+};
-+
-+static struct serdes_access srds;
-+
-+static u32 get_lane_memmap_size(void)
-+{
-+ return 0x40;
-+}
-+
-+static void reset_lane(void *reg)
-+{
-+ struct per_lane_ctrl_status *reg_base = reg;
-+
-+ /* reset the lane */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
-+ &reg_base->gcr0);
-+ udelay(1);
-+
-+ /* unreset the lane */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
-+ &reg_base->gcr0);
-+ udelay(1);
-+}
-+
-+static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
-+{
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ u32 val;
-+
-+ val = TECR0_INIT |
-+ adpt_eq << ZERO_COE_SHIFT |
-+ ratio_preq << PRE_COE_SHIFT |
-+ ratio_pst1q << POST_COE_SHIFT;
-+
-+ if (reset) {
-+ /* reset the lane */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
-+ &reg_base->gcr0);
-+ udelay(1);
-+ }
-+
-+ srds.iowrite32(val, &reg_base->tecr0);
-+ udelay(1);
-+
-+ if (reset) {
-+ /* unreset the lane */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
-+ &reg_base->gcr0);
-+ udelay(1);
-+ }
-+}
-+
-+static void lane_set_1gkx(void *reg)
-+{
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ u32 val;
-+
-+ /* reset the lane */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
-+ &reg_base->gcr0);
-+ udelay(1);
-+
-+ /* set gcr1 for 1GKX */
-+ val = srds.ioread32(&reg_base->gcr1);
-+ val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
-+ GCR1_REIDL_ET_MAS_MASK);
-+ srds.iowrite32(val, &reg_base->gcr1);
-+ udelay(1);
-+
-+ /* set tecr0 for 1GKX */
-+ val = srds.ioread32(&reg_base->tecr0);
-+ val &= ~TECR0_AMP_RED_MASK;
-+ srds.iowrite32(val, &reg_base->tecr0);
-+ udelay(1);
-+
-+ /* unreset the lane */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
-+ &reg_base->gcr0);
-+ udelay(1);
-+}
-+
-+static int get_median_gaink2(u32 *reg)
-+{
-+ int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
-+ u32 rx_eq_snp;
-+ struct per_lane_ctrl_status *reg_base;
-+ int timeout;
-+ int i, j, tmp, pos;
-+
-+ reg_base = (struct per_lane_ctrl_status *)reg;
-+
-+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
-+ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
-+ timeout = 100;
-+ while (srds.ioread32(&reg_base->recr1) &
-+ RECR1_CTL_SNP_DONE_MASK) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* start snap shot */
-+ srds.iowrite32((srds.ioread32(&reg_base->gcr1) |
-+ GCR1_CTL_SNP_START_MASK),
-+ &reg_base->gcr1);
-+
-+ /* wait for SNP done */
-+ timeout = 100;
-+ while (!(srds.ioread32(&reg_base->recr1) &
-+ RECR1_CTL_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* read and save the snap shot */
-+ rx_eq_snp = srds.ioread32(&reg_base->recr1);
-+ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
-+ RECR1_GAINK2_SHIFT;
-+
-+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
-+ srds.iowrite32((srds.ioread32(&reg_base->gcr1) &
-+ ~GCR1_CTL_SNP_START_MASK),
-+ &reg_base->gcr1);
-+ }
-+
-+ /* get median of the 5 snap shot */
-+ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
-+ tmp = gaink2_snap_shot[i];
-+ pos = i;
-+ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
-+ if (gaink2_snap_shot[j] < tmp) {
-+ tmp = gaink2_snap_shot[j];
-+ pos = j;
-+ }
-+ }
-+
-+ gaink2_snap_shot[pos] = gaink2_snap_shot[i];
-+ gaink2_snap_shot[i] = tmp;
-+ }
-+
-+ return gaink2_snap_shot[2];
-+}
-+
-+static bool is_bin_early(int bin_sel, void *reg)
-+{
-+ bool early = false;
-+ int bin_snap_shot[BIN_SNAPSHOT_NUM];
-+ int i, negative_count = 0;
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ int timeout;
-+
-+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
-+ /* wait RECR1_SNP_DONE_MASK has cleared */
-+ timeout = 100;
-+ while ((srds.ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* set TCSR1[CDR_SEL] to BinM1/BinLong */
-+ if (bin_sel == BIN_M1) {
-+ srds.iowrite32((srds.ioread32(&reg_base->tcsr1) &
-+ ~CDR_SEL_MASK) | BIN_M1_SEL,
-+ &reg_base->tcsr1);
-+ } else {
-+ srds.iowrite32((srds.ioread32(&reg_base->tcsr1) &
-+ ~CDR_SEL_MASK) | BIN_Long_SEL,
-+ &reg_base->tcsr1);
-+ }
-+
-+ /* start snap shot */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
-+ &reg_base->gcr1);
-+
-+ /* wait for SNP done */
-+ timeout = 100;
-+ while (!(srds.ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* read and save the snap shot */
-+ bin_snap_shot[i] = (srds.ioread32(&reg_base->tcsr1) &
-+ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
-+ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
-+ negative_count++;
-+
-+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
-+ srds.iowrite32(srds.ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
-+ &reg_base->gcr1);
-+ }
-+
-+ if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
-+ ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
-+ early = true;
-+ }
-+
-+ return early;
-+}
-+
-+struct serdes_access* setup_serdes_access_10g(void)
-+{
-+ srds.get_lane_memmap_size = get_lane_memmap_size;
-+ srds.tune_tecr = tune_tecr;
-+ srds.reset_lane = reset_lane;
-+ srds.lane_set_1gkx = lane_set_1gkx;
-+ srds.get_median_gaink2 = get_median_gaink2;
-+ srds.is_bin_early = is_bin_early;
-+
-+ return &srds;
-+}
-+
---- /dev/null
-+++ b/drivers/net/phy/fsl_backplane_serdes_28g.c
-@@ -0,0 +1,336 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * DPAA backplane driver for SerDes 28G.
-+ * Author: Florinel Iordache <florinel.iordache@nxp.com>
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Licensed under the GPL-2 or later.
-+ */
-+
-+#include <linux/io.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+
-+#include "fsl_backplane.h"
-+
-+#define BIN_M1_SEL 0x0000c000
-+#define BIN_Long_SEL 0x0000d000
-+#define CDR_SEL_MASK 0x0000f000
-+
-+#define PRE_COE_SHIFT 16
-+#define POST_COE_SHIFT 8
-+#define ZERO_COE_SHIFT 24
-+
-+#define TECR0_INIT 0x20808000
-+
-+#define RESET_REQ_MASK 0x80000000
-+
-+#define RECR3_SNP_START_MASK 0x80000000
-+#define RECR3_SNP_DONE_MASK 0x40000000
-+
-+#define RECR4_SNP_DATA_MASK 0x000003ff
-+#define RECR4_SNP_DATA_SHIFT 0
-+#define RECR4_EQ_SNPBIN_SIGN_MASK 0x200
-+
-+#define RECR3_GAINK2_MASK 0x1f000000
-+#define RECR3_GAINK2_SHIFT 24
-+
-+/* Required only for 1000BASE KX */
-+#define GCR1_REIDL_TH_MASK 0x00700000
-+#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
-+#define GCR1_REIDL_ET_MAS_MASK 0x04000000
-+#define TECR0_AMP_RED_MASK 0x0000003f
-+
-+struct per_lane_ctrl_status {
-+ u32 gcr0; /* 0x.000 - General Control Register 0 */
-+ u32 resv1; /* 0x.004 - Reserved */
-+ u32 resv2; /* 0x.008 - Reserved */
-+ u32 resv3; /* 0x.00C - Reserved */
-+ u32 resv4; /* 0x.010 - Reserved */
-+ u32 resv5; /* 0x.014 - Reserved */
-+ u32 resv6; /* 0x.018 - Reserved */
-+ u32 resv7; /* 0x.01C - Reserved */
-+ u32 trstctl; /* 0x.020 - TX Reset Control Register */
-+ u32 tgcr0; /* 0x.024 - TX General Control Register 0 */
-+ u32 tgcr1; /* 0x.028 - TX General Control Register 1 */
-+ u32 tgcr2; /* 0x.02C - TX General Control Register 2 */
-+ u32 tecr0; /* 0x.030 - Transmit Equalization Control Register 0 */
-+ u32 tecr1; /* 0x.034 - Transmit Equalization Control Register 1 */
-+ u32 resv8; /* 0x.038 - Reserved */
-+ u32 resv9; /* 0x.03C - Reserved */
-+ u32 rrstctl; /* 0x.040 - RX Reset Control Register */
-+ u32 rgcr0; /* 0x.044 - RX General Control Register 0 */
-+ u32 rxgcr1; /* 0x.048 - RX General Control Register 1 */
-+ u32 resv10; /* 0x.04C - Reserved */
-+ u32 recr0; /* 0x.050 - RX Equalization Register 0 */
-+ u32 recr1; /* 0x.054 - RX Equalization Register 1 */
-+ u32 recr2; /* 0x.058 - RX Equalization Register 2 */
-+ u32 recr3; /* 0x.05C - RX Equalization Register 3 */
-+ u32 recr4; /* 0x.060 - RX Equalization Register 4 */
-+ u32 resv11; /* 0x.064 - Reserved */
-+ u32 rccr0; /* 0x.068 - RX Calibration Register 0 */
-+ u32 rccr1; /* 0x.06C - RX Calibration Register 1 */
-+ u32 rcpcr0; /* 0x.070 - RX Clock Path Register 0 */
-+ u32 rsccr0; /* 0x.074 - RX Sampler Calibration Control Register 0 */
-+ u32 rsccr1; /* 0x.078 - RX Sampler Calibration Control Register 1 */
-+ u32 resv12; /* 0x.07C - Reserved */
-+ u32 ttlcr0; /* 0x.080 - Transition Tracking Loop Register 0 */
-+ u32 ttlcr1; /* 0x.084 - Transition Tracking Loop Register 1 */
-+ u32 ttlcr2; /* 0x.088 - Transition Tracking Loop Register 2 */
-+ u32 ttlcr3; /* 0x.08C - Transition Tracking Loop Register 3 */
-+ u32 resv13; /* 0x.090 - Reserved */
-+ u32 resv14; /* 0x.094 - Reserved */
-+ u32 resv15; /* 0x.098 - Reserved */
-+ u32 resv16; /* 0x.09C - Reserved */
-+ u32 tcsr0; /* 0x.0A0 - Test Control/Status Register 0 */
-+ u32 tcsr1; /* 0x.0A4 - Test Control/Status Register 1 */
-+ u32 tcsr2; /* 0x.0A8 - Test Control/Status Register 2 */
-+ u32 tcsr3; /* 0x.0AC - Test Control/Status Register 3 */
-+ u32 tcsr4; /* 0x.0B0 - Test Control/Status Register 4 */
-+ u32 resv17; /* 0x.0B4 - Reserved */
-+ u32 resv18; /* 0x.0B8 - Reserved */
-+ u32 resv19; /* 0x.0BC - Reserved */
-+ u32 rxcb0; /* 0x.0C0 - RX Control Block Register 0 */
-+ u32 rxcb1; /* 0x.0C4 - RX Control Block Register 1 */
-+ u32 resv20; /* 0x.0C8 - Reserved */
-+ u32 resv21; /* 0x.0CC - Reserved */
-+ u32 rxss0; /* 0x.0D0 - RX Speed Switch Register 0 */
-+ u32 rxss1; /* 0x.0D4 - RX Speed Switch Register 1 */
-+ u32 rxss2; /* 0x.0D8 - RX Speed Switch Register 2 */
-+ u32 resv22; /* 0x.0DC - Reserved */
-+ u32 txcb0; /* 0x.0E0 - TX Control Block Register 0 */
-+ u32 txcb1; /* 0x.0E4 - TX Control Block Register 1 */
-+ u32 resv23; /* 0x.0E8 - Reserved */
-+ u32 resv24; /* 0x.0EC - Reserved */
-+ u32 txss0; /* 0x.0F0 - TX Speed Switch Register 0 */
-+ u32 txss1; /* 0x.0F4 - TX Speed Switch Register 1 */
-+ u32 txss2; /* 0x.0F8 - TX Speed Switch Register 2 */
-+ u32 resv25; /* 0x.0FC - Reserved */
-+};
-+
-+static struct serdes_access srds;
-+
-+static u32 get_lane_memmap_size(void)
-+{
-+ return 0x100;
-+}
-+
-+static void reset_lane(void *reg)
-+{
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ u32 val;
-+ unsigned long timeout;
-+
-+ /* reset Tx lane: send reset request */
-+ srds.iowrite32(srds.ioread32(&reg_base->trstctl) | RESET_REQ_MASK,
-+ &reg_base->trstctl);
-+ udelay(1);
-+ timeout = 10;
-+ while (timeout--) {
-+ val = srds.ioread32(&reg_base->trstctl);
-+ if (!(val & RESET_REQ_MASK))
-+ break;
-+ usleep_range(5, 20);
-+ }
-+
-+ /* reset Rx lane: send reset request */
-+ srds.iowrite32(srds.ioread32(&reg_base->rrstctl) | RESET_REQ_MASK,
-+ &reg_base->rrstctl);
-+ udelay(1);
-+ timeout = 10;
-+ while (timeout--) {
-+ val = srds.ioread32(&reg_base->rrstctl);
-+ if (!(val & RESET_REQ_MASK))
-+ break;
-+ usleep_range(5, 20);
-+ }
-+
-+ /* wait for a while after reset */
-+ timeout = jiffies + 10;
-+ while (time_before(jiffies, timeout)) {
-+ schedule();
-+ usleep_range(5, 20);
-+ }
-+}
-+
-+static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
-+{
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ u32 val;
-+
-+ if (reset) {
-+ /* reset lanes */
-+ reset_lane(reg);
-+ }
-+
-+ val = TECR0_INIT |
-+ ratio_preq << PRE_COE_SHIFT |
-+ ratio_pst1q << POST_COE_SHIFT;
-+ srds.iowrite32(val, &reg_base->tecr0);
-+
-+ val = adpt_eq << ZERO_COE_SHIFT;
-+ srds.iowrite32(val, &reg_base->tecr1);
-+
-+ udelay(1);
-+}
-+
-+static void lane_set_1gkx(void *reg)
-+{
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ u32 val;
-+
-+ /* reset lanes */
-+ reset_lane(reg);
-+
-+ /* set gcr1 for 1GKX */
-+ val = srds.ioread32(&reg_base->rxgcr1);
-+ val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
-+ GCR1_REIDL_ET_MAS_MASK);
-+ srds.iowrite32(val, &reg_base->rxgcr1);
-+ udelay(1);
-+
-+ /* set tecr0 for 1GKX */
-+ val = srds.ioread32(&reg_base->tecr0);
-+ val &= ~TECR0_AMP_RED_MASK;
-+ srds.iowrite32(val, &reg_base->tecr0);
-+ udelay(1);
-+}
-+
-+static int get_median_gaink2(u32 *reg)
-+{
-+ int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
-+ u32 rx_eq_snp;
-+ struct per_lane_ctrl_status *reg_base;
-+ int timeout;
-+ int i, j, tmp, pos;
-+
-+ reg_base = (struct per_lane_ctrl_status *)reg;
-+
-+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
-+ /* wait RECR3_SNP_DONE_MASK has cleared */
-+ timeout = 100;
-+ while (srds.ioread32(&reg_base->recr3) &
-+ RECR3_SNP_DONE_MASK) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* start snap shot */
-+ srds.iowrite32((srds.ioread32(&reg_base->recr3) |
-+ RECR3_SNP_START_MASK),
-+ &reg_base->recr3);
-+
-+ /* wait for SNP done */
-+ timeout = 100;
-+ while (!(srds.ioread32(&reg_base->recr3) &
-+ RECR3_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* read and save the snap shot */
-+ rx_eq_snp = srds.ioread32(&reg_base->recr3);
-+ gaink2_snap_shot[i] = (rx_eq_snp & RECR3_GAINK2_MASK) >>
-+ RECR3_GAINK2_SHIFT;
-+
-+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
-+ srds.iowrite32((srds.ioread32(&reg_base->recr3) &
-+ ~RECR3_SNP_START_MASK),
-+ &reg_base->recr3);
-+ }
-+
-+ /* get median of the 5 snap shot */
-+ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
-+ tmp = gaink2_snap_shot[i];
-+ pos = i;
-+ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
-+ if (gaink2_snap_shot[j] < tmp) {
-+ tmp = gaink2_snap_shot[j];
-+ pos = j;
-+ }
-+ }
-+
-+ gaink2_snap_shot[pos] = gaink2_snap_shot[i];
-+ gaink2_snap_shot[i] = tmp;
-+ }
-+
-+ return gaink2_snap_shot[2];
-+}
-+
-+static bool is_bin_early(int bin_sel, void *reg)
-+{
-+ bool early = false;
-+ int bin_snap_shot[BIN_SNAPSHOT_NUM];
-+ int i, negative_count = 0;
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ int timeout;
-+
-+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
-+ /* wait RECR3_SNP_DONE_MASK has cleared */
-+ timeout = 100;
-+ while ((srds.ioread32(&reg_base->recr3) & RECR3_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* set TCSR1[CDR_SEL] to BinM1/BinLong */
-+ if (bin_sel == BIN_M1) {
-+ srds.iowrite32((srds.ioread32(&reg_base->recr4) &
-+ ~CDR_SEL_MASK) | BIN_M1_SEL,
-+ &reg_base->recr4);
-+ } else {
-+ srds.iowrite32((srds.ioread32(&reg_base->recr4) &
-+ ~CDR_SEL_MASK) | BIN_Long_SEL,
-+ &reg_base->recr4);
-+ }
-+
-+ /* start snap shot */
-+ srds.iowrite32(srds.ioread32(&reg_base->recr3) | RECR3_SNP_START_MASK,
-+ &reg_base->recr3);
-+
-+ /* wait for SNP done */
-+ timeout = 100;
-+ while (!(srds.ioread32(&reg_base->recr3) & RECR3_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* read and save the snap shot */
-+ bin_snap_shot[i] = (srds.ioread32(&reg_base->recr4) &
-+ RECR4_SNP_DATA_MASK) >> RECR4_SNP_DATA_SHIFT;
-+ if (bin_snap_shot[i] & RECR4_EQ_SNPBIN_SIGN_MASK)
-+ negative_count++;
-+
-+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
-+ srds.iowrite32(srds.ioread32(&reg_base->recr3) & ~RECR3_SNP_START_MASK,
-+ &reg_base->recr3);
-+ }
-+
-+ if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
-+ ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
-+ early = true;
-+ }
-+
-+ return early;
-+}
-+
-+struct serdes_access* setup_serdes_access_28g(void)
-+{
-+ srds.get_lane_memmap_size = get_lane_memmap_size;
-+ srds.tune_tecr = tune_tecr;
-+ srds.reset_lane = reset_lane;
-+ srds.lane_set_1gkx = lane_set_1gkx;
-+ srds.get_median_gaink2 = get_median_gaink2;
-+ srds.is_bin_early = is_bin_early;
-+
-+ return &srds;
-+}
---- /dev/null
-+++ b/drivers/net/phy/inphi.c
-@@ -0,0 +1,594 @@
-+/*
-+ * Copyright 2018 NXP
-+ * Copyright 2018 INPHI
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ *
-+ * 1. Redistributions of source code must retain the above copyright notice,
-+ * this list of conditions and the following disclaimer.
-+ * 2. Redistributions in binary form must reproduce the above copyright notice,
-+ * this list of conditions and the following disclaimer in the documentation
-+ * and/or other materials provided with the distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ *
-+ * Inphi is a registered trademark of Inphi Corporation
-+ *
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/phy.h>
-+#include <linux/mdio.h>
-+#include <linux/interrupt.h>
-+#include <linux/platform_device.h>
-+#include <linux/of_irq.h>
-+#include <linux/workqueue.h>
-+#include <linux/i2c.h>
-+#include <linux/timer.h>
-+#include <linux/delay.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/fs.h>
-+#include <linux/cdev.h>
-+#include <linux/device.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+
-+#define PHY_ID_IN112525 0x02107440
-+
-+#define INPHI_S03_DEVICE_ID_MSB 0x2
-+#define INPHI_S03_DEVICE_ID_LSB 0x3
-+
-+#define ALL_LANES 4
-+#define INPHI_POLL_DELAY 2500
-+
-+#define PHYCTRL_REG1 0x0012
-+#define PHYCTRL_REG2 0x0014
-+#define PHYCTRL_REG3 0x0120
-+#define PHYCTRL_REG4 0x0121
-+#define PHYCTRL_REG5 0x0180
-+#define PHYCTRL_REG6 0x0580
-+#define PHYCTRL_REG7 0x05C4
-+#define PHYCTRL_REG8 0x01C8
-+#define PHYCTRL_REG9 0x0521
-+
-+#define PHYSTAT_REG1 0x0021
-+#define PHYSTAT_REG2 0x0022
-+#define PHYSTAT_REG3 0x0123
-+
-+#define PHYMISC_REG1 0x0025
-+#define PHYMISC_REG2 0x002c
-+#define PHYMISC_REG3 0x00b3
-+#define PHYMISC_REG4 0x0181
-+#define PHYMISC_REG5 0x019D
-+#define PHYMISC_REG6 0x0198
-+#define PHYMISC_REG7 0x0199
-+#define PHYMISC_REG8 0x0581
-+#define PHYMISC_REG9 0x0598
-+#define PHYMISC_REG10 0x059c
-+#define PHYMISC_REG20 0x01B0
-+#define PHYMISC_REG21 0x01BC
-+#define PHYMISC_REG22 0x01C0
-+
-+#define RX_VCO_CODE_OFFSET 5
-+#define VCO_CODE 390
-+
-+int vco_codes[ALL_LANES] = {
-+ VCO_CODE,
-+ VCO_CODE,
-+ VCO_CODE,
-+ VCO_CODE
-+};
-+
-+static void mykmod_work_handler(struct work_struct *w);
-+
-+static struct workqueue_struct *wq;
-+static DECLARE_DELAYED_WORK(mykmod_work, mykmod_work_handler);
-+static unsigned long onesec;
-+struct phy_device *inphi_phydev;
-+
-+static int mdio_wr(u32 regnum, u16 val)
-+{
-+ regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
-+
-+ return mdiobus_write(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
-+ regnum, val);
-+}
-+
-+static int mdio_rd(u32 regnum)
-+{
-+ regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
-+
-+ return mdiobus_read(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
-+ regnum);
-+}
-+
-+
-+int bit_test(int value, int bit_field)
-+{
-+ int result;
-+ int bit_mask = (1 << bit_field);
-+
-+ result = ((value & bit_mask) == bit_mask);
-+ return result;
-+}
-+
-+int tx_pll_lock_test(int lane)
-+{
-+ int i, val, locked = 1;
-+
-+ if (lane == ALL_LANES) {
-+ for (i = 0; i < ALL_LANES; i++) {
-+ val = mdio_rd(i * 0x100 + PHYSTAT_REG3);
-+ locked = locked & bit_test(val, 15);
-+ }
-+ } else {
-+ val = mdio_rd(lane * 0x100 + PHYSTAT_REG3);
-+ locked = locked & bit_test(val, 15);
-+ }
-+
-+ return locked;
-+}
-+
-+void rx_reset_assert(int lane)
-+{
-+ int mask, val;
-+
-+ if (lane == ALL_LANES) {
-+ val = mdio_rd(PHYMISC_REG2);
-+ mask = (1 << 15);
-+ mdio_wr(PHYMISC_REG2, val + mask);
-+ } else {
-+ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
-+ mask = (1 << 6);
-+ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
-+ }
-+}
-+
-+void rx_reset_de_assert(int lane)
-+{
-+ int mask, val;
-+
-+ if (lane == ALL_LANES) {
-+ val = mdio_rd(PHYMISC_REG2);
-+ mask = 0xffff - (1 << 15);
-+ mdio_wr(PHYMISC_REG2, val & mask);
-+ } else {
-+ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
-+ mask = 0xffff - (1 << 6);
-+ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
-+ }
-+}
-+
-+void rx_powerdown_assert(int lane)
-+{
-+ int mask, val;
-+
-+ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
-+ mask = (1 << 5);
-+ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
-+}
-+
-+void rx_powerdown_de_assert(int lane)
-+{
-+ int mask, val;
-+
-+ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
-+ mask = 0xffff - (1 << 5);
-+ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
-+}
-+
-+void tx_pll_assert(int lane)
-+{
-+ int val, recal;
-+
-+ if (lane == ALL_LANES) {
-+ val = mdio_rd(PHYMISC_REG2);
-+ recal = (1 << 12);
-+ mdio_wr(PHYMISC_REG2, val | recal);
-+ } else {
-+ val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
-+ recal = (1 << 15);
-+ mdio_wr(lane * 0x100 + PHYCTRL_REG4, val | recal);
-+ }
-+}
-+
-+void tx_pll_de_assert(int lane)
-+{
-+ int recal, val;
-+
-+ if (lane == ALL_LANES) {
-+ val = mdio_rd(PHYMISC_REG2);
-+ recal = 0xefff;
-+ mdio_wr(PHYMISC_REG2, val & recal);
-+ } else {
-+ val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
-+ recal = 0x7fff;
-+ mdio_wr(lane * 0x100 + PHYCTRL_REG4, val & recal);
-+ }
-+}
-+
-+void tx_core_assert(int lane)
-+{
-+ int recal, val, val2, core_reset;
-+
-+ if (lane == 4) {
-+ val = mdio_rd(PHYMISC_REG2);
-+ recal = 1 << 10;
-+ mdio_wr(PHYMISC_REG2, val | recal);
-+ } else {
-+ val2 = mdio_rd(PHYMISC_REG3);
-+ core_reset = (1 << (lane + 8));
-+ mdio_wr(PHYMISC_REG3, val2 | core_reset);
-+ }
-+}
-+
-+void lol_disable(int lane)
-+{
-+ int val, mask;
-+
-+ val = mdio_rd(PHYMISC_REG3);
-+ mask = 1 << (lane + 4);
-+ mdio_wr(PHYMISC_REG3, val | mask);
-+}
-+
-+void tx_core_de_assert(int lane)
-+{
-+ int val, recal, val2, core_reset;
-+
-+ if (lane == ALL_LANES) {
-+ val = mdio_rd(PHYMISC_REG2);
-+ recal = 0xffff - (1 << 10);
-+ mdio_wr(PHYMISC_REG2, val & recal);
-+ } else {
-+ val2 = mdio_rd(PHYMISC_REG3);
-+ core_reset = 0xffff - (1 << (lane + 8));
-+ mdio_wr(PHYMISC_REG3, val2 & core_reset);
-+ }
-+}
-+
-+void tx_restart(int lane)
-+{
-+ tx_core_assert(lane);
-+ tx_pll_assert(lane);
-+ tx_pll_de_assert(lane);
-+ usleep_range(1500, 1600);
-+ tx_core_de_assert(lane);
-+}
-+
-+void disable_lane(int lane)
-+{
-+ rx_reset_assert(lane);
-+ rx_powerdown_assert(lane);
-+ tx_core_assert(lane);
-+ lol_disable(lane);
-+}
-+
-+void toggle_reset(int lane)
-+{
-+ int reg, val, orig;
-+
-+ if (lane == ALL_LANES) {
-+ mdio_wr(PHYMISC_REG2, 0x8000);
-+ udelay(100);
-+ mdio_wr(PHYMISC_REG2, 0x0000);
-+ } else {
-+ reg = lane * 0x100 + PHYCTRL_REG8;
-+ val = (1 << 6);
-+ orig = mdio_rd(reg);
-+ mdio_wr(reg, orig + val);
-+ udelay(100);
-+ mdio_wr(reg, orig);
-+ }
-+}
-+
-+int az_complete_test(int lane)
-+{
-+ int success = 1, value;
-+
-+ if (lane == 0 || lane == ALL_LANES) {
-+ value = mdio_rd(PHYCTRL_REG5);
-+ success = success & bit_test(value, 2);
-+ }
-+ if (lane == 1 || lane == ALL_LANES) {
-+ value = mdio_rd(PHYCTRL_REG5 + 0x100);
-+ success = success & bit_test(value, 2);
-+ }
-+ if (lane == 2 || lane == ALL_LANES) {
-+ value = mdio_rd(PHYCTRL_REG5 + 0x200);
-+ success = success & bit_test(value, 2);
-+ }
-+ if (lane == 3 || lane == ALL_LANES) {
-+ value = mdio_rd(PHYCTRL_REG5 + 0x300);
-+ success = success & bit_test(value, 2);
-+ }
-+
-+ return success;
-+}
-+
-+void save_az_offsets(int lane)
-+{
-+ int i;
-+
-+#define AZ_OFFSET_LANE_UPDATE(reg, lane) \
-+ mdio_wr((reg) + (lane) * 0x100, \
-+ (mdio_rd((reg) + (lane) * 0x100) >> 8))
-+
-+ if (lane == ALL_LANES) {
-+ for (i = 0; i < ALL_LANES; i++) {
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, i);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, i);
-+ }
-+ } else {
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, lane);
-+ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, lane);
-+ }
-+
-+ mdio_wr(PHYCTRL_REG7, 0x0001);
-+}
-+
-+void save_vco_codes(int lane)
-+{
-+ int i;
-+
-+ if (lane == ALL_LANES) {
-+ for (i = 0; i < ALL_LANES; i++) {
-+ vco_codes[i] = mdio_rd(PHYMISC_REG5 + i * 0x100);
-+ mdio_wr(PHYMISC_REG5 + i * 0x100,
-+ vco_codes[i] + RX_VCO_CODE_OFFSET);
-+ }
-+ } else {
-+ vco_codes[lane] = mdio_rd(PHYMISC_REG5 + lane * 0x100);
-+ mdio_wr(PHYMISC_REG5 + lane * 0x100,
-+ vco_codes[lane] + RX_VCO_CODE_OFFSET);
-+ }
-+}
-+
-+int inphi_lane_recovery(int lane)
-+{
-+ int i, value, az_pass;
-+
-+ switch (lane) {
-+ case 0:
-+ case 1:
-+ case 2:
-+ case 3:
-+ rx_reset_assert(lane);
-+ mdelay(20);
-+ break;
-+ case ALL_LANES:
-+ mdio_wr(PHYMISC_REG2, 0x9C00);
-+ mdelay(20);
-+ do {
-+ value = mdio_rd(PHYMISC_REG2);
-+ udelay(10);
-+ } while (!bit_test(value, 4));
-+ break;
-+ default:
-+ dev_err(&inphi_phydev->mdio.dev,
-+ "Incorrect usage of APIs in %s driver\n",
-+ inphi_phydev->drv->name);
-+ break;
-+ }
-+
-+ if (lane == ALL_LANES) {
-+ for (i = 0; i < ALL_LANES; i++)
-+ mdio_wr(PHYMISC_REG7 + i * 0x100, VCO_CODE);
-+ } else {
-+ mdio_wr(PHYMISC_REG7 + lane * 0x100, VCO_CODE);
-+ }
-+
-+ if (lane == ALL_LANES)
-+ for (i = 0; i < ALL_LANES; i++)
-+ mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0418);
-+ else
-+ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0418);
-+
-+ mdio_wr(PHYCTRL_REG7, 0x0000);
-+
-+ rx_reset_de_assert(lane);
-+
-+ if (lane == ALL_LANES) {
-+ for (i = 0; i < ALL_LANES; i++) {
-+ mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0410);
-+ mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0412);
-+ }
-+ } else {
-+ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0410);
-+ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0412);
-+ }
-+
-+ for (i = 0; i < 64; i++) {
-+ mdelay(100);
-+ az_pass = az_complete_test(lane);
-+ if (az_pass) {
-+ save_az_offsets(lane);
-+ break;
-+ }
-+ }
-+
-+ if (!az_pass) {
-+ pr_info("in112525: AZ calibration fail @ lane=%d\n", lane);
-+ return -1;
-+ }
-+
-+ if (lane == ALL_LANES) {
-+ mdio_wr(PHYMISC_REG8, 0x0002);
-+ mdio_wr(PHYMISC_REG9, 0x2028);
-+ mdio_wr(PHYCTRL_REG6, 0x0010);
-+ usleep_range(1000, 1200);
-+ mdio_wr(PHYCTRL_REG6, 0x0110);
-+ mdelay(30);
-+ mdio_wr(PHYMISC_REG9, 0x3020);
-+ } else {
-+ mdio_wr(PHYMISC_REG4 + lane * 0x100, 0x0002);
-+ mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x2028);
-+ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0010);
-+ usleep_range(1000, 1200);
-+ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0110);
-+ mdelay(30);
-+ mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x3020);
-+ }
-+
-+ if (lane == ALL_LANES) {
-+ mdio_wr(PHYMISC_REG2, 0x1C00);
-+ mdio_wr(PHYMISC_REG2, 0x0C00);
-+ } else {
-+ tx_restart(lane);
-+ mdelay(11);
-+ }
-+
-+ if (lane == ALL_LANES) {
-+ if (bit_test(mdio_rd(PHYMISC_REG2), 6) == 0)
-+ return -1;
-+ } else {
-+ if (tx_pll_lock_test(lane) == 0)
-+ return -1;
-+ }
-+
-+ save_vco_codes(lane);
-+
-+ if (lane == ALL_LANES) {
-+ mdio_wr(PHYMISC_REG2, 0x0400);
-+ mdio_wr(PHYMISC_REG2, 0x0000);
-+ value = mdio_rd(PHYCTRL_REG1);
-+ value = value & 0xffbf;
-+ mdio_wr(PHYCTRL_REG2, value);
-+ } else {
-+ tx_core_de_assert(lane);
-+ }
-+
-+ if (lane == ALL_LANES) {
-+ mdio_wr(PHYMISC_REG1, 0x8000);
-+ mdio_wr(PHYMISC_REG1, 0x0000);
-+ }
-+ mdio_rd(PHYMISC_REG1);
-+ mdio_rd(PHYMISC_REG1);
-+ usleep_range(1000, 1200);
-+ mdio_rd(PHYSTAT_REG1);
-+ mdio_rd(PHYSTAT_REG2);
-+
-+ return 0;
-+}
-+
-+static void mykmod_work_handler(struct work_struct *w)
-+{
-+ int all_lanes_lock, lane0_lock, lane1_lock, lane2_lock, lane3_lock;
-+
-+ lane0_lock = bit_test(mdio_rd(0x123), 15);
-+ lane1_lock = bit_test(mdio_rd(0x223), 15);
-+ lane2_lock = bit_test(mdio_rd(0x323), 15);
-+ lane3_lock = bit_test(mdio_rd(0x423), 15);
-+
-+ /* check if the chip had any successful lane lock from the previous
-+ * stage (e.g. u-boot)
-+ */
-+ all_lanes_lock = lane0_lock | lane1_lock | lane2_lock | lane3_lock;
-+
-+ if (!all_lanes_lock) {
-+ /* start fresh */
-+ inphi_lane_recovery(ALL_LANES);
-+ } else {
-+ if (!lane0_lock)
-+ inphi_lane_recovery(0);
-+ if (!lane1_lock)
-+ inphi_lane_recovery(1);
-+ if (!lane2_lock)
-+ inphi_lane_recovery(2);
-+ if (!lane3_lock)
-+ inphi_lane_recovery(3);
-+ }
-+
-+ queue_delayed_work(wq, &mykmod_work, onesec);
-+}
-+
-+int inphi_probe(struct phy_device *phydev)
-+{
-+ int phy_id = 0, id_lsb = 0, id_msb = 0;
-+
-+ /* setup the inphi_phydev ptr for mdio_rd/mdio_wr APIs */
-+ inphi_phydev = phydev;
-+
-+ /* Read device id from phy registers */
-+ id_lsb = mdio_rd(INPHI_S03_DEVICE_ID_MSB);
-+ if (id_lsb < 0)
-+ return -ENXIO;
-+
-+ phy_id = id_lsb << 16;
-+
-+ id_msb = mdio_rd(INPHI_S03_DEVICE_ID_LSB);
-+ if (id_msb < 0)
-+ return -ENXIO;
-+
-+ phy_id |= id_msb;
-+
-+ /* Make sure the device tree binding matched the driver with the
-+ * right device.
-+ */
-+ if (phy_id != phydev->drv->phy_id) {
-+ dev_err(&phydev->mdio.dev,
-+ "Error matching phy with %s driver\n",
-+ phydev->drv->name);
-+ return -ENODEV;
-+ }
-+
-+ /* update the local phydev pointer, used inside all APIs */
-+ inphi_phydev = phydev;
-+ onesec = msecs_to_jiffies(INPHI_POLL_DELAY);
-+
-+ wq = create_singlethread_workqueue("inphi_kmod");
-+ if (wq) {
-+ queue_delayed_work(wq, &mykmod_work, onesec);
-+ } else {
-+ dev_err(&phydev->mdio.dev,
-+ "Error creating kernel workqueue for %s driver\n",
-+ phydev->drv->name);
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+static struct phy_driver inphi_driver[] = {
-+{
-+ .phy_id = PHY_ID_IN112525,
-+ .phy_id_mask = 0x0ff0fff0,
-+ .name = "Inphi 112525_S03",
-+ .features = PHY_GBIT_FEATURES,
-+ .probe = &inphi_probe,
-+},
-+};
-+
-+module_phy_driver(inphi_driver);
-+
-+static struct mdio_device_id __maybe_unused inphi_tbl[] = {
-+ { PHY_ID_IN112525, 0x0ff0fff0},
-+ {},
-+};
-+
-+MODULE_DEVICE_TABLE(mdio, inphi_tbl);
---- /dev/null
-+++ b/drivers/net/phy/mdio-mux-multiplexer.c
-@@ -0,0 +1,122 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/* MDIO bus multiplexer using kernel multiplexer subsystem
-+ *
-+ * Copyright 2019 NXP
-+ */
-+
-+#include <linux/platform_device.h>
-+#include <linux/mdio-mux.h>
-+#include <linux/module.h>
-+#include <linux/mux/consumer.h>
-+
-+struct mdio_mux_multiplexer_state {
-+ struct mux_control *muxc;
-+ bool do_deselect;
-+ void *mux_handle;
-+};
-+
-+/**
-+ * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux
-+ * layer when it thinks the mdio bus
-+ * multiplexer needs to switch.
-+ * @current_child: current value of the mux register.
-+ * @desired_child: value of the 'reg' property of the target child MDIO node.
-+ * @data: Private data used by this switch_fn passed to mdio_mux_init function
-+ * via mdio_mux_init(.., .., .., .., data, ..).
-+ *
-+ * The first time this function is called, current_child == -1.
-+ * If current_child == desired_child, then the mux is already set to the
-+ * correct bus.
-+ */
-+static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child,
-+ void *data)
-+{
-+ struct platform_device *pdev;
-+ struct mdio_mux_multiplexer_state *s;
-+ int ret = 0;
-+
-+ pdev = (struct platform_device *)data;
-+ s = platform_get_drvdata(pdev);
-+
-+ if (!(current_child ^ desired_child))
-+ return 0;
-+
-+ if (s->do_deselect)
-+ ret = mux_control_deselect(s->muxc);
-+ if (ret) {
-+ dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n",
-+ __func__, ret);
-+ return ret;
-+ }
-+
-+ ret = mux_control_select(s->muxc, desired_child);
-+ if (!ret) {
-+ dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child,
-+ desired_child);
-+ s->do_deselect = true;
-+ } else {
-+ s->do_deselect = false;
-+ }
-+
-+ return ret;
-+}
-+
-+static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct mdio_mux_multiplexer_state *s;
-+ int ret = 0;
-+
-+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
-+ if (!s)
-+ return -ENOMEM;
-+
-+ s->muxc = devm_mux_control_get(dev, NULL);
-+ if (IS_ERR(s->muxc)) {
-+ ret = PTR_ERR(s->muxc);
-+ if (ret != -EPROBE_DEFER)
-+ dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
-+ return ret;
-+ }
-+
-+ platform_set_drvdata(pdev, s);
-+
-+ ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
-+ mdio_mux_multiplexer_switch_fn, &s->mux_handle,
-+ pdev, NULL);
-+
-+ return ret;
-+}
-+
-+static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
-+{
-+ struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev);
-+
-+ mdio_mux_uninit(s->mux_handle);
-+
-+ if (s->do_deselect)
-+ mux_control_deselect(s->muxc);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id mdio_mux_multiplexer_match[] = {
-+ { .compatible = "mdio-mux-multiplexer", },
-+ {},
-+};
-+MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match);
-+
-+static struct platform_driver mdio_mux_multiplexer_driver = {
-+ .driver = {
-+ .name = "mdio-mux-multiplexer",
-+ .of_match_table = mdio_mux_multiplexer_match,
-+ },
-+ .probe = mdio_mux_multiplexer_probe,
-+ .remove = mdio_mux_multiplexer_remove,
-+};
-+
-+module_platform_driver(mdio_mux_multiplexer_driver);
-+
-+MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem");
-+MODULE_AUTHOR("Pankaj Bansal <pankaj.bansal@nxp.com>");
-+MODULE_LICENSE("GPL");
---- a/drivers/net/phy/swphy.c
-+++ b/drivers/net/phy/swphy.c
-@@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
- static int swphy_decode_speed(int speed)
- {
- switch (speed) {
-+ case 10000:
- case 1000:
- return SWMII_SPEED_1000;
- case 100:
---- a/include/linux/phy.h
-+++ b/include/linux/phy.h
-@@ -87,6 +87,7 @@ typedef enum {
- PHY_INTERFACE_MODE_XAUI,
- /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
- PHY_INTERFACE_MODE_10GKR,
-+ PHY_INTERFACE_MODE_2500SGMII,
- PHY_INTERFACE_MODE_MAX,
- } phy_interface_t;
-
-@@ -159,6 +160,8 @@ static inline const char *phy_modes(phy_
- return "xaui";
- case PHY_INTERFACE_MODE_10GKR:
- return "10gbase-kr";
-+ case PHY_INTERFACE_MODE_2500SGMII:
-+ return "sgmii-2500";
- default:
- return "unknown";
- }
diff --git a/target/linux/layerscape/patches-4.14/710-pfe-eth-support-layerscape.patch b/target/linux/layerscape/patches-4.14/710-pfe-eth-support-layerscape.patch
deleted file mode 100644
index 80baa82f9a..0000000000
--- a/target/linux/layerscape/patches-4.14/710-pfe-eth-support-layerscape.patch
+++ /dev/null
@@ -1,11028 +0,0 @@
-From 35745905430a4c9827c235d42f3a61bef34043e8 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 19 Apr 2019 13:21:09 +0800
-Subject: [PATCH] pfe-eth: support layerscape
-
-This is an integrated patch of pfe-eth for layerscape
-
-Signed-off-by: Akhila Kavi <akhila.kavi@nxp.com>
-Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
-Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
-Signed-off-by: Archana Madhavan <archana.madhavan@nxp.com>
-Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
-Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
-Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- .../devicetree/bindings/net/fsl_ppfe/pfe.txt | 199 ++
- drivers/staging/fsl_ppfe/Kconfig | 21 +
- drivers/staging/fsl_ppfe/Makefile | 20 +
- drivers/staging/fsl_ppfe/TODO | 2 +
- drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
- .../staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
- .../fsl_ppfe/include/pfe/cbus/class_csr.h | 289 ++
- .../fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
- .../staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
- .../staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
- .../fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
- .../fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
- .../fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
- drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
- drivers/staging/fsl_ppfe/pfe_cdev.c | 258 ++
- drivers/staging/fsl_ppfe/pfe_cdev.h | 41 +
- drivers/staging/fsl_ppfe/pfe_ctrl.c | 226 ++
- drivers/staging/fsl_ppfe/pfe_ctrl.h | 100 +
- drivers/staging/fsl_ppfe/pfe_debugfs.c | 99 +
- drivers/staging/fsl_ppfe/pfe_debugfs.h | 13 +
- drivers/staging/fsl_ppfe/pfe_eth.c | 2554 +++++++++++++++++
- drivers/staging/fsl_ppfe/pfe_eth.h | 175 ++
- drivers/staging/fsl_ppfe/pfe_firmware.c | 302 ++
- drivers/staging/fsl_ppfe/pfe_firmware.h | 20 +
- drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++
- drivers/staging/fsl_ppfe/pfe_hif.c | 1060 +++++++
- drivers/staging/fsl_ppfe/pfe_hif.h | 200 ++
- drivers/staging/fsl_ppfe/pfe_hif_lib.c | 628 ++++
- drivers/staging/fsl_ppfe/pfe_hif_lib.h | 229 ++
- drivers/staging/fsl_ppfe/pfe_hw.c | 164 ++
- drivers/staging/fsl_ppfe/pfe_hw.h | 15 +
- .../staging/fsl_ppfe/pfe_ls1012a_platform.c | 368 +++
- drivers/staging/fsl_ppfe/pfe_mod.c | 158 +
- drivers/staging/fsl_ppfe/pfe_mod.h | 103 +
- drivers/staging/fsl_ppfe/pfe_perfmon.h | 26 +
- drivers/staging/fsl_ppfe/pfe_sysfs.c | 806 ++++++
- drivers/staging/fsl_ppfe/pfe_sysfs.h | 17 +
- 37 files changed, 10821 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
- create mode 100644 drivers/staging/fsl_ppfe/Kconfig
- create mode 100644 drivers/staging/fsl_ppfe/Makefile
- create mode 100644 drivers/staging/fsl_ppfe/TODO
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
- create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
- create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
- create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
-@@ -0,0 +1,199 @@
-+=============================================================================
-+NXP Programmable Packet Forwarding Engine Device Bindings
-+
-+CONTENTS
-+ - PFE Node
-+ - Ethernet Node
-+
-+=============================================================================
-+PFE Node
-+
-+DESCRIPTION
-+
-+PFE Node has all the properties associated with Packet Forwarding Engine block.
-+
-+PROPERTIES
-+
-+- compatible
-+ Usage: required
-+ Value type: <stringlist>
-+ Definition: Must include "fsl,pfe"
-+
-+- reg
-+ Usage: required
-+ Value type: <prop-encoded-array>
-+ Definition: A standard property.
-+ Specifies the offset of the following registers:
-+ - PFE configuration registers
-+ - DDR memory used by PFE
-+
-+- fsl,pfe-num-interfaces
-+ Usage: required
-+ Value type: <u32>
-+ Definition: Must be present. Value can be either one or two.
-+
-+- interrupts
-+ Usage: required
-+ Value type: <prop-encoded-array>
-+ Definition: Three interrupts are specified in this property.
-+ - HIF interrupt
-+ - HIF NO COPY interrupt
-+ - Wake On LAN interrupt
-+
-+- interrupt-names
-+ Usage: required
-+ Value type: <stringlist>
-+ Definition: Following strings are defined for the 3 interrupts.
-+ "pfe_hif" - HIF interrupt
-+ "pfe_hif_nocpy" - HIF NO COPY interrupt
-+ "pfe_wol" - Wake On LAN interrupt
-+
-+- memory-region
-+ Usage: required
-+ Value type: <phandle>
-+ Definition: phandle to a node describing reserved memory used by pfe.
-+ Refer:- Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
-+
-+- fsl,pfe-scfg
-+ Usage: required
-+ Value type: <phandle>
-+ Definition: phandle for scfg.
-+
-+- fsl,rcpm-wakeup
-+ Usage: required
-+ Value type: <phandle>
-+ Definition: phandle for rcpm.
-+
-+- clocks
-+ Usage: required
-+ Value type: <phandle>
-+ Definition: phandle for clockgen.
-+
-+- clock-names
-+ Usage: required
-+ Value type: <string>
-+ Definition: phandle for clock name.
-+
-+EXAMPLE
-+
-+pfe: pfe@04000000 {
-+ compatible = "fsl,pfe";
-+ reg = <0x0 0x04000000 0x0 0xc00000>, /* AXI 16M */
-+ <0x0 0x83400000 0x0 0xc00000>; /* PFE DDR 12M */
-+ reg-names = "pfe", "pfe-ddr";
-+ fsl,pfe-num-interfaces = <0x2>;
-+ interrupts = <0 172 0x4>, /* HIF interrupt */
-+ <0 173 0x4>, /*HIF_NOCPY interrupt */
-+ <0 174 0x4>; /* WoL interrupt */
-+ interrupt-names = "pfe_hif", "pfe_hif_nocpy", "pfe_wol";
-+ memory-region = <&pfe_reserved>;
-+ fsl,pfe-scfg = <&scfg 0>;
-+ fsl,rcpm-wakeup = <&rcpm 0xf0000020>;
-+ clocks = <&clockgen 4 0>;
-+ clock-names = "pfe";
-+
-+ status = "okay";
-+ pfe_mac0: ethernet@0 {
-+ };
-+
-+ pfe_mac1: ethernet@1 {
-+ };
-+};
-+
-+=============================================================================
-+Ethernet Node
-+
-+DESCRIPTION
-+
-+Ethernet Node has all the properties associated with PFE used by platforms to
-+connect to PHY:
-+
-+PROPERTIES
-+
-+- compatible
-+ Usage: required
-+ Value type: <stringlist>
-+ Definition: Must include "fsl,pfe-gemac-port"
-+
-+- reg
-+ Usage: required
-+ Value type: <prop-encoded-array>
-+ Definition: A standard property.
-+ Specifies the gemacid of the interface.
-+
-+- fsl,gemac-bus-id
-+ Usage: required
-+ Value type: <u32>
-+ Definition: Must be present. Value should be the id of the bus
-+ connected to gemac.
-+
-+- fsl,gemac-phy-id (deprecated binding)
-+ Usage: required
-+ Value type: <u32>
-+ Definition: This binding shouldn't be used with new platforms.
-+ Must be present. Value should be the id of the phy
-+ connected to gemac.
-+
-+- fsl,mdio-mux-val
-+ Usage: required
-+ Value type: <u32>
-+ Definition: Must be present. Value can be either 0 or 2 or 3.
-+ This value is used to configure the mux to enable mdio.
-+
-+- phy-mode
-+ Usage: required
-+ Value type: <string>
-+ Definition: Must include "sgmii"
-+
-+- fsl,pfe-phy-if-flags (deprecated binding)
-+ Usage: required
-+ Value type: <u32>
-+ Definition: This binding shouldn't be used with new platforms.
-+ Must be present. Value should be 0 by default.
-+ If there is not phy connected, this need to be 1.
-+
-+- phy-handle
-+ Usage: optional
-+ Value type: <phandle>
-+ Definition: phandle to the PHY device connected to this device.
-+
-+- mdio : A required subnode which specifies the mdio bus in the PFE and used as
-+a container for phy nodes according to ../phy.txt.
-+
-+EXAMPLE
-+
-+ethernet@0 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii";
-+ phy-handle = <&sgmii_phy1>;
-+};
-+
-+
-+ethernet@1 {
-+ compatible = "fsl,pfe-gemac-port";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x1>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x1>; /* BUS_ID */
-+ fsl,mdio-mux-val = <0x0>;
-+ phy-mode = "sgmii";
-+ phy-handle = <&sgmii_phy2>;
-+};
-+
-+mdio@0 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ sgmii_phy1: ethernet-phy@2 {
-+ reg = <0x2>;
-+ };
-+
-+ sgmii_phy2: ethernet-phy@1 {
-+ reg = <0x1>;
-+ };
-+};
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/Kconfig
-@@ -0,0 +1,21 @@
-+#
-+# Freescale Programmable Packet Forwarding Engine driver
-+#
-+config FSL_PPFE
-+ bool "Freescale PPFE Driver"
-+ select FSL_GUTS
-+ default n
-+ ---help---
-+ Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
-+ It provides two high performance ethernet interfaces.
-+ This driver initializes, programs and controls the PPFE.
-+ Use this driver to enable network connectivity on LS1012A platforms.
-+
-+if FSL_PPFE
-+
-+config FSL_PPFE_UTIL_DISABLED
-+ bool "Disable PPFE UTIL Processor Engine"
-+ ---help---
-+ UTIL PE has to be enabled only if required.
-+
-+endif # FSL_PPFE
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/Makefile
-@@ -0,0 +1,20 @@
-+#
-+# Makefile for Freesecale PPFE driver
-+#
-+
-+ccflags-y += -I$(src)/include -I$(src)
-+
-+obj-m += pfe.o
-+
-+pfe-y += pfe_mod.o \
-+ pfe_hw.o \
-+ pfe_firmware.o \
-+ pfe_ctrl.o \
-+ pfe_hif.o \
-+ pfe_hif_lib.o\
-+ pfe_eth.o \
-+ pfe_sysfs.o \
-+ pfe_debugfs.o \
-+ pfe_ls1012a_platform.o \
-+ pfe_hal.o \
-+ pfe_cdev.o
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/TODO
-@@ -0,0 +1,2 @@
-+TODO:
-+ - provide pfe pe monitoring support
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
-@@ -0,0 +1,78 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _CBUS_H_
-+#define _CBUS_H_
-+
-+#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
-+#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
-+#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
-+#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
-+#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
-+#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
-+#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
-+#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
-+#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
-+#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
-+#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
-+#define LMEM_SIZE 0x10000
-+#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
-+#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
-+#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
-+#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
-+#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
-+#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
-+
-+/*
-+ * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
-+ * XXX_MEM_ACCESS_ADDR register bit definitions.
-+ */
-+#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
-+#define PE_MEM_ACCESS_IMEM BIT(15)
-+#define PE_MEM_ACCESS_DMEM BIT(16)
-+
-+/* Byte Enables of the Internal memory access. These are interpred in BE */
-+#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
-+ ({ typeof(size) size_ = (size); \
-+ (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
-+
-+#include "cbus/emac_mtip.h"
-+#include "cbus/gpi.h"
-+#include "cbus/bmu.h"
-+#include "cbus/hif.h"
-+#include "cbus/tmu_csr.h"
-+#include "cbus/class_csr.h"
-+#include "cbus/hif_nocpy.h"
-+#include "cbus/util_csr.h"
-+
-+/* PFE cores states */
-+#define CORE_DISABLE 0x00000000
-+#define CORE_ENABLE 0x00000001
-+#define CORE_SW_RESET 0x00000002
-+
-+/* LMEM defines */
-+#define LMEM_HDR_SIZE 0x0010
-+#define LMEM_BUF_SIZE_LN2 0x7
-+#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
-+
-+/* DDR defines */
-+#define DDR_HDR_SIZE 0x0100
-+#define DDR_BUF_SIZE_LN2 0xb
-+#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
-+
-+#endif /* _CBUS_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
-@@ -0,0 +1,55 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _BMU_H_
-+#define _BMU_H_
-+
-+#define BMU_VERSION 0x000
-+#define BMU_CTRL 0x004
-+#define BMU_UCAST_CONFIG 0x008
-+#define BMU_UCAST_BASE_ADDR 0x00c
-+#define BMU_BUF_SIZE 0x010
-+#define BMU_BUF_CNT 0x014
-+#define BMU_THRES 0x018
-+#define BMU_INT_SRC 0x020
-+#define BMU_INT_ENABLE 0x024
-+#define BMU_ALLOC_CTRL 0x030
-+#define BMU_FREE_CTRL 0x034
-+#define BMU_FREE_ERR_ADDR 0x038
-+#define BMU_CURR_BUF_CNT 0x03c
-+#define BMU_MCAST_CNT 0x040
-+#define BMU_MCAST_ALLOC_CTRL 0x044
-+#define BMU_REM_BUF_CNT 0x048
-+#define BMU_LOW_WATERMARK 0x050
-+#define BMU_HIGH_WATERMARK 0x054
-+#define BMU_INT_MEM_ACCESS 0x100
-+
-+struct BMU_CFG {
-+ unsigned long baseaddr;
-+ u32 count;
-+ u32 size;
-+ u32 low_watermark;
-+ u32 high_watermark;
-+};
-+
-+#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
-+#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
-+
-+#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
-+
-+#endif /* _BMU_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
-@@ -0,0 +1,289 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _CLASS_CSR_H_
-+#define _CLASS_CSR_H_
-+
-+/* @file class_csr.h.
-+ * class_csr - block containing all the classifier control and status register.
-+ * Mapped on CBUS and accessible from all PE's and ARM.
-+ */
-+#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
-+#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
-+#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
-+
-+/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
-+#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
-+
-+/* LMEM header size for the Classifier block.\ Data in the LMEM
-+ * is written from this offset.
-+ */
-+#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
-+
-+/* DDR header size for the Classifier block.\ Data in the DDR
-+ * is written from this offset.
-+ */
-+#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
-+
-+#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
-+
-+/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
-+#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
-+
-+/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
-+#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
-+
-+/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
-+#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
-+
-+/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
-+
-+/* @name Class PE memory access. Allows external PE's and HOST to
-+ * read/write PMEM/DMEM memory ranges for each classifier PE.
-+ */
-+/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
-+ * See \ref XXX_MEM_ACCESS_ADDR for details.
-+ */
-+#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
-+
-+/* Internal Memory Access Write Data [31:0] */
-+#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
-+
-+/* Internal Memory Access Read Data [31:0] */
-+#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
-+#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
-+#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
-+
-+#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
-+#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
-+#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
-+#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
-+#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
-+#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
-+#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
-+#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
-+#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
-+#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
-+#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
-+#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
-+#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
-+#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
-+#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
-+#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
-+#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
-+#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
-+#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
-+#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
-+#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
-+#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
-+#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
-+#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
-+#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
-+#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
-+#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
-+#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
-+#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
-+#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
-+#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
-+#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
-+#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
-+#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
-+#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
-+#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
-+#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
-+#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
-+#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
-+#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
-+#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
-+#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
-+#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
-+#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
-+#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
-+#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
-+#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
-+#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
-+#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
-+#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
-+#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
-+#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
-+#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
-+#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
-+#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
-+#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
-+
-+#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
-+#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
-+#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
-+#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
-+#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
-+#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
-+#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
-+#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
-+#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
-+#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
-+
-+#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
-+
-+#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
-+#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
-+
-+/* (route_entry_size[9:0], route_hash_size[23:16]
-+ * (this is actually ln2(size)))
-+ */
-+#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
-+
-+#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
-+#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
-+
-+#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
-+
-+#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
-+#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
-+#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
-+#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
-+#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
-+#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
-+#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
-+
-+#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
-+#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
-+/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
-+
-+#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
-+
-+#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
-+#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
-+#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
-+#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
-+#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
-+#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
-+#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
-+#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
-+#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
-+#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
-+#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
-+#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
-+
-+#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
-+#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
-+
-+#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
-+#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
-+
-+#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
-+
-+#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
-+#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
-+#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
-+#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
-+#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
-+#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
-+
-+#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
-+
-+/* CLASS defines */
-+#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
-+#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
-+
-+/* Can be configured */
-+#define CLASS_PBUF0_BASE_ADDR 0x000
-+/* Can be configured */
-+#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
-+/* Can be configured */
-+#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
-+/* Can be configured */
-+#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
-+
-+#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
-+ CLASS_PBUF_HEADER_OFFSET)
-+#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
-+ CLASS_PBUF_HEADER_OFFSET)
-+#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
-+ CLASS_PBUF_HEADER_OFFSET)
-+#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
-+ CLASS_PBUF_HEADER_OFFSET)
-+
-+#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
-+ CLASS_PBUF0_BASE_ADDR)
-+#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
-+ CLASS_PBUF2_BASE_ADDR)
-+
-+#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
-+ CLASS_PBUF0_HEADER_BASE_ADDR)
-+#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
-+ CLASS_PBUF2_HEADER_BASE_ADDR)
-+
-+#define CLASS_ROUTE_SIZE 128
-+#define CLASS_MAX_ROUTE_SIZE 256
-+#define CLASS_ROUTE_HASH_BITS 20
-+#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
-+
-+/* Can be configured */
-+#define CLASS_ROUTE0_BASE_ADDR 0x400
-+/* Can be configured */
-+#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
-+/* Can be configured */
-+#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
-+/* Can be configured */
-+#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
-+
-+#define CLASS_SA_SIZE 128
-+#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
-+/* not used */
-+#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
-+/* not used */
-+#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
-+/* not used */
-+#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
-+
-+/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
-+#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
-+ (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
-+#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
-+ CLASS_SA_SIZE))
-+
-+#define TWO_LEVEL_ROUTE BIT(0)
-+#define PHYNO_IN_HASH BIT(1)
-+#define HW_ROUTE_FETCH BIT(3)
-+#define HW_BRIDGE_FETCH BIT(5)
-+#define IP_ALIGNED BIT(6)
-+#define ARC_HIT_CHECK_EN BIT(7)
-+#define CLASS_TOE BIT(11)
-+#define HASH_NORMAL (0 << 12)
-+#define HASH_CRC_PORT BIT(12)
-+#define HASH_CRC_IP (2 << 12)
-+#define HASH_CRC_PORT_IP (3 << 12)
-+#define QB2BUS_LE BIT(15)
-+
-+#define TCP_CHKSUM_DROP BIT(0)
-+#define UDP_CHKSUM_DROP BIT(1)
-+#define IPV4_CHKSUM_DROP BIT(9)
-+
-+/*CLASS_HIF_PARSE bits*/
-+#define HIF_PKT_CLASS_EN BIT(0)
-+#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
-+
-+struct class_cfg {
-+ u32 toe_mode;
-+ unsigned long route_table_baseaddr;
-+ u32 route_table_hash_bits;
-+ u32 pe_sys_clk_ratio;
-+ u32 resume;
-+};
-+
-+#endif /* _CLASS_CSR_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
-@@ -0,0 +1,242 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _EMAC_H_
-+#define _EMAC_H_
-+
-+#include <linux/ethtool.h>
-+
-+#define EMAC_IEVENT_REG 0x004
-+#define EMAC_IMASK_REG 0x008
-+#define EMAC_R_DES_ACTIVE_REG 0x010
-+#define EMAC_X_DES_ACTIVE_REG 0x014
-+#define EMAC_ECNTRL_REG 0x024
-+#define EMAC_MII_DATA_REG 0x040
-+#define EMAC_MII_CTRL_REG 0x044
-+#define EMAC_MIB_CTRL_STS_REG 0x064
-+#define EMAC_RCNTRL_REG 0x084
-+#define EMAC_TCNTRL_REG 0x0C4
-+#define EMAC_PHY_ADDR_LOW 0x0E4
-+#define EMAC_PHY_ADDR_HIGH 0x0E8
-+#define EMAC_GAUR 0x120
-+#define EMAC_GALR 0x124
-+#define EMAC_TFWR_STR_FWD 0x144
-+#define EMAC_RX_SECTION_FULL 0x190
-+#define EMAC_RX_SECTION_EMPTY 0x194
-+#define EMAC_TX_SECTION_EMPTY 0x1A0
-+#define EMAC_TRUNC_FL 0x1B0
-+
-+#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
-+#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
-+#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
-+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
-+#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
-+#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
-+#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
-+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
-+#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
-+#define RMON_T_COL 0x224 /* RMON TX collision count */
-+#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
-+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
-+#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
-+#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
-+#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
-+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
-+#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
-+#define RMON_T_OCTETS 0x244 /* RMON TX octets */
-+#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
-+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
-+#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
-+#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
-+#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
-+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
-+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
-+#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
-+#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
-+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
-+#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
-+#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
-+#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
-+#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
-+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
-+#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
-+#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
-+#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
-+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
-+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
-+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
-+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
-+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
-+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
-+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
-+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
-+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
-+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
-+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
-+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
-+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
-+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
-+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
-+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
-+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
-+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
-+
-+#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
-+#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
-+
-+/* GEMAC definitions and settings */
-+
-+#define EMAC_PORT_0 0
-+#define EMAC_PORT_1 1
-+
-+/* GEMAC Bit definitions */
-+#define EMAC_IEVENT_HBERR 0x80000000
-+#define EMAC_IEVENT_BABR 0x40000000
-+#define EMAC_IEVENT_BABT 0x20000000
-+#define EMAC_IEVENT_GRA 0x10000000
-+#define EMAC_IEVENT_TXF 0x08000000
-+#define EMAC_IEVENT_TXB 0x04000000
-+#define EMAC_IEVENT_RXF 0x02000000
-+#define EMAC_IEVENT_RXB 0x01000000
-+#define EMAC_IEVENT_MII 0x00800000
-+#define EMAC_IEVENT_EBERR 0x00400000
-+#define EMAC_IEVENT_LC 0x00200000
-+#define EMAC_IEVENT_RL 0x00100000
-+#define EMAC_IEVENT_UN 0x00080000
-+
-+#define EMAC_IMASK_HBERR 0x80000000
-+#define EMAC_IMASK_BABR 0x40000000
-+#define EMAC_IMASKT_BABT 0x20000000
-+#define EMAC_IMASK_GRA 0x10000000
-+#define EMAC_IMASKT_TXF 0x08000000
-+#define EMAC_IMASK_TXB 0x04000000
-+#define EMAC_IMASKT_RXF 0x02000000
-+#define EMAC_IMASK_RXB 0x01000000
-+#define EMAC_IMASK_MII 0x00800000
-+#define EMAC_IMASK_EBERR 0x00400000
-+#define EMAC_IMASK_LC 0x00200000
-+#define EMAC_IMASKT_RL 0x00100000
-+#define EMAC_IMASK_UN 0x00080000
-+
-+#define EMAC_RCNTRL_MAX_FL_SHIFT 16
-+#define EMAC_RCNTRL_LOOP 0x00000001
-+#define EMAC_RCNTRL_DRT 0x00000002
-+#define EMAC_RCNTRL_MII_MODE 0x00000004
-+#define EMAC_RCNTRL_PROM 0x00000008
-+#define EMAC_RCNTRL_BC_REJ 0x00000010
-+#define EMAC_RCNTRL_FCE 0x00000020
-+#define EMAC_RCNTRL_RGMII 0x00000040
-+#define EMAC_RCNTRL_SGMII 0x00000080
-+#define EMAC_RCNTRL_RMII 0x00000100
-+#define EMAC_RCNTRL_RMII_10T 0x00000200
-+#define EMAC_RCNTRL_CRC_FWD 0x00004000
-+
-+#define EMAC_TCNTRL_GTS 0x00000001
-+#define EMAC_TCNTRL_HBC 0x00000002
-+#define EMAC_TCNTRL_FDEN 0x00000004
-+#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
-+#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
-+
-+#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
-+#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
-+#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
-+#define EMAC_ECNTRL_SLEEP 0x00000008
-+#define EMAC_ECNTRL_SPEED 0x00000020
-+#define EMAC_ECNTRL_DBSWAP 0x00000100
-+
-+#define EMAC_X_WMRK_STRFWD 0x00000100
-+
-+#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
-+#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
-+
-+#define EMAC_RX_SECTION_EMPTY_V 0x00010006
-+/*
-+ * The possible operating speeds of the MAC, currently supporting 10, 100 and
-+ * 1000Mb modes.
-+ */
-+enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
-+
-+/* MII-related definitios */
-+#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
-+#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
-+#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
-+#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
-+#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
-+#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
-+#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
-+#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
-+#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
-+
-+#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
-+#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
-+#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
-+#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
-+
-+#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
-+ EMAC_MII_DATA_RA_SHIFT)
-+#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
-+ EMAC_MII_DATA_PA_SHIFT)
-+#define EMAC_MII_DATA(v) ((v) & 0xffff)
-+
-+#define EMAC_MII_SPEED_SHIFT 1
-+#define EMAC_HOLDTIME_SHIFT 8
-+#define EMAC_HOLDTIME_MASK 0x7
-+#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
-+ EMAC_HOLDTIME_SHIFT)
-+
-+/*
-+ * The Address organisation for the MAC device. All addresses are split into
-+ * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
-+ * the address and the other field are the high order bits - this may be 16-bits
-+ * in the case of MAC addresses, or 32-bits for the hash address.
-+ * In terms of memory storage, the first item (bottom) is assumed to be at a
-+ * lower address location than 'top'. i.e. top should be at address location of
-+ * 'bottom' + 4 bytes.
-+ */
-+struct pfe_mac_addr {
-+ u32 bottom; /* Lower 32-bits of address. */
-+ u32 top; /* Upper 32-bits of address. */
-+};
-+
-+/*
-+ * The following is the organisation of the address filters section of the MAC
-+ * registers. The Cadence MAC contains four possible specific address match
-+ * addresses, if an incoming frame corresponds to any one of these four
-+ * addresses then the frame will be copied to memory.
-+ * It is not necessary for all four of the address match registers to be
-+ * programmed, this is application dependent.
-+ */
-+struct spec_addr {
-+ struct pfe_mac_addr one; /* Specific address register 1. */
-+ struct pfe_mac_addr two; /* Specific address register 2. */
-+ struct pfe_mac_addr three; /* Specific address register 3. */
-+ struct pfe_mac_addr four; /* Specific address register 4. */
-+};
-+
-+struct gemac_cfg {
-+ u32 mode;
-+ u32 speed;
-+ u32 duplex;
-+};
-+
-+/* EMAC Hash size */
-+#define EMAC_HASH_REG_BITS 64
-+
-+#define EMAC_SPEC_ADDR_MAX 4
-+
-+#endif /* _EMAC_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
-@@ -0,0 +1,86 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _GPI_H_
-+#define _GPI_H_
-+
-+#define GPI_VERSION 0x00
-+#define GPI_CTRL 0x04
-+#define GPI_RX_CONFIG 0x08
-+#define GPI_HDR_SIZE 0x0c
-+#define GPI_BUF_SIZE 0x10
-+#define GPI_LMEM_ALLOC_ADDR 0x14
-+#define GPI_LMEM_FREE_ADDR 0x18
-+#define GPI_DDR_ALLOC_ADDR 0x1c
-+#define GPI_DDR_FREE_ADDR 0x20
-+#define GPI_CLASS_ADDR 0x24
-+#define GPI_DRX_FIFO 0x28
-+#define GPI_TRX_FIFO 0x2c
-+#define GPI_INQ_PKTPTR 0x30
-+#define GPI_DDR_DATA_OFFSET 0x34
-+#define GPI_LMEM_DATA_OFFSET 0x38
-+#define GPI_TMLF_TX 0x4c
-+#define GPI_DTX_ASEQ 0x50
-+#define GPI_FIFO_STATUS 0x54
-+#define GPI_FIFO_DEBUG 0x58
-+#define GPI_TX_PAUSE_TIME 0x5c
-+#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
-+#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
-+#define GPI_TOE_CHKSUM_EN 0x68
-+#define GPI_OVERRUN_DROPCNT 0x6c
-+#define GPI_CSR_MTIP_PAUSE_REG 0x74
-+#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
-+#define GPI_CSR_RX_CNT 0x7c
-+#define GPI_CSR_TX_CNT 0x80
-+#define GPI_CSR_DEBUG1 0x84
-+#define GPI_CSR_DEBUG2 0x88
-+
-+struct gpi_cfg {
-+ u32 lmem_rtry_cnt;
-+ u32 tmlf_txthres;
-+ u32 aseq_len;
-+ u32 mtip_pause_reg;
-+};
-+
-+/* GPI commons defines */
-+#define GPI_LMEM_BUF_EN 0x1
-+#define GPI_DDR_BUF_EN 0x1
-+
-+/* EGPI 1 defines */
-+#define EGPI1_LMEM_RTRY_CNT 0x40
-+#define EGPI1_TMLF_TXTHRES 0xBC
-+#define EGPI1_ASEQ_LEN 0x50
-+
-+/* EGPI 2 defines */
-+#define EGPI2_LMEM_RTRY_CNT 0x40
-+#define EGPI2_TMLF_TXTHRES 0xBC
-+#define EGPI2_ASEQ_LEN 0x40
-+
-+/* EGPI 3 defines */
-+#define EGPI3_LMEM_RTRY_CNT 0x40
-+#define EGPI3_TMLF_TXTHRES 0xBC
-+#define EGPI3_ASEQ_LEN 0x40
-+
-+/* HGPI defines */
-+#define HGPI_LMEM_RTRY_CNT 0x40
-+#define HGPI_TMLF_TXTHRES 0xBC
-+#define HGPI_ASEQ_LEN 0x40
-+
-+#define EGPI_PAUSE_TIME 0x000007D0
-+#define EGPI_PAUSE_ENABLE 0x40000000
-+#endif /* _GPI_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
-@@ -0,0 +1,100 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _HIF_H_
-+#define _HIF_H_
-+
-+/* @file hif.h.
-+ * hif - PFE hif block control and status register.
-+ * Mapped on CBUS and accessible from all PE's and ARM.
-+ */
-+#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
-+#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
-+#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
-+#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
-+#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
-+#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
-+#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
-+#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
-+#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
-+#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
-+#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
-+#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
-+#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
-+#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
-+#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
-+#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
-+#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
-+
-+/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
-+#define HIF_INT BIT(0)
-+#define HIF_RXBD_INT BIT(1)
-+#define HIF_RXPKT_INT BIT(2)
-+#define HIF_TXBD_INT BIT(3)
-+#define HIF_TXPKT_INT BIT(4)
-+
-+/* HIF_TX_CTRL bits */
-+#define HIF_CTRL_DMA_EN BIT(0)
-+#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
-+#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
-+
-+/* HIF_RX_STATUS bits */
-+#define BDP_CSR_RX_DMA_ACTV BIT(16)
-+
-+/* HIF_INT_ENABLE bits */
-+#define HIF_INT_EN BIT(0)
-+#define HIF_RXBD_INT_EN BIT(1)
-+#define HIF_RXPKT_INT_EN BIT(2)
-+#define HIF_TXBD_INT_EN BIT(3)
-+#define HIF_TXPKT_INT_EN BIT(4)
-+
-+/* HIF_POLL_CTRL bits*/
-+#define HIF_RX_POLL_CTRL_CYCLE 0x0400
-+#define HIF_TX_POLL_CTRL_CYCLE 0x0400
-+
-+/* HIF_INT_COAL bits*/
-+#define HIF_INT_COAL_ENABLE BIT(31)
-+
-+/* Buffer descriptor control bits */
-+#define BD_CTRL_BUFLEN_MASK 0x3fff
-+#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
-+#define BD_CTRL_CBD_INT_EN BIT(16)
-+#define BD_CTRL_PKT_INT_EN BIT(17)
-+#define BD_CTRL_LIFM BIT(18)
-+#define BD_CTRL_LAST_BD BIT(19)
-+#define BD_CTRL_DIR BIT(20)
-+#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
-+#define BD_CTRL_PKT_XFER BIT(24)
-+#define BD_CTRL_DESC_EN BIT(31)
-+#define BD_CTRL_PARSE_DISABLE BIT(25)
-+#define BD_CTRL_BRFETCH_DISABLE BIT(26)
-+#define BD_CTRL_RTFETCH_DISABLE BIT(27)
-+
-+/* Buffer descriptor status bits*/
-+#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
-+#define BD_STATUS_DIR_PROC_ID BIT(16)
-+#define BD_STATUS_CONN_ID_EN BIT(17)
-+#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
-+#define BD_STATUS_LE_DATA BIT(21)
-+#define BD_STATUS_CHKSUM_EN BIT(22)
-+
-+/* HIF Buffer descriptor status bits */
-+#define DIR_PROC_ID BIT(16)
-+#define PROC_ID(id) ((id) << 18)
-+
-+#endif /* _HIF_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
-@@ -0,0 +1,50 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _HIF_NOCPY_H_
-+#define _HIF_NOCPY_H_
-+
-+#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
-+#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
-+#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
-+#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
-+#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
-+#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
-+#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
-+#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
-+#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
-+#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
-+#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
-+#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
-+#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
-+#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
-+#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
-+#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
-+#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
-+#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
-+#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
-+#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
-+#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
-+#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
-+#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
-+#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
-+#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
-+#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
-+#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
-+
-+#endif /* _HIF_NOCPY_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
-@@ -0,0 +1,168 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _TMU_CSR_H_
-+#define _TMU_CSR_H_
-+
-+#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
-+#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
-+#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
-+#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
-+#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
-+#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
-+#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
-+#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
-+#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
-+#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
-+#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
-+#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
-+#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
-+#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
-+#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
-+#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
-+#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
-+#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
-+#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
-+#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
-+#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
-+#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
-+#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
-+#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
-+#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
-+#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
-+#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
-+#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
-+#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
-+#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
-+#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
-+#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
-+#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
-+#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
-+#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
-+#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
-+#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
-+#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
-+#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
-+#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
-+#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
-+#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
-+#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
-+#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
-+#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
-+#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
-+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
-+ * This is a global Enable for all schedulers in PHY0
-+ */
-+#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
-+
-+#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
-+#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
-+#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
-+#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
-+#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
-+#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
-+#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
-+#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
-+#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
-+#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
-+
-+/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
-+ * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
-+ * the internal memory. This address is used to access both the PM and DM of
-+ * all the PE's
-+ */
-+#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
-+
-+/* Internal Memory Access Write Data */
-+#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
-+/* Internal Memory Access Read Data. The commands are blocked
-+ * at the mem_access only
-+ */
-+#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
-+
-+/* [31:0] PHY0 in queue address (must be initialized with one of the
-+ * xxx_INQ_PKTPTR cbus addresses)
-+ */
-+#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
-+/* [31:0] PHY1 in queue address (must be initialized with one of the
-+ * xxx_INQ_PKTPTR cbus addresses)
-+ */
-+#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
-+/* [31:0] PHY2 in queue address (must be initialized with one of the
-+ * xxx_INQ_PKTPTR cbus addresses)
-+ */
-+#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
-+/* [31:0] PHY3 in queue address (must be initialized with one of the
-+ * xxx_INQ_PKTPTR cbus addresses)
-+ */
-+#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
-+#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
-+#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
-+
-+#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
-+#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
-+#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
-+
-+#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
-+#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
-+#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
-+/* [31:0] PHY4 in queue address (must be initialized with one of the
-+ * xxx_INQ_PKTPTR cbus addresses)
-+ */
-+#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
-+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
-+ * This is a global Enable for all schedulers in PHY1
-+ */
-+#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
-+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
-+ * This is a global Enable for all schedulers in PHY2
-+ */
-+#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
-+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
-+ * This is a global Enable for all schedulers in PHY3
-+ */
-+#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
-+#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
-+/* [31:0] PHY5 in queue address (must be initialized with one of the
-+ * xxx_INQ_PKTPTR cbus addresses)
-+ */
-+#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
-+
-+#define SW_RESET BIT(0) /* Global software reset */
-+#define INQ_RESET BIT(2)
-+#define TEQ_RESET BIT(3)
-+#define TDQ_RESET BIT(4)
-+#define PE_RESET BIT(5)
-+#define MEM_INIT BIT(6)
-+#define MEM_INIT_DONE BIT(7)
-+#define LLM_INIT BIT(8)
-+#define LLM_INIT_DONE BIT(9)
-+#define ECC_MEM_INIT_DONE BIT(10)
-+
-+struct tmu_cfg {
-+ u32 pe_sys_clk_ratio;
-+ unsigned long llm_base_addr;
-+ u32 llm_queue_len;
-+};
-+
-+/* Not HW related for pfe_ctrl / pfe common defines */
-+#define DEFAULT_MAX_QDEPTH 80
-+#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
-+#define DEFAULT_TMU3_QDEPTH 127
-+
-+#endif /* _TMU_CSR_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
-@@ -0,0 +1,61 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _UTIL_CSR_H_
-+#define _UTIL_CSR_H_
-+
-+#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
-+#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
-+#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
-+
-+#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
-+
-+#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
-+#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
-+#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
-+#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
-+
-+#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
-+#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
-+#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
-+
-+#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
-+#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
-+
-+#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
-+#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
-+#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
-+#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
-+#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
-+#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
-+#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
-+#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
-+#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
-+#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
-+
-+#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
-+#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
-+#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
-+
-+#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
-+
-+struct util_cfg {
-+ u32 pe_sys_clk_ratio;
-+};
-+
-+#endif /* _UTIL_CSR_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
-@@ -0,0 +1,372 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
-+ */
-+
-+#ifndef _PFE_H_
-+#define _PFE_H_
-+
-+#include "cbus.h"
-+
-+#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
-+/*
-+ * Only valid for mem access register interface
-+ */
-+#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
-+#define CLASS_DMEM_SIZE 0x00002000
-+#define CLASS_IMEM_SIZE 0x00008000
-+
-+#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
-+/*
-+ * Only valid for mem access register interface
-+ */
-+#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
-+#define TMU_DMEM_SIZE 0x00000800
-+#define TMU_IMEM_SIZE 0x00002000
-+
-+#define UTIL_DMEM_BASE_ADDR 0x00000000
-+#define UTIL_DMEM_SIZE 0x00002000
-+
-+#define PE_LMEM_BASE_ADDR 0xc3010000
-+#define PE_LMEM_SIZE 0x8000
-+#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
-+
-+#define DMEM_BASE_ADDR 0x00000000
-+#define DMEM_SIZE 0x2000 /* TMU has less... */
-+#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
-+
-+#define PMEM_BASE_ADDR 0x00010000
-+#define PMEM_SIZE 0x8000 /* TMU has less... */
-+#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
-+
-+/* These check memory ranges from PE point of view/memory map */
-+#define IS_DMEM(addr, len) \
-+ ({ typeof(addr) addr_ = (addr); \
-+ ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
-+ (((unsigned long)(addr_) + (len)) <= DMEM_END); })
-+
-+#define IS_PMEM(addr, len) \
-+ ({ typeof(addr) addr_ = (addr); \
-+ ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
-+ (((unsigned long)(addr_) + (len)) <= PMEM_END); })
-+
-+#define IS_PE_LMEM(addr, len) \
-+ ({ typeof(addr) addr_ = (addr); \
-+ ((unsigned long)(addr_) >= \
-+ PE_LMEM_BASE_ADDR) && \
-+ (((unsigned long)(addr_) + \
-+ (len)) <= PE_LMEM_END); })
-+
-+#define IS_PFE_LMEM(addr, len) \
-+ ({ typeof(addr) addr_ = (addr); \
-+ ((unsigned long)(addr_) >= \
-+ CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
-+ (((unsigned long)(addr_) + (len)) <= \
-+ CBUS_VIRT_TO_PFE(LMEM_END)); })
-+
-+#define __IS_PHYS_DDR(addr, len) \
-+ ({ typeof(addr) addr_ = (addr); \
-+ ((unsigned long)(addr_) >= \
-+ DDR_PHYS_BASE_ADDR) && \
-+ (((unsigned long)(addr_) + (len)) <= \
-+ DDR_PHYS_END); })
-+
-+#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
-+
-+/*
-+ * If using a run-time virtual address for the cbus base address use this code
-+ */
-+extern void *cbus_base_addr;
-+extern void *ddr_base_addr;
-+extern unsigned long ddr_phys_base_addr;
-+extern unsigned int ddr_size;
-+
-+#define CBUS_BASE_ADDR cbus_base_addr
-+#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
-+#define DDR_BASE_ADDR ddr_base_addr
-+#define DDR_SIZE ddr_size
-+
-+#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
-+
-+#define LS1012A_PFE_RESET_WA /*
-+ * PFE doesn't have global reset and re-init
-+ * should takecare few things to make PFE
-+ * functional after reset
-+ */
-+#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
-+ * as seen by PE's.
-+ */
-+/* CBUS physical base address as seen by PE's. */
-+#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
-+
-+#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
-+#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
-+#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
-+ PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
-+/* Translates to PFE address map */
-+
-+#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
-+#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
-+#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
-+
-+#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
-+ PFE_CBUS_PHYS_BASE_ADDR)
-+#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
-+ PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
-+
-+/* The below part of the code is used in QOS control driver from host */
-+#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
-+ * pe's
-+ */
-+
-+enum {
-+ CLASS0_ID = 0,
-+ CLASS1_ID,
-+ CLASS2_ID,
-+ CLASS3_ID,
-+ CLASS4_ID,
-+ CLASS5_ID,
-+ TMU0_ID,
-+ TMU1_ID,
-+ TMU2_ID,
-+ TMU3_ID,
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ UTIL_ID,
-+#endif
-+ MAX_PE
-+};
-+
-+#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
-+ BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
-+ BIT(CLASS4_ID) | BIT(CLASS5_ID))
-+#define CLASS_MAX_ID CLASS5_ID
-+
-+#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
-+ BIT(TMU3_ID))
-+
-+#define TMU_MAX_ID TMU3_ID
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+#define UTIL_MASK BIT(UTIL_ID)
-+#endif
-+
-+struct pe_status {
-+ u32 cpu_state;
-+ u32 activity_counter;
-+ u32 rx;
-+ union {
-+ u32 tx;
-+ u32 tmu_qstatus;
-+ };
-+ u32 drop;
-+#if defined(CFG_PE_DEBUG)
-+ u32 debug_indicator;
-+ u32 debug[16];
-+#endif
-+} __aligned(16);
-+
-+struct pe_sync_mailbox {
-+ u32 stop;
-+ u32 stopped;
-+};
-+
-+/* Drop counter definitions */
-+
-+#define CLASS_NUM_DROP_COUNTERS 13
-+#define UTIL_NUM_DROP_COUNTERS 8
-+
-+/* PE information.
-+ * Structure containing PE's specific information. It is used to create
-+ * generic C functions common to all PE's.
-+ * Before using the library functions this structure needs to be initialized
-+ * with the different registers virtual addresses
-+ * (according to the ARM MMU mmaping). The default initialization supports a
-+ * virtual == physical mapping.
-+ */
-+struct pe_info {
-+ u32 dmem_base_addr; /* PE's dmem base address */
-+ u32 pmem_base_addr; /* PE's pmem base address */
-+ u32 pmem_size; /* PE's pmem size */
-+
-+ void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
-+ * address
-+ */
-+ void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
-+ * address
-+ */
-+ void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
-+ * address
-+ */
-+};
-+
-+void pe_lmem_read(u32 *dst, u32 len, u32 offset);
-+void pe_lmem_write(u32 *src, u32 len, u32 offset);
-+
-+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
-+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
-+
-+u32 pe_pmem_read(int id, u32 addr, u8 size);
-+
-+void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
-+u32 pe_dmem_read(int id, u32 addr, u8 size);
-+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
-+void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
-+void class_bus_write(u32 val, u32 addr, u8 size);
-+u32 class_bus_read(u32 addr, u8 size);
-+
-+#define class_bus_readl(addr) class_bus_read(addr, 4)
-+#define class_bus_readw(addr) class_bus_read(addr, 2)
-+#define class_bus_readb(addr) class_bus_read(addr, 1)
-+
-+#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
-+#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
-+#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
-+
-+#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
-+#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
-+#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
-+
-+#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
-+#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
-+#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
-+
-+/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
-+int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
-+ struct device *dev);
-+
-+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
-+ unsigned int ddr_size);
-+void bmu_init(void *base, struct BMU_CFG *cfg);
-+void bmu_reset(void *base);
-+void bmu_enable(void *base);
-+void bmu_disable(void *base);
-+void bmu_set_config(void *base, struct BMU_CFG *cfg);
-+
-+/*
-+ * An enumerated type for loopback values. This can be one of three values, no
-+ * loopback -normal operation, local loopback with internal loopback module of
-+ * MAC or PHY loopback which is through the external PHY.
-+ */
-+#ifndef __MAC_LOOP_ENUM__
-+#define __MAC_LOOP_ENUM__
-+enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
-+#endif
-+
-+void gemac_init(void *base, void *config);
-+void gemac_disable_rx_checksum_offload(void *base);
-+void gemac_enable_rx_checksum_offload(void *base);
-+void gemac_set_speed(void *base, enum mac_speed gem_speed);
-+void gemac_set_duplex(void *base, int duplex);
-+void gemac_set_mode(void *base, int mode);
-+void gemac_enable(void *base);
-+void gemac_tx_disable(void *base);
-+void gemac_tx_enable(void *base);
-+void gemac_disable(void *base);
-+void gemac_reset(void *base);
-+void gemac_set_address(void *base, struct spec_addr *addr);
-+struct spec_addr gemac_get_address(void *base);
-+void gemac_set_loop(void *base, enum mac_loop gem_loop);
-+void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
-+void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
-+void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
-+void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
-+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
-+ unsigned int entry_index);
-+void gemac_clear_laddr1(void *base);
-+void gemac_clear_laddr2(void *base);
-+void gemac_clear_laddr3(void *base);
-+void gemac_clear_laddr4(void *base);
-+void gemac_clear_laddrN(void *base, unsigned int entry_index);
-+struct pfe_mac_addr gemac_get_hash(void *base);
-+void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
-+struct pfe_mac_addr gem_get_laddr1(void *base);
-+struct pfe_mac_addr gem_get_laddr2(void *base);
-+struct pfe_mac_addr gem_get_laddr3(void *base);
-+struct pfe_mac_addr gem_get_laddr4(void *base);
-+struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
-+void gemac_set_config(void *base, struct gemac_cfg *cfg);
-+void gemac_allow_broadcast(void *base);
-+void gemac_no_broadcast(void *base);
-+void gemac_enable_1536_rx(void *base);
-+void gemac_disable_1536_rx(void *base);
-+void gemac_set_rx_max_fl(void *base, int mtu);
-+void gemac_enable_rx_jmb(void *base);
-+void gemac_disable_rx_jmb(void *base);
-+void gemac_enable_stacked_vlan(void *base);
-+void gemac_disable_stacked_vlan(void *base);
-+void gemac_enable_pause_rx(void *base);
-+void gemac_disable_pause_rx(void *base);
-+void gemac_enable_copy_all(void *base);
-+void gemac_disable_copy_all(void *base);
-+void gemac_set_bus_width(void *base, int width);
-+void gemac_set_wol(void *base, u32 wol_conf);
-+
-+void gpi_init(void *base, struct gpi_cfg *cfg);
-+void gpi_reset(void *base);
-+void gpi_enable(void *base);
-+void gpi_disable(void *base);
-+void gpi_set_config(void *base, struct gpi_cfg *cfg);
-+
-+void class_init(struct class_cfg *cfg);
-+void class_reset(void);
-+void class_enable(void);
-+void class_disable(void);
-+void class_set_config(struct class_cfg *cfg);
-+
-+void tmu_reset(void);
-+void tmu_init(struct tmu_cfg *cfg);
-+void tmu_enable(u32 pe_mask);
-+void tmu_disable(u32 pe_mask);
-+u32 tmu_qstatus(u32 if_id);
-+u32 tmu_pkts_processed(u32 if_id);
-+
-+void util_init(struct util_cfg *cfg);
-+void util_reset(void);
-+void util_enable(void);
-+void util_disable(void);
-+
-+void hif_init(void);
-+void hif_tx_enable(void);
-+void hif_tx_disable(void);
-+void hif_rx_enable(void);
-+void hif_rx_disable(void);
-+
-+/* Get Chip Revision level
-+ *
-+ */
-+static inline unsigned int CHIP_REVISION(void)
-+{
-+ /*For LS1012A return always 1 */
-+ return 1;
-+}
-+
-+/* Start HIF rx DMA
-+ *
-+ */
-+static inline void hif_rx_dma_start(void)
-+{
-+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
-+}
-+
-+/* Start HIF tx DMA
-+ *
-+ */
-+static inline void hif_tx_dma_start(void)
-+{
-+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
-+}
-+
-+#endif /* _PFE_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_cdev.c
-@@ -0,0 +1,258 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2018 NXP
-+ */
-+
-+/* @pfe_cdev.c.
-+ * Dummy device representing the PFE US in userspace.
-+ * - used for interacting with the kernel layer for link status
-+ */
-+
-+#include <linux/eventfd.h>
-+#include <linux/irqreturn.h>
-+#include <linux/io.h>
-+#include <asm/irq.h>
-+
-+#include "pfe_cdev.h"
-+#include "pfe_mod.h"
-+
-+static int pfe_majno;
-+static struct class *pfe_char_class;
-+static struct device *pfe_char_dev;
-+struct eventfd_ctx *g_trigger;
-+
-+struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
-+
-+static int pfe_cdev_open(struct inode *inp, struct file *fp)
-+{
-+ pr_debug("PFE CDEV device opened.\n");
-+ return 0;
-+}
-+
-+static ssize_t pfe_cdev_read(struct file *fp, char *buf,
-+ size_t len, loff_t *off)
-+{
-+ int ret = 0;
-+
-+ pr_info("PFE CDEV attempt copying (%lu) size of user.\n",
-+ sizeof(link_states));
-+
-+ pr_debug("Dump link_state on screen before copy_to_user\n");
-+ for (; ret < PFE_CDEV_ETH_COUNT; ret++) {
-+ pr_debug("%u %u", link_states[ret].phy_id,
-+ link_states[ret].state);
-+ pr_debug("\n");
-+ }
-+
-+ /* Copy to user the value in buffer sized len */
-+ ret = copy_to_user(buf, &link_states, sizeof(link_states));
-+ if (ret != 0) {
-+ pr_err("Failed to send (%d)bytes of (%lu) requested.\n",
-+ ret, len);
-+ return -EFAULT;
-+ }
-+
-+ /* offset set back to 0 as there is contextual reading offset */
-+ *off = 0;
-+ pr_debug("Read of (%lu) bytes performed.\n", sizeof(link_states));
-+
-+ return sizeof(link_states);
-+}
-+
-+/**
-+ * This function is for getting some commands from user through non-IOCTL
-+ * channel. It can used to configure the device.
-+ * TODO: To be filled in future, if require duplex communication with user
-+ * space.
-+ */
-+static ssize_t pfe_cdev_write(struct file *fp, const char *buf,
-+ size_t len, loff_t *off)
-+{
-+ pr_info("PFE CDEV Write operation not supported!\n");
-+
-+ return -EFAULT;
-+}
-+
-+static int pfe_cdev_release(struct inode *inp, struct file *fp)
-+{
-+ if (g_trigger) {
-+ free_irq(pfe->hif_irq, g_trigger);
-+ eventfd_ctx_put(g_trigger);
-+ g_trigger = NULL;
-+ }
-+
-+ pr_info("PFE_CDEV: Device successfully closed\n");
-+ return 0;
-+}
-+
-+/*
-+ * hif_us_isr-
-+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
-+ */
-+static irqreturn_t hif_us_isr(int irq, void *arg)
-+{
-+ struct eventfd_ctx *trigger = (struct eventfd_ctx *)arg;
-+ int int_status;
-+ int int_enable_mask;
-+
-+ /*Read hif interrupt source register */
-+ int_status = readl_relaxed(HIF_INT_SRC);
-+ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
-+
-+ if ((int_status & HIF_INT) == 0)
-+ return IRQ_NONE;
-+
-+ if (int_status & HIF_RXPKT_INT) {
-+ int_enable_mask &= ~(HIF_RXPKT_INT);
-+ /* Disable interrupts, they will be enabled after
-+ * they are serviced
-+ */
-+ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
-+
-+ eventfd_signal(trigger, 1);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+#define PFE_INTR_COAL_USECS 100
-+static long pfe_cdev_ioctl(struct file *fp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ int ret = -EFAULT;
-+ int __user *argp = (int __user *)arg;
-+
-+ pr_debug("PFE CDEV IOCTL Called with cmd=(%u)\n", cmd);
-+
-+ switch (cmd) {
-+ case PFE_CDEV_ETH0_STATE_GET:
-+ /* Return an unsigned int (link state) for ETH0 */
-+ *argp = link_states[0].state;
-+ pr_debug("Returning state=%d for ETH0\n", *argp);
-+ ret = 0;
-+ break;
-+ case PFE_CDEV_ETH1_STATE_GET:
-+ /* Return an unsigned int (link state) for ETH0 */
-+ *argp = link_states[1].state;
-+ pr_debug("Returning state=%d for ETH1\n", *argp);
-+ ret = 0;
-+ break;
-+ case PFE_CDEV_HIF_INTR_EN:
-+ /* Return success/failure */
-+ g_trigger = eventfd_ctx_fdget(*argp);
-+ if (IS_ERR(g_trigger))
-+ return PTR_ERR(g_trigger);
-+ ret = request_irq(pfe->hif_irq, hif_us_isr, 0, "pfe_hif",
-+ g_trigger);
-+ if (ret) {
-+ pr_err("%s: failed to get the hif IRQ = %d\n",
-+ __func__, pfe->hif_irq);
-+ eventfd_ctx_put(g_trigger);
-+ g_trigger = NULL;
-+ }
-+ writel((PFE_INTR_COAL_USECS * (pfe->ctrl.sys_clk / 1000)) |
-+ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
-+
-+ pr_debug("request_irq for hif interrupt: %d\n", pfe->hif_irq);
-+ ret = 0;
-+ break;
-+ default:
-+ pr_info("Unsupport cmd (%d) for PFE CDEV.\n", cmd);
-+ break;
-+ };
-+
-+ return ret;
-+}
-+
-+static unsigned int pfe_cdev_poll(struct file *fp,
-+ struct poll_table_struct *wait)
-+{
-+ pr_info("PFE CDEV poll method not supported\n");
-+ return 0;
-+}
-+
-+static const struct file_operations pfe_cdev_fops = {
-+ .open = pfe_cdev_open,
-+ .read = pfe_cdev_read,
-+ .write = pfe_cdev_write,
-+ .release = pfe_cdev_release,
-+ .unlocked_ioctl = pfe_cdev_ioctl,
-+ .poll = pfe_cdev_poll,
-+};
-+
-+int pfe_cdev_init(void)
-+{
-+ int ret;
-+
-+ pr_debug("PFE CDEV initialization begin\n");
-+
-+ /* Register the major number for the device */
-+ pfe_majno = register_chrdev(0, PFE_CDEV_NAME, &pfe_cdev_fops);
-+ if (pfe_majno < 0) {
-+ pr_err("Unable to register PFE CDEV. PFE CDEV not available\n");
-+ ret = pfe_majno;
-+ goto cleanup;
-+ }
-+
-+ pr_debug("PFE CDEV assigned major number: %d\n", pfe_majno);
-+
-+ /* Register the class for the device */
-+ pfe_char_class = class_create(THIS_MODULE, PFE_CLASS_NAME);
-+ if (IS_ERR(pfe_char_class)) {
-+ pr_err(
-+ "Failed to init class for PFE CDEV. PFE CDEV not available.\n");
-+ goto cleanup;
-+ }
-+
-+ pr_debug("PFE CDEV Class created successfully.\n");
-+
-+ /* Create the device without any parent and without any callback data */
-+ pfe_char_dev = device_create(pfe_char_class, NULL,
-+ MKDEV(pfe_majno, 0), NULL,
-+ PFE_CDEV_NAME);
-+ if (IS_ERR(pfe_char_dev)) {
-+ pr_err("Unable to PFE CDEV device. PFE CDEV not available.\n");
-+ ret = PTR_ERR(pfe_char_dev);
-+ goto cleanup;
-+ }
-+
-+ /* Information structure being shared with the userspace */
-+ memset(link_states, 0, sizeof(struct pfe_shared_info) *
-+ PFE_CDEV_ETH_COUNT);
-+
-+ pr_info("PFE CDEV created: %s\n", PFE_CDEV_NAME);
-+
-+ ret = 0;
-+ return ret;
-+
-+cleanup:
-+ if (!IS_ERR(pfe_char_class))
-+ class_destroy(pfe_char_class);
-+
-+ if (pfe_majno > 0)
-+ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
-+
-+ ret = -EFAULT;
-+ return ret;
-+}
-+
-+void pfe_cdev_exit(void)
-+{
-+ if (!IS_ERR(pfe_char_dev))
-+ device_destroy(pfe_char_class, MKDEV(pfe_majno, 0));
-+
-+ if (!IS_ERR(pfe_char_class)) {
-+ class_unregister(pfe_char_class);
-+ class_destroy(pfe_char_class);
-+ }
-+
-+ if (pfe_majno > 0)
-+ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
-+
-+ /* reset the variables */
-+ pfe_majno = 0;
-+ pfe_char_class = NULL;
-+ pfe_char_dev = NULL;
-+
-+ pr_info("PFE CDEV Removed.\n");
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_cdev.h
-@@ -0,0 +1,41 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2018 NXP
-+ */
-+
-+#ifndef _PFE_CDEV_H_
-+#define _PFE_CDEV_H_
-+
-+#include <linux/init.h>
-+#include <linux/device.h>
-+#include <linux/err.h>
-+#include <linux/kernel.h>
-+#include <linux/fs.h>
-+#include <linux/uaccess.h>
-+#include <linux/poll.h>
-+
-+#define PFE_CDEV_NAME "pfe_us_cdev"
-+#define PFE_CLASS_NAME "ppfe_us"
-+
-+/* Extracted from ls1012a_pfe_platform_data, there are 3 interfaces which are
-+ * supported by PFE driver. Should be updated if number of eth devices are
-+ * changed.
-+ */
-+#define PFE_CDEV_ETH_COUNT 3
-+
-+struct pfe_shared_info {
-+ uint32_t phy_id; /* Link phy ID */
-+ uint8_t state; /* Has either 0 or 1 */
-+};
-+
-+extern struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
-+
-+/* IOCTL Commands */
-+#define PFE_CDEV_ETH0_STATE_GET _IOR('R', 0, int)
-+#define PFE_CDEV_ETH1_STATE_GET _IOR('R', 1, int)
-+#define PFE_CDEV_HIF_INTR_EN _IOWR('R', 2, int)
-+
-+int pfe_cdev_init(void);
-+void pfe_cdev_exit(void);
-+
-+#endif /* _PFE_CDEV_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
-@@ -0,0 +1,226 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/kthread.h>
-+
-+#include "pfe_mod.h"
-+#include "pfe_ctrl.h"
-+
-+#define TIMEOUT_MS 1000
-+
-+int relax(unsigned long end)
-+{
-+ if (time_after(jiffies, end)) {
-+ if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
-+ return -1;
-+
-+ if (need_resched())
-+ schedule();
-+ }
-+
-+ return 0;
-+}
-+
-+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
-+{
-+ int id;
-+
-+ mutex_lock(&ctrl->mutex);
-+
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
-+ pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
-+
-+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
-+ if (id == TMU2_ID)
-+ continue;
-+ pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
-+#endif
-+ mutex_unlock(&ctrl->mutex);
-+}
-+
-+void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
-+{
-+ int pe_mask = CLASS_MASK | TMU_MASK;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe_mask |= UTIL_MASK;
-+#endif
-+ mutex_lock(&ctrl->mutex);
-+ pe_start(&pfe->ctrl, pe_mask);
-+ mutex_unlock(&ctrl->mutex);
-+}
-+
-+/* PE sync stop.
-+ * Stops packet processing for a list of PE's (specified using a bitmask).
-+ * The caller must hold ctrl->mutex.
-+ *
-+ * @param ctrl Control context
-+ * @param pe_mask Mask of PE id's to stop
-+ *
-+ */
-+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
-+{
-+ struct pe_sync_mailbox *mbox;
-+ int pe_stopped = 0;
-+ unsigned long end = jiffies + 2;
-+ int i;
-+
-+ pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+
-+ while (pe_stopped != pe_mask) {
-+ for (i = 0; i < MAX_PE; i++)
-+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ if (pe_dmem_read(i, (unsigned
-+ long)&mbox->stopped, 4) &
-+ cpu_to_be32(0x1))
-+ pe_stopped |= (1 << i);
-+ }
-+
-+ if (relax(end) < 0)
-+ goto err;
-+ }
-+
-+ return 0;
-+
-+err:
-+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+
-+ return -EIO;
-+}
-+
-+/* PE start.
-+ * Starts packet processing for a list of PE's (specified using a bitmask).
-+ * The caller must hold ctrl->mutex.
-+ *
-+ * @param ctrl Control context
-+ * @param pe_mask Mask of PE id's to start
-+ *
-+ */
-+void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
-+{
-+ struct pe_sync_mailbox *mbox;
-+ int i;
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+}
-+
-+/* This function will ensure all PEs are put in to idle state */
-+int pe_reset_all(struct pfe_ctrl *ctrl)
-+{
-+ struct pe_sync_mailbox *mbox;
-+ int pe_stopped = 0;
-+ unsigned long end = jiffies + 2;
-+ int i;
-+ int pe_mask = CLASS_MASK | TMU_MASK;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe_mask |= UTIL_MASK;
-+#endif
-+
-+ for (i = 0; i < MAX_PE; i++)
-+ if (pe_mask & (1 << i)) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
-+ long)&mbox->stop, 4);
-+ }
-+
-+ while (pe_stopped != pe_mask) {
-+ for (i = 0; i < MAX_PE; i++)
-+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
-+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
-+
-+ if (pe_dmem_read(i, (unsigned long)
-+ &mbox->stopped, 4) &
-+ cpu_to_be32(0x1))
-+ pe_stopped |= (1 << i);
-+ }
-+
-+ if (relax(end) < 0)
-+ goto err;
-+ }
-+
-+ return 0;
-+
-+err:
-+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
-+ return -EIO;
-+}
-+
-+int pfe_ctrl_init(struct pfe *pfe)
-+{
-+ struct pfe_ctrl *ctrl = &pfe->ctrl;
-+ int id;
-+
-+ pr_info("%s\n", __func__);
-+
-+ mutex_init(&ctrl->mutex);
-+ spin_lock_init(&ctrl->lock);
-+
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
-+ ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
-+ ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
-+ }
-+
-+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
-+ if (id == TMU2_ID)
-+ continue;
-+ ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
-+ ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
-+ ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
-+#endif
-+
-+ ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
-+ ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
-+ ROUTE_TABLE_BASEADDR;
-+
-+ ctrl->dev = pfe->dev;
-+
-+ pr_info("%s finished\n", __func__);
-+
-+ return 0;
-+}
-+
-+void pfe_ctrl_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
-@@ -0,0 +1,100 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_CTRL_H_
-+#define _PFE_CTRL_H_
-+
-+#include <linux/dmapool.h>
-+
-+#include "pfe_mod.h"
-+#include "pfe/pfe.h"
-+
-+#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
-+#define DMA_BUF_SIZE_256 0x100
-+/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
-+#define DMA_BUF_SIZE_512 0x200
-+/* 512bytes dma allocated buffers used by rtp relay feature */
-+#define DMA_BUF_MIN_ALIGNMENT 8
-+#define DMA_BUF_BOUNDARY (4 * 1024)
-+/* bursts can not cross 4k boundary */
-+
-+#define CMD_TX_ENABLE 0x0501
-+#define CMD_TX_DISABLE 0x0502
-+
-+#define CMD_RX_LRO 0x0011
-+#define CMD_PKTCAP_ENABLE 0x0d01
-+#define CMD_QM_EXPT_RATE 0x020c
-+
-+#define CLASS_DM_SH_STATIC (0x800)
-+#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
-+#define CLASS_DM_SYNC_MBOX (0x808)
-+#define CLASS_DM_MSG_MBOX (0x810)
-+#define CLASS_DM_DROP_CNTR (0x820)
-+#define CLASS_DM_RESUME (0x854)
-+#define CLASS_DM_PESTATUS (0x860)
-+
-+#define TMU_DM_SH_STATIC (0x80)
-+#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
-+#define TMU_DM_SYNC_MBOX (0x88)
-+#define TMU_DM_MSG_MBOX (0x90)
-+#define TMU_DM_RESUME (0xA0)
-+#define TMU_DM_PESTATUS (0xB0)
-+#define TMU_DM_CONTEXT (0x300)
-+#define TMU_DM_TX_TRANS (0x480)
-+
-+#define UTIL_DM_SH_STATIC (0x0)
-+#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
-+#define UTIL_DM_SYNC_MBOX (0x8)
-+#define UTIL_DM_MSG_MBOX (0x10)
-+#define UTIL_DM_DROP_CNTR (0x20)
-+#define UTIL_DM_RESUME (0x40)
-+#define UTIL_DM_PESTATUS (0x50)
-+
-+struct pfe_ctrl {
-+ struct mutex mutex; /* to serialize pfe control access */
-+ spinlock_t lock;
-+
-+ void *dma_pool;
-+ void *dma_pool_512;
-+ void *dma_pool_128;
-+
-+ struct device *dev;
-+
-+ void *hash_array_baseaddr; /*
-+ * Virtual base address of
-+ * the conntrack hash array
-+ */
-+ unsigned long hash_array_phys_baseaddr; /*
-+ * Physical base address of
-+ * the conntrack hash array
-+ */
-+
-+ int (*event_cb)(u16, u16, u16*);
-+
-+ unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
-+ * Sync mailbox PFE
-+ * internal address,
-+ * initialized
-+ * when parsing elf images
-+ */
-+ unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
-+ * Msg mailbox PFE internal
-+ * address, initialized
-+ * when parsing elf images
-+ */
-+ unsigned int sys_clk; /* AXI clock value, in KHz */
-+};
-+
-+int pfe_ctrl_init(struct pfe *pfe);
-+void pfe_ctrl_exit(struct pfe *pfe);
-+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
-+void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
-+int pe_reset_all(struct pfe_ctrl *ctrl);
-+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
-+void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
-+int relax(unsigned long end);
-+
-+#endif /* _PFE_CTRL_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
-@@ -0,0 +1,99 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/debugfs.h>
-+#include <linux/platform_device.h>
-+
-+#include "pfe_mod.h"
-+
-+static int dmem_show(struct seq_file *s, void *unused)
-+{
-+ u32 dmem_addr, val;
-+ int id = (long int)s->private;
-+ int i;
-+
-+ for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
-+ seq_printf(s, "%04x:", dmem_addr);
-+
-+ for (i = 0; i < 8; i++) {
-+ val = pe_dmem_read(id, dmem_addr + i * 4, 4);
-+ seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
-+ (val >> 8) & 0xff, (val >> 16) & 0xff,
-+ (val >> 24) & 0xff);
-+ }
-+
-+ seq_puts(s, "\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static int dmem_open(struct inode *inode, struct file *file)
-+{
-+ return single_open(file, dmem_show, inode->i_private);
-+}
-+
-+static const struct file_operations dmem_fops = {
-+ .open = dmem_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+int pfe_debugfs_init(struct pfe *pfe)
-+{
-+ struct dentry *d;
-+
-+ pr_info("%s\n", __func__);
-+
-+ pfe->dentry = debugfs_create_dir("pfe", NULL);
-+ if (IS_ERR_OR_NULL(pfe->dentry))
-+ goto err_dir;
-+
-+ d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
-+ &dmem_fops);
-+ if (IS_ERR_OR_NULL(d))
-+ goto err_pe;
-+
-+ return 0;
-+
-+err_pe:
-+ debugfs_remove_recursive(pfe->dentry);
-+
-+err_dir:
-+ return -1;
-+}
-+
-+void pfe_debugfs_exit(struct pfe *pfe)
-+{
-+ debugfs_remove_recursive(pfe->dentry);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
-@@ -0,0 +1,13 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_DEBUGFS_H_
-+#define _PFE_DEBUGFS_H_
-+
-+int pfe_debugfs_init(struct pfe *pfe);
-+void pfe_debugfs_exit(struct pfe *pfe);
-+
-+#endif /* _PFE_DEBUGFS_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_eth.c
-@@ -0,0 +1,2554 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+/* @pfe_eth.c.
-+ * Ethernet driver for to handle exception path for PFE.
-+ * - uses HIF functions to send/receive packets.
-+ * - uses ctrl function to start/stop interfaces.
-+ * - uses direct register accesses to control phy operation.
-+ */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/interrupt.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ethtool.h>
-+#include <linux/mii.h>
-+#include <linux/phy.h>
-+#include <linux/timer.h>
-+#include <linux/hrtimer.h>
-+#include <linux/platform_device.h>
-+
-+#include <net/ip.h>
-+#include <net/sock.h>
-+
-+#include <linux/of.h>
-+#include <linux/of_mdio.h>
-+
-+#include <linux/io.h>
-+#include <asm/irq.h>
-+#include <linux/delay.h>
-+#include <linux/regmap.h>
-+#include <linux/i2c.h>
-+#include <linux/fsl/guts.h>
-+
-+#if defined(CONFIG_NF_CONNTRACK_MARK)
-+#include <net/netfilter/nf_conntrack.h>
-+#endif
-+
-+#include "pfe_mod.h"
-+#include "pfe_eth.h"
-+#include "pfe_cdev.h"
-+
-+#define LS1012A_REV_1_0 0x87040010
-+
-+bool pfe_use_old_dts_phy;
-+bool pfe_errata_a010897;
-+
-+static void *cbus_emac_base[3];
-+static void *cbus_gpi_base[3];
-+
-+/* Forward Declaration */
-+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
-+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
-+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
-+ from_tx, int n_desc);
-+
-+/* MDIO registers */
-+#define MDIO_SGMII_CR 0x00
-+#define MDIO_SGMII_SR 0x01
-+#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
-+#define MDIO_SGMII_LINK_TMR_L 0x12
-+#define MDIO_SGMII_LINK_TMR_H 0x13
-+#define MDIO_SGMII_IF_MODE 0x14
-+
-+/* SGMII Control defines */
-+#define SGMII_CR_RST 0x8000
-+#define SGMII_CR_AN_EN 0x1000
-+#define SGMII_CR_RESTART_AN 0x0200
-+#define SGMII_CR_FD 0x0100
-+#define SGMII_CR_SPEED_SEL1_1G 0x0040
-+#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
-+ SGMII_CR_SPEED_SEL1_1G)
-+
-+/* SGMII IF Mode */
-+#define SGMII_DUPLEX_HALF 0x10
-+#define SGMII_SPEED_10MBPS 0x00
-+#define SGMII_SPEED_100MBPS 0x04
-+#define SGMII_SPEED_1GBPS 0x08
-+#define SGMII_USE_SGMII_AN 0x02
-+#define SGMII_EN 0x01
-+
-+/* SGMII Device Ability for SGMII */
-+#define SGMII_DEV_ABIL_ACK 0x4000
-+#define SGMII_DEV_ABIL_EEE_CLK_STP_EN 0x0100
-+#define SGMII_DEV_ABIL_SGMII 0x0001
-+
-+unsigned int gemac_regs[] = {
-+ 0x0004, /* Interrupt event */
-+ 0x0008, /* Interrupt mask */
-+ 0x0024, /* Ethernet control */
-+ 0x0064, /* MIB Control/Status */
-+ 0x0084, /* Receive control/status */
-+ 0x00C4, /* Transmit control */
-+ 0x00E4, /* Physical address low */
-+ 0x00E8, /* Physical address high */
-+ 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
-+ 0x0190, /* Receive FIFO Section Full Threshold */
-+ 0x01A0, /* Transmit FIFO Section Empty Threshold */
-+ 0x01B0, /* Frame Truncation Length */
-+};
-+
-+/********************************************************************/
-+/* SYSFS INTERFACE */
-+/********************************************************************/
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+/*
-+ * pfe_eth_show_napi_stats
-+ */
-+static ssize_t pfe_eth_show_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "sched: %u\n",
-+ priv->napi_counters[NAPI_SCHED_COUNT]);
-+ len += sprintf(buf + len, "poll: %u\n",
-+ priv->napi_counters[NAPI_POLL_COUNT]);
-+ len += sprintf(buf + len, "packet: %u\n",
-+ priv->napi_counters[NAPI_PACKET_COUNT]);
-+ len += sprintf(buf + len, "budget: %u\n",
-+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
-+ len += sprintf(buf + len, "desc: %u\n",
-+ priv->napi_counters[NAPI_DESC_COUNT]);
-+
-+ return len;
-+}
-+
-+/*
-+ * pfe_eth_set_napi_stats
-+ */
-+static ssize_t pfe_eth_set_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+
-+ memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
-+
-+ return count;
-+}
-+#endif
-+#ifdef PFE_ETH_TX_STATS
-+/* pfe_eth_show_tx_stats
-+ *
-+ */
-+static ssize_t pfe_eth_show_tx_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t len = 0;
-+ int i;
-+
-+ len += sprintf(buf + len, "TX queues stats:\n");
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ i);
-+
-+ len += sprintf(buf + len, "\n");
-+ __netif_tx_lock_bh(tx_queue);
-+
-+ hif_tx_lock(&pfe->hif);
-+ len += sprintf(buf + len,
-+ "Queue %2d : credits = %10d\n"
-+ , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
-+ len += sprintf(buf + len,
-+ " tx packets = %10d\n"
-+ , pfe->tmu_credit.tx_packets[priv->id][i]);
-+ hif_tx_unlock(&pfe->hif);
-+
-+ /* Don't output additionnal stats if queue never used */
-+ if (!pfe->tmu_credit.tx_packets[priv->id][i])
-+ goto skip;
-+
-+ len += sprintf(buf + len,
-+ " clean_fail = %10d\n"
-+ , priv->clean_fail[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue = %10d\n"
-+ , priv->stop_queue_total[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue_hif = %10d\n"
-+ , priv->stop_queue_hif[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue_hif_client = %10d\n"
-+ , priv->stop_queue_hif_client[i]);
-+ len += sprintf(buf + len,
-+ " stop_queue_credit = %10d\n"
-+ , priv->stop_queue_credit[i]);
-+skip:
-+ __netif_tx_unlock_bh(tx_queue);
-+ }
-+ return len;
-+}
-+
-+/* pfe_eth_set_tx_stats
-+ *
-+ */
-+static ssize_t pfe_eth_set_tx_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ int i;
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ i);
-+
-+ __netif_tx_lock_bh(tx_queue);
-+ priv->clean_fail[i] = 0;
-+ priv->stop_queue_total[i] = 0;
-+ priv->stop_queue_hif[i] = 0;
-+ priv->stop_queue_hif_client[i] = 0;
-+ priv->stop_queue_credit[i] = 0;
-+ __netif_tx_unlock_bh(tx_queue);
-+ }
-+
-+ return count;
-+}
-+#endif
-+/* pfe_eth_show_txavail
-+ *
-+ */
-+static ssize_t pfe_eth_show_txavail(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ ssize_t len = 0;
-+ int i;
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ i);
-+
-+ __netif_tx_lock_bh(tx_queue);
-+
-+ len += sprintf(buf + len, "%d",
-+ hif_lib_tx_avail(&priv->client, i));
-+
-+ __netif_tx_unlock_bh(tx_queue);
-+
-+ if (i == (emac_txq_cnt - 1))
-+ len += sprintf(buf + len, "\n");
-+ else
-+ len += sprintf(buf + len, " ");
-+ }
-+
-+ return len;
-+}
-+
-+/* pfe_eth_show_default_priority
-+ *
-+ */
-+static ssize_t pfe_eth_show_default_priority(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ unsigned long flags;
-+ int rc;
-+
-+ spin_lock_irqsave(&priv->lock, flags);
-+ rc = sprintf(buf, "%d\n", priv->default_priority);
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+
-+ return rc;
-+}
-+
-+/* pfe_eth_set_default_priority
-+ *
-+ */
-+
-+static ssize_t pfe_eth_set_default_priority(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&priv->lock, flags);
-+ priv->default_priority = kstrtoul(buf, 0, 0);
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+
-+ return count;
-+}
-+
-+static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
-+static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
-+ pfe_eth_set_default_priority);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
-+ pfe_eth_set_napi_stats);
-+#endif
-+
-+#ifdef PFE_ETH_TX_STATS
-+static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
-+ pfe_eth_set_tx_stats);
-+#endif
-+
-+/*
-+ * pfe_eth_sysfs_init
-+ *
-+ */
-+static int pfe_eth_sysfs_init(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int err;
-+
-+ /* Initialize the default values */
-+
-+ /*
-+ * By default, packets without conntrack will use this default low
-+ * priority queue
-+ */
-+ priv->default_priority = 0;
-+
-+ /* Create our sysfs files */
-+ err = device_create_file(&ndev->dev, &dev_attr_default_priority);
-+ if (err) {
-+ netdev_err(ndev,
-+ "failed to create default_priority sysfs files\n");
-+ goto err_priority;
-+ }
-+
-+ err = device_create_file(&ndev->dev, &dev_attr_txavail);
-+ if (err) {
-+ netdev_err(ndev,
-+ "failed to create default_priority sysfs files\n");
-+ goto err_txavail;
-+ }
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
-+ if (err) {
-+ netdev_err(ndev, "failed to create napi stats sysfs files\n");
-+ goto err_napi;
-+ }
-+#endif
-+
-+#ifdef PFE_ETH_TX_STATS
-+ err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
-+ if (err) {
-+ netdev_err(ndev, "failed to create tx stats sysfs files\n");
-+ goto err_tx;
-+ }
-+#endif
-+
-+ return 0;
-+
-+#ifdef PFE_ETH_TX_STATS
-+err_tx:
-+#endif
-+#ifdef PFE_ETH_NAPI_STATS
-+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
-+
-+err_napi:
-+#endif
-+ device_remove_file(&ndev->dev, &dev_attr_txavail);
-+
-+err_txavail:
-+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
-+
-+err_priority:
-+ return -1;
-+}
-+
-+/* pfe_eth_sysfs_exit
-+ *
-+ */
-+void pfe_eth_sysfs_exit(struct net_device *ndev)
-+{
-+#ifdef PFE_ETH_TX_STATS
-+ device_remove_file(&ndev->dev, &dev_attr_tx_stats);
-+#endif
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
-+#endif
-+ device_remove_file(&ndev->dev, &dev_attr_txavail);
-+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
-+}
-+
-+/*************************************************************************/
-+/* ETHTOOL INTERCAE */
-+/*************************************************************************/
-+
-+/*MTIP GEMAC */
-+static const struct fec_stat {
-+ char name[ETH_GSTRING_LEN];
-+ u16 offset;
-+} fec_stats[] = {
-+ /* RMON TX */
-+ { "tx_dropped", RMON_T_DROP },
-+ { "tx_packets", RMON_T_PACKETS },
-+ { "tx_broadcast", RMON_T_BC_PKT },
-+ { "tx_multicast", RMON_T_MC_PKT },
-+ { "tx_crc_errors", RMON_T_CRC_ALIGN },
-+ { "tx_undersize", RMON_T_UNDERSIZE },
-+ { "tx_oversize", RMON_T_OVERSIZE },
-+ { "tx_fragment", RMON_T_FRAG },
-+ { "tx_jabber", RMON_T_JAB },
-+ { "tx_collision", RMON_T_COL },
-+ { "tx_64byte", RMON_T_P64 },
-+ { "tx_65to127byte", RMON_T_P65TO127 },
-+ { "tx_128to255byte", RMON_T_P128TO255 },
-+ { "tx_256to511byte", RMON_T_P256TO511 },
-+ { "tx_512to1023byte", RMON_T_P512TO1023 },
-+ { "tx_1024to2047byte", RMON_T_P1024TO2047 },
-+ { "tx_GTE2048byte", RMON_T_P_GTE2048 },
-+ { "tx_octets", RMON_T_OCTETS },
-+
-+ /* IEEE TX */
-+ { "IEEE_tx_drop", IEEE_T_DROP },
-+ { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
-+ { "IEEE_tx_1col", IEEE_T_1COL },
-+ { "IEEE_tx_mcol", IEEE_T_MCOL },
-+ { "IEEE_tx_def", IEEE_T_DEF },
-+ { "IEEE_tx_lcol", IEEE_T_LCOL },
-+ { "IEEE_tx_excol", IEEE_T_EXCOL },
-+ { "IEEE_tx_macerr", IEEE_T_MACERR },
-+ { "IEEE_tx_cserr", IEEE_T_CSERR },
-+ { "IEEE_tx_sqe", IEEE_T_SQE },
-+ { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
-+ { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
-+
-+ /* RMON RX */
-+ { "rx_packets", RMON_R_PACKETS },
-+ { "rx_broadcast", RMON_R_BC_PKT },
-+ { "rx_multicast", RMON_R_MC_PKT },
-+ { "rx_crc_errors", RMON_R_CRC_ALIGN },
-+ { "rx_undersize", RMON_R_UNDERSIZE },
-+ { "rx_oversize", RMON_R_OVERSIZE },
-+ { "rx_fragment", RMON_R_FRAG },
-+ { "rx_jabber", RMON_R_JAB },
-+ { "rx_64byte", RMON_R_P64 },
-+ { "rx_65to127byte", RMON_R_P65TO127 },
-+ { "rx_128to255byte", RMON_R_P128TO255 },
-+ { "rx_256to511byte", RMON_R_P256TO511 },
-+ { "rx_512to1023byte", RMON_R_P512TO1023 },
-+ { "rx_1024to2047byte", RMON_R_P1024TO2047 },
-+ { "rx_GTE2048byte", RMON_R_P_GTE2048 },
-+ { "rx_octets", RMON_R_OCTETS },
-+
-+ /* IEEE RX */
-+ { "IEEE_rx_drop", IEEE_R_DROP },
-+ { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
-+ { "IEEE_rx_crc", IEEE_R_CRC },
-+ { "IEEE_rx_align", IEEE_R_ALIGN },
-+ { "IEEE_rx_macerr", IEEE_R_MACERR },
-+ { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
-+ { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
-+};
-+
-+static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
-+ *stats, u64 *data)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
-+ data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
-+}
-+
-+static void pfe_eth_gstrings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
-+{
-+ int i;
-+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ fec_stats[i].name, ETH_GSTRING_LEN);
-+ break;
-+ }
-+}
-+
-+static int pfe_eth_stats_count(struct net_device *ndev, int sset)
-+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ARRAY_SIZE(fec_stats);
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+/*
-+ * pfe_eth_gemac_reglen - Return the length of the register structure.
-+ *
-+ */
-+static int pfe_eth_gemac_reglen(struct net_device *ndev)
-+{
-+ pr_info("%s()\n", __func__);
-+ return (sizeof(gemac_regs) / sizeof(u32));
-+}
-+
-+/*
-+ * pfe_eth_gemac_get_regs - Return the gemac register structure.
-+ *
-+ */
-+static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
-+ *regs, void *regbuf)
-+{
-+ int i;
-+
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ u32 *buf = (u32 *)regbuf;
-+
-+ pr_info("%s()\n", __func__);
-+ for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
-+ buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
-+}
-+
-+/*
-+ * pfe_eth_set_wol - Set the magic packet option, in WoL register.
-+ *
-+ */
-+static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ if (wol->wolopts & ~WAKE_MAGIC)
-+ return -EOPNOTSUPP;
-+
-+ /* for MTIP we store wol->wolopts */
-+ priv->wol = wol->wolopts;
-+
-+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
-+
-+ return 0;
-+}
-+
-+/*
-+ *
-+ * pfe_eth_get_wol - Get the WoL options.
-+ *
-+ */
-+static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
-+ *wol)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ wol->supported = WAKE_MAGIC;
-+ wol->wolopts = 0;
-+
-+ if (priv->wol & WAKE_MAGIC)
-+ wol->wolopts = WAKE_MAGIC;
-+
-+ memset(&wol->sopass, 0, sizeof(wol->sopass));
-+}
-+
-+/*
-+ * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
-+ *
-+ */
-+static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
-+ *drvinfo)
-+{
-+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
-+}
-+
-+/*
-+ * pfe_eth_set_settings - Used to send commands to PHY.
-+ *
-+ */
-+static int pfe_eth_set_settings(struct net_device *ndev,
-+ const struct ethtool_link_ksettings *cmd)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct phy_device *phydev = priv->phydev;
-+
-+ if (!phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_ksettings_set(phydev, cmd);
-+}
-+
-+/*
-+ * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
-+ * structure.
-+ *
-+ */
-+static int pfe_eth_get_settings(struct net_device *ndev,
-+ struct ethtool_link_ksettings *cmd)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct phy_device *phydev = priv->phydev;
-+
-+ if (!phydev)
-+ return -ENODEV;
-+
-+ phy_ethtool_ksettings_get(phydev, cmd);
-+
-+ return 0;
-+}
-+
-+/*
-+ * pfe_eth_get_msglevel - Gets the debug message mask.
-+ *
-+ */
-+static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ return priv->msg_enable;
-+}
-+
-+/*
-+ * pfe_eth_set_msglevel - Sets the debug message mask.
-+ *
-+ */
-+static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ priv->msg_enable = data;
-+}
-+
-+#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
-+#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
-+#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
-+ HIF_RX_COAL_CLKS_PER_USEC)
-+
-+/*
-+ * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
-+ *
-+ */
-+static int pfe_eth_set_coalesce(struct net_device *ndev,
-+ struct ethtool_coalesce *ec)
-+{
-+ if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
-+ return -EINVAL;
-+
-+ if (!ec->rx_coalesce_usecs) {
-+ writel(0, HIF_INT_COAL);
-+ return 0;
-+ }
-+
-+ writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
-+ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
-+
-+ return 0;
-+}
-+
-+/*
-+ * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
-+ *
-+ */
-+static int pfe_eth_get_coalesce(struct net_device *ndev,
-+ struct ethtool_coalesce *ec)
-+{
-+ int reg_val = readl(HIF_INT_COAL);
-+
-+ if (reg_val & HIF_INT_COAL_ENABLE)
-+ ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
-+ HIF_RX_COAL_CLKS_PER_USEC;
-+ else
-+ ec->rx_coalesce_usecs = 0;
-+
-+ return 0;
-+}
-+
-+/*
-+ * pfe_eth_set_pauseparam - Sets pause parameters
-+ *
-+ */
-+static int pfe_eth_set_pauseparam(struct net_device *ndev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ if (epause->tx_pause != epause->rx_pause) {
-+ netdev_info(ndev,
-+ "hardware only support enable/disable both tx and rx\n");
-+ return -EINVAL;
-+ }
-+
-+ priv->pause_flag = 0;
-+ priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
-+ priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
-+
-+ if (epause->rx_pause || epause->autoneg) {
-+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
-+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
-+ EGPI_PAUSE_ENABLE),
-+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
-+ if (priv->phydev) {
-+ priv->phydev->supported |= ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause;
-+ priv->phydev->advertising |= ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause;
-+ }
-+ } else {
-+ gemac_disable_pause_rx(priv->EMAC_baseaddr);
-+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
-+ ~EGPI_PAUSE_ENABLE),
-+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
-+ if (priv->phydev) {
-+ priv->phydev->supported &= ~(ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause);
-+ priv->phydev->advertising &= ~(ADVERTISED_Pause |
-+ ADVERTISED_Asym_Pause);
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * pfe_eth_get_pauseparam - Gets pause parameters
-+ *
-+ */
-+static void pfe_eth_get_pauseparam(struct net_device *ndev,
-+ struct ethtool_pauseparam *epause)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
-+ epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
-+ epause->rx_pause = epause->tx_pause;
-+}
-+
-+/*
-+ * pfe_eth_get_hash
-+ */
-+#define PFE_HASH_BITS 6 /* #bits in hash */
-+#define CRC32_POLY 0xEDB88320
-+
-+static int pfe_eth_get_hash(u8 *addr)
-+{
-+ unsigned int i, bit, data, crc, hash;
-+
-+ /* calculate crc32 value of mac address */
-+ crc = 0xffffffff;
-+
-+ for (i = 0; i < 6; i++) {
-+ data = addr[i];
-+ for (bit = 0; bit < 8; bit++, data >>= 1) {
-+ crc = (crc >> 1) ^
-+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
-+ }
-+ }
-+
-+ /*
-+ * only upper 6 bits (PFE_HASH_BITS) are used
-+ * which point to specific bit in the hash registers
-+ */
-+ hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
-+
-+ return hash;
-+}
-+
-+const struct ethtool_ops pfe_ethtool_ops = {
-+ .get_drvinfo = pfe_eth_get_drvinfo,
-+ .get_regs_len = pfe_eth_gemac_reglen,
-+ .get_regs = pfe_eth_gemac_get_regs,
-+ .get_link = ethtool_op_get_link,
-+ .get_wol = pfe_eth_get_wol,
-+ .set_wol = pfe_eth_set_wol,
-+ .set_pauseparam = pfe_eth_set_pauseparam,
-+ .get_pauseparam = pfe_eth_get_pauseparam,
-+ .get_strings = pfe_eth_gstrings,
-+ .get_sset_count = pfe_eth_stats_count,
-+ .get_ethtool_stats = pfe_eth_fill_stats,
-+ .get_msglevel = pfe_eth_get_msglevel,
-+ .set_msglevel = pfe_eth_set_msglevel,
-+ .set_coalesce = pfe_eth_set_coalesce,
-+ .get_coalesce = pfe_eth_get_coalesce,
-+ .get_link_ksettings = pfe_eth_get_settings,
-+ .set_link_ksettings = pfe_eth_set_settings,
-+};
-+
-+/* pfe_eth_mdio_reset
-+ */
-+int pfe_eth_mdio_reset(struct mii_bus *bus)
-+{
-+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
-+ u32 phy_speed;
-+
-+
-+ mutex_lock(&bus->mdio_lock);
-+
-+ /*
-+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
-+ *
-+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
-+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
-+ */
-+ phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
-+ << EMAC_MII_SPEED_SHIFT);
-+ phy_speed |= EMAC_HOLDTIME(0x5);
-+ __raw_writel(phy_speed, priv->mdio_base + EMAC_MII_CTRL_REG);
-+
-+ mutex_unlock(&bus->mdio_lock);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_mdio_timeout
-+ *
-+ */
-+static int pfe_eth_mdio_timeout(struct pfe_mdio_priv_s *priv, int timeout)
-+{
-+ while (!(__raw_readl(priv->mdio_base + EMAC_IEVENT_REG) &
-+ EMAC_IEVENT_MII)) {
-+ if (timeout-- <= 0)
-+ return -1;
-+ usleep_range(10, 20);
-+ }
-+ __raw_writel(EMAC_IEVENT_MII, priv->mdio_base + EMAC_IEVENT_REG);
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_mux(u8 muxval)
-+{
-+ struct i2c_adapter *a;
-+ struct i2c_msg msg;
-+ unsigned char buf[2];
-+ int ret;
-+
-+ a = i2c_get_adapter(0);
-+ if (!a)
-+ return -ENODEV;
-+
-+ /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
-+ buf[0] = 0x54; /* reg number */
-+ buf[1] = (muxval << 6) | 0x3; /* data */
-+ msg.addr = 0x66;
-+ msg.buf = buf;
-+ msg.len = 2;
-+ msg.flags = 0;
-+ ret = i2c_transfer(a, &msg, 1);
-+ i2c_put_adapter(a);
-+ if (ret != 1)
-+ return -ENODEV;
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
-+ int dev_addr, int regnum)
-+{
-+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
-+
-+ __raw_writel(EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA(dev_addr) |
-+ EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
-+ priv->mdio_base + EMAC_MII_DATA_REG);
-+
-+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ dev_err(&bus->dev, "phy MDIO address write timeout\n");
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
-+ u16 value)
-+{
-+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
-+
-+ /*To access external PHYs on QDS board mux needs to be configured*/
-+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
-+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
-+
-+ if (regnum & MII_ADDR_C45) {
-+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
-+ regnum & 0xffff);
-+ __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
-+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
-+ priv->mdio_base + EMAC_MII_DATA_REG);
-+ } else {
-+ /* start a write op */
-+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA(regnum) |
-+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
-+ priv->mdio_base + EMAC_MII_DATA_REG);
-+ }
-+
-+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ dev_err(&bus->dev, "%s: phy MDIO write timeout\n", __func__);
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-+{
-+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
-+ u16 value = 0;
-+
-+ /*To access external PHYs on QDS board mux needs to be configured*/
-+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
-+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
-+
-+ if (regnum & MII_ADDR_C45) {
-+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
-+ regnum & 0xffff);
-+ __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
-+ EMAC_MII_DATA_TA,
-+ priv->mdio_base + EMAC_MII_DATA_REG);
-+ } else {
-+ /* start a read op */
-+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
-+ EMAC_MII_DATA_PA(mii_id) |
-+ EMAC_MII_DATA_RA(regnum) |
-+ EMAC_MII_DATA_TA, priv->mdio_base +
-+ EMAC_MII_DATA_REG);
-+ }
-+
-+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ dev_err(&bus->dev, "%s: phy MDIO read timeout\n", __func__);
-+ return -1;
-+ }
-+
-+ value = EMAC_MII_DATA(__raw_readl(priv->mdio_base +
-+ EMAC_MII_DATA_REG));
-+ return value;
-+}
-+
-+static int pfe_eth_mdio_init(struct pfe *pfe,
-+ struct ls1012a_pfe_platform_data *pfe_info,
-+ int ii)
-+{
-+ struct pfe_mdio_priv_s *priv = NULL;
-+ struct ls1012a_mdio_platform_data *mdio_info;
-+ struct mii_bus *bus;
-+ struct device_node *mdio_node;
-+ int rc = 0;
-+
-+ mdio_info = (struct ls1012a_mdio_platform_data *)
-+ pfe_info->ls1012a_mdio_pdata;
-+ mdio_info->id = ii;
-+
-+ bus = mdiobus_alloc_size(sizeof(struct pfe_mdio_priv_s));
-+ if (!bus) {
-+ pr_err("mdiobus_alloc() failed\n");
-+ rc = -ENOMEM;
-+ goto err_mdioalloc;
-+ }
-+
-+ bus->name = "ls1012a MDIO Bus";
-+ snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", mdio_info->id);
-+
-+ bus->read = &pfe_eth_mdio_read;
-+ bus->write = &pfe_eth_mdio_write;
-+ bus->reset = &pfe_eth_mdio_reset;
-+ bus->parent = pfe->dev;
-+ bus->phy_mask = mdio_info->phy_mask;
-+ bus->irq[0] = mdio_info->irq[0];
-+ priv = bus->priv;
-+ priv->mdio_base = cbus_emac_base[ii];
-+
-+ priv->mdc_div = mdio_info->mdc_div;
-+ if (!priv->mdc_div)
-+ priv->mdc_div = 64;
-+ dev_info(bus->parent, "%s: mdc_div: %d, phy_mask: %x\n",
-+ __func__, priv->mdc_div, bus->phy_mask);
-+
-+ mdio_node = of_get_child_by_name(pfe->dev->of_node, "mdio");
-+ if ((mdio_info->id == 0) && mdio_node) {
-+ rc = of_mdiobus_register(bus, mdio_node);
-+ of_node_put(mdio_node);
-+ } else {
-+ rc = mdiobus_register(bus);
-+ }
-+
-+ if (rc) {
-+ dev_err(bus->parent, "mdiobus_register(%s) failed\n",
-+ bus->name);
-+ goto err_mdioregister;
-+ }
-+
-+ priv->mii_bus = bus;
-+ pfe->mdio.mdio_priv[ii] = priv;
-+
-+ pfe_eth_mdio_reset(bus);
-+
-+ return 0;
-+
-+err_mdioregister:
-+ mdiobus_free(bus);
-+err_mdioalloc:
-+ return rc;
-+}
-+
-+/* pfe_eth_mdio_exit
-+ */
-+static void pfe_eth_mdio_exit(struct pfe *pfe,
-+ int ii)
-+{
-+ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[ii];
-+ struct mii_bus *bus = mdio_priv->mii_bus;
-+
-+ if (!bus)
-+ return;
-+ mdiobus_unregister(bus);
-+ mdiobus_free(bus);
-+}
-+
-+/* pfe_get_phydev_speed
-+ */
-+static int pfe_get_phydev_speed(struct phy_device *phydev)
-+{
-+ switch (phydev->speed) {
-+ case 10:
-+ return SPEED_10M;
-+ case 100:
-+ return SPEED_100M;
-+ case 1000:
-+ default:
-+ return SPEED_1000M;
-+ }
-+}
-+
-+/* pfe_set_rgmii_speed
-+ */
-+#define RGMIIPCR 0x434
-+/* RGMIIPCR bit definitions*/
-+#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
-+#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
-+#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
-+#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
-+#define SCFG_RGMIIPCR_SETFD (0x00000001)
-+
-+#define MDIOSELCR 0x484
-+#define MDIOSEL_SERDES 0x0
-+#define MDIOSEL_EXTPHY 0x80000000
-+
-+static void pfe_set_rgmii_speed(struct phy_device *phydev)
-+{
-+ u32 rgmii_pcr;
-+
-+ regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
-+ rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
-+
-+ switch (phydev->speed) {
-+ case 10:
-+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
-+ break;
-+ case 1000:
-+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
-+ break;
-+ case 100:
-+ default:
-+ /* Default is 100M */
-+ break;
-+ }
-+ regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
-+}
-+
-+/* pfe_get_phydev_duplex
-+ */
-+static int pfe_get_phydev_duplex(struct phy_device *phydev)
-+{
-+ /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
-+ return DUPLEX_FULL;
-+}
-+
-+/* pfe_eth_adjust_link
-+ */
-+static void pfe_eth_adjust_link(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ unsigned long flags;
-+ struct phy_device *phydev = priv->phydev;
-+ int new_state = 0;
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ spin_lock_irqsave(&priv->lock, flags);
-+
-+ if (phydev->link) {
-+ /*
-+ * Now we make sure that we can be in full duplex mode.
-+ * If not, we operate in half-duplex mode.
-+ */
-+ if (phydev->duplex != priv->oldduplex) {
-+ new_state = 1;
-+ gemac_set_duplex(priv->EMAC_baseaddr,
-+ pfe_get_phydev_duplex(phydev));
-+ priv->oldduplex = phydev->duplex;
-+ }
-+
-+ if (phydev->speed != priv->oldspeed) {
-+ new_state = 1;
-+ gemac_set_speed(priv->EMAC_baseaddr,
-+ pfe_get_phydev_speed(phydev));
-+ if (priv->einfo->mii_config ==
-+ PHY_INTERFACE_MODE_RGMII_TXID)
-+ pfe_set_rgmii_speed(phydev);
-+ priv->oldspeed = phydev->speed;
-+ }
-+
-+ if (!priv->oldlink) {
-+ new_state = 1;
-+ priv->oldlink = 1;
-+ }
-+
-+ } else if (priv->oldlink) {
-+ new_state = 1;
-+ priv->oldlink = 0;
-+ priv->oldspeed = 0;
-+ priv->oldduplex = -1;
-+ }
-+
-+ if (new_state && netif_msg_link(priv))
-+ phy_print_status(phydev);
-+
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+
-+ /* Now, dump the details to the cdev.
-+ * XXX: Locking would be required? (uniprocess arch)
-+ * Or, maybe move it in spinlock above
-+ */
-+ if (us && priv->einfo->gem_id < PFE_CDEV_ETH_COUNT) {
-+ pr_debug("Changing link state from (%u) to (%u) for ID=(%u)\n",
-+ link_states[priv->einfo->gem_id].state,
-+ phydev->link,
-+ priv->einfo->gem_id);
-+ link_states[priv->einfo->gem_id].phy_id = priv->einfo->gem_id;
-+ link_states[priv->einfo->gem_id].state = phydev->link;
-+ }
-+}
-+
-+/* pfe_phy_exit
-+ */
-+static void pfe_phy_exit(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ phy_disconnect(priv->phydev);
-+ priv->phydev = NULL;
-+}
-+
-+/* pfe_eth_stop
-+ */
-+static void pfe_eth_stop(struct net_device *ndev, int wake)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ if (wake) {
-+ gemac_tx_disable(priv->EMAC_baseaddr);
-+ } else {
-+ gemac_disable(priv->EMAC_baseaddr);
-+ gpi_disable(priv->GPI_baseaddr);
-+
-+ if (priv->phydev)
-+ phy_stop(priv->phydev);
-+ }
-+}
-+
-+/* pfe_eth_start
-+ */
-+static int pfe_eth_start(struct pfe_eth_priv_s *priv)
-+{
-+ netif_info(priv, drv, priv->ndev, "%s\n", __func__);
-+
-+ if (priv->phydev)
-+ phy_start(priv->phydev);
-+
-+ gpi_enable(priv->GPI_baseaddr);
-+ gemac_enable(priv->EMAC_baseaddr);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Configure on chip serdes through mdio
-+ */
-+static void ls1012a_configure_serdes(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *eth_priv = netdev_priv(ndev);
-+ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[eth_priv->id];
-+ int sgmii_2500 = 0;
-+ struct mii_bus *bus = mdio_priv->mii_bus;
-+ u16 value = 0;
-+
-+ if (eth_priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
-+ sgmii_2500 = 1;
-+
-+ netif_info(eth_priv, drv, ndev, "%s\n", __func__);
-+ /* PCS configuration done with corresponding GEMAC */
-+
-+ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_CR);
-+ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_SR);
-+
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, SGMII_CR_RST);
-+
-+ if (sgmii_2500) {
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE, SGMII_SPEED_1GBPS
-+ | SGMII_EN);
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
-+ SGMII_DEV_ABIL_ACK | SGMII_DEV_ABIL_SGMII);
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0xa120);
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x7);
-+ /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
-+ value = SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
-+ } else {
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE,
-+ SGMII_SPEED_1GBPS
-+ | SGMII_USE_SGMII_AN
-+ | SGMII_EN);
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
-+ SGMII_DEV_ABIL_EEE_CLK_STP_EN
-+ | 0xa0
-+ | SGMII_DEV_ABIL_SGMII);
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0x400);
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x0);
-+ value = SGMII_CR_AN_EN | SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
-+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
-+ }
-+}
-+
-+/*
-+ * pfe_phy_init
-+ *
-+ */
-+static int pfe_phy_init(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct phy_device *phydev;
-+ char phy_id[MII_BUS_ID_SIZE + 3];
-+ char bus_id[MII_BUS_ID_SIZE];
-+ phy_interface_t interface;
-+
-+ priv->oldlink = 0;
-+ priv->oldspeed = 0;
-+ priv->oldduplex = -1;
-+
-+ snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
-+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
-+ priv->einfo->phy_id);
-+ netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
-+ interface = priv->einfo->mii_config;
-+ if ((interface == PHY_INTERFACE_MODE_SGMII) ||
-+ (interface == PHY_INTERFACE_MODE_2500SGMII)) {
-+ /*Configure SGMII PCS */
-+ if (pfe->scfg) {
-+ /* Config MDIO from serdes */
-+ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_SERDES);
-+ }
-+ ls1012a_configure_serdes(ndev);
-+ }
-+
-+ if (pfe->scfg) {
-+ /*Config MDIO from PAD */
-+ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_EXTPHY);
-+ }
-+
-+ priv->oldlink = 0;
-+ priv->oldspeed = 0;
-+ priv->oldduplex = -1;
-+ pr_info("%s interface %x\n", __func__, interface);
-+
-+ if (priv->phy_node) {
-+ phydev = of_phy_connect(ndev, priv->phy_node,
-+ pfe_eth_adjust_link, 0,
-+ priv->einfo->mii_config);
-+ if (!(phydev)) {
-+ netdev_err(ndev, "Unable to connect to phy\n");
-+ return -ENODEV;
-+ }
-+
-+ } else {
-+ phydev = phy_connect(ndev, phy_id,
-+ &pfe_eth_adjust_link, interface);
-+ if (IS_ERR(phydev)) {
-+ netdev_err(ndev, "Unable to connect to phy\n");
-+ return PTR_ERR(phydev);
-+ }
-+ }
-+
-+ priv->phydev = phydev;
-+ phydev->irq = PHY_POLL;
-+
-+ return 0;
-+}
-+
-+/* pfe_gemac_init
-+ */
-+static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
-+{
-+ struct gemac_cfg cfg;
-+
-+ netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
-+
-+ cfg.speed = SPEED_1000M;
-+ cfg.duplex = DUPLEX_FULL;
-+
-+ gemac_set_config(priv->EMAC_baseaddr, &cfg);
-+ gemac_allow_broadcast(priv->EMAC_baseaddr);
-+ gemac_enable_1536_rx(priv->EMAC_baseaddr);
-+ gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
-+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
-+ gemac_set_bus_width(priv->EMAC_baseaddr, 64);
-+
-+ /*GEM will perform checksum verifications*/
-+ if (priv->ndev->features & NETIF_F_RXCSUM)
-+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
-+ else
-+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_event_handler
-+ */
-+static int pfe_eth_event_handler(void *data, int event, int qno)
-+{
-+ struct pfe_eth_priv_s *priv = data;
-+
-+ switch (event) {
-+ case EVENT_RX_PKT_IND:
-+
-+ if (qno == 0) {
-+ if (napi_schedule_prep(&priv->high_napi)) {
-+ netif_info(priv, intr, priv->ndev,
-+ "%s: schedule high prio poll\n"
-+ , __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+
-+ __napi_schedule(&priv->high_napi);
-+ }
-+ } else if (qno == 1) {
-+ if (napi_schedule_prep(&priv->low_napi)) {
-+ netif_info(priv, intr, priv->ndev,
-+ "%s: schedule low prio poll\n"
-+ , __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+ __napi_schedule(&priv->low_napi);
-+ }
-+ } else if (qno == 2) {
-+ if (napi_schedule_prep(&priv->lro_napi)) {
-+ netif_info(priv, intr, priv->ndev,
-+ "%s: schedule lro prio poll\n"
-+ , __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+ __napi_schedule(&priv->lro_napi);
-+ }
-+ }
-+
-+ break;
-+
-+ case EVENT_TXDONE_IND:
-+ pfe_eth_flush_tx(priv);
-+ hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
-+ break;
-+ case EVENT_HIGH_RX_WM:
-+ default:
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int pfe_eth_change_mtu(struct net_device *ndev, int new_mtu)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ ndev->mtu = new_mtu;
-+ new_mtu += ETH_HLEN + ETH_FCS_LEN;
-+ gemac_set_rx_max_fl(priv->EMAC_baseaddr, new_mtu);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_open
-+ */
-+static int pfe_eth_open(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct hif_client_s *client;
-+ int rc;
-+
-+ netif_info(priv, ifup, ndev, "%s\n", __func__);
-+
-+ /* Register client driver with HIF */
-+ client = &priv->client;
-+ memset(client, 0, sizeof(*client));
-+ client->id = PFE_CL_GEM0 + priv->id;
-+ client->tx_qn = emac_txq_cnt;
-+ client->rx_qn = EMAC_RXQ_CNT;
-+ client->priv = priv;
-+ client->pfe = priv->pfe;
-+ client->event_handler = pfe_eth_event_handler;
-+
-+ client->tx_qsize = EMAC_TXQ_DEPTH;
-+ client->rx_qsize = EMAC_RXQ_DEPTH;
-+
-+ rc = hif_lib_client_register(client);
-+ if (rc) {
-+ netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
-+ __func__, client->id);
-+ goto err0;
-+ }
-+
-+ netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
-+ client);
-+
-+ pfe_gemac_init(priv);
-+
-+ if (!is_valid_ether_addr(ndev->dev_addr)) {
-+ netdev_err(ndev, "%s: invalid MAC address\n", __func__);
-+ rc = -EADDRNOTAVAIL;
-+ goto err1;
-+ }
-+
-+ gemac_set_laddrN(priv->EMAC_baseaddr,
-+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
-+
-+ napi_enable(&priv->high_napi);
-+ napi_enable(&priv->low_napi);
-+ napi_enable(&priv->lro_napi);
-+
-+ rc = pfe_eth_start(priv);
-+
-+ netif_tx_wake_all_queues(ndev);
-+
-+ return rc;
-+
-+err1:
-+ hif_lib_client_unregister(&priv->client);
-+
-+err0:
-+ return rc;
-+}
-+
-+/*
-+ * pfe_eth_shutdown
-+ */
-+int pfe_eth_shutdown(struct net_device *ndev, int wake)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int i, qstatus;
-+ unsigned long next_poll = jiffies + 1, end = jiffies +
-+ (TX_POLL_TIMEOUT_MS * HZ) / 1000;
-+ int tx_pkts, prv_tx_pkts;
-+
-+ netif_info(priv, ifdown, ndev, "%s\n", __func__);
-+
-+ for (i = 0; i < emac_txq_cnt; i++)
-+ hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
-+
-+ netif_tx_stop_all_queues(ndev);
-+
-+ do {
-+ tx_pkts = 0;
-+ pfe_eth_flush_tx(priv);
-+
-+ for (i = 0; i < emac_txq_cnt; i++)
-+ tx_pkts += hif_lib_tx_pending(&priv->client, i);
-+
-+ if (tx_pkts) {
-+ /*Don't wait forever, break if we cross max timeout */
-+ if (time_after(jiffies, end)) {
-+ pr_err(
-+ "(%s)Tx is not complete after %dmsec\n",
-+ ndev->name, TX_POLL_TIMEOUT_MS);
-+ break;
-+ }
-+
-+ pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
-+ , __func__, ndev->name, tx_pkts);
-+ if (need_resched())
-+ schedule();
-+ }
-+
-+ } while (tx_pkts);
-+
-+ end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
-+
-+ prv_tx_pkts = tmu_pkts_processed(priv->id);
-+ /*
-+ * Wait till TMU transmits all pending packets
-+ * poll tmu_qstatus and pkts processed by TMU for every 10ms
-+ * Consider TMU is busy, If we see TMU qeueu pending or any packets
-+ * processed by TMU
-+ */
-+ while (1) {
-+ if (time_after(jiffies, next_poll)) {
-+ tx_pkts = tmu_pkts_processed(priv->id);
-+ qstatus = tmu_qstatus(priv->id) & 0x7ffff;
-+
-+ if (!qstatus && (tx_pkts == prv_tx_pkts))
-+ break;
-+ /* Don't wait forever, break if we cross max
-+ * timeout(TX_POLL_TIMEOUT_MS)
-+ */
-+ if (time_after(jiffies, end)) {
-+ pr_err("TMU%d is busy after %dmsec\n",
-+ priv->id, TX_POLL_TIMEOUT_MS);
-+ break;
-+ }
-+ prv_tx_pkts = tx_pkts;
-+ next_poll++;
-+ }
-+ if (need_resched())
-+ schedule();
-+ }
-+ /* Wait for some more time to complete transmitting packet if any */
-+ next_poll = jiffies + 1;
-+ while (1) {
-+ if (time_after(jiffies, next_poll))
-+ break;
-+ if (need_resched())
-+ schedule();
-+ }
-+
-+ pfe_eth_stop(ndev, wake);
-+
-+ napi_disable(&priv->lro_napi);
-+ napi_disable(&priv->low_napi);
-+ napi_disable(&priv->high_napi);
-+
-+ hif_lib_client_unregister(&priv->client);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_close
-+ *
-+ */
-+static int pfe_eth_close(struct net_device *ndev)
-+{
-+ pfe_eth_shutdown(ndev, 0);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_suspend
-+ *
-+ * return value : 1 if netdevice is configured to wakeup system
-+ * 0 otherwise
-+ */
-+int pfe_eth_suspend(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int retval = 0;
-+
-+ if (priv->wol) {
-+ gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
-+ retval = 1;
-+ }
-+ pfe_eth_shutdown(ndev, priv->wol);
-+
-+ return retval;
-+}
-+
-+/* pfe_eth_resume
-+ *
-+ */
-+int pfe_eth_resume(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ if (priv->wol)
-+ gemac_set_wol(priv->EMAC_baseaddr, 0);
-+ gemac_tx_enable(priv->EMAC_baseaddr);
-+
-+ return pfe_eth_open(ndev);
-+}
-+
-+/* pfe_eth_get_queuenum
-+ */
-+static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
-+ *skb)
-+{
-+ int queuenum = 0;
-+ unsigned long flags;
-+
-+ /* Get the Fast Path queue number */
-+ /*
-+ * Use conntrack mark (if conntrack exists), then packet mark (if any),
-+ * then fallback to default
-+ */
-+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
-+ if (skb->_nfct) {
-+ enum ip_conntrack_info cinfo;
-+ struct nf_conn *ct;
-+
-+ ct = nf_ct_get(skb, &cinfo);
-+
-+ if (ct) {
-+ u32 connmark;
-+
-+ connmark = ct->mark;
-+
-+ if ((connmark & 0x80000000) && priv->id != 0)
-+ connmark >>= 16;
-+
-+ queuenum = connmark & EMAC_QUEUENUM_MASK;
-+ }
-+ } else {/* continued after #endif ... */
-+#endif
-+ if (skb->mark) {
-+ queuenum = skb->mark & EMAC_QUEUENUM_MASK;
-+ } else {
-+ spin_lock_irqsave(&priv->lock, flags);
-+ queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
-+ spin_unlock_irqrestore(&priv->lock, flags);
-+ }
-+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
-+ }
-+#endif
-+ return queuenum;
-+}
-+
-+/* pfe_eth_might_stop_tx
-+ *
-+ */
-+static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
-+ struct netdev_queue *tx_queue,
-+ unsigned int n_desc,
-+ unsigned int n_segs)
-+{
-+ ktime_t kt;
-+ int tried = 0;
-+
-+try_again:
-+ if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
-+ (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
-+ (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
-+ if (!tried) {
-+ __hif_lib_update_credit(&priv->client, queuenum);
-+ tried = 1;
-+ goto try_again;
-+ }
-+#ifdef PFE_ETH_TX_STATS
-+ if (__hif_tx_avail(&pfe->hif) < n_desc) {
-+ priv->stop_queue_hif[queuenum]++;
-+ } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
-+ priv->stop_queue_hif_client[queuenum]++;
-+ } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
-+ n_segs) {
-+ priv->stop_queue_credit[queuenum]++;
-+ }
-+ priv->stop_queue_total[queuenum]++;
-+#endif
-+ netif_tx_stop_queue(tx_queue);
-+
-+ kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
-+ NSEC_PER_MSEC);
-+ hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
-+ HRTIMER_MODE_REL);
-+ return -1;
-+ } else {
-+ return 0;
-+ }
-+}
-+
-+#define SA_MAX_OP 2
-+/* pfe_hif_send_packet
-+ *
-+ * At this level if TX fails we drop the packet
-+ */
-+static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
-+ *priv, int queuenum)
-+{
-+ struct skb_shared_info *sh = skb_shinfo(skb);
-+ unsigned int nr_frags;
-+ u32 ctrl = 0;
-+
-+ netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
-+
-+ if (skb_is_gso(skb)) {
-+ priv->stats.tx_dropped++;
-+ return;
-+ }
-+
-+ if (skb->ip_summed == CHECKSUM_PARTIAL)
-+ ctrl = HIF_CTRL_TX_CHECKSUM;
-+
-+ nr_frags = sh->nr_frags;
-+
-+ if (nr_frags) {
-+ skb_frag_t *f;
-+ int i;
-+
-+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
-+ skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
-+ skb);
-+
-+ for (i = 0; i < nr_frags - 1; i++) {
-+ f = &sh->frags[i];
-+ __hif_lib_xmit_pkt(&priv->client, queuenum,
-+ skb_frag_address(f),
-+ skb_frag_size(f),
-+ 0x0, 0x0, skb);
-+ }
-+
-+ f = &sh->frags[i];
-+
-+ __hif_lib_xmit_pkt(&priv->client, queuenum,
-+ skb_frag_address(f), skb_frag_size(f),
-+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
-+ skb);
-+
-+ netif_info(priv, tx_queued, priv->ndev,
-+ "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
-+ __func__, skb, nr_frags, skb->len);
-+ } else {
-+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
-+ skb->len, ctrl, HIF_FIRST_BUFFER |
-+ HIF_LAST_BUFFER | HIF_DATA_VALID,
-+ skb);
-+ netif_info(priv, tx_queued, priv->ndev,
-+ "%s: pkt sent successfully skb:%p len:%d\n",
-+ __func__, skb, skb->len);
-+ }
-+ hif_tx_dma_start();
-+ priv->stats.tx_packets++;
-+ priv->stats.tx_bytes += skb->len;
-+ hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
-+}
-+
-+/* pfe_eth_flush_txQ
-+ */
-+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
-+ from_tx, int n_desc)
-+{
-+ struct sk_buff *skb;
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ tx_q_num);
-+ unsigned int flags;
-+
-+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
-+
-+ if (!from_tx)
-+ __netif_tx_lock_bh(tx_queue);
-+
-+ /* Clean HIF and client queue */
-+ while ((skb = hif_lib_tx_get_next_complete(&priv->client,
-+ tx_q_num, &flags,
-+ HIF_TX_DESC_NT))) {
-+ if (flags & HIF_DATA_VALID)
-+ dev_kfree_skb_any(skb);
-+ }
-+ if (!from_tx)
-+ __netif_tx_unlock_bh(tx_queue);
-+}
-+
-+/* pfe_eth_flush_tx
-+ */
-+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
-+{
-+ int ii;
-+
-+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
-+
-+ for (ii = 0; ii < emac_txq_cnt; ii++) {
-+ pfe_eth_flush_txQ(priv, ii, 0, 0);
-+ __hif_lib_update_credit(&priv->client, ii);
-+ }
-+}
-+
-+void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
-+ *n_segs)
-+{
-+ struct skb_shared_info *sh = skb_shinfo(skb);
-+
-+ /* Scattered data */
-+ if (sh->nr_frags) {
-+ *n_desc = sh->nr_frags + 1;
-+ *n_segs = 1;
-+ /* Regular case */
-+ } else {
-+ *n_desc = 1;
-+ *n_segs = 1;
-+ }
-+}
-+
-+/* pfe_eth_send_packet
-+ */
-+static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int tx_q_num = skb_get_queue_mapping(skb);
-+ int n_desc, n_segs;
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ tx_q_num);
-+
-+ netif_info(priv, tx_queued, ndev, "%s\n", __func__);
-+
-+ if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
-+ sizeof(unsigned long)))) {
-+ netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
-+ __func__);
-+
-+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
-+ long)), 0, GFP_ATOMIC)) {
-+ /* No need to re-transmit, no way to recover*/
-+ kfree_skb(skb);
-+ priv->stats.tx_dropped++;
-+ return NETDEV_TX_OK;
-+ }
-+ }
-+
-+ pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
-+
-+ hif_tx_lock(&pfe->hif);
-+ if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
-+ n_segs))) {
-+#ifdef PFE_ETH_TX_STATS
-+ if (priv->was_stopped[tx_q_num]) {
-+ priv->clean_fail[tx_q_num]++;
-+ priv->was_stopped[tx_q_num] = 0;
-+ }
-+#endif
-+ hif_tx_unlock(&pfe->hif);
-+ return NETDEV_TX_BUSY;
-+ }
-+
-+ pfe_hif_send_packet(skb, priv, tx_q_num);
-+
-+ hif_tx_unlock(&pfe->hif);
-+
-+ tx_queue->trans_start = jiffies;
-+
-+#ifdef PFE_ETH_TX_STATS
-+ priv->was_stopped[tx_q_num] = 0;
-+#endif
-+
-+ return NETDEV_TX_OK;
-+}
-+
-+/* pfe_eth_select_queue
-+ *
-+ */
-+static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
-+ void *accel_priv,
-+ select_queue_fallback_t fallback)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ return pfe_eth_get_queuenum(priv, skb);
-+}
-+
-+/* pfe_eth_get_stats
-+ */
-+static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ return &priv->stats;
-+}
-+
-+/* pfe_eth_set_mac_address
-+ */
-+static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct sockaddr *sa = addr;
-+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
-+
-+ if (!is_valid_ether_addr(sa->sa_data))
-+ return -EADDRNOTAVAIL;
-+
-+ memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
-+
-+ gemac_set_laddrN(priv->EMAC_baseaddr,
-+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
-+
-+ return 0;
-+}
-+
-+/* pfe_eth_enet_addr_byte_mac
-+ */
-+int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
-+ struct pfe_mac_addr *enet_addr)
-+{
-+ if (!enet_byte_addr || !enet_addr) {
-+ return -1;
-+
-+ } else {
-+ enet_addr->bottom = enet_byte_addr[0] |
-+ (enet_byte_addr[1] << 8) |
-+ (enet_byte_addr[2] << 16) |
-+ (enet_byte_addr[3] << 24);
-+ enet_addr->top = enet_byte_addr[4] |
-+ (enet_byte_addr[5] << 8);
-+ return 0;
-+ }
-+}
-+
-+/* pfe_eth_set_multi
-+ */
-+static void pfe_eth_set_multi(struct net_device *ndev)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ struct pfe_mac_addr hash_addr; /* hash register structure */
-+ /* specific mac address register structure */
-+ struct pfe_mac_addr spec_addr;
-+ int result; /* index into hash register to set.. */
-+ int uc_count = 0;
-+ struct netdev_hw_addr *ha;
-+
-+ if (ndev->flags & IFF_PROMISC) {
-+ netif_info(priv, drv, ndev, "entering promiscuous mode\n");
-+
-+ priv->promisc = 1;
-+ gemac_enable_copy_all(priv->EMAC_baseaddr);
-+ } else {
-+ priv->promisc = 0;
-+ gemac_disable_copy_all(priv->EMAC_baseaddr);
-+ }
-+
-+ /* Enable broadcast frame reception if required. */
-+ if (ndev->flags & IFF_BROADCAST) {
-+ gemac_allow_broadcast(priv->EMAC_baseaddr);
-+ } else {
-+ netif_info(priv, drv, ndev,
-+ "disabling broadcast frame reception\n");
-+
-+ gemac_no_broadcast(priv->EMAC_baseaddr);
-+ }
-+
-+ if (ndev->flags & IFF_ALLMULTI) {
-+ /* Set the hash to rx all multicast frames */
-+ hash_addr.bottom = 0xFFFFFFFF;
-+ hash_addr.top = 0xFFFFFFFF;
-+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
-+ netdev_for_each_uc_addr(ha, ndev) {
-+ if (uc_count >= MAX_UC_SPEC_ADDR_REG)
-+ break;
-+ pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
-+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
-+ uc_count + 2);
-+ uc_count++;
-+ }
-+ } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
-+ u8 *addr;
-+
-+ hash_addr.bottom = 0;
-+ hash_addr.top = 0;
-+
-+ netdev_for_each_mc_addr(ha, ndev) {
-+ addr = ha->addr;
-+
-+ netif_info(priv, drv, ndev,
-+ "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
-+ addr[0], addr[1], addr[2],
-+ addr[3], addr[4], addr[5]);
-+
-+ result = pfe_eth_get_hash(addr);
-+
-+ if (result < EMAC_HASH_REG_BITS) {
-+ if (result < 32)
-+ hash_addr.bottom |= (1 << result);
-+ else
-+ hash_addr.top |= (1 << (result - 32));
-+ } else {
-+ break;
-+ }
-+ }
-+
-+ uc_count = -1;
-+ netdev_for_each_uc_addr(ha, ndev) {
-+ addr = ha->addr;
-+
-+ if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
-+ netdev_info(ndev,
-+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
-+ addr[0], addr[1], addr[2],
-+ addr[3], addr[4], addr[5]);
-+ pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
-+ gemac_set_laddrN(priv->EMAC_baseaddr,
-+ &spec_addr, uc_count + 2);
-+ } else {
-+ netif_info(priv, drv, ndev,
-+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
-+ addr[0], addr[1], addr[2],
-+ addr[3], addr[4], addr[5]);
-+
-+ result = pfe_eth_get_hash(addr);
-+ if (result >= EMAC_HASH_REG_BITS) {
-+ break;
-+
-+ } else {
-+ if (result < 32)
-+ hash_addr.bottom |= (1 <<
-+ result);
-+ else
-+ hash_addr.top |= (1 <<
-+ (result - 32));
-+ }
-+ }
-+ }
-+
-+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
-+ }
-+
-+ if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
-+ /*
-+ * Check if there are any specific address HW registers that
-+ * need to be flushed
-+ */
-+ for (uc_count = netdev_uc_count(ndev); uc_count <
-+ MAX_UC_SPEC_ADDR_REG; uc_count++)
-+ gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
-+ }
-+
-+ if (ndev->flags & IFF_LOOPBACK)
-+ gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
-+}
-+
-+/* pfe_eth_set_features
-+ */
-+static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
-+ features)
-+{
-+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
-+ int rc = 0;
-+
-+ if (features & NETIF_F_RXCSUM)
-+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
-+ else
-+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
-+ return rc;
-+}
-+
-+/* pfe_eth_fast_tx_timeout
-+ */
-+static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
-+{
-+ struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
-+ pfe_eth_fast_timer,
-+ timer);
-+ struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
-+ struct pfe_eth_priv_s,
-+ fast_tx_timeout);
-+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
-+ fast_tx_timeout->queuenum);
-+
-+ if (netif_tx_queue_stopped(tx_queue)) {
-+#ifdef PFE_ETH_TX_STATS
-+ priv->was_stopped[fast_tx_timeout->queuenum] = 1;
-+#endif
-+ netif_tx_wake_queue(tx_queue);
-+ }
-+
-+ return HRTIMER_NORESTART;
-+}
-+
-+/* pfe_eth_fast_tx_timeout_init
-+ */
-+static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
-+{
-+ int i;
-+
-+ for (i = 0; i < emac_txq_cnt; i++) {
-+ priv->fast_tx_timeout[i].queuenum = i;
-+ hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
-+ HRTIMER_MODE_REL);
-+ priv->fast_tx_timeout[i].timer.function =
-+ pfe_eth_fast_tx_timeout;
-+ priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
-+ }
-+}
-+
-+static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
-+ struct pfe_eth_priv_s *priv,
-+ unsigned int qno)
-+{
-+ void *buf_addr;
-+ unsigned int rx_ctrl;
-+ unsigned int desc_ctrl = 0;
-+ struct hif_ipsec_hdr *ipsec_hdr = NULL;
-+ struct sk_buff *skb;
-+ struct sk_buff *skb_frag, *skb_frag_last = NULL;
-+ int length = 0, offset;
-+
-+ skb = priv->skb_inflight[qno];
-+
-+ if (skb) {
-+ skb_frag_last = skb_shinfo(skb)->frag_list;
-+ if (skb_frag_last) {
-+ while (skb_frag_last->next)
-+ skb_frag_last = skb_frag_last->next;
-+ }
-+ }
-+
-+ while (!(desc_ctrl & CL_DESC_LAST)) {
-+ buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
-+ &offset, &rx_ctrl, &desc_ctrl,
-+ (void **)&ipsec_hdr);
-+ if (!buf_addr)
-+ goto incomplete;
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_DESC_COUNT]++;
-+#endif
-+
-+ /* First frag */
-+ if (desc_ctrl & CL_DESC_FIRST) {
-+ skb = build_skb(buf_addr, 0);
-+ if (unlikely(!skb))
-+ goto pkt_drop;
-+
-+ skb_reserve(skb, offset);
-+ skb_put(skb, length);
-+ skb->dev = ndev;
-+
-+ if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
-+ HIF_CTRL_RX_CHECKSUMMED))
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ else
-+ skb_checksum_none_assert(skb);
-+
-+ } else {
-+ /* Next frags */
-+ if (unlikely(!skb)) {
-+ pr_err("%s: NULL skb_inflight\n",
-+ __func__);
-+ goto pkt_drop;
-+ }
-+
-+ skb_frag = build_skb(buf_addr, 0);
-+
-+ if (unlikely(!skb_frag)) {
-+ kfree(buf_addr);
-+ goto pkt_drop;
-+ }
-+
-+ skb_reserve(skb_frag, offset);
-+ skb_put(skb_frag, length);
-+
-+ skb_frag->dev = ndev;
-+
-+ if (skb_shinfo(skb)->frag_list)
-+ skb_frag_last->next = skb_frag;
-+ else
-+ skb_shinfo(skb)->frag_list = skb_frag;
-+
-+ skb->truesize += skb_frag->truesize;
-+ skb->data_len += length;
-+ skb->len += length;
-+ skb_frag_last = skb_frag;
-+ }
-+ }
-+
-+ priv->skb_inflight[qno] = NULL;
-+ return skb;
-+
-+incomplete:
-+ priv->skb_inflight[qno] = skb;
-+ return NULL;
-+
-+pkt_drop:
-+ priv->skb_inflight[qno] = NULL;
-+
-+ if (skb)
-+ kfree_skb(skb);
-+ else
-+ kfree(buf_addr);
-+
-+ priv->stats.rx_errors++;
-+
-+ return NULL;
-+}
-+
-+/* pfe_eth_poll
-+ */
-+static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
-+ unsigned int qno, int budget)
-+{
-+ struct net_device *ndev = priv->ndev;
-+ struct sk_buff *skb;
-+ int work_done = 0;
-+ unsigned int len;
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_POLL_COUNT]++;
-+#endif
-+
-+ do {
-+ skb = pfe_eth_rx_skb(ndev, priv, qno);
-+
-+ if (!skb)
-+ break;
-+
-+ len = skb->len;
-+
-+ /* Packet will be processed */
-+ skb->protocol = eth_type_trans(skb, ndev);
-+
-+ netif_receive_skb(skb);
-+
-+ priv->stats.rx_packets++;
-+ priv->stats.rx_bytes += len;
-+
-+ work_done++;
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ priv->napi_counters[NAPI_PACKET_COUNT]++;
-+#endif
-+
-+ } while (work_done < budget);
-+
-+ /*
-+ * If no Rx receive nor cleanup work was done, exit polling mode.
-+ * No more netif_running(dev) check is required here , as this is
-+ * checked in net/core/dev.c (2.6.33.5 kernel specific).
-+ */
-+ if (work_done < budget) {
-+ napi_complete(napi);
-+
-+ hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
-+ qno);
-+ }
-+#ifdef PFE_ETH_NAPI_STATS
-+ else
-+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
-+#endif
-+
-+ return work_done;
-+}
-+
-+/*
-+ * pfe_eth_lro_poll
-+ */
-+static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
-+ lro_napi);
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+ return pfe_eth_poll(priv, napi, 2, budget);
-+}
-+
-+/* pfe_eth_low_poll
-+ */
-+static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
-+ low_napi);
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+ return pfe_eth_poll(priv, napi, 1, budget);
-+}
-+
-+/* pfe_eth_high_poll
-+ */
-+static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
-+ high_napi);
-+
-+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
-+
-+ return pfe_eth_poll(priv, napi, 0, budget);
-+}
-+
-+static const struct net_device_ops pfe_netdev_ops = {
-+ .ndo_open = pfe_eth_open,
-+ .ndo_stop = pfe_eth_close,
-+ .ndo_start_xmit = pfe_eth_send_packet,
-+ .ndo_select_queue = pfe_eth_select_queue,
-+ .ndo_set_rx_mode = pfe_eth_set_multi,
-+ .ndo_set_mac_address = pfe_eth_set_mac_address,
-+ .ndo_validate_addr = eth_validate_addr,
-+ .ndo_change_mtu = pfe_eth_change_mtu,
-+ .ndo_get_stats = pfe_eth_get_stats,
-+ .ndo_set_features = pfe_eth_set_features,
-+};
-+
-+/* pfe_eth_init_one
-+ */
-+static int pfe_eth_init_one(struct pfe *pfe,
-+ struct ls1012a_pfe_platform_data *pfe_info,
-+ int id)
-+{
-+ struct net_device *ndev = NULL;
-+ struct pfe_eth_priv_s *priv = NULL;
-+ struct ls1012a_eth_platform_data *einfo;
-+ int err;
-+
-+ einfo = (struct ls1012a_eth_platform_data *)
-+ pfe_info->ls1012a_eth_pdata;
-+
-+ /* einfo never be NULL, but no harm in having this check */
-+ if (!einfo) {
-+ pr_err(
-+ "%s: pfe missing additional gemacs platform data\n"
-+ , __func__);
-+ err = -ENODEV;
-+ goto err0;
-+ }
-+
-+ if (us)
-+ emac_txq_cnt = EMAC_TXQ_CNT;
-+ /* Create an ethernet device instance */
-+ ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
-+
-+ if (!ndev) {
-+ pr_err("%s: gemac %d device allocation failed\n",
-+ __func__, einfo[id].gem_id);
-+ err = -ENOMEM;
-+ goto err0;
-+ }
-+
-+ priv = netdev_priv(ndev);
-+ priv->ndev = ndev;
-+ priv->id = einfo[id].gem_id;
-+ priv->pfe = pfe;
-+ priv->phy_node = einfo[id].phy_node;
-+
-+ SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
-+
-+ pfe->eth.eth_priv[id] = priv;
-+
-+ /* Set the info in the priv to the current info */
-+ priv->einfo = &einfo[id];
-+ priv->EMAC_baseaddr = cbus_emac_base[id];
-+ priv->GPI_baseaddr = cbus_gpi_base[id];
-+
-+ spin_lock_init(&priv->lock);
-+
-+ pfe_eth_fast_tx_timeout_init(priv);
-+
-+ /* Copy the station address into the dev structure, */
-+ memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
-+
-+ if (us)
-+ goto phy_init;
-+
-+ ndev->mtu = 1500;
-+
-+ /* Set MTU limits */
-+ ndev->min_mtu = ETH_MIN_MTU;
-+
-+/*
-+ * Jumbo frames are not supported on LS1012A rev-1.0.
-+ * So max mtu should be restricted to supported frame length.
-+ */
-+ if (pfe_errata_a010897)
-+ ndev->max_mtu = JUMBO_FRAME_SIZE_V1 - ETH_HLEN - ETH_FCS_LEN;
-+ else
-+ ndev->max_mtu = JUMBO_FRAME_SIZE_V2 - ETH_HLEN - ETH_FCS_LEN;
-+
-+ /*Enable after checksum offload is validated */
-+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
-+ NETIF_F_IPV6_CSUM | NETIF_F_SG;
-+
-+ /* enabled by default */
-+ ndev->features = ndev->hw_features;
-+
-+ priv->usr_features = ndev->features;
-+
-+ ndev->netdev_ops = &pfe_netdev_ops;
-+
-+ ndev->ethtool_ops = &pfe_ethtool_ops;
-+
-+ /* Enable basic messages by default */
-+ priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
-+ NETIF_MSG_PROBE;
-+
-+ netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
-+ HIF_RX_POLL_WEIGHT - 16);
-+ netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
-+ HIF_RX_POLL_WEIGHT - 16);
-+ netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
-+ HIF_RX_POLL_WEIGHT - 16);
-+
-+ err = register_netdev(ndev);
-+ if (err) {
-+ netdev_err(ndev, "register_netdev() failed\n");
-+ goto err1;
-+ }
-+
-+ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
-+ ((pfe_use_old_dts_phy) &&
-+ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
-+ pr_info("%s: No PHY or fixed-link\n", __func__);
-+ goto skip_phy_init;
-+ }
-+
-+phy_init:
-+ device_init_wakeup(&ndev->dev, WAKE_MAGIC);
-+
-+ err = pfe_phy_init(ndev);
-+ if (err) {
-+ netdev_err(ndev, "%s: pfe_phy_init() failed\n",
-+ __func__);
-+ goto err2;
-+ }
-+
-+ if (us) {
-+ if (priv->phydev)
-+ phy_start(priv->phydev);
-+ return 0;
-+ }
-+
-+ netif_carrier_on(ndev);
-+
-+skip_phy_init:
-+ /* Create all the sysfs files */
-+ if (pfe_eth_sysfs_init(ndev))
-+ goto err3;
-+
-+ netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
-+ __func__, priv->EMAC_baseaddr);
-+
-+ return 0;
-+
-+err3:
-+ pfe_phy_exit(priv->ndev);
-+err2:
-+ if (us)
-+ goto err1;
-+ unregister_netdev(ndev);
-+err1:
-+ free_netdev(priv->ndev);
-+err0:
-+ return err;
-+}
-+
-+/* pfe_eth_init
-+ */
-+int pfe_eth_init(struct pfe *pfe)
-+{
-+ int ii = 0;
-+ int err;
-+ struct ls1012a_pfe_platform_data *pfe_info;
-+
-+ pr_info("%s\n", __func__);
-+
-+ cbus_emac_base[0] = EMAC1_BASE_ADDR;
-+ cbus_emac_base[1] = EMAC2_BASE_ADDR;
-+
-+ cbus_gpi_base[0] = EGPI1_BASE_ADDR;
-+ cbus_gpi_base[1] = EGPI2_BASE_ADDR;
-+
-+ pfe_info = (struct ls1012a_pfe_platform_data *)
-+ pfe->dev->platform_data;
-+ if (!pfe_info) {
-+ pr_err("%s: pfe missing additional platform data\n", __func__);
-+ err = -ENODEV;
-+ goto err_pdata;
-+ }
-+
-+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
-+ err = pfe_eth_mdio_init(pfe, pfe_info, ii);
-+ if (err) {
-+ pr_err("%s: pfe_eth_mdio_init() failed\n", __func__);
-+ goto err_mdio_init;
-+ }
-+ }
-+
-+ if (fsl_guts_get_svr() == LS1012A_REV_1_0)
-+ pfe_errata_a010897 = true;
-+ else
-+ pfe_errata_a010897 = false;
-+
-+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
-+ err = pfe_eth_init_one(pfe, pfe_info, ii);
-+ if (err)
-+ goto err_eth_init;
-+ }
-+
-+ return 0;
-+
-+err_eth_init:
-+ while (ii--) {
-+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
-+ pfe_eth_mdio_exit(pfe, ii);
-+ }
-+
-+err_mdio_init:
-+err_pdata:
-+ return err;
-+}
-+
-+/* pfe_eth_exit_one
-+ */
-+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
-+{
-+ netif_info(priv, probe, priv->ndev, "%s\n", __func__);
-+
-+ if (!us)
-+ pfe_eth_sysfs_exit(priv->ndev);
-+
-+ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
-+ ((pfe_use_old_dts_phy) &&
-+ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
-+ pr_info("%s: No PHY or fixed-link\n", __func__);
-+ goto skip_phy_exit;
-+ }
-+
-+ pfe_phy_exit(priv->ndev);
-+
-+skip_phy_exit:
-+ if (!us)
-+ unregister_netdev(priv->ndev);
-+
-+ free_netdev(priv->ndev);
-+}
-+
-+/* pfe_eth_exit
-+ */
-+void pfe_eth_exit(struct pfe *pfe)
-+{
-+ int ii;
-+
-+ pr_info("%s\n", __func__);
-+
-+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
-+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
-+
-+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
-+ pfe_eth_mdio_exit(pfe, ii);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_eth.h
-@@ -0,0 +1,175 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_ETH_H_
-+#define _PFE_ETH_H_
-+#include <linux/kernel.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ethtool.h>
-+#include <linux/mii.h>
-+#include <linux/phy.h>
-+#include <linux/clk.h>
-+#include <linux/interrupt.h>
-+#include <linux/time.h>
-+
-+#define PFE_ETH_NAPI_STATS
-+#define PFE_ETH_TX_STATS
-+
-+#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
-+#define LRO_LEN_COUNT_MAX 32
-+#define LRO_NB_COUNT_MAX 32
-+
-+#define PFE_PAUSE_FLAG_ENABLE 1
-+#define PFE_PAUSE_FLAG_AUTONEG 2
-+
-+/* GEMAC configured by SW */
-+/* GEMAC configured by phy lines (not for MII/GMII) */
-+
-+#define GEMAC_SW_FULL_DUPLEX BIT(9)
-+#define GEMAC_SW_SPEED_10M (0 << 12)
-+#define GEMAC_SW_SPEED_100M BIT(12)
-+#define GEMAC_SW_SPEED_1G (2 << 12)
-+
-+#define GEMAC_NO_PHY BIT(0)
-+
-+struct ls1012a_eth_platform_data {
-+ /* board specific information */
-+ u32 mii_config;
-+ u32 phy_flags;
-+ u32 gem_id;
-+ u32 phy_id;
-+ u32 mdio_muxval;
-+ u8 mac_addr[ETH_ALEN];
-+ struct device_node *phy_node;
-+};
-+
-+struct ls1012a_mdio_platform_data {
-+ int id;
-+ int irq[32];
-+ u32 phy_mask;
-+ int mdc_div;
-+};
-+
-+struct ls1012a_pfe_platform_data {
-+ struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
-+ struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
-+};
-+
-+#define NUM_GEMAC_SUPPORT 2
-+#define DRV_NAME "pfe-eth"
-+#define DRV_VERSION "1.0"
-+
-+#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
-+#define TX_POLL_TIMEOUT_MS 1000
-+
-+#define EMAC_TXQ_CNT 16
-+#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
-+
-+#define JUMBO_FRAME_SIZE_V1 1900
-+#define JUMBO_FRAME_SIZE_V2 10258
-+/*
-+ * Client Tx queue threshold, for txQ flush condition.
-+ * It must be smaller than the queue size (in case we ever change it in the
-+ * future).
-+ */
-+#define HIF_CL_TX_FLUSH_MARK 32
-+
-+/*
-+ * Max number of TX resources (HIF descriptors or skbs) that will be released
-+ * in a single go during batch recycling.
-+ * Should be lower than the flush mark so the SW can provide the HW with a
-+ * continuous stream of packets instead of bursts.
-+ */
-+#define TX_FREE_MAX_COUNT 16
-+#define EMAC_RXQ_CNT 3
-+#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
-+/* make sure clients can receive a full burst of packets */
-+#define EMAC_RMON_TXBYTES_POS 0x00
-+#define EMAC_RMON_RXBYTES_POS 0x14
-+
-+#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
-+#define EMAC_MDIO_TIMEOUT 1000
-+#define MAX_UC_SPEC_ADDR_REG 31
-+
-+struct pfe_eth_fast_timer {
-+ int queuenum;
-+ struct hrtimer timer;
-+ void *base;
-+};
-+
-+struct pfe_eth_priv_s {
-+ struct pfe *pfe;
-+ struct hif_client_s client;
-+ struct napi_struct lro_napi;
-+ struct napi_struct low_napi;
-+ struct napi_struct high_napi;
-+ int low_tmu_q;
-+ int high_tmu_q;
-+ struct net_device_stats stats;
-+ struct net_device *ndev;
-+ int id;
-+ int promisc;
-+ unsigned int msg_enable;
-+ unsigned int usr_features;
-+
-+ spinlock_t lock; /* protect member variables */
-+ unsigned int event_status;
-+ int irq;
-+ void *EMAC_baseaddr;
-+ void *GPI_baseaddr;
-+ /* PHY stuff */
-+ struct phy_device *phydev;
-+ int oldspeed;
-+ int oldduplex;
-+ int oldlink;
-+ struct device_node *phy_node;
-+ struct clk *gemtx_clk;
-+ int wol;
-+ int pause_flag;
-+
-+ int default_priority;
-+ struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
-+
-+ struct ls1012a_eth_platform_data *einfo;
-+ struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
-+
-+#ifdef PFE_ETH_TX_STATS
-+ unsigned int stop_queue_total[EMAC_TXQ_CNT];
-+ unsigned int stop_queue_hif[EMAC_TXQ_CNT];
-+ unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
-+ unsigned int stop_queue_credit[EMAC_TXQ_CNT];
-+ unsigned int clean_fail[EMAC_TXQ_CNT];
-+ unsigned int was_stopped[EMAC_TXQ_CNT];
-+#endif
-+
-+#ifdef PFE_ETH_NAPI_STATS
-+ unsigned int napi_counters[NAPI_MAX_COUNT];
-+#endif
-+ unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
-+};
-+
-+struct pfe_eth {
-+ struct pfe_eth_priv_s *eth_priv[3];
-+};
-+
-+struct pfe_mdio_priv_s {
-+ void __iomem *mdio_base;
-+ int mdc_div;
-+ struct mii_bus *mii_bus;
-+};
-+
-+struct pfe_mdio {
-+ struct pfe_mdio_priv_s *mdio_priv[3];
-+};
-+
-+int pfe_eth_init(struct pfe *pfe);
-+void pfe_eth_exit(struct pfe *pfe);
-+int pfe_eth_suspend(struct net_device *dev);
-+int pfe_eth_resume(struct net_device *dev);
-+int pfe_eth_mdio_reset(struct mii_bus *bus);
-+
-+#endif /* _PFE_ETH_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
-@@ -0,0 +1,302 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+/*
-+ * @file
-+ * Contains all the functions to handle parsing and loading of PE firmware
-+ * files.
-+ */
-+#include <linux/firmware.h>
-+
-+#include "pfe_mod.h"
-+#include "pfe_firmware.h"
-+#include "pfe/pfe.h"
-+
-+static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
-+ const char *section)
-+{
-+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
-+ struct elf32_shdr *shdr;
-+ struct elf32_shdr *shdr_shstr;
-+ Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
-+ Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
-+ Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
-+ Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
-+ Elf32_Off shstr_offset;
-+ Elf32_Word sh_name;
-+ const char *name;
-+ int i;
-+
-+ /* Section header strings */
-+ shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
-+ e_shentsize);
-+ shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
-+
-+ for (i = 0; i < e_shnum; i++) {
-+ shdr = (struct elf32_shdr *)(fw->data + e_shoff
-+ + i * e_shentsize);
-+
-+ sh_name = be32_to_cpu(shdr->sh_name);
-+
-+ name = (const char *)(fw->data + shstr_offset + sh_name);
-+
-+ if (!strcmp(name, section))
-+ return shdr;
-+ }
-+
-+ pr_err("%s: didn't find section %s\n", __func__, section);
-+
-+ return NULL;
-+}
-+
-+#if defined(CFG_DIAGS)
-+static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
-+ *diags_info)
-+{
-+ struct elf32_shdr *shdr;
-+ unsigned long offset, size;
-+
-+ shdr = get_elf_section_header(fw, ".pfe_diags_str");
-+ if (shdr) {
-+ offset = be32_to_cpu(shdr->sh_offset);
-+ size = be32_to_cpu(shdr->sh_size);
-+ diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
-+ diags_info->diags_str_size = size;
-+ diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
-+ memcpy(diags_info->diags_str_array, fw->data + offset, size);
-+
-+ return 0;
-+ } else {
-+ return -1;
-+ }
-+}
-+#endif
-+
-+static void pfe_check_version_info(const struct firmware *fw)
-+{
-+ /*static char *version = NULL;*/
-+ static char *version;
-+
-+ struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
-+
-+ if (shdr) {
-+ if (!version) {
-+ /*
-+ * this is the first fw we load, use its version
-+ * string as reference (whatever it is)
-+ */
-+ version = (char *)(fw->data +
-+ be32_to_cpu(shdr->sh_offset));
-+
-+ pr_info("PFE binary version: %s\n", version);
-+ } else {
-+ /*
-+ * already have loaded at least one firmware, check
-+ * sequence can start now
-+ */
-+ if (strcmp(version, (char *)(fw->data +
-+ be32_to_cpu(shdr->sh_offset)))) {
-+ pr_info(
-+ "WARNING: PFE firmware binaries from incompatible version\n");
-+ }
-+ }
-+ } else {
-+ /*
-+ * version cannot be verified, a potential issue that should
-+ * be reported
-+ */
-+ pr_info(
-+ "WARNING: PFE firmware binaries from incompatible version\n");
-+ }
-+}
-+
-+/* PFE elf firmware loader.
-+ * Loads an elf firmware image into a list of PE's (specified using a bitmask)
-+ *
-+ * @param pe_mask Mask of PE id's to load firmware to
-+ * @param fw Pointer to the firmware image
-+ *
-+ * @return 0 on success, a negative value on error
-+ *
-+ */
-+int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
-+{
-+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
-+ Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
-+ struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
-+ be32_to_cpu(elf_hdr->e_shoff));
-+ int id, section;
-+ int rc;
-+
-+ pr_info("%s\n", __func__);
-+
-+ /* Some sanity checks */
-+ if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
-+ pr_err("%s: incorrect elf magic number\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
-+ pr_err("%s: incorrect elf class(%x)\n", __func__,
-+ elf_hdr->e_ident[EI_CLASS]);
-+ return -EINVAL;
-+ }
-+
-+ if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
-+ pr_err("%s: incorrect elf data(%x)\n", __func__,
-+ elf_hdr->e_ident[EI_DATA]);
-+ return -EINVAL;
-+ }
-+
-+ if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
-+ pr_err("%s: incorrect elf file type(%x)\n", __func__,
-+ be16_to_cpu(elf_hdr->e_type));
-+ return -EINVAL;
-+ }
-+
-+ for (section = 0; section < sections; section++, shdr++) {
-+ if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
-+ SHF_EXECINSTR)))
-+ continue;
-+
-+ for (id = 0; id < MAX_PE; id++)
-+ if (pe_mask & (1 << id)) {
-+ rc = pe_load_elf_section(id, fw->data, shdr,
-+ pfe->dev);
-+ if (rc < 0)
-+ goto err;
-+ }
-+ }
-+
-+ pfe_check_version_info(fw);
-+
-+ return 0;
-+
-+err:
-+ return rc;
-+}
-+
-+/* PFE firmware initialization.
-+ * Loads different firmware files from filesystem.
-+ * Initializes PE IMEM/DMEM and UTIL-PE DDR
-+ * Initializes control path symbol addresses (by looking them up in the elf
-+ * firmware files
-+ * Takes PE's out of reset
-+ *
-+ * @return 0 on success, a negative value on error
-+ *
-+ */
-+int pfe_firmware_init(struct pfe *pfe)
-+{
-+ const struct firmware *class_fw, *tmu_fw;
-+ int rc = 0;
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ const char *util_fw_name;
-+ const struct firmware *util_fw;
-+#endif
-+
-+ pr_info("%s\n", __func__);
-+
-+ if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
-+ pr_err("%s: request firmware %s failed\n", __func__,
-+ CLASS_FIRMWARE_FILENAME);
-+ rc = -ETIMEDOUT;
-+ goto err0;
-+ }
-+
-+ if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
-+ pr_err("%s: request firmware %s failed\n", __func__,
-+ TMU_FIRMWARE_FILENAME);
-+ rc = -ETIMEDOUT;
-+ goto err1;
-+}
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_fw_name = UTIL_FIRMWARE_FILENAME;
-+
-+ if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
-+ pr_err("%s: request firmware %s failed\n", __func__,
-+ util_fw_name);
-+ rc = -ETIMEDOUT;
-+ goto err2;
-+ }
-+#endif
-+ rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
-+ if (rc < 0) {
-+ pr_err("%s: class firmware load failed\n", __func__);
-+ goto err3;
-+ }
-+
-+#if defined(CFG_DIAGS)
-+ rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
-+ if (rc < 0) {
-+ pr_warn(
-+ "PFE diags won't be available for class PEs\n");
-+ rc = 0;
-+ }
-+#endif
-+
-+ rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
-+ if (rc < 0) {
-+ pr_err("%s: tmu firmware load failed\n", __func__);
-+ goto err3;
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
-+ if (rc < 0) {
-+ pr_err("%s: util firmware load failed\n", __func__);
-+ goto err3;
-+ }
-+
-+#if defined(CFG_DIAGS)
-+ rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
-+ if (rc < 0) {
-+ pr_warn(
-+ "PFE diags won't be available for util PE\n");
-+ rc = 0;
-+ }
-+#endif
-+
-+ util_enable();
-+#endif
-+
-+ tmu_enable(0xf);
-+ class_enable();
-+
-+err3:
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ release_firmware(util_fw);
-+
-+err2:
-+#endif
-+ release_firmware(tmu_fw);
-+
-+err1:
-+ release_firmware(class_fw);
-+
-+err0:
-+ return rc;
-+}
-+
-+/* PFE firmware cleanup
-+ * Puts PE's in reset
-+ *
-+ *
-+ */
-+void pfe_firmware_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ if (pe_reset_all(&pfe->ctrl) != 0)
-+ pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
-+
-+ class_disable();
-+ tmu_disable(0xf);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_disable();
-+#endif
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
-@@ -0,0 +1,20 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_FIRMWARE_H_
-+#define _PFE_FIRMWARE_H_
-+
-+#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
-+#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
-+
-+#define PFE_FW_CHECK_PASS 0
-+#define PFE_FW_CHECK_FAIL 1
-+#define NUM_PFE_FW 3
-+
-+int pfe_firmware_init(struct pfe *pfe);
-+void pfe_firmware_exit(struct pfe *pfe);
-+
-+#endif /* _PFE_FIRMWARE_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hal.c
-@@ -0,0 +1,1516 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include "pfe_mod.h"
-+#include "pfe/pfe.h"
-+
-+/* A-010897: Jumbo frame is not supported */
-+extern bool pfe_errata_a010897;
-+
-+#define PFE_RCR_MAX_FL_MASK 0xC000FFFF
-+
-+void *cbus_base_addr;
-+void *ddr_base_addr;
-+unsigned long ddr_phys_base_addr;
-+unsigned int ddr_size;
-+
-+static struct pe_info pe[MAX_PE];
-+
-+/* Initializes the PFE library.
-+ * Must be called before using any of the library functions.
-+ *
-+ * @param[in] cbus_base CBUS virtual base address (as mapped in
-+ * the host CPU address space)
-+ * @param[in] ddr_base PFE DDR range virtual base address (as
-+ * mapped in the host CPU address space)
-+ * @param[in] ddr_phys_base PFE DDR range physical base address (as
-+ * mapped in platform)
-+ * @param[in] size PFE DDR range size (as defined by the host
-+ * software)
-+ */
-+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
-+ unsigned int size)
-+{
-+ cbus_base_addr = cbus_base;
-+ ddr_base_addr = ddr_base;
-+ ddr_phys_base_addr = ddr_phys_base;
-+ ddr_size = size;
-+
-+ pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
-+ pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
-+ pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
-+ pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
-+ pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
-+ pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
-+ pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
-+ pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
-+ pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
-+ pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
-+ pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
-+ pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
-+ pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
-+ pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
-+ pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
-+ pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
-+
-+ pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
-+ pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
-+ pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
-+ pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
-+ pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
-+ pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
-+
-+ pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
-+ pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
-+ pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
-+ pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
-+ pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
-+ pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
-+
-+ pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
-+ pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
-+ pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
-+ pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
-+ pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
-+ pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
-+ pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
-+ pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
-+ pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
-+#endif
-+}
-+
-+/* Writes a buffer to PE internal memory from the host
-+ * through indirect access registers.
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] src Buffer source address
-+ * @param[in] mem_access_addr DMEM destination address (must be 32bit
-+ * aligned)
-+ * @param[in] len Number of bytes to copy
-+ */
-+void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
-+int len)
-+{
-+ u32 offset = 0, val, addr;
-+ unsigned int len32 = len >> 2;
-+ int i;
-+
-+ addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
-+ PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
-+
-+ for (i = 0; i < len32; i++, offset += 4, src += 4) {
-+ val = *(u32 *)src;
-+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
-+ writel(addr + offset, pe[id].mem_access_addr);
-+ }
-+
-+ len = (len & 0x3);
-+ if (len) {
-+ val = 0;
-+
-+ addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
-+ PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
-+
-+ for (i = 0; i < len; i++, src++)
-+ val |= (*(u8 *)src) << (8 * i);
-+
-+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
-+ writel(addr, pe[id].mem_access_addr);
-+ }
-+}
-+
-+/* Writes a buffer to PE internal data memory (DMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] src Buffer source address
-+ * @param[in] dst DMEM destination address (must be 32bit
-+ * aligned)
-+ * @param[in] len Number of bytes to copy
-+ */
-+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
-+{
-+ pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
-+ PE_MEM_ACCESS_DMEM, src, len);
-+}
-+
-+/* Writes a buffer to PE internal program memory (PMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., TMU3_ID)
-+ * @param[in] src Buffer source address
-+ * @param[in] dst PMEM destination address (must be 32bit
-+ * aligned)
-+ * @param[in] len Number of bytes to copy
-+ */
-+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
-+{
-+ pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
-+ - 1)) | PE_MEM_ACCESS_IMEM, src, len);
-+}
-+
-+/* Reads PE internal program memory (IMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., TMU3_ID)
-+ * @param[in] addr PMEM read address (must be aligned on size)
-+ * @param[in] size Number of bytes to read (maximum 4, must not
-+ * cross 32bit boundaries)
-+ * @return the data read (in PE endianness, i.e BE).
-+ */
-+u32 pe_pmem_read(int id, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+ u32 mask = 0xffffffff >> ((4 - size) << 3);
-+ u32 val;
-+
-+ addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
-+ | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
-+
-+ writel(addr, pe[id].mem_access_addr);
-+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
-+
-+ return (val >> (offset << 3)) & mask;
-+}
-+
-+/* Writes PE internal data memory (DMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] addr DMEM write address (must be aligned on size)
-+ * @param[in] val Value to write (in PE endianness, i.e BE)
-+ * @param[in] size Number of bytes to write (maximum 4, must not
-+ * cross 32bit boundaries)
-+ */
-+void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+
-+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
-+ PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
-+
-+ /* Indirect access interface is byte swapping data being written */
-+ writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
-+ writel(addr, pe[id].mem_access_addr);
-+}
-+
-+/* Reads PE internal data memory (DMEM) from the host
-+ * through indirect access registers.
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] addr DMEM read address (must be aligned on size)
-+ * @param[in] size Number of bytes to read (maximum 4, must not
-+ * cross 32bit boundaries)
-+ * @return the data read (in PE endianness, i.e BE).
-+ */
-+u32 pe_dmem_read(int id, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+ u32 mask = 0xffffffff >> ((4 - size) << 3);
-+ u32 val;
-+
-+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
-+ PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
-+
-+ writel(addr, pe[id].mem_access_addr);
-+
-+ /* Indirect access interface is byte swapping data being read */
-+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
-+
-+ return (val >> (offset << 3)) & mask;
-+}
-+
-+/* This function is used to write to CLASS internal bus peripherals (ccu,
-+ * pe-lem) from the host
-+ * through indirect access registers.
-+ * @param[in] val value to write
-+ * @param[in] addr Address to write to (must be aligned on size)
-+ * @param[in] size Number of bytes to write (1, 2 or 4)
-+ *
-+ */
-+void class_bus_write(u32 val, u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+
-+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
-+
-+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
-+ (size << 24);
-+
-+ writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
-+ writel(addr, CLASS_BUS_ACCESS_ADDR);
-+}
-+
-+/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
-+ * through indirect access registers.
-+ * @param[in] addr Address to read from (must be aligned on size)
-+ * @param[in] size Number of bytes to read (1, 2 or 4)
-+ * @return the read data
-+ *
-+ */
-+u32 class_bus_read(u32 addr, u8 size)
-+{
-+ u32 offset = addr & 0x3;
-+ u32 mask = 0xffffffff >> ((4 - size) << 3);
-+ u32 val;
-+
-+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
-+
-+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
-+
-+ writel(addr, CLASS_BUS_ACCESS_ADDR);
-+ val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
-+
-+ return (val >> (offset << 3)) & mask;
-+}
-+
-+/* Writes data to the cluster memory (PE_LMEM)
-+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
-+ * @param[in] src Buffer source address
-+ * @param[in] len Number of bytes to copy
-+ */
-+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
-+{
-+ u32 len32 = len >> 2;
-+ int i;
-+
-+ for (i = 0; i < len32; i++, src += 4, dst += 4)
-+ class_bus_write(*(u32 *)src, dst, 4);
-+
-+ if (len & 0x2) {
-+ class_bus_write(*(u16 *)src, dst, 2);
-+ src += 2;
-+ dst += 2;
-+ }
-+
-+ if (len & 0x1) {
-+ class_bus_write(*(u8 *)src, dst, 1);
-+ src++;
-+ dst++;
-+ }
-+}
-+
-+/* Writes value to the cluster memory (PE_LMEM)
-+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
-+ * @param[in] val Value to write
-+ * @param[in] len Number of bytes to write
-+ */
-+void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
-+{
-+ u32 len32 = len >> 2;
-+ int i;
-+
-+ val = val | (val << 8) | (val << 16) | (val << 24);
-+
-+ for (i = 0; i < len32; i++, dst += 4)
-+ class_bus_write(val, dst, 4);
-+
-+ if (len & 0x2) {
-+ class_bus_write(val, dst, 2);
-+ dst += 2;
-+ }
-+
-+ if (len & 0x1) {
-+ class_bus_write(val, dst, 1);
-+ dst++;
-+ }
-+}
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+
-+/* Writes UTIL program memory (DDR) from the host.
-+ *
-+ * @param[in] addr Address to write (virtual, must be aligned on size)
-+ * @param[in] val Value to write (in PE endianness, i.e BE)
-+ * @param[in] size Number of bytes to write (2 or 4)
-+ */
-+static void util_pmem_write(u32 val, void *addr, u8 size)
-+{
-+ void *addr64 = (void *)((unsigned long)addr & ~0x7);
-+ unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
-+
-+ /*
-+ * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
-+ * location
-+ */
-+ if (size == 4)
-+ writel(be32_to_cpu(val), addr64 + off);
-+ else
-+ writew(be16_to_cpu((u16)val), addr64 + off);
-+}
-+
-+/* Writes a buffer to UTIL program memory (DDR) from the host.
-+ *
-+ * @param[in] dst Address to write (virtual, must be at least 16bit
-+ * aligned)
-+ * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
-+ * same alignment as dst)
-+ * @param[in] len Number of bytes to write (must be at least 16bit
-+ * aligned)
-+ */
-+static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
-+{
-+ unsigned int len32;
-+ int i;
-+
-+ if ((unsigned long)src & 0x2) {
-+ util_pmem_write(*(u16 *)src, dst, 2);
-+ src += 2;
-+ dst += 2;
-+ len -= 2;
-+ }
-+
-+ len32 = len >> 2;
-+
-+ for (i = 0; i < len32; i++, dst += 4, src += 4)
-+ util_pmem_write(*(u32 *)src, dst, 4);
-+
-+ if (len & 0x2)
-+ util_pmem_write(*(u16 *)src, dst, len & 0x2);
-+}
-+#endif
-+
-+/* Loads an elf section into pmem
-+ * Code needs to be at least 16bit aligned and only PROGBITS sections are
-+ * supported
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
-+ * TMU3_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_pmem_section(int id, const void *data,
-+ struct elf32_shdr *shdr)
-+{
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (id == UTIL_ID) {
-+ pr_err("%s: unsupported pmem section for UTIL\n",
-+ __func__);
-+ return -EINVAL;
-+ }
-+#endif
-+
-+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
-+ , __func__, addr, (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x1) {
-+ pr_err("%s: load address(%x) is not 16bit aligned\n",
-+ __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ if (size & 0x1) {
-+ pr_err("%s: load size(%x) is not 16bit aligned\n",
-+ __func__, size);
-+ return -EINVAL;
-+ }
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ pe_pmem_memcpy_to32(id, addr, data + offset, size);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into dmem
-+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
-+ * initialized to 0
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_dmem_section(int id, const void *data,
-+ struct elf32_shdr *shdr)
-+{
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+ u32 size32 = size >> 2;
-+ int i;
-+
-+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
-+ __func__, addr, (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x3) {
-+ pr_err("%s: load address(%x) is not 32bit aligned\n",
-+ __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ pe_dmem_memcpy_to32(id, addr, data + offset, size);
-+ break;
-+
-+ case SHT_NOBITS:
-+ for (i = 0; i < size32; i++, addr += 4)
-+ pe_dmem_write(id, 0, addr, 4);
-+
-+ if (size & 0x3)
-+ pe_dmem_write(id, 0, addr, size & 0x3);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into DDR
-+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
-+ * initialized to 0
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_ddr_section(int id, const void *data,
-+ struct elf32_shdr *shdr,
-+ struct device *dev) {
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+ u32 flags = be32_to_cpu(shdr->sh_flags);
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ if (flags & SHF_EXECINSTR) {
-+ if (id <= CLASS_MAX_ID) {
-+ /* DO the loading only once in DDR */
-+ if (id == CLASS0_ID) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) rcvd\n",
-+ __func__, addr,
-+ (unsigned long)data + offset);
-+ if (((unsigned long)(data + offset)
-+ & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
-+ , __func__, addr,
-+ (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x1) {
-+ pr_err(
-+ "%s: load address(%x) is not 16bit aligned\n"
-+ , __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ if (size & 0x1) {
-+ pr_err(
-+ "%s: load length(%x) is not 16bit aligned\n"
-+ , __func__, size);
-+ return -EINVAL;
-+ }
-+ memcpy(DDR_PHYS_TO_VIRT(
-+ DDR_PFE_TO_PHYS(addr)),
-+ data + offset, size);
-+ }
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ } else if (id == UTIL_ID) {
-+ if (((unsigned long)(data + offset) & 0x3)
-+ != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
-+ , __func__, addr,
-+ (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x1) {
-+ pr_err(
-+ "%s: load address(%x) is not 16bit aligned\n"
-+ , __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ if (size & 0x1) {
-+ pr_err(
-+ "%s: load length(%x) is not 16bit aligned\n"
-+ , __func__, size);
-+ return -EINVAL;
-+ }
-+
-+ util_pmem_memcpy(DDR_PHYS_TO_VIRT(
-+ DDR_PFE_TO_PHYS(addr)),
-+ data + offset, size);
-+ }
-+#endif
-+ } else {
-+ pr_err(
-+ "%s: unsupported ddr section type(%x) for PE(%d)\n"
-+ , __func__, type, id);
-+ return -EINVAL;
-+ }
-+
-+ } else {
-+ memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
-+ + offset, size);
-+ }
-+
-+ break;
-+
-+ case SHT_NOBITS:
-+ memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into pe lmem
-+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
-+ * initialized to 0
-+ *
-+ * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+static int pe_load_pe_lmem_section(int id, const void *data,
-+ struct elf32_shdr *shdr)
-+{
-+ u32 offset = be32_to_cpu(shdr->sh_offset);
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+ u32 type = be32_to_cpu(shdr->sh_type);
-+
-+ if (id > CLASS_MAX_ID) {
-+ pr_err(
-+ "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
-+ __func__, type, id);
-+ return -EINVAL;
-+ }
-+
-+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
-+ pr_err(
-+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
-+ __func__, addr, (unsigned long)data + offset);
-+
-+ return -EINVAL;
-+ }
-+
-+ if (addr & 0x3) {
-+ pr_err("%s: load address(%x) is not 32bit aligned\n",
-+ __func__, addr);
-+ return -EINVAL;
-+ }
-+
-+ switch (type) {
-+ case SHT_PROGBITS:
-+ class_pe_lmem_memcpy_to32(addr, data + offset, size);
-+ break;
-+
-+ case SHT_NOBITS:
-+ class_pe_lmem_memset(addr, 0, size);
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported section type(%x)\n", __func__,
-+ type);
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Loads an elf section into a PE
-+ * For now only supports loading a section to dmem (all PE's), pmem (class and
-+ * tmu PE's),
-+ * DDDR (util PE code)
-+ *
-+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
-+ * ..., UTIL_ID)
-+ * @param[in] data pointer to the elf firmware
-+ * @param[in] shdr pointer to the elf section header
-+ *
-+ */
-+int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
-+ struct device *dev) {
-+ u32 addr = be32_to_cpu(shdr->sh_addr);
-+ u32 size = be32_to_cpu(shdr->sh_size);
-+
-+ if (IS_DMEM(addr, size))
-+ return pe_load_dmem_section(id, data, shdr);
-+ else if (IS_PMEM(addr, size))
-+ return pe_load_pmem_section(id, data, shdr);
-+ else if (IS_PFE_LMEM(addr, size))
-+ return 0;
-+ else if (IS_PHYS_DDR(addr, size))
-+ return pe_load_ddr_section(id, data, shdr, dev);
-+ else if (IS_PE_LMEM(addr, size))
-+ return pe_load_pe_lmem_section(id, data, shdr);
-+
-+ pr_err("%s: unsupported memory range(%x)\n", __func__,
-+ addr);
-+ return 0;
-+}
-+
-+/**************************** BMU ***************************/
-+
-+/* Initializes a BMU block.
-+ * @param[in] base BMU block base address
-+ * @param[in] cfg BMU configuration
-+ */
-+void bmu_init(void *base, struct BMU_CFG *cfg)
-+{
-+ bmu_disable(base);
-+
-+ bmu_set_config(base, cfg);
-+
-+ bmu_reset(base);
-+}
-+
-+/* Resets a BMU block.
-+ * @param[in] base BMU block base address
-+ */
-+void bmu_reset(void *base)
-+{
-+ writel(CORE_SW_RESET, base + BMU_CTRL);
-+
-+ /* Wait for self clear */
-+ while (readl(base + BMU_CTRL) & CORE_SW_RESET)
-+ ;
-+}
-+
-+/* Enabled a BMU block.
-+ * @param[in] base BMU block base address
-+ */
-+void bmu_enable(void *base)
-+{
-+ writel(CORE_ENABLE, base + BMU_CTRL);
-+}
-+
-+/* Disables a BMU block.
-+ * @param[in] base BMU block base address
-+ */
-+void bmu_disable(void *base)
-+{
-+ writel(CORE_DISABLE, base + BMU_CTRL);
-+}
-+
-+/* Sets the configuration of a BMU block.
-+ * @param[in] base BMU block base address
-+ * @param[in] cfg BMU configuration
-+ */
-+void bmu_set_config(void *base, struct BMU_CFG *cfg)
-+{
-+ writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
-+ writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
-+ writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
-+
-+ /* Interrupts are never used */
-+ writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
-+ writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
-+ writel(0x0, base + BMU_INT_ENABLE);
-+}
-+
-+/**************************** MTIP GEMAC ***************************/
-+
-+/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
-+ * TCP or UDP checksums are discarded
-+ *
-+ * @param[in] base GEMAC base address.
-+ */
-+void gemac_enable_rx_checksum_offload(void *base)
-+{
-+ /*Do not find configuration to do this */
-+}
-+
-+/* Disable Rx Checksum Engine.
-+ *
-+ * @param[in] base GEMAC base address.
-+ */
-+void gemac_disable_rx_checksum_offload(void *base)
-+{
-+ /*Do not find configuration to do this */
-+}
-+
-+/* GEMAC set speed.
-+ * @param[in] base GEMAC base address
-+ * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
-+ */
-+void gemac_set_speed(void *base, enum mac_speed gem_speed)
-+{
-+ u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
-+ u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
-+
-+ switch (gem_speed) {
-+ case SPEED_10M:
-+ rcr |= EMAC_RCNTRL_RMII_10T;
-+ break;
-+
-+ case SPEED_1000M:
-+ ecr |= EMAC_ECNTRL_SPEED;
-+ break;
-+
-+ case SPEED_100M:
-+ default:
-+ /*It is in 100M mode */
-+ break;
-+ }
-+ writel(ecr, (base + EMAC_ECNTRL_REG));
-+ writel(rcr, (base + EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC set duplex.
-+ * @param[in] base GEMAC base address
-+ * @param[in] duplex GEMAC duplex mode (Full, Half)
-+ */
-+void gemac_set_duplex(void *base, int duplex)
-+{
-+ if (duplex == DUPLEX_HALF) {
-+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
-+ + EMAC_TCNTRL_REG);
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
-+ + EMAC_RCNTRL_REG));
-+ } else{
-+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
-+ + EMAC_TCNTRL_REG);
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
-+ + EMAC_RCNTRL_REG));
-+ }
-+}
-+
-+/* GEMAC set mode.
-+ * @param[in] base GEMAC base address
-+ * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
-+ */
-+void gemac_set_mode(void *base, int mode)
-+{
-+ u32 val = readl(base + EMAC_RCNTRL_REG);
-+
-+ /*Remove loopbank*/
-+ val &= ~EMAC_RCNTRL_LOOP;
-+
-+ /* Enable flow control and MII mode and terminate received CRC */
-+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
-+
-+ writel(val, base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable(void *base)
-+{
-+ writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
-+ EMAC_ECNTRL_REG);
-+}
-+
-+/* GEMAC disable function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable(void *base)
-+{
-+ writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
-+ EMAC_ECNTRL_REG);
-+}
-+
-+/* GEMAC TX disable function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_tx_disable(void *base)
-+{
-+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
-+ EMAC_TCNTRL_REG);
-+}
-+
-+void gemac_tx_enable(void *base)
-+{
-+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
-+ EMAC_TCNTRL_REG);
-+}
-+
-+/* Sets the hash register of the MAC.
-+ * This register is used for matching unicast and multicast frames.
-+ *
-+ * @param[in] base GEMAC base address.
-+ * @param[in] hash 64-bit hash to be configured.
-+ */
-+void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
-+{
-+ writel(hash->bottom, base + EMAC_GALR);
-+ writel(hash->top, base + EMAC_GAUR);
-+}
-+
-+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
-+ unsigned int entry_index)
-+{
-+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
-+ return;
-+
-+ entry_index = entry_index - 1;
-+ if (entry_index < 1) {
-+ writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
-+ writel((htonl(address->top) | 0x8808), base +
-+ EMAC_PHY_ADDR_HIGH);
-+ } else {
-+ writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
-+ + EMAC_SMAC_0_0);
-+ writel((htonl(address->top) | 0x8808), base + ((entry_index -
-+ 1) * 8) + EMAC_SMAC_0_1);
-+ }
-+}
-+
-+void gemac_clear_laddrN(void *base, unsigned int entry_index)
-+{
-+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
-+ return;
-+
-+ entry_index = entry_index - 1;
-+ if (entry_index < 1) {
-+ writel(0, base + EMAC_PHY_ADDR_LOW);
-+ writel(0, base + EMAC_PHY_ADDR_HIGH);
-+ } else {
-+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
-+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
-+ }
-+}
-+
-+/* Set the loopback mode of the MAC. This can be either no loopback for
-+ * normal operation, local loopback through MAC internal loopback module or PHY
-+ * loopback for external loopback through a PHY. This asserts the external
-+ * loop pin.
-+ *
-+ * @param[in] base GEMAC base address.
-+ * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
-+ * Loopback,
-+ * LB_EXT - PHY Loopback.
-+ */
-+void gemac_set_loop(void *base, enum mac_loop gem_loop)
-+{
-+ pr_info("%s()\n", __func__);
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
-+ EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC allow frames
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_copy_all(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
-+ EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC do not allow frames
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable_copy_all(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
-+ EMAC_RCNTRL_REG));
-+}
-+
-+/* GEMAC allow broadcast function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_allow_broadcast(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
-+ EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC no broadcast function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_no_broadcast(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
-+ EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable 1536 rx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_1536_rx(void *base)
-+{
-+ /* Set 1536 as Maximum frame length */
-+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
-+ | (1536 << 16), base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC set rx Max frame length.
-+ * @param[in] base GEMAC base address
-+ * @param[in] mtu new mtu
-+ */
-+void gemac_set_rx_max_fl(void *base, int mtu)
-+{
-+ /* Set mtu as Maximum frame length */
-+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
-+ | (mtu << 16), base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable stacked vlan function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_stacked_vlan(void *base)
-+{
-+ /* MTIP doesn't support stacked vlan */
-+}
-+
-+/* GEMAC enable pause rx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_pause_rx(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
-+ base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC disable pause rx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable_pause_rx(void *base)
-+{
-+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
-+ base + EMAC_RCNTRL_REG);
-+}
-+
-+/* GEMAC enable pause tx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_enable_pause_tx(void *base)
-+{
-+ writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
-+}
-+
-+/* GEMAC disable pause tx function.
-+ * @param[in] base GEMAC base address
-+ */
-+void gemac_disable_pause_tx(void *base)
-+{
-+ writel(0x0, base + EMAC_RX_SECTION_EMPTY);
-+}
-+
-+/* GEMAC wol configuration
-+ * @param[in] base GEMAC base address
-+ * @param[in] wol_conf WoL register configuration
-+ */
-+void gemac_set_wol(void *base, u32 wol_conf)
-+{
-+ u32 val = readl(base + EMAC_ECNTRL_REG);
-+
-+ if (wol_conf)
-+ val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
-+ else
-+ val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
-+ writel(val, base + EMAC_ECNTRL_REG);
-+}
-+
-+/* Sets Gemac bus width to 64bit
-+ * @param[in] base GEMAC base address
-+ * @param[in] width gemac bus width to be set possible values are 32/64/128
-+ */
-+void gemac_set_bus_width(void *base, int width)
-+{
-+}
-+
-+/* Sets Gemac configuration.
-+ * @param[in] base GEMAC base address
-+ * @param[in] cfg GEMAC configuration
-+ */
-+void gemac_set_config(void *base, struct gemac_cfg *cfg)
-+{
-+ /*GEMAC config taken from VLSI */
-+ writel(0x00000004, base + EMAC_TFWR_STR_FWD);
-+ writel(0x00000005, base + EMAC_RX_SECTION_FULL);
-+
-+ if (pfe_errata_a010897)
-+ writel(0x0000076c, base + EMAC_TRUNC_FL);
-+ else
-+ writel(0x00003fff, base + EMAC_TRUNC_FL);
-+
-+ writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
-+ writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
-+
-+ gemac_set_mode(base, cfg->mode);
-+
-+ gemac_set_speed(base, cfg->speed);
-+
-+ gemac_set_duplex(base, cfg->duplex);
-+}
-+
-+/**************************** GPI ***************************/
-+
-+/* Initializes a GPI block.
-+ * @param[in] base GPI base address
-+ * @param[in] cfg GPI configuration
-+ */
-+void gpi_init(void *base, struct gpi_cfg *cfg)
-+{
-+ gpi_reset(base);
-+
-+ gpi_disable(base);
-+
-+ gpi_set_config(base, cfg);
-+}
-+
-+/* Resets a GPI block.
-+ * @param[in] base GPI base address
-+ */
-+void gpi_reset(void *base)
-+{
-+ writel(CORE_SW_RESET, base + GPI_CTRL);
-+}
-+
-+/* Enables a GPI block.
-+ * @param[in] base GPI base address
-+ */
-+void gpi_enable(void *base)
-+{
-+ writel(CORE_ENABLE, base + GPI_CTRL);
-+}
-+
-+/* Disables a GPI block.
-+ * @param[in] base GPI base address
-+ */
-+void gpi_disable(void *base)
-+{
-+ writel(CORE_DISABLE, base + GPI_CTRL);
-+}
-+
-+/* Sets the configuration of a GPI block.
-+ * @param[in] base GPI base address
-+ * @param[in] cfg GPI configuration
-+ */
-+void gpi_set_config(void *base, struct gpi_cfg *cfg)
-+{
-+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
-+ + GPI_LMEM_ALLOC_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
-+ + GPI_LMEM_FREE_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
-+ + GPI_DDR_ALLOC_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
-+ + GPI_DDR_FREE_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
-+ writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
-+ writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
-+ writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
-+ writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
-+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
-+ writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
-+
-+ writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
-+ GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
-+ writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
-+ writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
-+ writel(1, base + GPI_TOE_CHKSUM_EN);
-+
-+ if (cfg->mtip_pause_reg) {
-+ writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
-+ writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
-+ }
-+}
-+
-+/**************************** CLASSIFIER ***************************/
-+
-+/* Initializes CLASSIFIER block.
-+ * @param[in] cfg CLASSIFIER configuration
-+ */
-+void class_init(struct class_cfg *cfg)
-+{
-+ class_reset();
-+
-+ class_disable();
-+
-+ class_set_config(cfg);
-+}
-+
-+/* Resets CLASSIFIER block.
-+ *
-+ */
-+void class_reset(void)
-+{
-+ writel(CORE_SW_RESET, CLASS_TX_CTRL);
-+}
-+
-+/* Enables all CLASS-PE's cores.
-+ *
-+ */
-+void class_enable(void)
-+{
-+ writel(CORE_ENABLE, CLASS_TX_CTRL);
-+}
-+
-+/* Disables all CLASS-PE's cores.
-+ *
-+ */
-+void class_disable(void)
-+{
-+ writel(CORE_DISABLE, CLASS_TX_CTRL);
-+}
-+
-+/*
-+ * Sets the configuration of the CLASSIFIER block.
-+ * @param[in] cfg CLASSIFIER configuration
-+ */
-+void class_set_config(struct class_cfg *cfg)
-+{
-+ u32 val;
-+
-+ /* Initialize route table */
-+ if (!cfg->resume)
-+ memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
-+ cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
-+#endif
-+
-+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
-+ writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
-+ writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
-+ CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
-+ CLASS_ROUTE_HASH_ENTRY_SIZE);
-+ writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
-+ CLASS_HIF_PARSE);
-+
-+ val = HASH_CRC_PORT_IP | QB2BUS_LE;
-+
-+#if defined(CONFIG_IP_ALIGNED)
-+ val |= IP_ALIGNED;
-+#endif
-+
-+ /*
-+ * Class PE packet steering will only work if TOE mode, bridge fetch or
-+ * route fetch are enabled (see class/qb_fet.v). Route fetch would
-+ * trigger additional memory copies (likely from DDR because of hash
-+ * table size, which cannot be reduced because PE software still
-+ * relies on hash value computed in HW), so when not in TOE mode we
-+ * simply enable HW bridge fetch even though we don't use it.
-+ */
-+ if (cfg->toe_mode)
-+ val |= CLASS_TOE;
-+ else
-+ val |= HW_BRIDGE_FETCH;
-+
-+ writel(val, CLASS_ROUTE_MULTI);
-+
-+ writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
-+ CLASS_ROUTE_TABLE_BASE);
-+ writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
-+ writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
-+ writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
-+ writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
-+ writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
-+
-+ writel(23, CLASS_AFULL_THRES);
-+ writel(23, CLASS_TSQ_FIFO_THRES);
-+
-+ writel(24, CLASS_MAX_BUF_CNT);
-+ writel(24, CLASS_TSQ_MAX_CNT);
-+}
-+
-+/**************************** TMU ***************************/
-+
-+void tmu_reset(void)
-+{
-+ writel(SW_RESET, TMU_CTRL);
-+}
-+
-+/* Initializes TMU block.
-+ * @param[in] cfg TMU configuration
-+ */
-+void tmu_init(struct tmu_cfg *cfg)
-+{
-+ int q, phyno;
-+
-+ tmu_disable(0xF);
-+ mdelay(10);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ /* keep in soft reset */
-+ writel(SW_RESET, TMU_CTRL);
-+#endif
-+ writel(0x3, TMU_SYS_GENERIC_CONTROL);
-+ writel(750, TMU_INQ_WATERMARK);
-+ writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
-+ GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
-+ GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
-+ GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
-+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
-+ TMU_BMU_INQ_ADDR);
-+
-+ writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
-+ * enabling all 10
-+ * schedulers [9:0] of each TDQ
-+ */
-+ writel(0x3FF, TMU_TDQ1_SCH_CTRL);
-+ writel(0x3FF, TMU_TDQ3_SCH_CTRL);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
-+#endif
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
-+ /* Extra packet pointers will be stored from this address onwards */
-+
-+ writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
-+ writel(5, TMU_TDQ_IIFG_CFG);
-+ writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
-+
-+ writel(0x0, TMU_CTRL);
-+
-+ /* MEM init */
-+ pr_info("%s: mem init\n", __func__);
-+ writel(MEM_INIT, TMU_CTRL);
-+
-+ while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
-+ ;
-+
-+ /* LLM init */
-+ pr_info("%s: lmem init\n", __func__);
-+ writel(LLM_INIT, TMU_CTRL);
-+
-+ while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
-+ ;
-+#endif
-+ /* set up each queue for tail drop */
-+ for (phyno = 0; phyno < 4; phyno++) {
-+ if (phyno == 2)
-+ continue;
-+ for (q = 0; q < 16; q++) {
-+ u32 qdepth;
-+
-+ writel((phyno << 8) | q, TMU_TEQ_CTRL);
-+ writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
-+
-+ if (phyno == 3)
-+ qdepth = DEFAULT_TMU3_QDEPTH;
-+ else
-+ qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
-+ DEFAULT_MAX_QDEPTH;
-+
-+ /* LOG: 68855 */
-+ /*
-+ * The following is a workaround for the reordered
-+ * packet and BMU2 buffer leakage issue.
-+ */
-+ if (CHIP_REVISION() == 0)
-+ qdepth = 31;
-+
-+ writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
-+ writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
-+ }
-+ }
-+
-+#ifdef CFG_LRO
-+ /* Set TMU-3 queue 5 (LRO) in no-drop mode */
-+ writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
-+ writel(0, TMU_TEQ_QCFG);
-+#endif
-+
-+ writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
-+
-+ writel(0x0, TMU_CTRL);
-+}
-+
-+/* Enables TMU-PE cores.
-+ * @param[in] pe_mask TMU PE mask
-+ */
-+void tmu_enable(u32 pe_mask)
-+{
-+ writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
-+}
-+
-+/* Disables TMU cores.
-+ * @param[in] pe_mask TMU PE mask
-+ */
-+void tmu_disable(u32 pe_mask)
-+{
-+ writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
-+}
-+
-+/* This will return the tmu queue status
-+ * @param[in] if_id gem interface id or TMU index
-+ * @return returns the bit mask of busy queues, zero means all
-+ * queues are empty
-+ */
-+u32 tmu_qstatus(u32 if_id)
-+{
-+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
-+ offsetof(struct pe_status, tmu_qstatus), 4));
-+}
-+
-+u32 tmu_pkts_processed(u32 if_id)
-+{
-+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
-+ offsetof(struct pe_status, rx), 4));
-+}
-+
-+/**************************** UTIL ***************************/
-+
-+/* Resets UTIL block.
-+ */
-+void util_reset(void)
-+{
-+ writel(CORE_SW_RESET, UTIL_TX_CTRL);
-+}
-+
-+/* Initializes UTIL block.
-+ * @param[in] cfg UTIL configuration
-+ */
-+void util_init(struct util_cfg *cfg)
-+{
-+ writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
-+}
-+
-+/* Enables UTIL-PE core.
-+ *
-+ */
-+void util_enable(void)
-+{
-+ writel(CORE_ENABLE, UTIL_TX_CTRL);
-+}
-+
-+/* Disables UTIL-PE core.
-+ *
-+ */
-+void util_disable(void)
-+{
-+ writel(CORE_DISABLE, UTIL_TX_CTRL);
-+}
-+
-+/**************************** HIF ***************************/
-+/* Initializes HIF copy block.
-+ *
-+ */
-+void hif_init(void)
-+{
-+ /*Initialize HIF registers*/
-+ writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
-+ HIF_POLL_CTRL);
-+}
-+
-+/* Enable hif tx DMA and interrupt
-+ *
-+ */
-+void hif_tx_enable(void)
-+{
-+ writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
-+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
-+ HIF_INT_ENABLE);
-+}
-+
-+/* Disable hif tx DMA and interrupt
-+ *
-+ */
-+void hif_tx_disable(void)
-+{
-+ u32 hif_int;
-+
-+ writel(0, HIF_TX_CTRL);
-+
-+ hif_int = readl(HIF_INT_ENABLE);
-+ hif_int &= HIF_TXPKT_INT_EN;
-+ writel(hif_int, HIF_INT_ENABLE);
-+}
-+
-+/* Enable hif rx DMA and interrupt
-+ *
-+ */
-+void hif_rx_enable(void)
-+{
-+ hif_rx_dma_start();
-+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
-+ HIF_INT_ENABLE);
-+}
-+
-+/* Disable hif rx DMA and interrupt
-+ *
-+ */
-+void hif_rx_disable(void)
-+{
-+ u32 hif_int;
-+
-+ writel(0, HIF_RX_CTRL);
-+
-+ hif_int = readl(HIF_INT_ENABLE);
-+ hif_int &= HIF_RXPKT_INT_EN;
-+ writel(hif_int, HIF_INT_ENABLE);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hif.c
-@@ -0,0 +1,1060 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/interrupt.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/kthread.h>
-+#include <linux/slab.h>
-+
-+#include <linux/io.h>
-+#include <asm/irq.h>
-+
-+#include "pfe_mod.h"
-+
-+#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
-+
-+unsigned char napi_first_batch;
-+
-+static void pfe_tx_do_cleanup(unsigned long data);
-+
-+static int pfe_hif_alloc_descr(struct pfe_hif *hif)
-+{
-+ void *addr;
-+ dma_addr_t dma_addr;
-+ int err = 0;
-+
-+ pr_info("%s\n", __func__);
-+ addr = dma_alloc_coherent(pfe->dev,
-+ HIF_RX_DESC_NT * sizeof(struct hif_desc) +
-+ HIF_TX_DESC_NT * sizeof(struct hif_desc),
-+ &dma_addr, GFP_KERNEL);
-+
-+ if (!addr) {
-+ pr_err("%s: Could not allocate buffer descriptors!\n"
-+ , __func__);
-+ err = -ENOMEM;
-+ goto err0;
-+ }
-+
-+ hif->descr_baseaddr_p = dma_addr;
-+ hif->descr_baseaddr_v = addr;
-+ hif->rx_ring_size = HIF_RX_DESC_NT;
-+ hif->tx_ring_size = HIF_TX_DESC_NT;
-+
-+ return 0;
-+
-+err0:
-+ return err;
-+}
-+
-+#if defined(LS1012A_PFE_RESET_WA)
-+static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
-+{
-+ int ii;
-+ struct hif_desc *desc = hif->rx_base;
-+
-+ /*Mark all descriptors as LAST_BD */
-+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
-+ desc->ctrl |= BD_CTRL_LAST_BD;
-+ desc++;
-+ }
-+}
-+
-+struct class_rx_hdr_t {
-+ u32 next_ptr; /* ptr to the start of the first DDR buffer */
-+ u16 length; /* total packet length */
-+ u16 phyno; /* input physical port number */
-+ u32 status; /* gemac status bits */
-+ u32 status2; /* reserved for software usage */
-+};
-+
-+/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
-+ * except overflow
-+ */
-+#define STATUS_BAD_FRAME_ERR BIT(16)
-+#define STATUS_LENGTH_ERR BIT(17)
-+#define STATUS_CRC_ERR BIT(18)
-+#define STATUS_TOO_SHORT_ERR BIT(19)
-+#define STATUS_TOO_LONG_ERR BIT(20)
-+#define STATUS_CODE_ERR BIT(21)
-+#define STATUS_MC_HASH_MATCH BIT(22)
-+#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
-+#define STATUS_UNICAST_HASH_MATCH BIT(24)
-+#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
-+#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
-+#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
-+#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
-+#define MIN_PKT_SIZE 64
-+
-+static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
-+{
-+ int i;
-+
-+ for (i = 0; i < len; i += sizeof(u32)) {
-+ *dst = htonl(*src);
-+ dst++; src++;
-+ }
-+}
-+
-+static void send_dummy_pkt_to_hif(void)
-+{
-+ void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
-+ u32 physaddr;
-+ struct class_rx_hdr_t local_hdr;
-+ static u32 dummy_pkt[] = {
-+ 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
-+ 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
-+ 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
-+ 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
-+
-+ ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
-+ if (!ddr_ptr)
-+ return;
-+
-+ lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
-+ if (!lmem_ptr)
-+ return;
-+
-+ pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
-+ physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
-+
-+ lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
-+
-+ local_hdr.phyno = htons(0); /* RX_PHY_0 */
-+ local_hdr.length = htons(MIN_PKT_SIZE);
-+
-+ local_hdr.next_ptr = htonl((u32)physaddr);
-+ /*Mark checksum is correct */
-+ local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
-+ STATUS_UDP_CHECKSUM_CORRECT |
-+ STATUS_TCP_CHECKSUM_CORRECT |
-+ STATUS_UNICAST_HASH_MATCH |
-+ STATUS_CUMULATIVE_ARC_HIT));
-+ copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
-+ sizeof(local_hdr));
-+
-+ copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
-+ 0x40);
-+
-+ writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
-+}
-+
-+void pfe_hif_rx_idle(struct pfe_hif *hif)
-+{
-+ int hif_stop_loop = 10;
-+ u32 rx_status;
-+
-+ pfe_hif_disable_rx_desc(hif);
-+ pr_info("Bringing hif to idle state...");
-+ writel(0, HIF_INT_ENABLE);
-+ /*If HIF Rx BDP is busy send a dummy packet */
-+ do {
-+ rx_status = readl(HIF_RX_STATUS);
-+ if (rx_status & BDP_CSR_RX_DMA_ACTV)
-+ send_dummy_pkt_to_hif();
-+
-+ usleep_range(100, 150);
-+ } while (--hif_stop_loop);
-+
-+ if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
-+ pr_info("Failed\n");
-+ else
-+ pr_info("Done\n");
-+}
-+#endif
-+
-+static void pfe_hif_free_descr(struct pfe_hif *hif)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ dma_free_coherent(pfe->dev,
-+ hif->rx_ring_size * sizeof(struct hif_desc) +
-+ hif->tx_ring_size * sizeof(struct hif_desc),
-+ hif->descr_baseaddr_v, hif->descr_baseaddr_p);
-+}
-+
-+void pfe_hif_desc_dump(struct pfe_hif *hif)
-+{
-+ struct hif_desc *desc;
-+ unsigned long desc_p;
-+ int ii = 0;
-+
-+ pr_info("%s\n", __func__);
-+
-+ desc = hif->rx_base;
-+ desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
-+ hif->descr_baseaddr_p);
-+
-+ pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
-+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
-+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
-+ readl(&desc->status), readl(&desc->ctrl),
-+ readl(&desc->data), readl(&desc->next));
-+ desc++;
-+ }
-+
-+ desc = hif->tx_base;
-+ desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
-+ hif->descr_baseaddr_p);
-+
-+ pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
-+ for (ii = 0; ii < hif->tx_ring_size; ii++) {
-+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
-+ readl(&desc->status), readl(&desc->ctrl),
-+ readl(&desc->data), readl(&desc->next));
-+ desc++;
-+ }
-+}
-+
-+/* pfe_hif_release_buffers */
-+static void pfe_hif_release_buffers(struct pfe_hif *hif)
-+{
-+ struct hif_desc *desc;
-+ int i = 0;
-+
-+ hif->rx_base = hif->descr_baseaddr_v;
-+
-+ pr_info("%s\n", __func__);
-+
-+ /*Free Rx buffers */
-+ desc = hif->rx_base;
-+ for (i = 0; i < hif->rx_ring_size; i++) {
-+ if (readl(&desc->data)) {
-+ if ((i < hif->shm->rx_buf_pool_cnt) &&
-+ (!hif->shm->rx_buf_pool[i])) {
-+ /*
-+ * dma_unmap_single(hif->dev, desc->data,
-+ * hif->rx_buf_len[i], DMA_FROM_DEVICE);
-+ */
-+ dma_unmap_single(hif->dev,
-+ DDR_PFE_TO_PHYS(
-+ readl(&desc->data)),
-+ hif->rx_buf_len[i],
-+ DMA_FROM_DEVICE);
-+ hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
-+ } else {
-+ pr_err("%s: buffer pool already full\n"
-+ , __func__);
-+ }
-+ }
-+
-+ writel(0, &desc->data);
-+ writel(0, &desc->status);
-+ writel(0, &desc->ctrl);
-+ desc++;
-+ }
-+}
-+
-+/*
-+ * pfe_hif_init_buffers
-+ * This function initializes the HIF Rx/Tx ring descriptors and
-+ * initialize Rx queue with buffers.
-+ */
-+static int pfe_hif_init_buffers(struct pfe_hif *hif)
-+{
-+ struct hif_desc *desc, *first_desc_p;
-+ u32 data;
-+ int i = 0;
-+
-+ pr_info("%s\n", __func__);
-+
-+ /* Check enough Rx buffers available in the shared memory */
-+ if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
-+ return -ENOMEM;
-+
-+ hif->rx_base = hif->descr_baseaddr_v;
-+ memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
-+
-+ /*Initialize Rx descriptors */
-+ desc = hif->rx_base;
-+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
-+
-+ for (i = 0; i < hif->rx_ring_size; i++) {
-+ /* Initialize Rx buffers from the shared memory */
-+
-+ data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
-+ pfe_pkt_size, DMA_FROM_DEVICE);
-+ hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
-+ hif->rx_buf_len[i] = pfe_pkt_size;
-+ hif->shm->rx_buf_pool[i] = NULL;
-+
-+ if (likely(dma_mapping_error(hif->dev, data) == 0)) {
-+ writel(DDR_PHYS_TO_PFE(data), &desc->data);
-+ } else {
-+ pr_err("%s : low on mem\n", __func__);
-+
-+ goto err;
-+ }
-+
-+ writel(0, &desc->status);
-+
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ wmb();
-+
-+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
-+ | BD_CTRL_DIR | BD_CTRL_DESC_EN
-+ | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
-+
-+ /* Chain descriptors */
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
-+ desc++;
-+ }
-+
-+ /* Overwrite last descriptor to chain it to first one*/
-+ desc--;
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
-+
-+ hif->rxtoclean_index = 0;
-+
-+ /*Initialize Rx buffer descriptor ring base address */
-+ writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
-+
-+ hif->tx_base = hif->rx_base + hif->rx_ring_size;
-+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
-+ hif->rx_ring_size;
-+ memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
-+
-+ /*Initialize tx descriptors */
-+ desc = hif->tx_base;
-+
-+ for (i = 0; i < hif->tx_ring_size; i++) {
-+ /* Chain descriptors */
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
-+ writel(0, &desc->ctrl);
-+ desc++;
-+ }
-+
-+ /* Overwrite last descriptor to chain it to first one */
-+ desc--;
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
-+ hif->txavail = hif->tx_ring_size;
-+ hif->txtosend = 0;
-+ hif->txtoclean = 0;
-+ hif->txtoflush = 0;
-+
-+ /*Initialize Tx buffer descriptor ring base address */
-+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
-+
-+ return 0;
-+
-+err:
-+ pfe_hif_release_buffers(hif);
-+ return -ENOMEM;
-+}
-+
-+/*
-+ * pfe_hif_client_register
-+ *
-+ * This function used to register a client driver with the HIF driver.
-+ *
-+ * Return value:
-+ * 0 - on Successful registration
-+ */
-+static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
-+ struct hif_client_shm *client_shm)
-+{
-+ struct hif_client *client = &hif->client[client_id];
-+ u32 i, cnt;
-+ struct rx_queue_desc *rx_qbase;
-+ struct tx_queue_desc *tx_qbase;
-+ struct hif_rx_queue *rx_queue;
-+ struct hif_tx_queue *tx_queue;
-+ int err = 0;
-+
-+ pr_info("%s\n", __func__);
-+
-+ spin_lock_bh(&hif->tx_lock);
-+
-+ if (test_bit(client_id, &hif->shm->g_client_status[0])) {
-+ pr_err("%s: client %d already registered\n",
-+ __func__, client_id);
-+ err = -1;
-+ goto unlock;
-+ }
-+
-+ memset(client, 0, sizeof(struct hif_client));
-+
-+ /* Initialize client Rx queues baseaddr, size */
-+
-+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
-+ /* Check if client is requesting for more queues than supported */
-+ if (cnt > HIF_CLIENT_QUEUES_MAX)
-+ cnt = HIF_CLIENT_QUEUES_MAX;
-+
-+ client->rx_qn = cnt;
-+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
-+ for (i = 0; i < cnt; i++) {
-+ rx_queue = &client->rx_q[i];
-+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
-+ rx_queue->size = client_shm->rx_qsize;
-+ rx_queue->write_idx = 0;
-+ }
-+
-+ /* Initialize client Tx queues baseaddr, size */
-+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
-+
-+ /* Check if client is requesting for more queues than supported */
-+ if (cnt > HIF_CLIENT_QUEUES_MAX)
-+ cnt = HIF_CLIENT_QUEUES_MAX;
-+
-+ client->tx_qn = cnt;
-+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
-+ for (i = 0; i < cnt; i++) {
-+ tx_queue = &client->tx_q[i];
-+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
-+ tx_queue->size = client_shm->tx_qsize;
-+ tx_queue->ack_idx = 0;
-+ }
-+
-+ set_bit(client_id, &hif->shm->g_client_status[0]);
-+
-+unlock:
-+ spin_unlock_bh(&hif->tx_lock);
-+
-+ return err;
-+}
-+
-+/*
-+ * pfe_hif_client_unregister
-+ *
-+ * This function used to unregister a client from the HIF driver.
-+ *
-+ */
-+static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ /*
-+ * Mark client as no longer available (which prevents further packet
-+ * receive for this client)
-+ */
-+ spin_lock_bh(&hif->tx_lock);
-+
-+ if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
-+ pr_err("%s: client %d not registered\n", __func__,
-+ client_id);
-+
-+ spin_unlock_bh(&hif->tx_lock);
-+ return;
-+ }
-+
-+ clear_bit(client_id, &hif->shm->g_client_status[0]);
-+
-+ spin_unlock_bh(&hif->tx_lock);
-+}
-+
-+/*
-+ * client_put_rxpacket-
-+ * This functions puts the Rx pkt in the given client Rx queue.
-+ * It actually swap the Rx pkt in the client Rx descriptor buffer
-+ * and returns the free buffer from it.
-+ *
-+ * If the function returns NULL means client Rx queue is full and
-+ * packet couldn't send to client queue.
-+ */
-+static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
-+ u32 flags, u32 client_ctrl, u32 *rem_len)
-+{
-+ void *free_pkt = NULL;
-+ struct rx_queue_desc *desc = queue->base + queue->write_idx;
-+
-+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
-+ if (page_mode) {
-+ int rem_page_size = PAGE_SIZE -
-+ PRESENT_OFST_IN_PAGE(pkt);
-+ int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
-+ pfe_pkt_headroom);
-+ *rem_len = (rem_page_size - cur_pkt_size);
-+ if (*rem_len) {
-+ free_pkt = pkt + cur_pkt_size;
-+ get_page(virt_to_page(free_pkt));
-+ } else {
-+ free_pkt = (void
-+ *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
-+ *rem_len = pfe_pkt_size;
-+ }
-+ } else {
-+ free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
-+ GFP_DMA_PFE);
-+ *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
-+ }
-+
-+ if (free_pkt) {
-+ desc->data = pkt;
-+ desc->client_ctrl = client_ctrl;
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ smp_wmb();
-+ writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
-+ queue->write_idx = (queue->write_idx + 1)
-+ & (queue->size - 1);
-+
-+ free_pkt += pfe_pkt_headroom;
-+ }
-+ }
-+
-+ return free_pkt;
-+}
-+
-+/*
-+ * pfe_hif_rx_process-
-+ * This function does pfe hif rx queue processing.
-+ * Dequeue packet from Rx queue and send it to corresponding client queue
-+ */
-+static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
-+{
-+ struct hif_desc *desc;
-+ struct hif_hdr *pkt_hdr;
-+ struct __hif_hdr hif_hdr;
-+ void *free_buf;
-+ int rtc, len, rx_processed = 0;
-+ struct __hif_desc local_desc;
-+ int flags;
-+ unsigned int desc_p;
-+ unsigned int buf_size = 0;
-+
-+ spin_lock_bh(&hif->lock);
-+
-+ rtc = hif->rxtoclean_index;
-+
-+ while (rx_processed < budget) {
-+ desc = hif->rx_base + rtc;
-+
-+ __memcpy12(&local_desc, desc);
-+
-+ /* ACK pending Rx interrupt */
-+ if (local_desc.ctrl & BD_CTRL_DESC_EN) {
-+ writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
-+
-+ if (rx_processed == 0) {
-+ if (napi_first_batch == 1) {
-+ desc_p = hif->descr_baseaddr_p +
-+ ((unsigned long int)(desc) -
-+ (unsigned long
-+ int)hif->descr_baseaddr_v);
-+ napi_first_batch = 0;
-+ }
-+ }
-+
-+ __memcpy12(&local_desc, desc);
-+
-+ if (local_desc.ctrl & BD_CTRL_DESC_EN)
-+ break;
-+ }
-+
-+ napi_first_batch = 0;
-+
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_DESC_COUNT]++;
-+#endif
-+ len = BD_BUF_LEN(local_desc.ctrl);
-+ /*
-+ * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
-+ * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
-+ */
-+ dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
-+ hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
-+
-+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
-+
-+ /* Track last HIF header received */
-+ if (!hif->started) {
-+ hif->started = 1;
-+
-+ __memcpy8(&hif_hdr, pkt_hdr);
-+
-+ hif->qno = hif_hdr.hdr.q_num;
-+ hif->client_id = hif_hdr.hdr.client_id;
-+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
-+ hif_hdr.hdr.client_ctrl;
-+ flags = CL_DESC_FIRST;
-+
-+ } else {
-+ flags = 0;
-+ }
-+
-+ if (local_desc.ctrl & BD_CTRL_LIFM)
-+ flags |= CL_DESC_LAST;
-+
-+ /* Check for valid client id and still registered */
-+ if ((hif->client_id >= HIF_CLIENTS_MAX) ||
-+ !(test_bit(hif->client_id,
-+ &hif->shm->g_client_status[0]))) {
-+ printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
-+ __func__,
-+ hif->client_id,
-+ hif->qno);
-+
-+ free_buf = pkt_hdr;
-+
-+ goto pkt_drop;
-+ }
-+
-+ /* Check to valid queue number */
-+ if (hif->client[hif->client_id].rx_qn <= hif->qno) {
-+ pr_info("%s: packet with invalid queue: %d\n"
-+ , __func__, hif->qno);
-+ hif->qno = 0;
-+ }
-+
-+ free_buf =
-+ client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
-+ (void *)pkt_hdr, len, flags,
-+ hif->client_ctrl, &buf_size);
-+
-+ hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
-+ hif->qno);
-+
-+ if (unlikely(!free_buf)) {
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
-+#endif
-+ /*
-+ * If we want to keep in polling mode to retry later,
-+ * we need to tell napi that we consumed
-+ * the full budget or we will hit a livelock scenario.
-+ * The core code keeps this napi instance
-+ * at the head of the list and none of the other
-+ * instances get to run
-+ */
-+ rx_processed = budget;
-+
-+ if (flags & CL_DESC_FIRST)
-+ hif->started = 0;
-+
-+ break;
-+ }
-+
-+pkt_drop:
-+ /*Fill free buffer in the descriptor */
-+ hif->rx_buf_addr[rtc] = free_buf;
-+ hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
-+ writel((DDR_PHYS_TO_PFE
-+ ((u32)dma_map_single(hif->dev,
-+ free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
-+ &desc->data);
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ wmb();
-+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
-+ BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
-+ &desc->ctrl);
-+
-+ rtc = (rtc + 1) & (hif->rx_ring_size - 1);
-+
-+ if (local_desc.ctrl & BD_CTRL_LIFM) {
-+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
-+ rx_processed++;
-+
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_PACKET_COUNT]++;
-+#endif
-+ }
-+ hif->started = 0;
-+ }
-+ }
-+
-+ hif->rxtoclean_index = rtc;
-+ spin_unlock_bh(&hif->lock);
-+
-+ /* we made some progress, re-start rx dma in case it stopped */
-+ hif_rx_dma_start();
-+
-+ return rx_processed;
-+}
-+
-+/*
-+ * client_ack_txpacket-
-+ * This function ack the Tx packet in the give client Tx queue by resetting
-+ * ownership bit in the descriptor.
-+ */
-+static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
-+ unsigned int q_no)
-+{
-+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
-+ struct tx_queue_desc *desc = queue->base + queue->ack_idx;
-+
-+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
-+ writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
-+ queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
-+
-+ return 0;
-+
-+ } else {
-+ /*This should not happen */
-+ pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
-+ hif->txtosend, hif->txtoclean, hif->txavail,
-+ client_id, q_no, queue, queue->ack_idx);
-+ WARN(1, "%s: doesn't own this descriptor", __func__);
-+ return 1;
-+ }
-+}
-+
-+void __hif_tx_done_process(struct pfe_hif *hif, int count)
-+{
-+ struct hif_desc *desc;
-+ struct hif_desc_sw *desc_sw;
-+ int ttc, tx_avl;
-+ int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
-+
-+ ttc = hif->txtoclean;
-+ tx_avl = hif->txavail;
-+
-+ while ((tx_avl < hif->tx_ring_size) && count--) {
-+ desc = hif->tx_base + ttc;
-+
-+ if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
-+ break;
-+
-+ desc_sw = &hif->tx_sw_queue[ttc];
-+
-+ if (desc_sw->data) {
-+ /*
-+ * dmap_unmap_single(hif->dev, desc_sw->data,
-+ * desc_sw->len, DMA_TO_DEVICE);
-+ */
-+ dma_unmap_single(hif->dev, desc_sw->data,
-+ desc_sw->len, DMA_TO_DEVICE);
-+ }
-+
-+ if (desc_sw->client_id > HIF_CLIENTS_MAX)
-+ pr_err("Invalid cl id %d\n", desc_sw->client_id);
-+
-+ pkts_done[desc_sw->client_id]++;
-+
-+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
-+
-+ ttc = (ttc + 1) & (hif->tx_ring_size - 1);
-+ tx_avl++;
-+ }
-+
-+ if (pkts_done[0])
-+ hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
-+ if (pkts_done[1])
-+ hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
-+
-+ hif->txtoclean = ttc;
-+ hif->txavail = tx_avl;
-+
-+ if (!count) {
-+ tasklet_schedule(&hif->tx_cleanup_tasklet);
-+ } else {
-+ /*Enable Tx done interrupt */
-+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
-+ HIF_INT_ENABLE);
-+ }
-+}
-+
-+static void pfe_tx_do_cleanup(unsigned long data)
-+{
-+ struct pfe_hif *hif = (struct pfe_hif *)data;
-+
-+ writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
-+
-+ hif_tx_done_process(hif, 64);
-+}
-+
-+/*
-+ * __hif_xmit_pkt -
-+ * This function puts one packet in the HIF Tx queue
-+ */
-+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
-+ q_no, void *data, u32 len, unsigned int flags)
-+{
-+ struct hif_desc *desc;
-+ struct hif_desc_sw *desc_sw;
-+
-+ desc = hif->tx_base + hif->txtosend;
-+ desc_sw = &hif->tx_sw_queue[hif->txtosend];
-+
-+ desc_sw->len = len;
-+ desc_sw->client_id = client_id;
-+ desc_sw->q_no = q_no;
-+ desc_sw->flags = flags;
-+
-+ if (flags & HIF_DONT_DMA_MAP) {
-+ desc_sw->data = 0;
-+ writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
-+ } else {
-+ desc_sw->data = dma_map_single(hif->dev, data, len,
-+ DMA_TO_DEVICE);
-+ writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
-+ }
-+
-+ hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
-+ hif->txavail--;
-+
-+ if ((!((flags & HIF_DATA_VALID) && (flags &
-+ HIF_LAST_BUFFER))))
-+ goto skip_tx;
-+
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ wmb();
-+
-+ do {
-+ desc_sw = &hif->tx_sw_queue[hif->txtoflush];
-+ desc = hif->tx_base + hif->txtoflush;
-+
-+ if (desc_sw->flags & HIF_LAST_BUFFER) {
-+ writel((BD_CTRL_LIFM |
-+ BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
-+ | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
-+ BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
-+ &desc->ctrl);
-+ } else {
-+ writel((BD_CTRL_DESC_EN |
-+ BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
-+ }
-+ hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
-+ }
-+ while (hif->txtoflush != hif->txtosend)
-+ ;
-+
-+skip_tx:
-+ return;
-+}
-+
-+static irqreturn_t wol_isr(int irq, void *dev_id)
-+{
-+ pr_info("WoL\n");
-+ gemac_set_wol(EMAC1_BASE_ADDR, 0);
-+ gemac_set_wol(EMAC2_BASE_ADDR, 0);
-+ return IRQ_HANDLED;
-+}
-+
-+/*
-+ * hif_isr-
-+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
-+ */
-+static irqreturn_t hif_isr(int irq, void *dev_id)
-+{
-+ struct pfe_hif *hif = (struct pfe_hif *)dev_id;
-+ int int_status;
-+ int int_enable_mask;
-+
-+ /*Read hif interrupt source register */
-+ int_status = readl_relaxed(HIF_INT_SRC);
-+ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
-+
-+ if ((int_status & HIF_INT) == 0)
-+ return IRQ_NONE;
-+
-+ int_status &= ~(HIF_INT);
-+
-+ if (int_status & HIF_RXPKT_INT) {
-+ int_status &= ~(HIF_RXPKT_INT);
-+ int_enable_mask &= ~(HIF_RXPKT_INT);
-+
-+ napi_first_batch = 1;
-+
-+ if (napi_schedule_prep(&hif->napi)) {
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_SCHED_COUNT]++;
-+#endif
-+ __napi_schedule(&hif->napi);
-+ }
-+ }
-+
-+ if (int_status & HIF_TXPKT_INT) {
-+ int_status &= ~(HIF_TXPKT_INT);
-+ int_enable_mask &= ~(HIF_TXPKT_INT);
-+ /*Schedule tx cleanup tassklet */
-+ tasklet_schedule(&hif->tx_cleanup_tasklet);
-+ }
-+
-+ /*Disable interrupts, they will be enabled after they are serviced */
-+ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
-+
-+ if (int_status) {
-+ pr_info("%s : Invalid interrupt : %d\n", __func__,
-+ int_status);
-+ writel(int_status, HIF_INT_SRC);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
-+{
-+ unsigned int client_id = data1;
-+
-+ if (client_id >= HIF_CLIENTS_MAX) {
-+ pr_err("%s: client id %d out of bounds\n", __func__,
-+ client_id);
-+ return;
-+ }
-+
-+ switch (req) {
-+ case REQUEST_CL_REGISTER:
-+ /* Request for register a client */
-+ pr_info("%s: register client_id %d\n",
-+ __func__, client_id);
-+ pfe_hif_client_register(hif, client_id, (struct
-+ hif_client_shm *)&hif->shm->client[client_id]);
-+ break;
-+
-+ case REQUEST_CL_UNREGISTER:
-+ pr_info("%s: unregister client_id %d\n",
-+ __func__, client_id);
-+
-+ /* Request for unregister a client */
-+ pfe_hif_client_unregister(hif, client_id);
-+
-+ break;
-+
-+ default:
-+ pr_err("%s: unsupported request %d\n",
-+ __func__, req);
-+ break;
-+ }
-+
-+ /*
-+ * Process client Tx queues
-+ * Currently we don't have checking for tx pending
-+ */
-+}
-+
-+/*
-+ * pfe_hif_rx_poll
-+ * This function is NAPI poll function to process HIF Rx queue.
-+ */
-+static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
-+{
-+ struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
-+ int work_done;
-+
-+#ifdef HIF_NAPI_STATS
-+ hif->napi_counters[NAPI_POLL_COUNT]++;
-+#endif
-+
-+ work_done = pfe_hif_rx_process(hif, budget);
-+
-+ if (work_done < budget) {
-+ napi_complete(napi);
-+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
-+ HIF_INT_ENABLE);
-+ }
-+#ifdef HIF_NAPI_STATS
-+ else
-+ hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
-+#endif
-+
-+ return work_done;
-+}
-+
-+/*
-+ * pfe_hif_init
-+ * This function initializes the baseaddresses and irq, etc.
-+ */
-+int pfe_hif_init(struct pfe *pfe)
-+{
-+ struct pfe_hif *hif = &pfe->hif;
-+ int err;
-+
-+ pr_info("%s\n", __func__);
-+
-+ hif->dev = pfe->dev;
-+ hif->irq = pfe->hif_irq;
-+
-+ err = pfe_hif_alloc_descr(hif);
-+ if (err)
-+ goto err0;
-+
-+ if (pfe_hif_init_buffers(hif)) {
-+ pr_err("%s: Could not initialize buffer descriptors\n"
-+ , __func__);
-+ err = -ENOMEM;
-+ goto err1;
-+ }
-+
-+ /* Initialize NAPI for Rx processing */
-+ init_dummy_netdev(&hif->dummy_dev);
-+ netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
-+ HIF_RX_POLL_WEIGHT);
-+ napi_enable(&hif->napi);
-+
-+ spin_lock_init(&hif->tx_lock);
-+ spin_lock_init(&hif->lock);
-+
-+ hif_init();
-+ hif_rx_enable();
-+ hif_tx_enable();
-+
-+ /* Disable tx done interrupt */
-+ writel(HIF_INT_MASK, HIF_INT_ENABLE);
-+
-+ gpi_enable(HGPI_BASE_ADDR);
-+
-+ err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
-+ if (err) {
-+ pr_err("%s: failed to get the hif IRQ = %d\n",
-+ __func__, hif->irq);
-+ goto err1;
-+ }
-+
-+ err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
-+ if (err) {
-+ pr_err("%s: failed to get the wol IRQ = %d\n",
-+ __func__, pfe->wol_irq);
-+ goto err1;
-+ }
-+
-+ tasklet_init(&hif->tx_cleanup_tasklet,
-+ (void(*)(unsigned long))pfe_tx_do_cleanup,
-+ (unsigned long)hif);
-+
-+ return 0;
-+err1:
-+ pfe_hif_free_descr(hif);
-+err0:
-+ return err;
-+}
-+
-+/* pfe_hif_exit- */
-+void pfe_hif_exit(struct pfe *pfe)
-+{
-+ struct pfe_hif *hif = &pfe->hif;
-+
-+ pr_info("%s\n", __func__);
-+
-+ tasklet_kill(&hif->tx_cleanup_tasklet);
-+
-+ spin_lock_bh(&hif->lock);
-+ hif->shm->g_client_status[0] = 0;
-+ /* Make sure all clients are disabled*/
-+ hif->shm->g_client_status[1] = 0;
-+
-+ spin_unlock_bh(&hif->lock);
-+
-+ /*Disable Rx/Tx */
-+ gpi_disable(HGPI_BASE_ADDR);
-+ hif_rx_disable();
-+ hif_tx_disable();
-+
-+ napi_disable(&hif->napi);
-+ netif_napi_del(&hif->napi);
-+
-+ free_irq(pfe->wol_irq, pfe);
-+ free_irq(hif->irq, hif);
-+
-+ pfe_hif_release_buffers(hif);
-+ pfe_hif_free_descr(hif);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hif.h
-@@ -0,0 +1,200 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_HIF_H_
-+#define _PFE_HIF_H_
-+
-+#include <linux/netdevice.h>
-+#include <linux/interrupt.h>
-+
-+#define HIF_NAPI_STATS
-+
-+#define HIF_CLIENT_QUEUES_MAX 16
-+#define HIF_RX_POLL_WEIGHT 64
-+
-+#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
-+#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
-+#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
-+ & HIF_RX_PKT_MIN_SIZE_MASK)
-+#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
-+ - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
-+
-+enum {
-+ NAPI_SCHED_COUNT = 0,
-+ NAPI_POLL_COUNT,
-+ NAPI_PACKET_COUNT,
-+ NAPI_DESC_COUNT,
-+ NAPI_FULL_BUDGET_COUNT,
-+ NAPI_CLIENT_FULL_COUNT,
-+ NAPI_MAX_COUNT
-+};
-+
-+/*
-+ * HIF_TX_DESC_NT value should be always greter than 4,
-+ * Otherwise HIF_TX_POLL_MARK will become zero.
-+ */
-+#define HIF_RX_DESC_NT 256
-+#define HIF_TX_DESC_NT 2048
-+
-+#define HIF_FIRST_BUFFER BIT(0)
-+#define HIF_LAST_BUFFER BIT(1)
-+#define HIF_DONT_DMA_MAP BIT(2)
-+#define HIF_DATA_VALID BIT(3)
-+#define HIF_TSO BIT(4)
-+
-+enum {
-+ PFE_CL_GEM0 = 0,
-+ PFE_CL_GEM1,
-+ HIF_CLIENTS_MAX
-+};
-+
-+/*structure to store client queue info */
-+struct hif_rx_queue {
-+ struct rx_queue_desc *base;
-+ u32 size;
-+ u32 write_idx;
-+};
-+
-+struct hif_tx_queue {
-+ struct tx_queue_desc *base;
-+ u32 size;
-+ u32 ack_idx;
-+};
-+
-+/*Structure to store the client info */
-+struct hif_client {
-+ int rx_qn;
-+ struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
-+ int tx_qn;
-+ struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
-+};
-+
-+/*HIF hardware buffer descriptor */
-+struct hif_desc {
-+ u32 ctrl;
-+ u32 status;
-+ u32 data;
-+ u32 next;
-+};
-+
-+struct __hif_desc {
-+ u32 ctrl;
-+ u32 status;
-+ u32 data;
-+};
-+
-+struct hif_desc_sw {
-+ dma_addr_t data;
-+ u16 len;
-+ u8 client_id;
-+ u8 q_no;
-+ u16 flags;
-+};
-+
-+struct hif_hdr {
-+ u8 client_id;
-+ u8 q_num;
-+ u16 client_ctrl;
-+ u16 client_ctrl1;
-+};
-+
-+struct __hif_hdr {
-+ union {
-+ struct hif_hdr hdr;
-+ u32 word[2];
-+ };
-+};
-+
-+struct hif_ipsec_hdr {
-+ u16 sa_handle[2];
-+} __packed;
-+
-+/* HIF_CTRL_TX... defines */
-+#define HIF_CTRL_TX_CHECKSUM BIT(2)
-+
-+/* HIF_CTRL_RX... defines */
-+#define HIF_CTRL_RX_OFFSET_OFST (24)
-+#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
-+#define HIF_CTRL_RX_CONTINUED BIT(1)
-+
-+struct pfe_hif {
-+ /* To store registered clients in hif layer */
-+ struct hif_client client[HIF_CLIENTS_MAX];
-+ struct hif_shm *shm;
-+ int irq;
-+
-+ void *descr_baseaddr_v;
-+ unsigned long descr_baseaddr_p;
-+
-+ struct hif_desc *rx_base;
-+ u32 rx_ring_size;
-+ u32 rxtoclean_index;
-+ void *rx_buf_addr[HIF_RX_DESC_NT];
-+ int rx_buf_len[HIF_RX_DESC_NT];
-+ unsigned int qno;
-+ unsigned int client_id;
-+ unsigned int client_ctrl;
-+ unsigned int started;
-+
-+ struct hif_desc *tx_base;
-+ u32 tx_ring_size;
-+ u32 txtosend;
-+ u32 txtoclean;
-+ u32 txavail;
-+ u32 txtoflush;
-+ struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
-+
-+/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
-+ spinlock_t tx_lock;
-+/* lock synchronizes hif rx queue processing */
-+ spinlock_t lock;
-+ struct net_device dummy_dev;
-+ struct napi_struct napi;
-+ struct device *dev;
-+
-+#ifdef HIF_NAPI_STATS
-+ unsigned int napi_counters[NAPI_MAX_COUNT];
-+#endif
-+ struct tasklet_struct tx_cleanup_tasklet;
-+};
-+
-+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
-+ q_no, void *data, u32 len, unsigned int flags);
-+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
-+ void *data, unsigned int len);
-+void __hif_tx_done_process(struct pfe_hif *hif, int count);
-+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
-+ data2);
-+int pfe_hif_init(struct pfe *pfe);
-+void pfe_hif_exit(struct pfe *pfe);
-+void pfe_hif_rx_idle(struct pfe_hif *hif);
-+static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
-+{
-+ spin_lock_bh(&hif->tx_lock);
-+ __hif_tx_done_process(hif, count);
-+ spin_unlock_bh(&hif->tx_lock);
-+}
-+
-+static inline void hif_tx_lock(struct pfe_hif *hif)
-+{
-+ spin_lock_bh(&hif->tx_lock);
-+}
-+
-+static inline void hif_tx_unlock(struct pfe_hif *hif)
-+{
-+ spin_unlock_bh(&hif->tx_lock);
-+}
-+
-+static inline int __hif_tx_avail(struct pfe_hif *hif)
-+{
-+ return hif->txavail;
-+}
-+
-+#define __memcpy8(dst, src) memcpy(dst, src, 8)
-+#define __memcpy12(dst, src) memcpy(dst, src, 12)
-+#define __memcpy(dst, src, len) memcpy(dst, src, len)
-+
-+#endif /* _PFE_HIF_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
-@@ -0,0 +1,628 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/workqueue.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/sched.h>
-+#include <linux/skbuff.h>
-+#include <linux/moduleparam.h>
-+#include <linux/cpu.h>
-+
-+#include "pfe_mod.h"
-+#include "pfe_hif.h"
-+#include "pfe_hif_lib.h"
-+
-+unsigned int lro_mode;
-+unsigned int page_mode;
-+unsigned int tx_qos = 1;
-+module_param(tx_qos, uint, 0444);
-+MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
-+ "1: enable (default), guarantee no packet drop at TMU level\n");
-+unsigned int pfe_pkt_size;
-+unsigned int pfe_pkt_headroom;
-+unsigned int emac_txq_cnt;
-+
-+/*
-+ * @pfe_hal_lib.c.
-+ * Common functions used by HIF client drivers
-+ */
-+
-+/*HIF shared memory Global variable */
-+struct hif_shm ghif_shm;
-+
-+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
-+ * This function should be called after pfe_hif_exit
-+ *
-+ * @param[in] hif_shm Shared memory address location in DDR
-+ */
-+static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
-+{
-+ int i;
-+ void *pkt;
-+
-+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
-+ pkt = hif_shm->rx_buf_pool[i];
-+ if (pkt) {
-+ hif_shm->rx_buf_pool[i] = NULL;
-+ pkt -= pfe_pkt_headroom;
-+
-+ if (page_mode)
-+ put_page(virt_to_page(pkt));
-+ else
-+ kfree(pkt);
-+ }
-+ }
-+}
-+
-+/* Initialize shared memory used between HIF driver and clients,
-+ * allocate rx_buffer_pool required for HIF Rx descriptors.
-+ * This function should be called before initializing HIF driver.
-+ *
-+ * @param[in] hif_shm Shared memory address location in DDR
-+ * @rerurn 0 - on succes, <0 on fail to initialize
-+ */
-+static int pfe_hif_shm_init(struct hif_shm *hif_shm)
-+{
-+ int i;
-+ void *pkt;
-+
-+ memset(hif_shm, 0, sizeof(struct hif_shm));
-+ hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
-+
-+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
-+ if (page_mode) {
-+ pkt = (void *)__get_free_page(GFP_KERNEL |
-+ GFP_DMA_PFE);
-+ } else {
-+ pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
-+ }
-+
-+ if (pkt)
-+ hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
-+ else
-+ goto err0;
-+ }
-+
-+ return 0;
-+
-+err0:
-+ pr_err("%s Low memory\n", __func__);
-+ pfe_hif_shm_clean(hif_shm);
-+ return -ENOMEM;
-+}
-+
-+/*This function sends indication to HIF driver
-+ *
-+ * @param[in] hif hif context
-+ */
-+static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
-+ data2)
-+{
-+ hif_process_client_req(hif, req, data1, data2);
-+}
-+
-+void hif_lib_indicate_client(int client_id, int event_type, int qno)
-+{
-+ struct hif_client_s *client = pfe->hif_client[client_id];
-+
-+ if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
-+ HIF_CLIENT_QUEUES_MAX))
-+ return;
-+
-+ if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
-+ client->event_handler(client->priv, event_type, qno);
-+}
-+
-+/*This function releases Rx queue descriptors memory and pre-filled buffers
-+ *
-+ * @param[in] client hif_client context
-+ */
-+static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
-+{
-+ struct rx_queue_desc *desc;
-+ int qno, ii;
-+ void *buf;
-+
-+ for (qno = 0; qno < client->rx_qn; qno++) {
-+ desc = client->rx_q[qno].base;
-+
-+ for (ii = 0; ii < client->rx_q[qno].size; ii++) {
-+ buf = (void *)desc->data;
-+ if (buf) {
-+ buf -= pfe_pkt_headroom;
-+
-+ if (page_mode)
-+ free_page((unsigned long)buf);
-+ else
-+ kfree(buf);
-+
-+ desc->ctrl = 0;
-+ }
-+
-+ desc++;
-+ }
-+ }
-+
-+ kfree(client->rx_qbase);
-+}
-+
-+/*This function allocates memory for the rxq descriptors and pre-fill rx queues
-+ * with buffers.
-+ * @param[in] client client context
-+ * @param[in] q_size size of the rxQ, all queues are of same size
-+ */
-+static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
-+ q_size)
-+{
-+ struct rx_queue_desc *desc;
-+ struct hif_client_rx_queue *queue;
-+ int ii, qno;
-+
-+ /*Allocate memory for the client queues */
-+ client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
-+ rx_queue_desc), GFP_KERNEL);
-+ if (!client->rx_qbase)
-+ goto err;
-+
-+ for (qno = 0; qno < client->rx_qn; qno++) {
-+ queue = &client->rx_q[qno];
-+
-+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct
-+ rx_queue_desc);
-+ queue->size = q_size;
-+ queue->read_idx = 0;
-+ queue->write_idx = 0;
-+
-+ pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
-+ queue->base, queue->size);
-+ }
-+
-+ for (qno = 0; qno < client->rx_qn; qno++) {
-+ queue = &client->rx_q[qno];
-+ desc = queue->base;
-+
-+ for (ii = 0; ii < queue->size; ii++) {
-+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
-+ CL_DESC_OWN;
-+ desc++;
-+ }
-+ }
-+
-+ return 0;
-+
-+err:
-+ return 1;
-+}
-+
-+
-+static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
-+{
-+ pr_debug("%s\n", __func__);
-+
-+ /*
-+ * Check if there are any pending packets. Client must flush the tx
-+ * queues before unregistering, by calling by calling
-+ * hif_lib_tx_get_next_complete()
-+ *
-+ * Hif no longer calls since we are no longer registered
-+ */
-+ if (queue->tx_pending)
-+ pr_err("%s: pending transmit packets\n", __func__);
-+}
-+
-+static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
-+{
-+ int qno;
-+
-+ pr_debug("%s\n", __func__);
-+
-+ for (qno = 0; qno < client->tx_qn; qno++)
-+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
-+
-+ kfree(client->tx_qbase);
-+}
-+
-+static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
-+ q_size)
-+{
-+ struct hif_client_tx_queue *queue;
-+ int qno;
-+
-+ client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
-+ tx_queue_desc), GFP_KERNEL);
-+ if (!client->tx_qbase)
-+ return 1;
-+
-+ for (qno = 0; qno < client->tx_qn; qno++) {
-+ queue = &client->tx_q[qno];
-+
-+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct
-+ tx_queue_desc);
-+ queue->size = q_size;
-+ queue->read_idx = 0;
-+ queue->write_idx = 0;
-+ queue->tx_pending = 0;
-+ queue->nocpy_flag = 0;
-+ queue->prev_tmu_tx_pkts = 0;
-+ queue->done_tmu_tx_pkts = 0;
-+
-+ pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
-+ queue->base, queue->size);
-+ }
-+
-+ return 0;
-+}
-+
-+static int hif_lib_event_dummy(void *priv, int event_type, int qno)
-+{
-+ return 0;
-+}
-+
-+int hif_lib_client_register(struct hif_client_s *client)
-+{
-+ struct hif_shm *hif_shm;
-+ struct hif_client_shm *client_shm;
-+ int err, i;
-+ /* int loop_cnt = 0; */
-+
-+ pr_debug("%s\n", __func__);
-+
-+ /*Allocate memory before spin_lock*/
-+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
-+ err = -ENOMEM;
-+ goto err_rx;
-+ }
-+
-+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
-+ err = -ENOMEM;
-+ goto err_tx;
-+ }
-+
-+ spin_lock_bh(&pfe->hif.lock);
-+ if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
-+ (pfe->hif_client[client->id])) {
-+ err = -EINVAL;
-+ goto err;
-+ }
-+
-+ hif_shm = client->pfe->hif.shm;
-+
-+ if (!client->event_handler)
-+ client->event_handler = hif_lib_event_dummy;
-+
-+ /*Initialize client specific shared memory */
-+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
-+ client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
-+ client_shm->rx_qsize = client->rx_qsize;
-+ client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
-+ client_shm->tx_qsize = client->tx_qsize;
-+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
-+ (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
-+ /* spin_lock_init(&client->rx_lock); */
-+
-+ for (i = 0; i < HIF_EVENT_MAX; i++) {
-+ client->queue_mask[i] = 0; /*
-+ * By default all events are
-+ * unmasked
-+ */
-+ }
-+
-+ /*Indicate to HIF driver*/
-+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
-+
-+ pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
-+ __func__, client, client->id, client->tx_qsize,
-+ client->rx_qsize);
-+
-+ client->cpu_id = -1;
-+
-+ pfe->hif_client[client->id] = client;
-+ spin_unlock_bh(&pfe->hif.lock);
-+
-+ return 0;
-+
-+err:
-+ spin_unlock_bh(&pfe->hif.lock);
-+ hif_lib_client_release_tx_buffers(client);
-+
-+err_tx:
-+ hif_lib_client_release_rx_buffers(client);
-+
-+err_rx:
-+ return err;
-+}
-+
-+int hif_lib_client_unregister(struct hif_client_s *client)
-+{
-+ struct pfe *pfe = client->pfe;
-+ u32 client_id = client->id;
-+
-+ pr_info(
-+ "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
-+ , __func__, client, client->id, client->tx_qsize,
-+ client->rx_qsize);
-+
-+ spin_lock_bh(&pfe->hif.lock);
-+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
-+
-+ hif_lib_client_release_tx_buffers(client);
-+ hif_lib_client_release_rx_buffers(client);
-+ pfe->hif_client[client_id] = NULL;
-+ spin_unlock_bh(&pfe->hif.lock);
-+
-+ return 0;
-+}
-+
-+int hif_lib_event_handler_start(struct hif_client_s *client, int event,
-+ int qno)
-+{
-+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
-+ struct rx_queue_desc *desc = queue->base + queue->read_idx;
-+
-+ if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
-+ pr_debug("%s: Unsupported event : %d queue number : %d\n",
-+ __func__, event, qno);
-+ return -1;
-+ }
-+
-+ test_and_clear_bit(qno, &client->queue_mask[event]);
-+
-+ switch (event) {
-+ case EVENT_RX_PKT_IND:
-+ if (!(desc->ctrl & CL_DESC_OWN))
-+ hif_lib_indicate_client(client->id,
-+ EVENT_RX_PKT_IND, qno);
-+ break;
-+
-+ case EVENT_HIGH_RX_WM:
-+ case EVENT_TXDONE_IND:
-+ default:
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * This function gets one packet from the specified client queue
-+ * It also refill the rx buffer
-+ */
-+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
-+ *ofst, unsigned int *rx_ctrl,
-+ unsigned int *desc_ctrl, void **priv_data)
-+{
-+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
-+ struct rx_queue_desc *desc;
-+ void *pkt = NULL;
-+
-+ /*
-+ * Following lock is to protect rx queue access from,
-+ * hif_lib_event_handler_start.
-+ * In general below lock is not required, because hif_lib_xmit_pkt and
-+ * hif_lib_event_handler_start are called from napi poll and which is
-+ * not re-entrant. But if some client use in different way this lock is
-+ * required.
-+ */
-+ /*spin_lock_irqsave(&client->rx_lock, flags); */
-+ desc = queue->base + queue->read_idx;
-+ if (!(desc->ctrl & CL_DESC_OWN)) {
-+ pkt = desc->data - pfe_pkt_headroom;
-+
-+ *rx_ctrl = desc->client_ctrl;
-+ *desc_ctrl = desc->ctrl;
-+
-+ if (desc->ctrl & CL_DESC_FIRST) {
-+ u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
-+
-+ if (size) {
-+ size += PFE_PARSE_INFO_SIZE;
-+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
-+ PFE_PKT_HEADER_SZ - size;
-+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
-+ + size;
-+ *priv_data = desc->data + PFE_PKT_HEADER_SZ;
-+ } else {
-+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
-+ PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
-+ *ofst = pfe_pkt_headroom
-+ + PFE_PKT_HEADER_SZ
-+ + PFE_PARSE_INFO_SIZE;
-+ *priv_data = NULL;
-+ }
-+
-+ } else {
-+ *len = CL_DESC_BUF_LEN(desc->ctrl);
-+ *ofst = pfe_pkt_headroom;
-+ }
-+
-+ /*
-+ * Needed so we don't free a buffer/page
-+ * twice on module_exit
-+ */
-+ desc->data = NULL;
-+
-+ /*
-+ * Ensure everything else is written to DDR before
-+ * writing bd->ctrl
-+ */
-+ smp_wmb();
-+
-+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
-+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
-+ }
-+
-+ /*spin_unlock_irqrestore(&client->rx_lock, flags); */
-+ return pkt;
-+}
-+
-+static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
-+ client_id, unsigned int qno,
-+ u32 client_ctrl)
-+{
-+ /* Optimize the write since the destinaton may be non-cacheable */
-+ if (!((unsigned long)pkt_hdr & 0x3)) {
-+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
-+ client_id;
-+ } else {
-+ ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
-+ ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
-+ }
-+}
-+
-+/*This function puts the given packet in the specific client queue */
-+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
-+ *data, unsigned int len, u32 client_ctrl,
-+ unsigned int flags, void *client_data)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
-+
-+ /* First buffer */
-+ if (flags & HIF_FIRST_BUFFER) {
-+ data -= sizeof(struct hif_hdr);
-+ len += sizeof(struct hif_hdr);
-+
-+ hif_hdr_write(data, client->id, qno, client_ctrl);
-+ }
-+
-+ desc->data = client_data;
-+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
-+
-+ __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
-+
-+ queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
-+ queue->tx_pending++;
-+ queue->jiffies_last_packet = jiffies;
-+}
-+
-+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
-+ unsigned int *flags, int count)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+ struct tx_queue_desc *desc = queue->base + queue->read_idx;
-+
-+ pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
-+ queue->read_idx, queue->tx_pending);
-+
-+ if (!queue->tx_pending)
-+ return NULL;
-+
-+ if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
-+ u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
-+ client->id, TMU_DM_TX_TRANS, 4));
-+
-+ if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
-+ queue->done_tmu_tx_pkts = UINT_MAX -
-+ queue->prev_tmu_tx_pkts + tmu_tx_pkts;
-+ else
-+ queue->done_tmu_tx_pkts = tmu_tx_pkts -
-+ queue->prev_tmu_tx_pkts;
-+
-+ queue->prev_tmu_tx_pkts = tmu_tx_pkts;
-+
-+ if (!queue->done_tmu_tx_pkts)
-+ return NULL;
-+ }
-+
-+ if (desc->ctrl & CL_DESC_OWN)
-+ return NULL;
-+
-+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
-+ queue->tx_pending--;
-+
-+ *flags = CL_DESC_GET_FLAGS(desc->ctrl);
-+
-+ if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
-+ queue->done_tmu_tx_pkts--;
-+
-+ return desc->data;
-+}
-+
-+static void hif_lib_tmu_credit_init(struct pfe *pfe)
-+{
-+ int i, q;
-+
-+ for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
-+ for (q = 0; q < emac_txq_cnt; q++) {
-+ pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
-+ DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
-+ pfe->tmu_credit.tx_credit[i][q] =
-+ pfe->tmu_credit.tx_credit_max[i][q];
-+ }
-+}
-+
-+/* __hif_lib_update_credit
-+ *
-+ * @param[in] client hif client context
-+ * @param[in] queue queue number in match with TMU
-+ */
-+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
-+{
-+ unsigned int tmu_tx_packets, tmp;
-+
-+ if (tx_qos) {
-+ tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
-+ client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
-+
-+ /* tx_packets counter overflowed */
-+ if (tmu_tx_packets >
-+ pfe->tmu_credit.tx_packets[client->id][queue]) {
-+ tmp = UINT_MAX - tmu_tx_packets +
-+ pfe->tmu_credit.tx_packets[client->id][queue];
-+
-+ pfe->tmu_credit.tx_credit[client->id][queue] =
-+ pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
-+ } else {
-+ /* TMU tx <= pfe_eth tx, normal case or both OF since
-+ * last time
-+ */
-+ pfe->tmu_credit.tx_credit[client->id][queue] =
-+ pfe->tmu_credit.tx_credit_max[client->id][queue] -
-+ (pfe->tmu_credit.tx_packets[client->id][queue] -
-+ tmu_tx_packets);
-+ }
-+ }
-+}
-+
-+int pfe_hif_lib_init(struct pfe *pfe)
-+{
-+ int rc;
-+
-+ pr_info("%s\n", __func__);
-+
-+ if (lro_mode) {
-+ page_mode = 1;
-+ pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
-+ pfe_pkt_headroom = 0;
-+ } else {
-+ page_mode = 0;
-+ pfe_pkt_size = PFE_PKT_SIZE;
-+ pfe_pkt_headroom = PFE_PKT_HEADROOM;
-+ }
-+
-+ if (tx_qos)
-+ emac_txq_cnt = EMAC_TXQ_CNT / 2;
-+ else
-+ emac_txq_cnt = EMAC_TXQ_CNT;
-+
-+ hif_lib_tmu_credit_init(pfe);
-+ pfe->hif.shm = &ghif_shm;
-+ rc = pfe_hif_shm_init(pfe->hif.shm);
-+
-+ return rc;
-+}
-+
-+void pfe_hif_lib_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ pfe_hif_shm_clean(pfe->hif.shm);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
-@@ -0,0 +1,229 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_HIF_LIB_H_
-+#define _PFE_HIF_LIB_H_
-+
-+#include "pfe_hif.h"
-+
-+#define HIF_CL_REQ_TIMEOUT 10
-+#define GFP_DMA_PFE 0
-+#define PFE_PARSE_INFO_SIZE 16
-+
-+enum {
-+ REQUEST_CL_REGISTER = 0,
-+ REQUEST_CL_UNREGISTER,
-+ HIF_REQUEST_MAX
-+};
-+
-+enum {
-+ /* Event to indicate that client rx queue is reached water mark level */
-+ EVENT_HIGH_RX_WM = 0,
-+ /* Event to indicate that, packet received for client */
-+ EVENT_RX_PKT_IND,
-+ /* Event to indicate that, packet tx done for client */
-+ EVENT_TXDONE_IND,
-+ HIF_EVENT_MAX
-+};
-+
-+/*structure to store client queue info */
-+
-+/*structure to store client queue info */
-+struct hif_client_rx_queue {
-+ struct rx_queue_desc *base;
-+ u32 size;
-+ u32 read_idx;
-+ u32 write_idx;
-+};
-+
-+struct hif_client_tx_queue {
-+ struct tx_queue_desc *base;
-+ u32 size;
-+ u32 read_idx;
-+ u32 write_idx;
-+ u32 tx_pending;
-+ unsigned long jiffies_last_packet;
-+ u32 nocpy_flag;
-+ u32 prev_tmu_tx_pkts;
-+ u32 done_tmu_tx_pkts;
-+};
-+
-+struct hif_client_s {
-+ int id;
-+ int tx_qn;
-+ int rx_qn;
-+ void *rx_qbase;
-+ void *tx_qbase;
-+ int tx_qsize;
-+ int rx_qsize;
-+ int cpu_id;
-+ struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
-+ struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
-+ int (*event_handler)(void *priv, int event, int data);
-+ unsigned long queue_mask[HIF_EVENT_MAX];
-+ struct pfe *pfe;
-+ void *priv;
-+};
-+
-+/*
-+ * Client specific shared memory
-+ * It contains number of Rx/Tx queues, base addresses and queue sizes
-+ */
-+struct hif_client_shm {
-+ u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
-+ unsigned long rx_qbase; /*Rx queue base address */
-+ u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
-+ unsigned long tx_qbase; /* Tx queue base address */
-+ u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
-+};
-+
-+/*Client shared memory ctrl bit description */
-+#define CLIENT_CTRL_RX_Q_CNT_OFST 0
-+#define CLIENT_CTRL_TX_Q_CNT_OFST 8
-+#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
-+ & 0xFF)
-+#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
-+ & 0xFF)
-+
-+/*
-+ * Shared memory used to communicate between HIF driver and host/client drivers
-+ * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
-+ * initialized with host buffers and buffers count in the pool.
-+ * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
-+ *
-+ */
-+struct hif_shm {
-+ u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
-+ /*Rx buffers required to initialize HIF rx descriptors */
-+ void *rx_buf_pool[HIF_RX_DESC_NT];
-+ unsigned long g_client_status[2]; /*Global client status bit mask */
-+ /* Client specific shared memory */
-+ struct hif_client_shm client[HIF_CLIENTS_MAX];
-+};
-+
-+#define CL_DESC_OWN BIT(31)
-+/* This sets owner ship to HIF driver */
-+#define CL_DESC_LAST BIT(30)
-+/* This indicates last packet for multi buffers handling */
-+#define CL_DESC_FIRST BIT(29)
-+/* This indicates first packet for multi buffers handling */
-+
-+#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
-+#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
-+#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
-+
-+struct rx_queue_desc {
-+ void *data;
-+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
-+ u32 client_ctrl;
-+};
-+
-+struct tx_queue_desc {
-+ void *data;
-+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
-+};
-+
-+/* HIF Rx is not working properly for 2-byte aligned buffers and
-+ * ip_header should be 4byte aligned for better iperformance.
-+ * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
-+ */
-+#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
-+/* must be big enough for headroom, pkt size and skb shared info */
-+#define PFE_BUF_SIZE 2048
-+#define PFE_PKT_HEADROOM 128
-+
-+#define SKB_SHARED_INFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-+#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
-+ - SKB_SHARED_INFO_SIZE)
-+#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
-+#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
-+#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
-+#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
-+ + MAX_L4_HDR_SIZE)
-+/* Used in page mode to clamp packet size to the maximum supported by the hif
-+ *hw interface (<16KiB)
-+ */
-+#define MAX_PFE_PKT_SIZE 16380UL
-+
-+extern unsigned int pfe_pkt_size;
-+extern unsigned int pfe_pkt_headroom;
-+extern unsigned int page_mode;
-+extern unsigned int lro_mode;
-+extern unsigned int tx_qos;
-+extern unsigned int emac_txq_cnt;
-+
-+int pfe_hif_lib_init(struct pfe *pfe);
-+void pfe_hif_lib_exit(struct pfe *pfe);
-+int hif_lib_client_register(struct hif_client_s *client);
-+int hif_lib_client_unregister(struct hif_client_s *client);
-+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
-+ *data, unsigned int len, u32 client_ctrl,
-+ unsigned int flags, void *client_data);
-+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
-+ unsigned int len, u32 client_ctrl, void *client_data);
-+void hif_lib_indicate_client(int cl_id, int event, int data);
-+int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
-+ data);
-+int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
-+int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
-+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
-+ unsigned int *flags, int count);
-+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
-+ *ofst, unsigned int *rx_ctrl,
-+ unsigned int *desc_ctrl, void **priv_data);
-+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
-+void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
-+void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
-+ enable);
-+static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
-+ qno)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+
-+ return (queue->size - queue->tx_pending);
-+}
-+
-+static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
-+ int qno)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+
-+ return queue->write_idx;
-+}
-+
-+static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
-+ qno)
-+{
-+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
-+
-+ return queue->tx_pending;
-+}
-+
-+#define hif_lib_tx_credit_avail(pfe, id, qno) \
-+ ((pfe)->tmu_credit.tx_credit[id][qno])
-+
-+#define hif_lib_tx_credit_max(pfe, id, qno) \
-+ ((pfe)->tmu_credit.tx_credit_max[id][qno])
-+
-+/*
-+ * Test comment
-+ */
-+#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
-+ ({ typeof(pfe) pfe_ = pfe; \
-+ typeof(id) id_ = id; \
-+ typeof(qno) qno_ = qno_; \
-+ typeof(credit) credit_ = credit; \
-+ do { \
-+ if (tx_qos) { \
-+ (pfe_)->tmu_credit.tx_credit[id_][qno_]\
-+ -= credit_; \
-+ (pfe_)->tmu_credit.tx_packets[id_][qno_]\
-+ += credit_; \
-+ } \
-+ } while (0); \
-+ })
-+
-+#endif /* _PFE_HIF_LIB_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hw.c
-@@ -0,0 +1,164 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include "pfe_mod.h"
-+#include "pfe_hw.h"
-+
-+/* Functions to handle most of pfe hw register initialization */
-+int pfe_hw_init(struct pfe *pfe, int resume)
-+{
-+ struct class_cfg class_cfg = {
-+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
-+ .route_table_baseaddr = pfe->ddr_phys_baseaddr +
-+ ROUTE_TABLE_BASEADDR,
-+ .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
-+ };
-+
-+ struct tmu_cfg tmu_cfg = {
-+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
-+ .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
-+ .llm_queue_len = TMU_LLM_QUEUE_LEN,
-+ };
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ struct util_cfg util_cfg = {
-+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
-+ };
-+#endif
-+
-+ struct BMU_CFG bmu1_cfg = {
-+ .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
-+ BMU1_LMEM_BASEADDR),
-+ .count = BMU1_BUF_COUNT,
-+ .size = BMU1_BUF_SIZE,
-+ .low_watermark = 10,
-+ .high_watermark = 15,
-+ };
-+
-+ struct BMU_CFG bmu2_cfg = {
-+ .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
-+ BMU2_DDR_BASEADDR),
-+ .count = BMU2_BUF_COUNT,
-+ .size = BMU2_BUF_SIZE,
-+ .low_watermark = 250,
-+ .high_watermark = 253,
-+ };
-+
-+ struct gpi_cfg egpi1_cfg = {
-+ .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
-+ .tmlf_txthres = EGPI1_TMLF_TXTHRES,
-+ .aseq_len = EGPI1_ASEQ_LEN,
-+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
-+ EMAC_TCNTRL_REG),
-+ };
-+
-+ struct gpi_cfg egpi2_cfg = {
-+ .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
-+ .tmlf_txthres = EGPI2_TMLF_TXTHRES,
-+ .aseq_len = EGPI2_ASEQ_LEN,
-+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
-+ EMAC_TCNTRL_REG),
-+ };
-+
-+ struct gpi_cfg hgpi_cfg = {
-+ .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
-+ .tmlf_txthres = HGPI_TMLF_TXTHRES,
-+ .aseq_len = HGPI_ASEQ_LEN,
-+ .mtip_pause_reg = 0,
-+ };
-+
-+ pr_info("%s\n", __func__);
-+
-+#if !defined(LS1012A_PFE_RESET_WA)
-+ /* LS1012A needs this to make PE work correctly */
-+ writel(0x3, CLASS_PE_SYS_CLK_RATIO);
-+ writel(0x3, TMU_PE_SYS_CLK_RATIO);
-+ writel(0x3, UTIL_PE_SYS_CLK_RATIO);
-+ usleep_range(10, 20);
-+#endif
-+
-+ pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
-+ pr_info("TMU version: %x\n", readl(TMU_VERSION));
-+
-+ pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
-+ BMU_VERSION));
-+ pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
-+ BMU_VERSION));
-+
-+ pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
-+ GPI_VERSION));
-+ pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
-+ GPI_VERSION));
-+ pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
-+ GPI_VERSION));
-+
-+ pr_info("HIF version: %x\n", readl(HIF_VERSION));
-+ pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
-+#endif
-+ while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
-+ ;
-+
-+ hif_rx_disable();
-+ hif_tx_disable();
-+
-+ bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
-+
-+ pr_info("bmu_init(1) done\n");
-+
-+ bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
-+
-+ pr_info("bmu_init(2) done\n");
-+
-+ class_cfg.resume = resume ? 1 : 0;
-+
-+ class_init(&class_cfg);
-+
-+ pr_info("class_init() done\n");
-+
-+ tmu_init(&tmu_cfg);
-+
-+ pr_info("tmu_init() done\n");
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_init(&util_cfg);
-+
-+ pr_info("util_init() done\n");
-+#endif
-+ gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
-+
-+ pr_info("gpi_init(1) done\n");
-+
-+ gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
-+
-+ pr_info("gpi_init(2) done\n");
-+
-+ gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
-+
-+ pr_info("gpi_init(hif) done\n");
-+
-+ bmu_enable(BMU1_BASE_ADDR);
-+
-+ pr_info("bmu_enable(1) done\n");
-+
-+ bmu_enable(BMU2_BASE_ADDR);
-+
-+ pr_info("bmu_enable(2) done\n");
-+
-+ return 0;
-+}
-+
-+void pfe_hw_exit(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ bmu_disable(BMU1_BASE_ADDR);
-+ bmu_reset(BMU1_BASE_ADDR);
-+
-+ bmu_disable(BMU2_BASE_ADDR);
-+ bmu_reset(BMU2_BASE_ADDR);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_hw.h
-@@ -0,0 +1,15 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_HW_H_
-+#define _PFE_HW_H_
-+
-+#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
-+
-+int pfe_hw_init(struct pfe *pfe, int resume);
-+void pfe_hw_exit(struct pfe *pfe);
-+
-+#endif /* _PFE_HW_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
-@@ -0,0 +1,368 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/device.h>
-+#include <linux/of.h>
-+#include <linux/of_net.h>
-+#include <linux/of_address.h>
-+#include <linux/of_mdio.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+#include <linux/clk.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-+
-+#include "pfe_mod.h"
-+
-+extern bool pfe_use_old_dts_phy;
-+struct ls1012a_pfe_platform_data pfe_platform_data;
-+
-+static int pfe_get_gemac_if_properties(struct device_node *gem,
-+ int port,
-+ struct ls1012a_pfe_platform_data *pdata)
-+{
-+ struct device_node *phy_node = NULL;
-+ int size;
-+ int phy_id = 0;
-+ const u32 *addr;
-+ const void *mac_addr;
-+
-+ addr = of_get_property(gem, "reg", &size);
-+ port = be32_to_cpup(addr);
-+
-+ pdata->ls1012a_eth_pdata[port].gem_id = port;
-+
-+ mac_addr = of_get_mac_address(gem);
-+ if (mac_addr) {
-+ memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
-+ ETH_ALEN);
-+ }
-+
-+ phy_node = of_parse_phandle(gem, "phy-handle", 0);
-+ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
-+ if (phy_node) {
-+ pfe_use_old_dts_phy = false;
-+ goto process_phynode;
-+ } else if (of_phy_is_fixed_link(gem)) {
-+ pfe_use_old_dts_phy = false;
-+ if (of_phy_register_fixed_link(gem) < 0) {
-+ pr_err("broken fixed-link specification\n");
-+ goto err;
-+ }
-+ phy_node = of_node_get(gem);
-+ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
-+ } else if (of_get_property(gem, "fsl,pfe-phy-if-flags", &size)) {
-+ pfe_use_old_dts_phy = true;
-+ /* Use old dts properties for phy handling */
-+ addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
-+ pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
-+
-+ addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
-+ if (!addr) {
-+ pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
-+ __LINE__);
-+ } else {
-+ phy_id = be32_to_cpup(addr);
-+ pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
-+ pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
-+ }
-+
-+ /* If PHY is enabled, read mdio properties */
-+ if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
-+ goto done;
-+
-+ } else {
-+ pr_info("%s: No PHY or fixed-link\n", __func__);
-+ return 0;
-+ }
-+
-+process_phynode:
-+ pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
-+ if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
-+ pr_err("%s:%d Incorrect Phy mode....\n", __func__,
-+ __LINE__);
-+
-+ addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
-+ if (!addr) {
-+ pr_err("%s: Invalid mdio-mux-val....\n", __func__);
-+ } else {
-+ phy_id = be32_to_cpup(addr);
-+ pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
-+ }
-+
-+ if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
-+ pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
-+ pdata->ls1012a_eth_pdata[port].mdio_muxval;
-+
-+
-+ pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
-+
-+done:
-+ return 0;
-+
-+err:
-+ return -1;
-+}
-+
-+/*
-+ *
-+ * pfe_platform_probe -
-+ *
-+ *
-+ */
-+static int pfe_platform_probe(struct platform_device *pdev)
-+{
-+ struct resource res;
-+ int ii, rc, interface_count = 0, size = 0;
-+ const u32 *prop;
-+ struct device_node *np, *gem = NULL;
-+ struct clk *pfe_clk;
-+
-+ np = pdev->dev.of_node;
-+
-+ if (!np) {
-+ pr_err("Invalid device node\n");
-+ return -EINVAL;
-+ }
-+
-+ pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
-+ if (!pfe) {
-+ rc = -ENOMEM;
-+ goto err_alloc;
-+ }
-+
-+ platform_set_drvdata(pdev, pfe);
-+
-+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-+
-+ if (of_address_to_resource(np, 1, &res)) {
-+ rc = -ENOMEM;
-+ pr_err("failed to get ddr resource\n");
-+ goto err_ddr;
-+ }
-+
-+ pfe->ddr_phys_baseaddr = res.start;
-+ pfe->ddr_size = resource_size(&res);
-+ pfe->ddr_baseaddr = phys_to_virt(res.start);
-+
-+ pfe->scfg =
-+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-+ "fsl,pfe-scfg");
-+ if (IS_ERR(pfe->scfg)) {
-+ dev_err(&pdev->dev, "No syscfg phandle specified\n");
-+ return PTR_ERR(pfe->scfg);
-+ }
-+
-+ pfe->cbus_baseaddr = of_iomap(np, 0);
-+ if (!pfe->cbus_baseaddr) {
-+ rc = -ENOMEM;
-+ pr_err("failed to get axi resource\n");
-+ goto err_axi;
-+ }
-+
-+ pfe->hif_irq = platform_get_irq(pdev, 0);
-+ if (pfe->hif_irq < 0) {
-+ pr_err("platform_get_irq for hif failed\n");
-+ rc = pfe->hif_irq;
-+ goto err_hif_irq;
-+ }
-+
-+ pfe->wol_irq = platform_get_irq(pdev, 2);
-+ if (pfe->wol_irq < 0) {
-+ pr_err("platform_get_irq for WoL failed\n");
-+ rc = pfe->wol_irq;
-+ goto err_hif_irq;
-+ }
-+
-+ /* Read interface count */
-+ prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
-+ if (!prop) {
-+ pr_err("Failed to read number of interfaces\n");
-+ rc = -ENXIO;
-+ goto err_prop;
-+ }
-+
-+ interface_count = be32_to_cpup(prop);
-+ if (interface_count <= 0) {
-+ pr_err("No ethernet interface count : %d\n",
-+ interface_count);
-+ rc = -ENXIO;
-+ goto err_prop;
-+ }
-+
-+ pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
-+
-+ for (ii = 0; ii < interface_count; ii++) {
-+ gem = of_get_next_child(np, gem);
-+ if (gem)
-+ pfe_get_gemac_if_properties(gem, ii,
-+ &pfe_platform_data);
-+ else
-+ pr_err("Unable to find interface %d\n", ii);
-+
-+ }
-+
-+ pfe->dev = &pdev->dev;
-+
-+ pfe->dev->platform_data = &pfe_platform_data;
-+
-+ /* declare WoL capabilities */
-+ device_init_wakeup(&pdev->dev, true);
-+
-+ /* find the clocks */
-+ pfe_clk = devm_clk_get(pfe->dev, "pfe");
-+ if (IS_ERR(pfe_clk))
-+ return PTR_ERR(pfe_clk);
-+
-+ /* PFE clock is (platform clock / 2) */
-+ /* save sys_clk value as KHz */
-+ pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
-+
-+ rc = pfe_probe(pfe);
-+ if (rc < 0)
-+ goto err_probe;
-+
-+ return 0;
-+
-+err_probe:
-+err_prop:
-+err_hif_irq:
-+ iounmap(pfe->cbus_baseaddr);
-+
-+err_axi:
-+err_ddr:
-+ platform_set_drvdata(pdev, NULL);
-+
-+ kfree(pfe);
-+
-+err_alloc:
-+ return rc;
-+}
-+
-+/*
-+ * pfe_platform_remove -
-+ */
-+static int pfe_platform_remove(struct platform_device *pdev)
-+{
-+ struct pfe *pfe = platform_get_drvdata(pdev);
-+ int rc;
-+
-+ pr_info("%s\n", __func__);
-+
-+ rc = pfe_remove(pfe);
-+
-+ iounmap(pfe->cbus_baseaddr);
-+
-+ platform_set_drvdata(pdev, NULL);
-+
-+ kfree(pfe);
-+
-+ return rc;
-+}
-+
-+#ifdef CONFIG_PM
-+#ifdef CONFIG_PM_SLEEP
-+int pfe_platform_suspend(struct device *dev)
-+{
-+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
-+ struct net_device *netdev;
-+ int i;
-+
-+ pfe->wake = 0;
-+
-+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
-+ netdev = pfe->eth.eth_priv[i]->ndev;
-+
-+ netif_device_detach(netdev);
-+
-+ if (netif_running(netdev))
-+ if (pfe_eth_suspend(netdev))
-+ pfe->wake = 1;
-+ }
-+
-+ /* Shutdown PFE only if we're not waking up the system */
-+ if (!pfe->wake) {
-+#if defined(LS1012A_PFE_RESET_WA)
-+ pfe_hif_rx_idle(&pfe->hif);
-+#endif
-+ pfe_ctrl_suspend(&pfe->ctrl);
-+ pfe_firmware_exit(pfe);
-+
-+ pfe_hif_exit(pfe);
-+ pfe_hif_lib_exit(pfe);
-+
-+ pfe_hw_exit(pfe);
-+ }
-+
-+ return 0;
-+}
-+
-+static int pfe_platform_resume(struct device *dev)
-+{
-+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
-+ struct net_device *netdev;
-+ int i;
-+
-+ if (!pfe->wake) {
-+ pfe_hw_init(pfe, 1);
-+ pfe_hif_lib_init(pfe);
-+ pfe_hif_init(pfe);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ util_enable();
-+#endif
-+ tmu_enable(0xf);
-+ class_enable();
-+ pfe_ctrl_resume(&pfe->ctrl);
-+ }
-+
-+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
-+ netdev = pfe->eth.eth_priv[i]->ndev;
-+
-+ if (pfe->mdio.mdio_priv[i]->mii_bus)
-+ pfe_eth_mdio_reset(pfe->mdio.mdio_priv[i]->mii_bus);
-+
-+ if (netif_running(netdev))
-+ pfe_eth_resume(netdev);
-+
-+ netif_device_attach(netdev);
-+ }
-+ return 0;
-+}
-+#else
-+#define pfe_platform_suspend NULL
-+#define pfe_platform_resume NULL
-+#endif
-+
-+static const struct dev_pm_ops pfe_platform_pm_ops = {
-+ SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
-+};
-+#endif
-+
-+static const struct of_device_id pfe_match[] = {
-+ {
-+ .compatible = "fsl,pfe",
-+ },
-+ {},
-+};
-+MODULE_DEVICE_TABLE(of, pfe_match);
-+
-+static struct platform_driver pfe_platform_driver = {
-+ .probe = pfe_platform_probe,
-+ .remove = pfe_platform_remove,
-+ .driver = {
-+ .name = "pfe",
-+ .of_match_table = pfe_match,
-+#ifdef CONFIG_PM
-+ .pm = &pfe_platform_pm_ops,
-+#endif
-+ },
-+};
-+
-+module_platform_driver(pfe_platform_driver);
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("PFE Ethernet driver");
-+MODULE_AUTHOR("NXP DNCPE");
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_mod.c
-@@ -0,0 +1,158 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include <linux/dma-mapping.h>
-+#include "pfe_mod.h"
-+#include "pfe_cdev.h"
-+
-+unsigned int us;
-+module_param(us, uint, 0444);
-+MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
-+ "1: module enabled for userspace networking\n");
-+struct pfe *pfe;
-+
-+/*
-+ * pfe_probe -
-+ */
-+int pfe_probe(struct pfe *pfe)
-+{
-+ int rc;
-+
-+ if (pfe->ddr_size < DDR_MAX_SIZE) {
-+ pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
-+ __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
-+ rc = -ENOMEM;
-+ goto err_hw;
-+ }
-+
-+ if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
-+ (8 * SZ_1M - 1)) != 0) {
-+ pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
-+ __func__, (int)pfe->ddr_phys_baseaddr +
-+ BMU2_DDR_BASEADDR);
-+ rc = -ENOMEM;
-+ goto err_hw;
-+ }
-+
-+ pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
-+ (unsigned long)pfe->cbus_baseaddr,
-+ (unsigned long)pfe->ddr_baseaddr,
-+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
-+
-+ pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
-+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
-+
-+ rc = pfe_hw_init(pfe, 0);
-+ if (rc < 0)
-+ goto err_hw;
-+
-+ if (us)
-+ goto firmware_init;
-+
-+ rc = pfe_hif_lib_init(pfe);
-+ if (rc < 0)
-+ goto err_hif_lib;
-+
-+ rc = pfe_hif_init(pfe);
-+ if (rc < 0)
-+ goto err_hif;
-+
-+firmware_init:
-+ rc = pfe_firmware_init(pfe);
-+ if (rc < 0)
-+ goto err_firmware;
-+
-+ rc = pfe_ctrl_init(pfe);
-+ if (rc < 0)
-+ goto err_ctrl;
-+
-+ rc = pfe_eth_init(pfe);
-+ if (rc < 0)
-+ goto err_eth;
-+
-+ rc = pfe_sysfs_init(pfe);
-+ if (rc < 0)
-+ goto err_sysfs;
-+
-+ rc = pfe_debugfs_init(pfe);
-+ if (rc < 0)
-+ goto err_debugfs;
-+
-+ if (us) {
-+ /* Creating a character device */
-+ rc = pfe_cdev_init();
-+ if (rc < 0)
-+ goto err_cdev;
-+ }
-+
-+ return 0;
-+
-+err_cdev:
-+ pfe_debugfs_exit(pfe);
-+
-+err_debugfs:
-+ pfe_sysfs_exit(pfe);
-+
-+err_sysfs:
-+ pfe_eth_exit(pfe);
-+
-+err_eth:
-+ pfe_ctrl_exit(pfe);
-+
-+err_ctrl:
-+ pfe_firmware_exit(pfe);
-+
-+err_firmware:
-+ if (us)
-+ goto err_hif_lib;
-+
-+ pfe_hif_exit(pfe);
-+
-+err_hif:
-+ pfe_hif_lib_exit(pfe);
-+
-+err_hif_lib:
-+ pfe_hw_exit(pfe);
-+
-+err_hw:
-+ return rc;
-+}
-+
-+/*
-+ * pfe_remove -
-+ */
-+int pfe_remove(struct pfe *pfe)
-+{
-+ pr_info("%s\n", __func__);
-+
-+ if (us)
-+ pfe_cdev_exit();
-+
-+ pfe_debugfs_exit(pfe);
-+
-+ pfe_sysfs_exit(pfe);
-+
-+ pfe_eth_exit(pfe);
-+
-+ pfe_ctrl_exit(pfe);
-+
-+#if defined(LS1012A_PFE_RESET_WA)
-+ pfe_hif_rx_idle(&pfe->hif);
-+#endif
-+ pfe_firmware_exit(pfe);
-+
-+ if (us)
-+ goto hw_exit;
-+
-+ pfe_hif_exit(pfe);
-+
-+ pfe_hif_lib_exit(pfe);
-+
-+hw_exit:
-+ pfe_hw_exit(pfe);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_mod.h
-@@ -0,0 +1,103 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_MOD_H_
-+#define _PFE_MOD_H_
-+
-+#include <linux/device.h>
-+#include <linux/elf.h>
-+
-+extern unsigned int us;
-+
-+struct pfe;
-+
-+#include "pfe_hw.h"
-+#include "pfe_firmware.h"
-+#include "pfe_ctrl.h"
-+#include "pfe_hif.h"
-+#include "pfe_hif_lib.h"
-+#include "pfe_eth.h"
-+#include "pfe_sysfs.h"
-+#include "pfe_perfmon.h"
-+#include "pfe_debugfs.h"
-+
-+#define PHYID_MAX_VAL 32
-+
-+struct pfe_tmu_credit {
-+ /* Number of allowed TX packet in-flight, matches TMU queue size */
-+ unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
-+ unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
-+ unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
-+};
-+
-+struct pfe {
-+ struct regmap *scfg;
-+ unsigned long ddr_phys_baseaddr;
-+ void *ddr_baseaddr;
-+ unsigned int ddr_size;
-+ void *cbus_baseaddr;
-+ void *apb_baseaddr;
-+ unsigned long iram_phys_baseaddr;
-+ void *iram_baseaddr;
-+ unsigned long ipsec_phys_baseaddr;
-+ void *ipsec_baseaddr;
-+ int hif_irq;
-+ int wol_irq;
-+ int hif_client_irq;
-+ struct device *dev;
-+ struct dentry *dentry;
-+ struct pfe_ctrl ctrl;
-+ struct pfe_hif hif;
-+ struct pfe_eth eth;
-+ struct pfe_mdio mdio;
-+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
-+#if defined(CFG_DIAGS)
-+ struct pfe_diags diags;
-+#endif
-+ struct pfe_tmu_credit tmu_credit;
-+ struct pfe_cpumon cpumon;
-+ struct pfe_memmon memmon;
-+ int wake;
-+ int mdio_muxval[PHYID_MAX_VAL];
-+ struct clk *hfe_clock;
-+};
-+
-+extern struct pfe *pfe;
-+
-+int pfe_probe(struct pfe *pfe);
-+int pfe_remove(struct pfe *pfe);
-+
-+/* DDR Mapping in reserved memory*/
-+#define ROUTE_TABLE_BASEADDR 0
-+#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
-+#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
-+ * CLASS_ROUTE_SIZE)
-+#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
-+#define BMU2_BUF_COUNT (4096 - 256)
-+/* This is to get a total DDR size of 12MiB */
-+#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
-+#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
-+#define UTIL_CODE_SIZE (128 * SZ_1K)
-+#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
-+#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
-+#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
-+#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
-+#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
-+#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
-+#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
-+#define TMU_LLM_QUEUE_LEN (8 * 512)
-+/* Must be power of two and at least 16 * 8 = 128 bytes */
-+#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
-+/* (4 TMU's x 16 queues x queue_len) */
-+
-+#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
-+
-+/* LMEM Mapping */
-+#define BMU1_LMEM_BASEADDR 0
-+#define BMU1_BUF_COUNT 256
-+#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
-+
-+#endif /* _PFE_MOD_H */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
-@@ -0,0 +1,26 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_PERFMON_H_
-+#define _PFE_PERFMON_H_
-+
-+#include "pfe/pfe.h"
-+
-+#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
-+
-+struct pfe_cpumon {
-+ u32 cpu_usage_pct[MAX_PE];
-+ u32 class_usage_pct;
-+};
-+
-+struct pfe_memmon {
-+ u32 kernel_memory_allocated;
-+};
-+
-+int pfe_perfmon_init(struct pfe *pfe);
-+void pfe_perfmon_exit(struct pfe *pfe);
-+
-+#endif /* _PFE_PERFMON_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
-@@ -0,0 +1,806 @@
-+// SPDX-License-Identifier: GPL-2.0+
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+
-+#include "pfe_mod.h"
-+
-+#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
-+#define NUM_QUEUES 16
-+
-+static char register_name[20][5] = {
-+ "EPC", "ECAS", "EID", "ED",
-+ "r0", "r1", "r2", "r3",
-+ "r4", "r5", "r6", "r7",
-+ "r8", "r9", "r10", "r11",
-+ "r12", "r13", "r14", "r15",
-+};
-+
-+static char exception_name[14][20] = {
-+ "Reset",
-+ "HardwareFailure",
-+ "NMI",
-+ "InstBreakpoint",
-+ "DataBreakpoint",
-+ "Unsupported",
-+ "PrivilegeViolation",
-+ "InstBusError",
-+ "DataBusError",
-+ "AlignmentError",
-+ "ArithmeticError",
-+ "SystemCall",
-+ "MemoryManagement",
-+ "Interrupt",
-+};
-+
-+static unsigned long class_do_clear;
-+static unsigned long tmu_do_clear;
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static unsigned long util_do_clear;
-+#endif
-+
-+static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
-+ do_clear)
-+{
-+ ssize_t len = 0;
-+ u32 val;
-+ char statebuf[5];
-+ struct pfe_cpumon *cpumon = &pfe->cpumon;
-+ u32 debug_indicator;
-+ u32 debug[20];
-+
-+ *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
-+ dmem_addr += 4;
-+
-+ statebuf[4] = '\0';
-+ len += sprintf(buf + len, "state=%4s ", statebuf);
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ dmem_addr += 4;
-+ len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ if (do_clear && val)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ dmem_addr += 4;
-+ len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ if (do_clear && val)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ dmem_addr += 4;
-+ if (id >= TMU0_ID && id <= TMU_MAX_ID)
-+ len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
-+ else
-+ len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
-+
-+ val = pe_dmem_read(id, dmem_addr, 4);
-+ if (do_clear && val)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ dmem_addr += 4;
-+ if (val)
-+ len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
-+
-+ len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
-+
-+ len += sprintf(buf + len, "\n");
-+
-+ debug_indicator = pe_dmem_read(id, dmem_addr, 4);
-+ dmem_addr += 4;
-+ if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
-+ int j, last = 0;
-+
-+ for (j = 0; j < 16; j++) {
-+ debug[j] = pe_dmem_read(id, dmem_addr, 4);
-+ if (debug[j]) {
-+ if (do_clear)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ last = j + 1;
-+ }
-+ dmem_addr += 4;
-+ }
-+ for (j = 0; j < last; j++) {
-+ len += sprintf(buf + len, "%08x%s",
-+ cpu_to_be32(debug[j]),
-+ (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
-+ }
-+ }
-+
-+ if (!strncmp(statebuf, "DEAD", 4)) {
-+ u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
-+
-+ len += sprintf(buf + len, "Exception details:\n");
-+ for (i = 0; i < 20; i++) {
-+ debug[i] = pe_dmem_read(id, dump, 4);
-+ dump += 4;
-+ if (i == 2)
-+ len += sprintf(buf + len, "%4s = %08x (=%s) ",
-+ register_name[i], cpu_to_be32(debug[i]),
-+ exception_name[min((u32)
-+ cpu_to_be32(debug[i]), (u32)13)]);
-+ else
-+ len += sprintf(buf + len, "%4s = %08x%s",
-+ register_name[i], cpu_to_be32(debug[i]),
-+ (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
-+ }
-+ }
-+
-+ return len;
-+}
-+
-+static ssize_t class_phy_stats(char *buf, int phy)
-+{
-+ ssize_t len = 0;
-+ int off1 = phy * 0x28;
-+ int off2 = phy * 0x10;
-+
-+ if (phy == 3)
-+ off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
-+
-+ len += sprintf(buf + len, "phy: %d\n", phy);
-+ len += sprintf(buf + len,
-+ " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
-+ readl(CLASS_PHY1_RX_PKTS + off1),
-+ readl(CLASS_PHY1_TX_PKTS + off1),
-+ readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
-+ readl(CLASS_PHY1_V4_PKTS + off1),
-+ readl(CLASS_PHY1_V6_PKTS + off1));
-+
-+ len += sprintf(buf + len,
-+ " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
-+ readl(CLASS_PHY1_ICMP_PKTS + off2),
-+ readl(CLASS_PHY1_IGMP_PKTS + off2),
-+ readl(CLASS_PHY1_TCP_PKTS + off2),
-+ readl(CLASS_PHY1_UDP_PKTS + off2));
-+
-+ len += sprintf(buf + len, " err\n");
-+ len += sprintf(buf + len,
-+ " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
-+ readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
-+ readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
-+ readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
-+ readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
-+ readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
-+
-+ return len;
-+}
-+
-+/* qm_read_drop_stat
-+ * This function is used to read the drop statistics from the TMU
-+ * hw drop counter. Since the hw counter is always cleared afer
-+ * reading, this function maintains the previous drop count, and
-+ * adds the new value to it. That value can be retrieved by
-+ * passing a pointer to it with the total_drops arg.
-+ *
-+ * @param tmu TMU number (0 - 3)
-+ * @param queue queue number (0 - 15)
-+ * @param total_drops pointer to location to store total drops (or NULL)
-+ * @param do_reset if TRUE, clear total drops after updating
-+ */
-+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
-+{
-+ static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
-+ u32 val;
-+
-+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
-+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
-+ val = readl(TMU_TEQ_DROP_STAT);
-+ qtotal[tmu][queue] += val;
-+ if (total_drops)
-+ *total_drops = qtotal[tmu][queue];
-+ if (do_reset)
-+ qtotal[tmu][queue] = 0;
-+ return val;
-+}
-+
-+static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
-+{
-+ ssize_t len = 0;
-+ u32 drops;
-+
-+ len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
-+
-+ drops = qm_read_drop_stat(tmu, queue, NULL, 0);
-+
-+ /* Select queue */
-+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
-+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
-+
-+ len += sprintf(buf + len,
-+ "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
-+ drops, readl(TMU_TEQ_TRANS_STAT),
-+ readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
-+ readl(TMU_LLM_QUE_DROPCNT));
-+
-+ return len;
-+}
-+
-+static ssize_t tmu_queues(char *buf, int tmu)
-+{
-+ ssize_t len = 0;
-+ int queue;
-+
-+ for (queue = 0; queue < 16; queue++)
-+ len += tmu_queue_stats(buf + len, tmu, queue);
-+
-+ return len;
-+}
-+
-+static ssize_t block_version(char *buf, void *addr)
-+{
-+ ssize_t len = 0;
-+ u32 val;
-+
-+ val = readl(addr);
-+ len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
-+ (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
-+
-+ return len;
-+}
-+
-+static ssize_t bmu(char *buf, int id, void *base)
-+{
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "%s: %d\n ", __func__, id);
-+
-+ len += block_version(buf + len, base + BMU_VERSION);
-+
-+ len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
-+ BMU_BUF_SIZE)));
-+ len += sprintf(buf + len, " buf count: %x\n", readl(base +
-+ BMU_BUF_CNT));
-+ len += sprintf(buf + len, " buf rem: %x\n", readl(base +
-+ BMU_REM_BUF_CNT));
-+ len += sprintf(buf + len, " buf curr: %x\n", readl(base +
-+ BMU_CURR_BUF_CNT));
-+ len += sprintf(buf + len, " free err: %x\n", readl(base +
-+ BMU_FREE_ERR_ADDR));
-+
-+ return len;
-+}
-+
-+static ssize_t gpi(char *buf, int id, void *base)
-+{
-+ ssize_t len = 0;
-+ u32 val;
-+
-+ len += sprintf(buf + len, "%s%d:\n ", __func__, id);
-+ len += block_version(buf + len, base + GPI_VERSION);
-+
-+ len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
-+ GPI_FIFO_STATUS));
-+ val = readl(base + GPI_FIFO_DEBUG);
-+ len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
-+ 0x3f);
-+ len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
-+ 0x3f);
-+ len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
-+ 0x1ff);
-+ len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
-+ 0x1ff);
-+ len += sprintf(buf + len, " overrun: %x\n", readl(base +
-+ GPI_OVERRUN_DROPCNT));
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ class_do_clear = kstrtoul(buf, 0, 0);
-+ return count;
-+}
-+
-+static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ int id;
-+ u32 val;
-+ struct pfe_cpumon *cpumon = &pfe->cpumon;
-+
-+ len += block_version(buf + len, CLASS_VERSION);
-+
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
-+ len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
-+
-+ val = readl(CLASS_PE0_DEBUG + id * 4);
-+ len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
-+
-+ len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
-+ class_do_clear);
-+ }
-+ len += sprintf(buf + len, "aggregate load=%d%%\n\n",
-+ cpumon->class_usage_pct);
-+
-+ len += sprintf(buf + len, "pe status: 0x%x\n",
-+ readl(CLASS_PE_STATUS));
-+ len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
-+ readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
-+ len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
-+ readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
-+ len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
-+
-+ len += class_phy_stats(buf + len, 0);
-+ len += class_phy_stats(buf + len, 1);
-+ len += class_phy_stats(buf + len, 2);
-+ len += class_phy_stats(buf + len, 3);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ tmu_do_clear = kstrtoul(buf, 0, 0);
-+ return count;
-+}
-+
-+static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ int id;
-+ u32 val;
-+
-+ len += block_version(buf + len, TMU_VERSION);
-+
-+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
-+ if (id == TMU2_ID)
-+ continue;
-+ len += sprintf(buf + len, "%d: ", id - TMU0_ID);
-+
-+ len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
-+ tmu_do_clear);
-+ }
-+
-+ len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
-+ len += sprintf(buf + len, "inq fifo cnt: %x\n",
-+ readl(TMU_PHY_INQ_FIFO_CNT));
-+ val = readl(TMU_INQ_STAT);
-+ len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
-+ len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
-+
-+ return len;
-+}
-+
-+static unsigned long drops_do_clear;
-+static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
-+#endif
-+
-+char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
-+ "ICC",
-+ "Host Pkt Error",
-+ "Rx Error",
-+ "IPsec Outbound",
-+ "IPsec Inbound",
-+ "EXPT IPsec Error",
-+ "Reassembly",
-+ "Fragmenter",
-+ "NAT-T",
-+ "Socket",
-+ "Multicast",
-+ "NAT-PT",
-+ "Tx Disabled",
-+};
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
-+ "IPsec Outbound",
-+ "IPsec Inbound",
-+ "IPsec Rate Limiter",
-+ "Fragmenter",
-+ "Socket",
-+ "Tx Disabled",
-+ "Rx Error",
-+};
-+#endif
-+
-+static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ drops_do_clear = kstrtoul(buf, 0, 0);
-+ return count;
-+}
-+
-+static u32 tmu_drops[4][16];
-+static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ int id, dropnum;
-+ int tmu, queue;
-+ u32 val;
-+ u32 dmem_addr;
-+ int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
-+ struct pfe_ctrl *ctrl = &pfe->ctrl;
-+
-+ memset(class_drop_counter, 0, sizeof(class_drop_counter));
-+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
-+ if (drops_do_clear)
-+ pe_sync_stop(ctrl, (1 << id));
-+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
-+ dropnum++) {
-+ dmem_addr = CLASS_DM_DROP_CNTR;
-+ val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
-+ class_drop_counter[dropnum] += val;
-+ num_class_drops += val;
-+ if (drops_do_clear)
-+ pe_dmem_write(id, 0, dmem_addr, 4);
-+ }
-+ if (drops_do_clear)
-+ pe_start(ctrl, (1 << id));
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (drops_do_clear)
-+ pe_sync_stop(ctrl, (1 << UTIL_ID));
-+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
-+ dmem_addr = UTIL_DM_DROP_CNTR;
-+ val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
-+ util_drop_counter[dropnum] = val;
-+ num_util_drops += val;
-+ if (drops_do_clear)
-+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
-+ }
-+ if (drops_do_clear)
-+ pe_start(ctrl, (1 << UTIL_ID));
-+#endif
-+ for (tmu = 0; tmu < 4; tmu++) {
-+ for (queue = 0; queue < 16; queue++) {
-+ qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
-+ drops_do_clear);
-+ num_tmu_drops += tmu_drops[tmu][queue];
-+ }
-+ }
-+
-+ if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
-+ len += sprintf(buf + len, "No PE drops\n\n");
-+
-+ if (num_class_drops > 0) {
-+ len += sprintf(buf + len, "Class PE drops --\n");
-+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
-+ dropnum++) {
-+ if (class_drop_counter[dropnum] > 0)
-+ len += sprintf(buf + len, " %s: %d\n",
-+ class_drop_description[dropnum],
-+ class_drop_counter[dropnum]);
-+ }
-+ len += sprintf(buf + len, "\n");
-+ }
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (num_util_drops > 0) {
-+ len += sprintf(buf + len, "Util PE drops --\n");
-+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
-+ if (util_drop_counter[dropnum] > 0)
-+ len += sprintf(buf + len, " %s: %d\n",
-+ util_drop_description[dropnum],
-+ util_drop_counter[dropnum]);
-+ }
-+ len += sprintf(buf + len, "\n");
-+ }
-+#endif
-+ if (num_tmu_drops > 0) {
-+ len += sprintf(buf + len, "TMU drops --\n");
-+ for (tmu = 0; tmu < 4; tmu++) {
-+ for (queue = 0; queue < 16; queue++) {
-+ if (tmu_drops[tmu][queue] > 0)
-+ len += sprintf(buf + len,
-+ " TMU%d-Q%d: %d\n"
-+ , tmu, queue, tmu_drops[tmu][queue]);
-+ }
-+ }
-+ len += sprintf(buf + len, "\n");
-+ }
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 0);
-+}
-+
-+static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 1);
-+}
-+
-+static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 2);
-+}
-+
-+static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ return tmu_queues(buf, 3);
-+}
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ util_do_clear = kstrtoul(buf, NULL, 0);
-+ return count;
-+}
-+
-+static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+ struct pfe_ctrl *ctrl = &pfe->ctrl;
-+
-+ len += block_version(buf + len, UTIL_VERSION);
-+
-+ pe_sync_stop(ctrl, (1 << UTIL_ID));
-+ len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
-+ util_do_clear);
-+ pe_start(ctrl, (1 << UTIL_ID));
-+
-+ len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
-+ len += sprintf(buf + len, "max buf cnt: %x\n",
-+ readl(UTIL_MAX_BUF_CNT));
-+ len += sprintf(buf + len, "tsq max cnt: %x\n",
-+ readl(UTIL_TSQ_MAX_CNT));
-+
-+ return len;
-+}
-+#endif
-+
-+static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+
-+ len += bmu(buf + len, 1, BMU1_BASE_ADDR);
-+ len += bmu(buf + len, 2, BMU2_BASE_ADDR);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "hif:\n ");
-+ len += block_version(buf + len, HIF_VERSION);
-+
-+ len += sprintf(buf + len, " tx curr bd: %x\n",
-+ readl(HIF_TX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " tx status: %x\n",
-+ readl(HIF_TX_STATUS));
-+ len += sprintf(buf + len, " tx dma status: %x\n",
-+ readl(HIF_TX_DMA_STATUS));
-+
-+ len += sprintf(buf + len, " rx curr bd: %x\n",
-+ readl(HIF_RX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " rx status: %x\n",
-+ readl(HIF_RX_STATUS));
-+ len += sprintf(buf + len, " rx dma status: %x\n",
-+ readl(HIF_RX_DMA_STATUS));
-+
-+ len += sprintf(buf + len, "hif nocopy:\n ");
-+ len += block_version(buf + len, HIF_NOCPY_VERSION);
-+
-+ len += sprintf(buf + len, " tx curr bd: %x\n",
-+ readl(HIF_NOCPY_TX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " tx status: %x\n",
-+ readl(HIF_NOCPY_TX_STATUS));
-+ len += sprintf(buf + len, " tx dma status: %x\n",
-+ readl(HIF_NOCPY_TX_DMA_STATUS));
-+
-+ len += sprintf(buf + len, " rx curr bd: %x\n",
-+ readl(HIF_NOCPY_RX_CURR_BD_ADDR));
-+ len += sprintf(buf + len, " rx status: %x\n",
-+ readl(HIF_NOCPY_RX_STATUS));
-+ len += sprintf(buf + len, " rx dma status: %x\n",
-+ readl(HIF_NOCPY_RX_DMA_STATUS));
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ ssize_t len = 0;
-+
-+ len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
-+ len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
-+ len += gpi(buf + len, 3, HGPI_BASE_ADDR);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
-+ *attr, char *buf)
-+{
-+ ssize_t len = 0;
-+ struct pfe_memmon *memmon = &pfe->memmon;
-+
-+ len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
-+ memmon->kernel_memory_allocated,
-+ (memmon->kernel_memory_allocated + 1023) / 1024);
-+
-+ return len;
-+}
-+
-+#ifdef HIF_NAPI_STATS
-+static ssize_t pfe_show_hif_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct platform_device *pdev = to_platform_device(dev);
-+ struct pfe *pfe = platform_get_drvdata(pdev);
-+ ssize_t len = 0;
-+
-+ len += sprintf(buf + len, "sched: %u\n",
-+ pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
-+ len += sprintf(buf + len, "poll: %u\n",
-+ pfe->hif.napi_counters[NAPI_POLL_COUNT]);
-+ len += sprintf(buf + len, "packet: %u\n",
-+ pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
-+ len += sprintf(buf + len, "budget: %u\n",
-+ pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
-+ len += sprintf(buf + len, "desc: %u\n",
-+ pfe->hif.napi_counters[NAPI_DESC_COUNT]);
-+ len += sprintf(buf + len, "full: %u\n",
-+ pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
-+
-+ return len;
-+}
-+
-+static ssize_t pfe_set_hif_napi_stats(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct platform_device *pdev = to_platform_device(dev);
-+ struct pfe *pfe = platform_get_drvdata(pdev);
-+
-+ memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
-+
-+ return count;
-+}
-+
-+static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
-+ pfe_set_hif_napi_stats);
-+#endif
-+
-+static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
-+static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
-+#endif
-+static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
-+static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
-+static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
-+static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
-+static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
-+static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
-+static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
-+static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
-+static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
-+
-+int pfe_sysfs_init(struct pfe *pfe)
-+{
-+ if (device_create_file(pfe->dev, &dev_attr_class))
-+ goto err_class;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu))
-+ goto err_tmu;
-+
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ if (device_create_file(pfe->dev, &dev_attr_util))
-+ goto err_util;
-+#endif
-+
-+ if (device_create_file(pfe->dev, &dev_attr_bmu))
-+ goto err_bmu;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_hif))
-+ goto err_hif;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_gpi))
-+ goto err_gpi;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_drops))
-+ goto err_drops;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
-+ goto err_tmu0_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
-+ goto err_tmu1_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
-+ goto err_tmu2_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
-+ goto err_tmu3_queues;
-+
-+ if (device_create_file(pfe->dev, &dev_attr_pfemem))
-+ goto err_pfemem;
-+
-+#ifdef HIF_NAPI_STATS
-+ if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
-+ goto err_hif_napi_stats;
-+#endif
-+
-+ return 0;
-+
-+#ifdef HIF_NAPI_STATS
-+err_hif_napi_stats:
-+ device_remove_file(pfe->dev, &dev_attr_pfemem);
-+#endif
-+
-+err_pfemem:
-+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
-+
-+err_tmu3_queues:
-+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
-+
-+err_tmu2_queues:
-+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
-+
-+err_tmu1_queues:
-+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
-+
-+err_tmu0_queues:
-+ device_remove_file(pfe->dev, &dev_attr_drops);
-+
-+err_drops:
-+ device_remove_file(pfe->dev, &dev_attr_gpi);
-+
-+err_gpi:
-+ device_remove_file(pfe->dev, &dev_attr_hif);
-+
-+err_hif:
-+ device_remove_file(pfe->dev, &dev_attr_bmu);
-+
-+err_bmu:
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ device_remove_file(pfe->dev, &dev_attr_util);
-+
-+err_util:
-+#endif
-+ device_remove_file(pfe->dev, &dev_attr_tmu);
-+
-+err_tmu:
-+ device_remove_file(pfe->dev, &dev_attr_class);
-+
-+err_class:
-+ return -1;
-+}
-+
-+void pfe_sysfs_exit(struct pfe *pfe)
-+{
-+#ifdef HIF_NAPI_STATS
-+ device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
-+#endif
-+ device_remove_file(pfe->dev, &dev_attr_pfemem);
-+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
-+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
-+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
-+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
-+ device_remove_file(pfe->dev, &dev_attr_drops);
-+ device_remove_file(pfe->dev, &dev_attr_gpi);
-+ device_remove_file(pfe->dev, &dev_attr_hif);
-+ device_remove_file(pfe->dev, &dev_attr_bmu);
-+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
-+ device_remove_file(pfe->dev, &dev_attr_util);
-+#endif
-+ device_remove_file(pfe->dev, &dev_attr_tmu);
-+ device_remove_file(pfe->dev, &dev_attr_class);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
-@@ -0,0 +1,17 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ */
-+
-+#ifndef _PFE_SYSFS_H_
-+#define _PFE_SYSFS_H_
-+
-+#include <linux/proc_fs.h>
-+
-+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
-+
-+int pfe_sysfs_init(struct pfe *pfe);
-+void pfe_sysfs_exit(struct pfe *pfe);
-+
-+#endif /* _PFE_SYSFS_H_ */
diff --git a/target/linux/layerscape/patches-4.14/711-dpaa-bqman-support-layerscape.patch b/target/linux/layerscape/patches-4.14/711-dpaa-bqman-support-layerscape.patch
deleted file mode 100644
index 715cfe0875..0000000000
--- a/target/linux/layerscape/patches-4.14/711-dpaa-bqman-support-layerscape.patch
+++ /dev/null
@@ -1,923 +0,0 @@
-From 371e99a257cb714f9a6027d6571cb1a43855d926 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:24 +0800
-Subject: [PATCH] dpaa-bqman: support layerscape
-
-This is an integrated patch of dpaa-bqman for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
-Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
----
- drivers/soc/fsl/qbman/Kconfig | 2 +-
- drivers/soc/fsl/qbman/bman.c | 24 +++-
- drivers/soc/fsl/qbman/bman_ccsr.c | 57 +++++++++-
- drivers/soc/fsl/qbman/bman_portal.c | 44 ++++++--
- drivers/soc/fsl/qbman/bman_priv.h | 3 +
- drivers/soc/fsl/qbman/dpaa_sys.h | 8 +-
- drivers/soc/fsl/qbman/qman.c | 46 +++++++-
- drivers/soc/fsl/qbman/qman_ccsr.c | 168 +++++++++++++++++++++++-----
- drivers/soc/fsl/qbman/qman_portal.c | 60 ++++++++--
- drivers/soc/fsl/qbman/qman_priv.h | 5 +-
- drivers/soc/fsl/qbman/qman_test.h | 2 -
- include/soc/fsl/bman.h | 16 +++
- include/soc/fsl/qman.h | 17 +++
- 13 files changed, 390 insertions(+), 62 deletions(-)
-
---- a/drivers/soc/fsl/qbman/Kconfig
-+++ b/drivers/soc/fsl/qbman/Kconfig
-@@ -1,6 +1,6 @@
- menuconfig FSL_DPAA
- bool "Freescale DPAA 1.x support"
-- depends on FSL_SOC_BOOKE
-+ depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
- select GENERIC_ALLOCATOR
- help
- The Freescale Data Path Acceleration Architecture (DPAA) is a set of
---- a/drivers/soc/fsl/qbman/bman.c
-+++ b/drivers/soc/fsl/qbman/bman.c
-@@ -35,6 +35,27 @@
-
- /* Portal register assists */
-
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+/* Cache-inhibited register offsets */
-+#define BM_REG_RCR_PI_CINH 0x3000
-+#define BM_REG_RCR_CI_CINH 0x3100
-+#define BM_REG_RCR_ITR 0x3200
-+#define BM_REG_CFG 0x3300
-+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
-+#define BM_REG_ISR 0x3e00
-+#define BM_REG_IER 0x3e40
-+#define BM_REG_ISDR 0x3e80
-+#define BM_REG_IIR 0x3ec0
-+
-+/* Cache-enabled register offsets */
-+#define BM_CL_CR 0x0000
-+#define BM_CL_RR0 0x0100
-+#define BM_CL_RR1 0x0140
-+#define BM_CL_RCR 0x1000
-+#define BM_CL_RCR_PI_CENA 0x3000
-+#define BM_CL_RCR_CI_CENA 0x3100
-+
-+#else
- /* Cache-inhibited register offsets */
- #define BM_REG_RCR_PI_CINH 0x0000
- #define BM_REG_RCR_CI_CINH 0x0004
-@@ -53,6 +74,7 @@
- #define BM_CL_RCR 0x1000
- #define BM_CL_RCR_PI_CENA 0x3000
- #define BM_CL_RCR_CI_CENA 0x3100
-+#endif
-
- /*
- * Portal modes.
-@@ -607,7 +629,7 @@ int bman_p_irqsource_add(struct bman_por
- unsigned long irqflags;
-
- local_irq_save(irqflags);
-- set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
-+ p->irq_sources |= bits & BM_PIRQ_VISIBLE;
- bm_out(&p->p, BM_REG_IER, p->irq_sources);
- local_irq_restore(irqflags);
- return 0;
---- a/drivers/soc/fsl/qbman/bman_ccsr.c
-+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
-@@ -29,6 +29,7 @@
- */
-
- #include "bman_priv.h"
-+#include <linux/iommu.h>
-
- u16 bman_ip_rev;
- EXPORT_SYMBOL(bman_ip_rev);
-@@ -120,6 +121,7 @@ static void bm_set_memory(u64 ba, u32 si
- */
- static dma_addr_t fbpr_a;
- static size_t fbpr_sz;
-+static int __bman_probed;
-
- static int bman_fbpr(struct reserved_mem *rmem)
- {
-@@ -166,14 +168,24 @@ static irqreturn_t bman_isr(int irq, voi
- return IRQ_HANDLED;
- }
-
-+int bman_is_probed(void)
-+{
-+ return __bman_probed;
-+}
-+EXPORT_SYMBOL_GPL(bman_is_probed);
-+
- static int fsl_bman_probe(struct platform_device *pdev)
- {
- int ret, err_irq;
- struct device *dev = &pdev->dev;
-- struct device_node *node = dev->of_node;
-+ struct device_node *mem_node, *node = dev->of_node;
-+ struct iommu_domain *domain;
- struct resource *res;
- u16 id, bm_pool_cnt;
- u8 major, minor;
-+ u64 size;
-+
-+ __bman_probed = -1;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
-@@ -201,6 +213,47 @@ static int fsl_bman_probe(struct platfor
- return -ENODEV;
- }
-
-+ /*
-+ * If FBPR memory wasn't defined using the qbman compatiable string
-+ * try using the of_reserved_mem_device method
-+ */
-+ if (!fbpr_a) {
-+ ret = of_reserved_mem_device_init(dev);
-+ if (ret) {
-+ dev_err(dev, "of_reserved_mem_device_init() failed 0x%x\n",
-+ ret);
-+ return -ENODEV;
-+ }
-+ mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
-+ if (mem_node) {
-+ ret = of_property_read_u64(mem_node, "size", &size);
-+ if (ret) {
-+ dev_err(dev, "FBPR: of_address_to_resource fails 0x%x\n",
-+ ret);
-+ return -ENODEV;
-+ }
-+ fbpr_sz = size;
-+ } else {
-+ dev_err(dev, "No memory-region found for FBPR\n");
-+ return -ENODEV;
-+ }
-+ if (!dma_zalloc_coherent(dev, fbpr_sz, &fbpr_a, 0)) {
-+ dev_err(dev, "Alloc FBPR memory failed\n");
-+ return -ENODEV;
-+ }
-+ }
-+
-+ dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
-+
-+ /* Create an 1-to-1 iommu mapping for FBPR area */
-+ domain = iommu_get_domain_for_dev(dev);
-+ if (domain) {
-+ ret = iommu_map(domain, fbpr_a, fbpr_a, PAGE_ALIGN(fbpr_sz),
-+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
-+ if (ret)
-+ dev_warn(dev, "failed to iommu_map() %d\n", ret);
-+ }
-+
- bm_set_memory(fbpr_a, fbpr_sz);
-
- err_irq = platform_get_irq(pdev, 0);
-@@ -240,6 +293,8 @@ static int fsl_bman_probe(struct platfor
- return ret;
- }
-
-+ __bman_probed = 1;
-+
- return 0;
- };
-
---- a/drivers/soc/fsl/qbman/bman_portal.c
-+++ b/drivers/soc/fsl/qbman/bman_portal.c
-@@ -32,6 +32,7 @@
-
- static struct bman_portal *affine_bportals[NR_CPUS];
- static struct cpumask portal_cpus;
-+static int __bman_portals_probed;
- /* protect bman global registers and global data shared among portals */
- static DEFINE_SPINLOCK(bman_lock);
-
-@@ -85,6 +86,12 @@ static int bman_online_cpu(unsigned int
- return 0;
- }
-
-+int bman_portals_probed(void)
-+{
-+ return __bman_portals_probed;
-+}
-+EXPORT_SYMBOL_GPL(bman_portals_probed);
-+
- static int bman_portal_probe(struct platform_device *pdev)
- {
- struct device *dev = &pdev->dev;
-@@ -92,11 +99,21 @@ static int bman_portal_probe(struct plat
- struct bm_portal_config *pcfg;
- struct resource *addr_phys[2];
- void __iomem *va;
-- int irq, cpu;
-+ int irq, cpu, err;
-+
-+ err = bman_is_probed();
-+ if (!err)
-+ return -EPROBE_DEFER;
-+ if (err < 0) {
-+ dev_err(&pdev->dev, "failing probe due to bman probe error\n");
-+ return -ENODEV;
-+ }
-
- pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
-- if (!pcfg)
-+ if (!pcfg) {
-+ __bman_portals_probed = -1;
- return -ENOMEM;
-+ }
-
- pcfg->dev = dev;
-
-@@ -104,14 +121,14 @@ static int bman_portal_probe(struct plat
- DPAA_PORTAL_CE);
- if (!addr_phys[0]) {
- dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
-- return -ENXIO;
-+ goto err_ioremap1;
- }
-
- addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
- DPAA_PORTAL_CI);
- if (!addr_phys[1]) {
- dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
-- return -ENXIO;
-+ goto err_ioremap1;
- }
-
- pcfg->cpu = -1;
-@@ -119,11 +136,18 @@ static int bman_portal_probe(struct plat
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "Can't get %pOF IRQ'\n", node);
-- return -ENXIO;
-+ goto err_ioremap1;
- }
- pcfg->irq = irq;
-
-- va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
-+#ifdef CONFIG_PPC
-+ /* PPC requires a cacheable/non-coherent mapping of the portal */
-+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
-+ (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
-+#else
-+ /* For ARM we can use write combine mapping. */
-+ va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
-+#endif
- if (!va) {
- dev_err(dev, "ioremap::CE failed\n");
- goto err_ioremap1;
-@@ -131,8 +155,7 @@ static int bman_portal_probe(struct plat
-
- pcfg->addr_virt[DPAA_PORTAL_CE] = va;
-
-- va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
-- _PAGE_GUARDED | _PAGE_NO_CACHE);
-+ va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
- if (!va) {
- dev_err(dev, "ioremap::CI failed\n");
- goto err_ioremap2;
-@@ -149,6 +172,9 @@ static int bman_portal_probe(struct plat
- }
-
- cpumask_set_cpu(cpu, &portal_cpus);
-+ if (!__bman_portals_probed &&
-+ cpumask_weight(&portal_cpus) == num_online_cpus())
-+ __bman_portals_probed = 1;
- spin_unlock(&bman_lock);
- pcfg->cpu = cpu;
-
-@@ -168,6 +194,8 @@ err_portal_init:
- err_ioremap2:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
- err_ioremap1:
-+ __bman_portals_probed = -1;
-+
- return -ENXIO;
- }
-
---- a/drivers/soc/fsl/qbman/bman_priv.h
-+++ b/drivers/soc/fsl/qbman/bman_priv.h
-@@ -33,6 +33,9 @@
- #include "dpaa_sys.h"
-
- #include <soc/fsl/bman.h>
-+#include <linux/dma-contiguous.h>
-+#include <linux/of_address.h>
-+#include <linux/dma-mapping.h>
-
- /* Portal processing (interrupt) sources */
- #define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
---- a/drivers/soc/fsl/qbman/dpaa_sys.h
-+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
-@@ -44,20 +44,18 @@
- #include <linux/prefetch.h>
- #include <linux/genalloc.h>
- #include <asm/cacheflush.h>
-+#include <linux/io.h>
-+#include <linux/delay.h>
-
- /* For 2-element tables related to cache-inhibited and cache-enabled mappings */
- #define DPAA_PORTAL_CE 0
- #define DPAA_PORTAL_CI 1
-
--#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
--#error "Unsupported Cacheline Size"
--#endif
--
- static inline void dpaa_flush(void *p)
- {
- #ifdef CONFIG_PPC
- flush_dcache_range((unsigned long)p, (unsigned long)p+64);
--#elif defined(CONFIG_ARM32)
-+#elif defined(CONFIG_ARM)
- __cpuc_flush_dcache_area(p, 64);
- #elif defined(CONFIG_ARM64)
- __flush_dcache_area(p, 64);
---- a/drivers/soc/fsl/qbman/qman.c
-+++ b/drivers/soc/fsl/qbman/qman.c
-@@ -41,6 +41,43 @@
-
- /* Portal register assists */
-
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+/* Cache-inhibited register offsets */
-+#define QM_REG_EQCR_PI_CINH 0x3000
-+#define QM_REG_EQCR_CI_CINH 0x3040
-+#define QM_REG_EQCR_ITR 0x3080
-+#define QM_REG_DQRR_PI_CINH 0x3100
-+#define QM_REG_DQRR_CI_CINH 0x3140
-+#define QM_REG_DQRR_ITR 0x3180
-+#define QM_REG_DQRR_DCAP 0x31C0
-+#define QM_REG_DQRR_SDQCR 0x3200
-+#define QM_REG_DQRR_VDQCR 0x3240
-+#define QM_REG_DQRR_PDQCR 0x3280
-+#define QM_REG_MR_PI_CINH 0x3300
-+#define QM_REG_MR_CI_CINH 0x3340
-+#define QM_REG_MR_ITR 0x3380
-+#define QM_REG_CFG 0x3500
-+#define QM_REG_ISR 0x3600
-+#define QM_REG_IER 0x3640
-+#define QM_REG_ISDR 0x3680
-+#define QM_REG_IIR 0x36C0
-+#define QM_REG_ITPR 0x3740
-+
-+/* Cache-enabled register offsets */
-+#define QM_CL_EQCR 0x0000
-+#define QM_CL_DQRR 0x1000
-+#define QM_CL_MR 0x2000
-+#define QM_CL_EQCR_PI_CENA 0x3000
-+#define QM_CL_EQCR_CI_CENA 0x3040
-+#define QM_CL_DQRR_PI_CENA 0x3100
-+#define QM_CL_DQRR_CI_CENA 0x3140
-+#define QM_CL_MR_PI_CENA 0x3300
-+#define QM_CL_MR_CI_CENA 0x3340
-+#define QM_CL_CR 0x3800
-+#define QM_CL_RR0 0x3900
-+#define QM_CL_RR1 0x3940
-+
-+#else
- /* Cache-inhibited register offsets */
- #define QM_REG_EQCR_PI_CINH 0x0000
- #define QM_REG_EQCR_CI_CINH 0x0004
-@@ -75,6 +112,7 @@
- #define QM_CL_CR 0x3800
- #define QM_CL_RR0 0x3900
- #define QM_CL_RR1 0x3940
-+#endif
-
- /*
- * BTW, the drivers (and h/w programming model) already obtain the required
-@@ -909,12 +947,12 @@ static inline int qm_mc_result_timeout(s
-
- static inline void fq_set(struct qman_fq *fq, u32 mask)
- {
-- set_bits(mask, &fq->flags);
-+ fq->flags |= mask;
- }
-
- static inline void fq_clear(struct qman_fq *fq, u32 mask)
- {
-- clear_bits(mask, &fq->flags);
-+ fq->flags &= ~mask;
- }
-
- static inline int fq_isset(struct qman_fq *fq, u32 mask)
-@@ -1567,7 +1605,7 @@ void qman_p_irqsource_add(struct qman_po
- unsigned long irqflags;
-
- local_irq_save(irqflags);
-- set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
-+ p->irq_sources |= bits & QM_PIRQ_VISIBLE;
- qm_out(&p->p, QM_REG_IER, p->irq_sources);
- local_irq_restore(irqflags);
- }
-@@ -1590,7 +1628,7 @@ void qman_p_irqsource_remove(struct qman
- */
- local_irq_save(irqflags);
- bits &= QM_PIRQ_VISIBLE;
-- clear_bits(bits, &p->irq_sources);
-+ p->irq_sources &= ~bits;
- qm_out(&p->p, QM_REG_IER, p->irq_sources);
- ier = qm_in(&p->p, QM_REG_IER);
- /*
---- a/drivers/soc/fsl/qbman/qman_ccsr.c
-+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
-@@ -29,6 +29,7 @@
- */
-
- #include "qman_priv.h"
-+#include <linux/iommu.h>
-
- u16 qman_ip_rev;
- EXPORT_SYMBOL(qman_ip_rev);
-@@ -273,6 +274,7 @@ static const struct qman_error_info_mdat
- static u32 __iomem *qm_ccsr_start;
- /* A SDQCR mask comprising all the available/visible pool channels */
- static u32 qm_pools_sdqcr;
-+static int __qman_probed;
-
- static inline u32 qm_ccsr_in(u32 offset)
- {
-@@ -401,21 +403,42 @@ static int qm_init_pfdr(struct device *d
- }
-
- /*
-- * Ideally we would use the DMA API to turn rmem->base into a DMA address
-- * (especially if iommu translations ever get involved). Unfortunately, the
-- * DMA API currently does not allow mapping anything that is not backed with
-- * a struct page.
-+ * QMan needs two global memory areas initialized at boot time:
-+ * 1) FQD: Frame Queue Descriptors used to manage frame queues
-+ * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
-+ * Both areas are reserved using the device tree reserved memory framework
-+ * and the addresses and sizes are initialized when the QMan device is probed
- */
- static dma_addr_t fqd_a, pfdr_a;
- static size_t fqd_sz, pfdr_sz;
-
-+#ifdef CONFIG_PPC
-+/*
-+ * Support for PPC Device Tree backward compatibility when compatiable
-+ * string is set to fsl-qman-fqd and fsl-qman-pfdr
-+ */
-+static int zero_priv_mem(phys_addr_t addr, size_t sz)
-+{
-+ /* map as cacheable, non-guarded */
-+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
-+
-+ if (!tmpp)
-+ return -ENOMEM;
-+
-+ memset_io(tmpp, 0, sz);
-+ flush_dcache_range((unsigned long)tmpp,
-+ (unsigned long)tmpp + sz);
-+ iounmap(tmpp);
-+
-+ return 0;
-+}
-+
- static int qman_fqd(struct reserved_mem *rmem)
- {
- fqd_a = rmem->base;
- fqd_sz = rmem->size;
-
- WARN_ON(!(fqd_a && fqd_sz));
--
- return 0;
- }
- RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
-@@ -431,32 +454,13 @@ static int qman_pfdr(struct reserved_mem
- }
- RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
-
-+#endif
-+
- static unsigned int qm_get_fqid_maxcnt(void)
- {
- return fqd_sz / 64;
- }
-
--/*
-- * Flush this memory range from data cache so that QMAN originated
-- * transactions for this memory region could be marked non-coherent.
-- */
--static int zero_priv_mem(struct device *dev, struct device_node *node,
-- phys_addr_t addr, size_t sz)
--{
-- /* map as cacheable, non-guarded */
-- void __iomem *tmpp = ioremap_prot(addr, sz, 0);
--
-- if (!tmpp)
-- return -ENOMEM;
--
-- memset_io(tmpp, 0, sz);
-- flush_dcache_range((unsigned long)tmpp,
-- (unsigned long)tmpp + sz);
-- iounmap(tmpp);
--
-- return 0;
--}
--
- static void log_edata_bits(struct device *dev, u32 bit_count)
- {
- u32 i, j, mask = 0xffffffff;
-@@ -595,6 +599,7 @@ static int qman_init_ccsr(struct device
- #define LIO_CFG_LIODN_MASK 0x0fff0000
- void qman_liodn_fixup(u16 channel)
- {
-+#ifdef CONFIG_PPC
- static int done;
- static u32 liodn_offset;
- u32 before, after;
-@@ -614,6 +619,7 @@ void qman_liodn_fixup(u16 channel)
- qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
- else
- qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
-+#endif
- }
-
- #define IO_CFG_SDEST_MASK 0x00ff0000
-@@ -684,14 +690,24 @@ static int qman_resource_init(struct dev
- return 0;
- }
-
-+int qman_is_probed(void)
-+{
-+ return __qman_probed;
-+}
-+EXPORT_SYMBOL_GPL(qman_is_probed);
-+
- static int fsl_qman_probe(struct platform_device *pdev)
- {
- struct device *dev = &pdev->dev;
-- struct device_node *node = dev->of_node;
-+ struct device_node *mem_node, *node = dev->of_node;
-+ struct iommu_domain *domain;
- struct resource *res;
- int ret, err_irq;
- u16 id;
- u8 major, minor;
-+ u64 size;
-+
-+ __qman_probed = -1;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
-@@ -717,6 +733,8 @@ static int fsl_qman_probe(struct platfor
- qman_ip_rev = QMAN_REV30;
- else if (major == 3 && minor == 1)
- qman_ip_rev = QMAN_REV31;
-+ else if (major == 3 && minor == 2)
-+ qman_ip_rev = QMAN_REV32;
- else {
- dev_err(dev, "Unknown QMan version\n");
- return -ENODEV;
-@@ -727,10 +745,96 @@ static int fsl_qman_probe(struct platfor
- qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
- }
-
-- ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
-- WARN_ON(ret);
-- if (ret)
-- return -ENODEV;
-+ if (fqd_a) {
-+#ifdef CONFIG_PPC
-+ /*
-+ * For PPC backward DT compatibility
-+ * FQD memory MUST be zero'd by software
-+ */
-+ zero_priv_mem(fqd_a, fqd_sz);
-+#else
-+ WARN(1, "Unexpected archiceture using non shared-dma-mem reservations");
-+#endif
-+ } else {
-+ /*
-+ * Order of memory regions is assumed as FQD followed by PFDR
-+ * in order to ensure allocations from the correct regions the
-+ * driver initializes then allocates each piece in order
-+ */
-+ ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
-+ if (ret) {
-+ dev_err(dev, "of_reserved_mem_device_init_by_idx(0) failed 0x%x\n",
-+ ret);
-+ return -ENODEV;
-+ }
-+ mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
-+ if (mem_node) {
-+ ret = of_property_read_u64(mem_node, "size", &size);
-+ if (ret) {
-+ dev_err(dev, "FQD: of_address_to_resource fails 0x%x\n",
-+ ret);
-+ return -ENODEV;
-+ }
-+ fqd_sz = size;
-+ } else {
-+ dev_err(dev, "No memory-region found for FQD\n");
-+ return -ENODEV;
-+ }
-+ if (!dma_zalloc_coherent(dev, fqd_sz, &fqd_a, 0)) {
-+ dev_err(dev, "Alloc FQD memory failed\n");
-+ return -ENODEV;
-+ }
-+
-+ /*
-+ * Disassociate the FQD reserved memory area from the device
-+ * because a device can only have one DMA memory area. This
-+ * should be fine since the memory is allocated and initialized
-+ * and only ever accessed by the QMan device from now on
-+ */
-+ of_reserved_mem_device_release(dev);
-+ }
-+ dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
-+
-+ if (!pfdr_a) {
-+ /* Setup PFDR memory */
-+ ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 1);
-+ if (ret) {
-+ dev_err(dev, "of_reserved_mem_device_init(1) failed 0x%x\n",
-+ ret);
-+ return -ENODEV;
-+ }
-+ mem_node = of_parse_phandle(dev->of_node, "memory-region", 1);
-+ if (mem_node) {
-+ ret = of_property_read_u64(mem_node, "size", &size);
-+ if (ret) {
-+ dev_err(dev, "PFDR: of_address_to_resource fails 0x%x\n",
-+ ret);
-+ return -ENODEV;
-+ }
-+ pfdr_sz = size;
-+ } else {
-+ dev_err(dev, "No memory-region found for PFDR\n");
-+ return -ENODEV;
-+ }
-+ if (!dma_zalloc_coherent(dev, pfdr_sz, &pfdr_a, 0)) {
-+ dev_err(dev, "Alloc PFDR Failed size 0x%zx\n", pfdr_sz);
-+ return -ENODEV;
-+ }
-+ }
-+ dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
-+
-+ /* Create an 1-to-1 iommu mapping for fqd and pfdr areas */
-+ domain = iommu_get_domain_for_dev(dev);
-+ if (domain) {
-+ ret = iommu_map(domain, fqd_a, fqd_a, PAGE_ALIGN(fqd_sz),
-+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
-+ if (ret)
-+ dev_warn(dev, "iommu_map(fqd) failed %d\n", ret);
-+ ret = iommu_map(domain, pfdr_a, pfdr_a, PAGE_ALIGN(pfdr_sz),
-+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
-+ if (ret)
-+ dev_warn(dev, "iommu_map(pfdr) failed %d\n", ret);
-+ }
-
- ret = qman_init_ccsr(dev);
- if (ret) {
-@@ -793,6 +897,8 @@ static int fsl_qman_probe(struct platfor
- if (ret)
- return ret;
-
-+ __qman_probed = 1;
-+
- return 0;
- }
-
---- a/drivers/soc/fsl/qbman/qman_portal.c
-+++ b/drivers/soc/fsl/qbman/qman_portal.c
-@@ -29,6 +29,7 @@
- */
-
- #include "qman_priv.h"
-+#include <linux/iommu.h>
-
- struct qman_portal *qman_dma_portal;
- EXPORT_SYMBOL(qman_dma_portal);
-@@ -38,6 +39,7 @@ EXPORT_SYMBOL(qman_dma_portal);
- #define CONFIG_FSL_DPA_PIRQ_FAST 1
-
- static struct cpumask portal_cpus;
-+static int __qman_portals_probed;
- /* protect qman global registers and global data shared among portals */
- static DEFINE_SPINLOCK(qman_lock);
-
-@@ -218,19 +220,36 @@ static int qman_online_cpu(unsigned int
- return 0;
- }
-
-+int qman_portals_probed(void)
-+{
-+ return __qman_portals_probed;
-+}
-+EXPORT_SYMBOL_GPL(qman_portals_probed);
-+
- static int qman_portal_probe(struct platform_device *pdev)
- {
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
-+ struct iommu_domain *domain;
- struct qm_portal_config *pcfg;
- struct resource *addr_phys[2];
- void __iomem *va;
- int irq, cpu, err;
- u32 val;
-
-+ err = qman_is_probed();
-+ if (!err)
-+ return -EPROBE_DEFER;
-+ if (err < 0) {
-+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
-+ return -ENODEV;
-+ }
-+
- pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
-- if (!pcfg)
-+ if (!pcfg) {
-+ __qman_portals_probed = -1;
- return -ENOMEM;
-+ }
-
- pcfg->dev = dev;
-
-@@ -238,19 +257,20 @@ static int qman_portal_probe(struct plat
- DPAA_PORTAL_CE);
- if (!addr_phys[0]) {
- dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
-- return -ENXIO;
-+ goto err_ioremap1;
- }
-
- addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
- DPAA_PORTAL_CI);
- if (!addr_phys[1]) {
- dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
-- return -ENXIO;
-+ goto err_ioremap1;
- }
-
- err = of_property_read_u32(node, "cell-index", &val);
- if (err) {
- dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
-+ __qman_portals_probed = -1;
- return err;
- }
- pcfg->channel = val;
-@@ -258,11 +278,18 @@ static int qman_portal_probe(struct plat
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "Can't get %pOF IRQ\n", node);
-- return -ENXIO;
-+ goto err_ioremap1;
- }
- pcfg->irq = irq;
-
-- va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
-+#ifdef CONFIG_PPC
-+ /* PPC requires a cacheable/non-coherent mapping of the portal */
-+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
-+ (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
-+#else
-+ /* For ARM we can use write combine mapping. */
-+ va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
-+#endif
- if (!va) {
- dev_err(dev, "ioremap::CE failed\n");
- goto err_ioremap1;
-@@ -270,8 +297,7 @@ static int qman_portal_probe(struct plat
-
- pcfg->addr_virt[DPAA_PORTAL_CE] = va;
-
-- va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
-- _PAGE_GUARDED | _PAGE_NO_CACHE);
-+ va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
- if (!va) {
- dev_err(dev, "ioremap::CI failed\n");
- goto err_ioremap2;
-@@ -279,6 +305,21 @@ static int qman_portal_probe(struct plat
-
- pcfg->addr_virt[DPAA_PORTAL_CI] = va;
-
-+ /* Create an 1-to-1 iommu mapping for cena portal area */
-+ domain = iommu_get_domain_for_dev(dev);
-+ if (domain) {
-+ /*
-+ * Note: not mapping this as cacheable triggers the infamous
-+ * QMan CIDE error.
-+ */
-+ err = iommu_map(domain,
-+ addr_phys[0]->start, addr_phys[0]->start,
-+ PAGE_ALIGN(resource_size(addr_phys[0])),
-+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
-+ if (err)
-+ dev_warn(dev, "failed to iommu_map() %d\n", err);
-+ }
-+
- pcfg->pools = qm_get_pools_sdqcr();
-
- spin_lock(&qman_lock);
-@@ -290,6 +331,9 @@ static int qman_portal_probe(struct plat
- }
-
- cpumask_set_cpu(cpu, &portal_cpus);
-+ if (!__qman_portals_probed &&
-+ cpumask_weight(&portal_cpus) == num_online_cpus())
-+ __qman_portals_probed = 1;
- spin_unlock(&qman_lock);
- pcfg->cpu = cpu;
-
-@@ -314,6 +358,8 @@ err_portal_init:
- err_ioremap2:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
- err_ioremap1:
-+ __qman_portals_probed = -1;
-+
- return -ENXIO;
- }
-
---- a/drivers/soc/fsl/qbman/qman_priv.h
-+++ b/drivers/soc/fsl/qbman/qman_priv.h
-@@ -28,13 +28,13 @@
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
- #include "dpaa_sys.h"
-
- #include <soc/fsl/qman.h>
- #include <linux/dma-mapping.h>
- #include <linux/iommu.h>
-+#include <linux/dma-contiguous.h>
-+#include <linux/of_address.h>
-
- #if defined(CONFIG_FSL_PAMU)
- #include <asm/fsl_pamu_stash.h>
-@@ -187,6 +187,7 @@ struct qm_portal_config {
- #define QMAN_REV20 0x0200
- #define QMAN_REV30 0x0300
- #define QMAN_REV31 0x0301
-+#define QMAN_REV32 0x0302
- extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
-
- #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
---- a/drivers/soc/fsl/qbman/qman_test.h
-+++ b/drivers/soc/fsl/qbman/qman_test.h
-@@ -30,7 +30,5 @@
-
- #include "qman_priv.h"
-
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
- int qman_test_stash(void);
- int qman_test_api(void);
---- a/include/soc/fsl/bman.h
-+++ b/include/soc/fsl/bman.h
-@@ -126,4 +126,20 @@ int bman_release(struct bman_pool *pool,
- */
- int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
-
-+/**
-+ * bman_is_probed - Check if bman is probed
-+ *
-+ * Returns 1 if the bman driver successfully probed, -1 if the bman driver
-+ * failed to probe or 0 if the bman driver did not probed yet.
-+ */
-+int bman_is_probed(void);
-+/**
-+ * bman_portals_probed - Check if all cpu bound bman portals are probed
-+ *
-+ * Returns 1 if all the required cpu bound bman portals successfully probed,
-+ * -1 if probe errors appeared or 0 if the bman portals did not yet finished
-+ * probing.
-+ */
-+int bman_portals_probed(void);
-+
- #endif /* __FSL_BMAN_H */
---- a/include/soc/fsl/qman.h
-+++ b/include/soc/fsl/qman.h
-@@ -1186,4 +1186,21 @@ int qman_alloc_cgrid_range(u32 *result,
- */
- int qman_release_cgrid(u32 id);
-
-+/**
-+ * qman_is_probed - Check if qman is probed
-+ *
-+ * Returns 1 if the qman driver successfully probed, -1 if the qman driver
-+ * failed to probe or 0 if the qman driver did not probed yet.
-+ */
-+int qman_is_probed(void);
-+
-+/**
-+ * qman_portals_probed - Check if all cpu bound qman portals are probed
-+ *
-+ * Returns 1 if all the required cpu bound qman portals successfully probed,
-+ * -1 if probe errors appeared or 0 if the qman portals did not yet finished
-+ * probing.
-+ */
-+int qman_portals_probed(void);
-+
- #endif /* __FSL_QMAN_H */
diff --git a/target/linux/layerscape/patches-4.14/712-etsec-support-layerscape.patch b/target/linux/layerscape/patches-4.14/712-etsec-support-layerscape.patch
deleted file mode 100644
index bb075d4d4f..0000000000
--- a/target/linux/layerscape/patches-4.14/712-etsec-support-layerscape.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From f0f6e88696957d376d8875f675c1caf75a33fd67 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:34 +0800
-Subject: [PATCH] etsec: support layerscape
-
-This is an integrated patch of etsec for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/net/ethernet/freescale/gianfar.h | 3 ---
- .../net/ethernet/freescale/gianfar_ethtool.c | 23 +++++++++++++++----
- 2 files changed, 18 insertions(+), 8 deletions(-)
-
---- a/drivers/net/ethernet/freescale/gianfar.h
-+++ b/drivers/net/ethernet/freescale/gianfar.h
-@@ -1372,7 +1372,4 @@ struct filer_table {
- struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
- };
-
--/* The gianfar_ptp module will set this variable */
--extern int gfar_phc_index;
--
- #endif /* __GIANFAR_H */
---- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
-+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
-@@ -41,6 +41,8 @@
- #include <linux/phy.h>
- #include <linux/sort.h>
- #include <linux/if_vlan.h>
-+#include <linux/of_platform.h>
-+#include <linux/fsl/ptp_qoriq.h>
-
- #include "gianfar.h"
-
-@@ -1509,24 +1511,35 @@ static int gfar_get_nfc(struct net_devic
- return ret;
- }
-
--int gfar_phc_index = -1;
--EXPORT_SYMBOL(gfar_phc_index);
--
- static int gfar_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
- {
- struct gfar_private *priv = netdev_priv(dev);
-+ struct platform_device *ptp_dev;
-+ struct device_node *ptp_node;
-+ struct qoriq_ptp *ptp = NULL;
-+
-+ info->phc_index = -1;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-- info->phc_index = -1;
- return 0;
- }
-+
-+ ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
-+ if (ptp_node) {
-+ ptp_dev = of_find_device_by_node(ptp_node);
-+ if (ptp_dev)
-+ ptp = platform_get_drvdata(ptp_dev);
-+ }
-+
-+ if (ptp)
-+ info->phc_index = ptp->phc_index;
-+
- info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-- info->phc_index = gfar_phc_index;
- info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/target/linux/layerscape/patches-4.14/713-sdk_qbman-Fix-error-in-IP-revision-comparison.patch b/target/linux/layerscape/patches-4.14/713-sdk_qbman-Fix-error-in-IP-revision-comparison.patch
deleted file mode 100644
index b310e26855..0000000000
--- a/target/linux/layerscape/patches-4.14/713-sdk_qbman-Fix-error-in-IP-revision-comparison.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From b43b4fdd5caa4f66fd712c77589c167c952ec659 Mon Sep 17 00:00:00 2001
-From: Roy Pledge <roy.pledge@nxp.com>
-Date: Mon, 6 May 2019 11:18:57 -0400
-Subject: [PATCH] sdk_qbman: Fix error in IP revision comparison
-
-The comparison for QMAN_REV31 was incorrect as it
-would always fail due to the wrong mask.
-
-This fixes the following error in newer GCC versions:
-"error: bitwise comparison always evaluates to false
- [-Werror=tautological-compare]"
-
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
----
- drivers/staging/fsl_qbman/qman_config.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/staging/fsl_qbman/qman_config.c
-+++ b/drivers/staging/fsl_qbman/qman_config.c
-@@ -812,7 +812,7 @@ int qman_set_sdest(u16 channel, unsigned
-
- if (!qman_have_ccsr())
- return -ENODEV;
-- if ((qman_ip_rev & 0xFF00) == QMAN_REV31) {
-+ if ((qman_ip_rev & 0xFFFF) == QMAN_REV31) {
- /* LS1043A - only one L2 cache */
- cpu_idx = 0;
- }
diff --git a/target/linux/layerscape/patches-4.14/801-sata-support-layerscape.patch b/target/linux/layerscape/patches-4.14/801-sata-support-layerscape.patch
deleted file mode 100644
index 779127058a..0000000000
--- a/target/linux/layerscape/patches-4.14/801-sata-support-layerscape.patch
+++ /dev/null
@@ -1,289 +0,0 @@
-From 71fb63c92eae3f9197e2343ed5ed3676440789e1 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:59:01 +0800
-Subject: [PATCH] sata: support layerscape
-
-This is an integrated patch of sata for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Peng Ma <peng.ma@nxp.com>
-Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
----
- drivers/ata/ahci.h | 7 ++
- drivers/ata/ahci_qoriq.c | 168 ++++++++++++++++++++++++++++++++++++++
- drivers/ata/libata-core.c | 3 +
- 3 files changed, 178 insertions(+)
-
---- a/drivers/ata/ahci.h
-+++ b/drivers/ata/ahci.h
-@@ -445,4 +445,11 @@ static inline int ahci_nr_ports(u32 cap)
- return (cap & 0x1f) + 1;
- }
-
-+#ifdef CONFIG_AHCI_QORIQ
-+extern void fsl_sata_errata_379364(struct ata_link *link);
-+#else
-+static void fsl_sata_errata_379364(struct ata_link *link)
-+{}
-+#endif
-+
- #endif /* _AHCI_H */
---- a/drivers/ata/ahci_qoriq.c
-+++ b/drivers/ata/ahci_qoriq.c
-@@ -35,6 +35,8 @@
-
- /* port register default value */
- #define AHCI_PORT_PHY_1_CFG 0xa003fffe
-+#define AHCI_PORT_PHY2_CFG 0x28184d1f
-+#define AHCI_PORT_PHY3_CFG 0x0e081509
- #define AHCI_PORT_TRANS_CFG 0x08000029
- #define AHCI_PORT_AXICC_CFG 0x3fffffff
-
-@@ -49,6 +51,27 @@
- #define ECC_DIS_ARMV8_CH2 0x80000000
- #define ECC_DIS_LS1088A 0x40000000
-
-+/* errata for lx2160 */
-+#define RCWSR29_BASE 0x1E00170
-+#define SERDES2_BASE 0x1EB0000
-+#define DEVICE_CONFIG_REG_BASE 0x1E00000
-+#define SERDES2_LNAX_RX_CR(x) (0x840 + (0x100 * (x)))
-+#define SERDES2_LNAX_RX_CBR(x) (0x8C0 + (0x100 * (x)))
-+#define SYS_VER_REG 0xA4
-+#define LN_RX_RST 0x80000010
-+#define LN_RX_RST_DONE 0x3
-+#define LN_RX_MASK 0xf
-+#define LX2160A_VER1 0x1
-+
-+#define SERDES2_LNAA 0
-+#define SERDES2_LNAB 1
-+#define SERDES2_LNAC 2
-+#define SERDES2_LNAD 3
-+#define SERDES2_LNAE 4
-+#define SERDES2_LNAF 5
-+#define SERDES2_LNAG 6
-+#define SERDES2_LNAH 7
-+
- enum ahci_qoriq_type {
- AHCI_LS1021A,
- AHCI_LS1043A,
-@@ -56,6 +79,7 @@ enum ahci_qoriq_type {
- AHCI_LS1046A,
- AHCI_LS1088A,
- AHCI_LS2088A,
-+ AHCI_LX2160A,
- };
-
- struct ahci_qoriq_priv {
-@@ -72,6 +96,7 @@ static const struct of_device_id ahci_qo
- { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
- { .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
- { .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
-+ { .compatible = "fsl,lx2160a-ahci", .data = (void *)AHCI_LX2160A},
- {},
- };
- MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
-@@ -156,6 +181,138 @@ static struct scsi_host_template ahci_qo
- AHCI_SHT(DRV_NAME),
- };
-
-+void fsl_sata_errata_379364(struct ata_link *link)
-+{
-+ struct ata_port *ap = link->ap;
-+ struct ahci_host_priv *hpriv = ap->host->private_data;
-+ struct ahci_qoriq_priv *qoriq_priv = hpriv->plat_data;
-+ bool lx2160a_workaround = (qoriq_priv->type == AHCI_LX2160A);
-+
-+ int val = 0;
-+ void __iomem *rcw_base = NULL;
-+ void __iomem *serdes_base = NULL;
-+ void __iomem *dev_con_base = NULL;
-+
-+ if (!lx2160a_workaround)
-+ return;
-+ else {
-+ dev_con_base = ioremap(DEVICE_CONFIG_REG_BASE, PAGE_SIZE);
-+ if (!dev_con_base) {
-+ ata_link_err(link, "device config ioremap failed\n");
-+ return;
-+ }
-+
-+ val = (readl(dev_con_base + SYS_VER_REG) & GENMASK(7, 4)) >> 4;
-+ if (val != LX2160A_VER1)
-+ goto dev_unmap;
-+
-+ /*
-+ * Add few msec delay.
-+ * Check for corresponding serdes lane RST_DONE .
-+ * apply lane reset.
-+ */
-+
-+ serdes_base = ioremap(SERDES2_BASE, PAGE_SIZE);
-+ if (!serdes_base) {
-+ ata_link_err(link, "serdes ioremap failed\n");
-+ goto dev_unmap;
-+ }
-+
-+ rcw_base = ioremap(RCWSR29_BASE, PAGE_SIZE);
-+ if (!rcw_base) {
-+ ata_link_err(link, "rcw ioremap failed\n");
-+ goto serdes_unmap;
-+ }
-+
-+ ata_msleep(link->ap, 1);
-+
-+ val = (readl(rcw_base) & GENMASK(25, 21)) >> 21;
-+
-+ switch (val) {
-+ case 1:
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAC)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAC));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAD)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAD));
-+ break;
-+
-+ case 4:
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAG)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAG));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAH)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAH));
-+ break;
-+
-+ case 5:
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAE)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAE));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAF)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAF));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAG)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAG));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAH)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAH));
-+ break;
-+
-+ case 8:
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAC)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAC));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAD)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAD));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAE)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAE));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAF)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAF));
-+ break;
-+
-+ case 12:
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAG)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAG));
-+ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAH)) &
-+ LN_RX_MASK) != LN_RX_RST_DONE)
-+ writel(LN_RX_RST, serdes_base +
-+ SERDES2_LNAX_RX_CR(SERDES2_LNAH));
-+ break;
-+
-+ default:
-+ break;
-+ }
-+ }
-+
-+ iounmap(rcw_base);
-+serdes_unmap:
-+ iounmap(serdes_base);
-+dev_unmap:
-+ iounmap(dev_con_base);
-+}
-+
-+
- static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
- {
- struct ahci_qoriq_priv *qpriv = hpriv->plat_data;
-@@ -183,13 +340,18 @@ static int ahci_qoriq_phy_init(struct ah
- writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
- qpriv->ecc_addr);
- writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
-+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
-+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
- writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- if (qpriv->is_dmacoherent)
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
- break;
-
- case AHCI_LS2080A:
-+ case AHCI_LX2160A:
- writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
-+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
-+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
- writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- if (qpriv->is_dmacoherent)
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
-@@ -201,6 +363,8 @@ static int ahci_qoriq_phy_init(struct ah
- writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
- qpriv->ecc_addr);
- writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
-+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
-+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
- writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- if (qpriv->is_dmacoherent)
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
-@@ -212,6 +376,8 @@ static int ahci_qoriq_phy_init(struct ah
- writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
- qpriv->ecc_addr);
- writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
-+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
-+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
- writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- if (qpriv->is_dmacoherent)
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
-@@ -219,6 +385,8 @@ static int ahci_qoriq_phy_init(struct ah
-
- case AHCI_LS2088A:
- writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
-+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
-+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
- writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- if (qpriv->is_dmacoherent)
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
---- a/drivers/ata/libata-core.c
-+++ b/drivers/ata/libata-core.c
-@@ -76,6 +76,7 @@
- #define CREATE_TRACE_POINTS
- #include <trace/events/libata.h>
-
-+#include "ahci.h"
- #include "libata.h"
- #include "libata-transport.h"
-
-@@ -4119,6 +4120,8 @@ int sata_link_hardreset(struct ata_link
- */
- ata_msleep(link->ap, 1);
-
-+ fsl_sata_errata_379364(link);
-+
- /* bring link back */
- rc = sata_link_resume(link, timing, deadline);
- if (rc)
diff --git a/target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch b/target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch
deleted file mode 100644
index e39bae0d1d..0000000000
--- a/target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch
+++ /dev/null
@@ -1,4263 +0,0 @@
-From 5cb4bc977d933323429050033da9c701b24df43e Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:23 +0800
-Subject: [PATCH] dma: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of dma for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
-Signed-off-by: Changming Huang <jerry.huang@nxp.com>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Peng Ma <peng.ma@nxp.com>
-Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
-Signed-off-by: Rajiv Vishwakarma <rajiv.vishwakarma@nxp.com>
-Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
-Signed-off-by: Wen He <wen.he_1@nxp.com>
-Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
----
- drivers/dma/Kconfig | 33 +-
- drivers/dma/Makefile | 3 +
- drivers/dma/caam_dma.c | 462 ++++++++
- drivers/dma/dpaa2-qdma/Kconfig | 8 +
- drivers/dma/dpaa2-qdma/Makefile | 8 +
- drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 781 ++++++++++++++
- drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 181 ++++
- drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++++
- drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++++
- drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++
- drivers/dma/fsl-edma.c | 66 +-
- drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++++++++
- 12 files changed, 4073 insertions(+), 5 deletions(-)
- create mode 100644 drivers/dma/caam_dma.c
- create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
- create mode 100644 drivers/dma/dpaa2-qdma/Makefile
- create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
- create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
- create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
- create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
- create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
- create mode 100644 drivers/dma/fsl-qdma.c
-
---- a/drivers/dma/Kconfig
-+++ b/drivers/dma/Kconfig
-@@ -129,6 +129,24 @@ config COH901318
- help
- Enable support for ST-Ericsson COH 901 318 DMA.
-
-+config CRYPTO_DEV_FSL_CAAM_DMA
-+ tristate "CAAM DMA engine support"
-+ depends on CRYPTO_DEV_FSL_CAAM_JR
-+ default n
-+ select DMA_ENGINE
-+ select ASYNC_CORE
-+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-+ help
-+ Selecting this will offload the DMA operations for users of
-+ the scatter gather memcopy API to the CAAM via job rings. The
-+ CAAM is a hardware module that provides hardware acceleration to
-+ cryptographic operations. It has a built-in DMA controller that can
-+ be programmed to read/write cryptographic data. This module defines
-+ a DMA driver that uses the DMA capabilities of the CAAM.
-+
-+ To compile this as a module, choose M here: the module
-+ will be called caam_dma.
-+
- config DMA_BCM2835
- tristate "BCM2835 DMA engine support"
- depends on ARCH_BCM2835
-@@ -215,6 +233,20 @@ config FSL_EDMA
- multiplexing capability for DMA request sources(slot).
- This module can be found on Freescale Vybrid and LS-1 SoCs.
-
-+config FSL_QDMA
-+ tristate "NXP Layerscape qDMA engine support"
-+ select DMA_ENGINE
-+ select DMA_VIRTUAL_CHANNELS
-+ select DMA_ENGINE_RAID
-+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-+ help
-+ Support the NXP Layerscape qDMA engine with command queue and legacy mode.
-+ Channel virtualization is supported through enqueuing of DMA jobs to,
-+ or dequeuing DMA jobs from, different work queues.
-+ This module can be found on NXP Layerscape SoCs.
-+
-+source drivers/dma/dpaa2-qdma/Kconfig
-+
- config FSL_RAID
- tristate "Freescale RAID engine Support"
- depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
-@@ -600,7 +632,6 @@ config ZX_DMA
- help
- Support the DMA engine for ZTE ZX family platform devices.
-
--
- # driver files
- source "drivers/dma/bestcomm/Kconfig"
-
---- a/drivers/dma/Makefile
-+++ b/drivers/dma/Makefile
-@@ -31,7 +31,9 @@ obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
- obj-$(CONFIG_DW_DMAC_CORE) += dw/
- obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
- obj-$(CONFIG_FSL_DMA) += fsldma.o
-+obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
- obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
-+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
- obj-$(CONFIG_FSL_RAID) += fsl_raid.o
- obj-$(CONFIG_HSU_DMA) += hsu/
- obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
-@@ -71,6 +73,7 @@ obj-$(CONFIG_TI_EDMA) += edma.o
- obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
- obj-$(CONFIG_ZX_DMA) += zx_dma.o
- obj-$(CONFIG_ST_FDMA) += st_fdma.o
-+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
-
- obj-y += qcom/
- obj-y += xilinx/
---- /dev/null
-+++ b/drivers/dma/caam_dma.c
-@@ -0,0 +1,462 @@
-+/*
-+ * caam support for SG DMA
-+ *
-+ * Copyright 2016 Freescale Semiconductor, Inc
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the names of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/dma-mapping.h>
-+#include <linux/dmaengine.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+
-+#include "dmaengine.h"
-+
-+#include "../crypto/caam/regs.h"
-+#include "../crypto/caam/jr.h"
-+#include "../crypto/caam/error.h"
-+#include "../crypto/caam/desc_constr.h"
-+
-+#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
-+ CAAM_CMD_SZ)
-+
-+/*
-+ * This is max chunk size of a DMA transfer. If a buffer is larger than this
-+ * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
-+ * and for each chunk a DMA transfer request is issued.
-+ * This value is the largest number on 16 bits that is a multiple of 256 bytes
-+ * (the largest configurable CAAM DMA burst size).
-+ */
-+#define CAAM_DMA_CHUNK_SIZE 65280
-+
-+struct caam_dma_sh_desc {
-+ u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
-+ dma_addr_t desc_dma;
-+};
-+
-+/* caam dma extended descriptor */
-+struct caam_dma_edesc {
-+ struct dma_async_tx_descriptor async_tx;
-+ struct list_head node;
-+ struct caam_dma_ctx *ctx;
-+ dma_addr_t src_dma;
-+ dma_addr_t dst_dma;
-+ unsigned int src_len;
-+ unsigned int dst_len;
-+ u32 jd[] ____cacheline_aligned;
-+};
-+
-+/*
-+ * caam_dma_ctx - per jr/channel context
-+ * @chan: dma channel used by async_tx API
-+ * @node: list_head used to attach to the global dma_ctx_list
-+ * @jrdev: Job Ring device
-+ * @pending_q: queue of pending (submitted, but not enqueued) jobs
-+ * @done_not_acked: jobs that have been completed by jr, but maybe not acked
-+ * @edesc_lock: protects extended descriptor
-+ */
-+struct caam_dma_ctx {
-+ struct dma_chan chan;
-+ struct list_head node;
-+ struct device *jrdev;
-+ struct list_head pending_q;
-+ struct list_head done_not_acked;
-+ spinlock_t edesc_lock;
-+};
-+
-+static struct dma_device *dma_dev;
-+static struct caam_dma_sh_desc *dma_sh_desc;
-+static LIST_HEAD(dma_ctx_list);
-+
-+static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
-+{
-+ struct caam_dma_edesc *edesc = NULL;
-+ struct caam_dma_ctx *ctx = NULL;
-+ dma_cookie_t cookie;
-+
-+ edesc = container_of(tx, struct caam_dma_edesc, async_tx);
-+ ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
-+
-+ spin_lock_bh(&ctx->edesc_lock);
-+
-+ cookie = dma_cookie_assign(tx);
-+ list_add_tail(&edesc->node, &ctx->pending_q);
-+
-+ spin_unlock_bh(&ctx->edesc_lock);
-+
-+ return cookie;
-+}
-+
-+static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
-+{
-+ struct caam_dma_ctx *ctx = edesc->ctx;
-+ struct caam_dma_edesc *_edesc = NULL;
-+
-+ spin_lock_bh(&ctx->edesc_lock);
-+
-+ list_add_tail(&edesc->node, &ctx->done_not_acked);
-+ list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
-+ if (async_tx_test_ack(&edesc->async_tx)) {
-+ list_del(&edesc->node);
-+ kfree(edesc);
-+ }
-+ }
-+
-+ spin_unlock_bh(&ctx->edesc_lock);
-+}
-+
-+static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
-+ void *context)
-+{
-+ struct caam_dma_edesc *edesc = context;
-+ struct caam_dma_ctx *ctx = edesc->ctx;
-+ dma_async_tx_callback callback;
-+ void *callback_param;
-+
-+ if (err)
-+ caam_jr_strstatus(ctx->jrdev, err);
-+
-+ dma_run_dependencies(&edesc->async_tx);
-+
-+ spin_lock_bh(&ctx->edesc_lock);
-+ dma_cookie_complete(&edesc->async_tx);
-+ spin_unlock_bh(&ctx->edesc_lock);
-+
-+ callback = edesc->async_tx.callback;
-+ callback_param = edesc->async_tx.callback_param;
-+
-+ dma_descriptor_unmap(&edesc->async_tx);
-+
-+ caam_jr_chan_free_edesc(edesc);
-+
-+ if (callback)
-+ callback(callback_param);
-+}
-+
-+static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
-+{
-+ u32 *jd = edesc->jd;
-+ u32 *sh_desc = dma_sh_desc->desc;
-+ dma_addr_t desc_dma = dma_sh_desc->desc_dma;
-+
-+ /* init the job descriptor */
-+ init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
-+
-+ /* set SEQIN PTR */
-+ append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
-+
-+ /* set SEQOUT PTR */
-+ append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
-+
-+ print_hex_dump_debug("caam dma desc@" __stringify(__LINE__) ": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
-+}
-+
-+static struct dma_async_tx_descriptor *
-+caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
-+ size_t len, unsigned long flags)
-+{
-+ struct caam_dma_edesc *edesc;
-+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
-+ chan);
-+
-+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
-+ if (!edesc)
-+ return ERR_PTR(-ENOMEM);
-+
-+ dma_async_tx_descriptor_init(&edesc->async_tx, chan);
-+ edesc->async_tx.tx_submit = caam_dma_tx_submit;
-+ edesc->async_tx.flags = flags;
-+ edesc->async_tx.cookie = -EBUSY;
-+
-+ edesc->src_dma = src;
-+ edesc->src_len = len;
-+ edesc->dst_dma = dst;
-+ edesc->dst_len = len;
-+ edesc->ctx = ctx;
-+
-+ caam_dma_memcpy_init_job_desc(edesc);
-+
-+ return &edesc->async_tx;
-+}
-+
-+/* This function can be called in an interrupt context */
-+static void caam_dma_issue_pending(struct dma_chan *chan)
-+{
-+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
-+ chan);
-+ struct caam_dma_edesc *edesc, *_edesc;
-+
-+ spin_lock_bh(&ctx->edesc_lock);
-+ list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
-+ if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
-+ caam_dma_done, edesc) < 0)
-+ break;
-+ list_del(&edesc->node);
-+ }
-+ spin_unlock_bh(&ctx->edesc_lock);
-+}
-+
-+static void caam_dma_free_chan_resources(struct dma_chan *chan)
-+{
-+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
-+ chan);
-+ struct caam_dma_edesc *edesc, *_edesc;
-+
-+ spin_lock_bh(&ctx->edesc_lock);
-+ list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
-+ list_del(&edesc->node);
-+ kfree(edesc);
-+ }
-+ list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
-+ list_del(&edesc->node);
-+ kfree(edesc);
-+ }
-+ spin_unlock_bh(&ctx->edesc_lock);
-+}
-+
-+static int caam_dma_jr_chan_bind(void)
-+{
-+ struct device *jrdev;
-+ struct caam_dma_ctx *ctx;
-+ int bonds = 0;
-+ int i;
-+
-+ for (i = 0; i < caam_jr_driver_probed(); i++) {
-+ jrdev = caam_jridx_alloc(i);
-+ if (IS_ERR(jrdev)) {
-+ pr_err("job ring device %d allocation failed\n", i);
-+ continue;
-+ }
-+
-+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-+ if (!ctx) {
-+ caam_jr_free(jrdev);
-+ continue;
-+ }
-+
-+ ctx->chan.device = dma_dev;
-+ ctx->chan.private = ctx;
-+
-+ ctx->jrdev = jrdev;
-+
-+ INIT_LIST_HEAD(&ctx->pending_q);
-+ INIT_LIST_HEAD(&ctx->done_not_acked);
-+ INIT_LIST_HEAD(&ctx->node);
-+ spin_lock_init(&ctx->edesc_lock);
-+
-+ dma_cookie_init(&ctx->chan);
-+
-+ /* add the context of this channel to the context list */
-+ list_add_tail(&ctx->node, &dma_ctx_list);
-+
-+ /* add this channel to the device chan list */
-+ list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
-+
-+ bonds++;
-+ }
-+
-+ return bonds;
-+}
-+
-+static inline void caam_jr_dma_free(struct dma_chan *chan)
-+{
-+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
-+ chan);
-+
-+ list_del(&ctx->node);
-+ list_del(&chan->device_node);
-+ caam_jr_free(ctx->jrdev);
-+ kfree(ctx);
-+}
-+
-+static void set_caam_dma_desc(u32 *desc)
-+{
-+ u32 *jmp_cmd;
-+
-+ /* dma shared descriptor */
-+ init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
-+
-+ /* REG1 = CAAM_DMA_CHUNK_SIZE */
-+ append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
-+
-+ /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
-+ append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
-+
-+ /*
-+ * if (REG0 > 0)
-+ * jmp to LABEL1
-+ */
-+ jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
-+ JUMP_COND_MATH_Z);
-+
-+ /* REG1 = SEQINLEN */
-+ append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
-+
-+ /* LABEL1 */
-+ set_jump_tgt_here(desc, jmp_cmd);
-+
-+ /* VARSEQINLEN = REG1 */
-+ append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
-+
-+ /* VARSEQOUTLEN = REG1 */
-+ append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
-+
-+ /* do FIFO STORE */
-+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
-+
-+ /* do FIFO LOAD */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IFIFO | LDST_VLF);
-+
-+ /*
-+ * if (REG0 > 0)
-+ * jmp 0xF8 (after shared desc header)
-+ */
-+ append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
-+ JUMP_COND_MATH_Z | 0xF8);
-+
-+ print_hex_dump_debug("caam dma shdesc@" __stringify(__LINE__) ": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
-+ 1);
-+}
-+
-+static int __init caam_dma_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct device *ctrldev = dev->parent;
-+ struct dma_chan *chan, *_chan;
-+ u32 *sh_desc;
-+ int err = -ENOMEM;
-+ int bonds;
-+
-+ if (!caam_jr_driver_probed()) {
-+ dev_info(dev, "Defer probing after JR driver probing\n");
-+ return -EPROBE_DEFER;
-+ }
-+
-+ dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
-+ if (!dma_dev)
-+ return -ENOMEM;
-+
-+ dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
-+ if (!dma_sh_desc)
-+ goto desc_err;
-+
-+ sh_desc = dma_sh_desc->desc;
-+ set_caam_dma_desc(sh_desc);
-+ dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
-+ desc_bytes(sh_desc),
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
-+ dev_err(dev, "unable to map dma descriptor\n");
-+ goto map_err;
-+ }
-+
-+ INIT_LIST_HEAD(&dma_dev->channels);
-+
-+ bonds = caam_dma_jr_chan_bind();
-+ if (!bonds) {
-+ err = -ENODEV;
-+ goto jr_bind_err;
-+ }
-+
-+ dma_dev->dev = dev;
-+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
-+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
-+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
-+ dma_dev->device_tx_status = dma_cookie_status;
-+ dma_dev->device_issue_pending = caam_dma_issue_pending;
-+ dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
-+ dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
-+
-+ err = dma_async_device_register(dma_dev);
-+ if (err) {
-+ dev_err(dev, "Failed to register CAAM DMA engine\n");
-+ goto jr_bind_err;
-+ }
-+
-+ dev_info(dev, "caam dma support with %d job rings\n", bonds);
-+
-+ return err;
-+
-+jr_bind_err:
-+ list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
-+ caam_jr_dma_free(chan);
-+
-+ dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
-+ DMA_TO_DEVICE);
-+map_err:
-+ kfree(dma_sh_desc);
-+desc_err:
-+ kfree(dma_dev);
-+ return err;
-+}
-+
-+static int caam_dma_remove(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct device *ctrldev = dev->parent;
-+ struct caam_dma_ctx *ctx, *_ctx;
-+
-+ dma_async_device_unregister(dma_dev);
-+
-+ list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
-+ list_del(&ctx->node);
-+ caam_jr_free(ctx->jrdev);
-+ kfree(ctx);
-+ }
-+
-+ dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
-+ desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
-+
-+ kfree(dma_sh_desc);
-+ kfree(dma_dev);
-+
-+ dev_info(dev, "caam dma support disabled\n");
-+ return 0;
-+}
-+
-+static struct platform_driver caam_dma_driver = {
-+ .driver = {
-+ .name = "caam-dma",
-+ },
-+ .probe = caam_dma_probe,
-+ .remove = caam_dma_remove,
-+};
-+module_platform_driver(caam_dma_driver);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_DESCRIPTION("NXP CAAM support for DMA engine");
-+MODULE_AUTHOR("NXP Semiconductors");
-+MODULE_ALIAS("platform:caam-dma");
---- /dev/null
-+++ b/drivers/dma/dpaa2-qdma/Kconfig
-@@ -0,0 +1,8 @@
-+menuconfig FSL_DPAA2_QDMA
-+ tristate "NXP DPAA2 QDMA"
-+ depends on FSL_MC_BUS && FSL_MC_DPIO
-+ select DMA_ENGINE
-+ select DMA_VIRTUAL_CHANNELS
-+ ---help---
-+ NXP Data Path Acceleration Architecture 2 QDMA driver,
-+ using the NXP MC bus driver.
---- /dev/null
-+++ b/drivers/dma/dpaa2-qdma/Makefile
-@@ -0,0 +1,8 @@
-+#
-+# Makefile for the NXP DPAA2 CAAM controllers
-+#
-+ccflags-y += -DVERSION=\"\"
-+
-+obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
-+
-+fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
---- /dev/null
-+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
-@@ -0,0 +1,781 @@
-+/*
-+ * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
-+ *
-+ * Copyright 2015-2017 NXP Semiconductor, Inc.
-+ * Author: Changming Huang <jerry.huang@nxp.com>
-+ *
-+ * Driver for the NXP QDMA engine with QMan mode.
-+ * Channel virtualization is supported through enqueuing of DMA jobs to,
-+ * or dequeuing DMA jobs from different work queues with QMan portal.
-+ * This module can be found on NXP LS2 SoCs.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the
-+ * Free Software Foundation; either version 2 of the License, or (at your
-+ * option) any later version.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/clk.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/of_dma.h>
-+#include <linux/types.h>
-+#include <linux/delay.h>
-+#include <linux/iommu.h>
-+#include <linux/sys_soc.h>
-+
-+#include "../virt-dma.h"
-+
-+#include <linux/fsl/mc.h>
-+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
-+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
-+#include "fsl_dpdmai_cmd.h"
-+#include "fsl_dpdmai.h"
-+#include "dpaa2-qdma.h"
-+
-+static bool smmu_disable = true;
-+
-+static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
-+{
-+ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
-+}
-+
-+static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
-+{
-+ return container_of(vd, struct dpaa2_qdma_comp, vdesc);
-+}
-+
-+static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
-+{
-+ return 0;
-+}
-+
-+static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
-+{
-+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
-+ unsigned long flags;
-+ LIST_HEAD(head);
-+
-+ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
-+ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
-+ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
-+
-+ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
-+}
-+
-+/*
-+ * Request a command descriptor for enqueue.
-+ */
-+static struct dpaa2_qdma_comp *
-+dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
-+{
-+ struct dpaa2_qdma_comp *comp_temp = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
-+ if (list_empty(&dpaa2_chan->comp_free)) {
-+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
-+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
-+ if (!comp_temp)
-+ goto err;
-+ comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
-+ GFP_NOWAIT, &comp_temp->fd_bus_addr);
-+ if (!comp_temp->fd_virt_addr)
-+ goto err;
-+
-+ comp_temp->fl_virt_addr =
-+ (void *)((struct dpaa2_fd *)
-+ comp_temp->fd_virt_addr + 1);
-+ comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
-+ sizeof(struct dpaa2_fd);
-+ comp_temp->desc_virt_addr =
-+ (void *)((struct dpaa2_fl_entry *)
-+ comp_temp->fl_virt_addr + 3);
-+ comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
-+ sizeof(struct dpaa2_fl_entry) * 3;
-+
-+ comp_temp->qchan = dpaa2_chan;
-+ return comp_temp;
-+ }
-+ comp_temp = list_first_entry(&dpaa2_chan->comp_free,
-+ struct dpaa2_qdma_comp, list);
-+ list_del(&comp_temp->list);
-+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
-+
-+ comp_temp->qchan = dpaa2_chan;
-+err:
-+ return comp_temp;
-+}
-+
-+static void dpaa2_qdma_populate_fd(uint32_t format,
-+ struct dpaa2_qdma_comp *dpaa2_comp)
-+{
-+ struct dpaa2_fd *fd;
-+
-+ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
-+ memset(fd, 0, sizeof(struct dpaa2_fd));
-+
-+ /* fd populated */
-+ dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
-+ /* Bypass memory translation, Frame list format, short length disable */
-+ /* we need to disable BMT if fsl-mc use iova addr */
-+ if (smmu_disable)
-+ dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
-+ dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
-+
-+ dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
-+}
-+
-+/* first frame list for descriptor buffer */
-+static void dpaa2_qdma_populate_first_framel(
-+ struct dpaa2_fl_entry *f_list,
-+ struct dpaa2_qdma_comp *dpaa2_comp,
-+ bool wrt_changed)
-+{
-+ struct dpaa2_qdma_sd_d *sdd;
-+
-+ sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
-+ memset(sdd, 0, 2 * (sizeof(*sdd)));
-+ /* source and destination descriptor */
-+ sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
-+ sdd++;
-+
-+ /* dest descriptor CMD */
-+ if (wrt_changed)
-+ sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
-+ else
-+ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
-+
-+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
-+ /* first frame list to source descriptor */
-+
-+ dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
-+ dpaa2_fl_set_len(f_list, 0x20);
-+ dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
-+
-+ if (smmu_disable)
-+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
-+}
-+
-+/* source and destination frame list */
-+static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
-+ dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
-+{
-+ /* source frame list to source buffer */
-+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
-+
-+
-+ dpaa2_fl_set_addr(f_list, src);
-+ dpaa2_fl_set_len(f_list, len);
-+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */
-+ if (smmu_disable)
-+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
-+
-+ f_list++;
-+ /* destination frame list to destination buffer */
-+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
-+
-+ dpaa2_fl_set_addr(f_list, dst);
-+ dpaa2_fl_set_len(f_list, len);
-+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
-+ dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */
-+ if (smmu_disable)
-+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
-+}
-+
-+static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
-+ struct dma_chan *chan, dma_addr_t dst,
-+ dma_addr_t src, size_t len, unsigned long flags)
-+{
-+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
-+ struct dpaa2_qdma_engine *dpaa2_qdma;
-+ struct dpaa2_qdma_comp *dpaa2_comp;
-+ struct dpaa2_fl_entry *f_list;
-+ bool wrt_changed;
-+ uint32_t format;
-+
-+ dpaa2_qdma = dpaa2_chan->qdma;
-+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
-+ wrt_changed = dpaa2_qdma->qdma_wrtype_fixup;
-+
-+#ifdef LONG_FORMAT
-+ format = QDMA_FD_LONG_FORMAT;
-+#else
-+ format = QDMA_FD_SHORT_FORMAT;
-+#endif
-+ /* populate Frame descriptor */
-+ dpaa2_qdma_populate_fd(format, dpaa2_comp);
-+
-+ f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr;
-+
-+#ifdef LONG_FORMAT
-+ /* first frame list for descriptor buffer (logn format) */
-+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
-+
-+ f_list++;
-+#endif
-+
-+ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
-+
-+ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
-+}
-+
-+static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
-+ dma_cookie_t cookie, struct dma_tx_state *txstate)
-+{
-+ return dma_cookie_status(chan, cookie, txstate);
-+}
-+
-+static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
-+{
-+}
-+
-+static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
-+{
-+ struct dpaa2_qdma_comp *dpaa2_comp;
-+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
-+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
-+ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
-+ struct virt_dma_desc *vdesc;
-+ struct dpaa2_fd *fd;
-+ int err;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
-+ spin_lock(&dpaa2_chan->vchan.lock);
-+ if (vchan_issue_pending(&dpaa2_chan->vchan)) {
-+ vdesc = vchan_next_desc(&dpaa2_chan->vchan);
-+ if (!vdesc)
-+ goto err_enqueue;
-+ dpaa2_comp = to_fsl_qdma_comp(vdesc);
-+
-+ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
-+
-+ list_del(&vdesc->node);
-+ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
-+
-+ /* TOBO: priority hard-coded to zero */
-+ err = dpaa2_io_service_enqueue_fq(NULL,
-+ priv->tx_queue_attr[0].fqid, fd);
-+ if (err) {
-+ list_del(&dpaa2_comp->list);
-+ list_add_tail(&dpaa2_comp->list,
-+ &dpaa2_chan->comp_free);
-+ }
-+
-+ }
-+err_enqueue:
-+ spin_unlock(&dpaa2_chan->vchan.lock);
-+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
-+}
-+
-+static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
-+{
-+ struct device *dev = &ls_dev->dev;
-+ struct dpaa2_qdma_priv *priv;
-+ struct dpaa2_qdma_priv_per_prio *ppriv;
-+ uint8_t prio_def = DPDMAI_PRIO_NUM;
-+ int err;
-+ int i;
-+
-+ priv = dev_get_drvdata(dev);
-+
-+ priv->dev = dev;
-+ priv->dpqdma_id = ls_dev->obj_desc.id;
-+
-+ /*Get the handle for the DPDMAI this interface is associate with */
-+ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpdmai_open() failed\n");
-+ return err;
-+ }
-+ dev_info(dev, "Opened dpdmai object successfully\n");
-+
-+ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
-+ &priv->dpdmai_attr);
-+ if (err) {
-+ dev_err(dev, "dpdmai_get_attributes() failed\n");
-+ return err;
-+ }
-+
-+ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
-+ dev_err(dev, "DPDMAI major version mismatch\n"
-+ "Found %u.%u, supported version is %u.%u\n",
-+ priv->dpdmai_attr.version.major,
-+ priv->dpdmai_attr.version.minor,
-+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
-+ }
-+
-+ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
-+ dev_err(dev, "DPDMAI minor version mismatch\n"
-+ "Found %u.%u, supported version is %u.%u\n",
-+ priv->dpdmai_attr.version.major,
-+ priv->dpdmai_attr.version.minor,
-+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
-+ }
-+
-+ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
-+ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
-+ if (!ppriv) {
-+ dev_err(dev, "kzalloc for ppriv failed\n");
-+ return -1;
-+ }
-+ priv->ppriv = ppriv;
-+
-+ for (i = 0; i < priv->num_pairs; i++) {
-+ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
-+ i, &priv->rx_queue_attr[i]);
-+ if (err) {
-+ dev_err(dev, "dpdmai_get_rx_queue() failed\n");
-+ return err;
-+ }
-+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
-+
-+ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
-+ i, &priv->tx_queue_attr[i]);
-+ if (err) {
-+ dev_err(dev, "dpdmai_get_tx_queue() failed\n");
-+ return err;
-+ }
-+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
-+ ppriv->prio = i;
-+ ppriv->priv = priv;
-+ ppriv++;
-+ }
-+
-+ return 0;
-+}
-+
-+static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
-+{
-+ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
-+ struct dpaa2_qdma_priv_per_prio, nctx);
-+ struct dpaa2_qdma_priv *priv = ppriv->priv;
-+ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
-+ struct dpaa2_qdma_chan *qchan;
-+ const struct dpaa2_fd *fd;
-+ const struct dpaa2_fd *fd_eq;
-+ struct dpaa2_dq *dq;
-+ int err;
-+ int is_last = 0;
-+ uint8_t status;
-+ int i;
-+ int found;
-+ uint32_t n_chans = priv->dpaa2_qdma->n_chans;
-+
-+ do {
-+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
-+ ppriv->store);
-+ } while (err);
-+
-+ while (!is_last) {
-+ do {
-+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
-+ } while (!is_last && !dq);
-+ if (!dq) {
-+ dev_err(priv->dev, "FQID returned no valid frames!\n");
-+ continue;
-+ }
-+
-+ /* obtain FD and process the error */
-+ fd = dpaa2_dq_fd(dq);
-+
-+ status = dpaa2_fd_get_ctrl(fd) & 0xff;
-+ if (status)
-+ dev_err(priv->dev, "FD error occurred\n");
-+ found = 0;
-+ for (i = 0; i < n_chans; i++) {
-+ qchan = &priv->dpaa2_qdma->chans[i];
-+ spin_lock(&qchan->queue_lock);
-+ if (list_empty(&qchan->comp_used)) {
-+ spin_unlock(&qchan->queue_lock);
-+ continue;
-+ }
-+ list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
-+ &qchan->comp_used, list) {
-+ fd_eq = (struct dpaa2_fd *)
-+ dpaa2_comp->fd_virt_addr;
-+
-+ if (le64_to_cpu(fd_eq->simple.addr) ==
-+ le64_to_cpu(fd->simple.addr)) {
-+
-+ list_del(&dpaa2_comp->list);
-+ list_add_tail(&dpaa2_comp->list,
-+ &qchan->comp_free);
-+
-+ spin_lock(&qchan->vchan.lock);
-+ vchan_cookie_complete(
-+ &dpaa2_comp->vdesc);
-+ spin_unlock(&qchan->vchan.lock);
-+ found = 1;
-+ break;
-+ }
-+ }
-+ spin_unlock(&qchan->queue_lock);
-+ if (found)
-+ break;
-+ }
-+ }
-+
-+ dpaa2_io_service_rearm(NULL, ctx);
-+}
-+
-+static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
-+{
-+ int err, i, num;
-+ struct device *dev = priv->dev;
-+ struct dpaa2_qdma_priv_per_prio *ppriv;
-+
-+ num = priv->num_pairs;
-+ ppriv = priv->ppriv;
-+ for (i = 0; i < num; i++) {
-+ ppriv->nctx.is_cdan = 0;
-+ ppriv->nctx.desired_cpu = 1;
-+ ppriv->nctx.id = ppriv->rsp_fqid;
-+ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
-+ err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
-+ if (err) {
-+ dev_err(dev, "Notification register failed\n");
-+ goto err_service;
-+ }
-+
-+ ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
-+ dev);
-+ if (!ppriv->store) {
-+ dev_err(dev, "dpaa2_io_store_create() failed\n");
-+ goto err_store;
-+ }
-+
-+ ppriv++;
-+ }
-+ return 0;
-+
-+err_store:
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
-+err_service:
-+ ppriv--;
-+ while (ppriv >= priv->ppriv) {
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
-+ dpaa2_io_store_destroy(ppriv->store);
-+ ppriv--;
-+ }
-+ return -1;
-+}
-+
-+static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
-+{
-+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
-+ int i;
-+
-+ for (i = 0; i < priv->num_pairs; i++) {
-+ dpaa2_io_store_destroy(ppriv->store);
-+ ppriv++;
-+ }
-+}
-+
-+static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
-+{
-+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
-+ struct device *dev = priv->dev;
-+ int i;
-+
-+ for (i = 0; i < priv->num_pairs; i++) {
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
-+ ppriv++;
-+ }
-+}
-+
-+static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
-+{
-+ int err;
-+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
-+ struct device *dev = priv->dev;
-+ struct dpaa2_qdma_priv_per_prio *ppriv;
-+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
-+ int i, num;
-+
-+ num = priv->num_pairs;
-+ ppriv = priv->ppriv;
-+ for (i = 0; i < num; i++) {
-+ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
-+ DPDMAI_QUEUE_OPT_DEST;
-+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
-+ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
-+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
-+ rx_queue_cfg.dest_cfg.priority = ppriv->prio;
-+ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
-+ rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
-+ if (err) {
-+ dev_err(dev, "dpdmai_set_rx_queue() failed\n");
-+ return err;
-+ }
-+
-+ ppriv++;
-+ }
-+
-+ return 0;
-+}
-+
-+static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
-+{
-+ int err = 0;
-+ struct device *dev = priv->dev;
-+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
-+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
-+ int i;
-+
-+ for (i = 0; i < priv->num_pairs; i++) {
-+ ppriv->nctx.qman64 = 0;
-+ ppriv->nctx.dpio_id = 0;
-+ ppriv++;
-+ }
-+
-+ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
-+ if (err)
-+ dev_err(dev, "dpdmai_reset() failed\n");
-+
-+ return err;
-+}
-+
-+static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
-+ struct list_head *head)
-+{
-+ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
-+ /* free the QDMA comp resource */
-+ list_for_each_entry_safe(comp_tmp, _comp_tmp,
-+ head, list) {
-+ dma_pool_free(qchan->fd_pool,
-+ comp_tmp->fd_virt_addr,
-+ comp_tmp->fd_bus_addr);
-+ list_del(&comp_tmp->list);
-+ kfree(comp_tmp);
-+ }
-+
-+}
-+
-+static void __cold dpaa2_dpdmai_free_channels(
-+ struct dpaa2_qdma_engine *dpaa2_qdma)
-+{
-+ struct dpaa2_qdma_chan *qchan;
-+ int num, i;
-+
-+ num = dpaa2_qdma->n_chans;
-+ for (i = 0; i < num; i++) {
-+ qchan = &dpaa2_qdma->chans[i];
-+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
-+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
-+ dma_pool_destroy(qchan->fd_pool);
-+ }
-+}
-+
-+static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
-+{
-+ struct dpaa2_qdma_chan *dpaa2_chan;
-+ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
-+ int i;
-+
-+ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
-+ for (i = 0; i < dpaa2_qdma->n_chans; i++) {
-+ dpaa2_chan = &dpaa2_qdma->chans[i];
-+ dpaa2_chan->qdma = dpaa2_qdma;
-+ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
-+ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
-+
-+ dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
-+ dev, FD_POOL_SIZE, 32, 0);
-+ if (!dpaa2_chan->fd_pool)
-+ return -1;
-+
-+ spin_lock_init(&dpaa2_chan->queue_lock);
-+ INIT_LIST_HEAD(&dpaa2_chan->comp_used);
-+ INIT_LIST_HEAD(&dpaa2_chan->comp_free);
-+ }
-+ return 0;
-+}
-+
-+static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
-+{
-+ struct dpaa2_qdma_priv *priv;
-+ struct device *dev = &dpdmai_dev->dev;
-+ struct dpaa2_qdma_engine *dpaa2_qdma;
-+ int err;
-+
-+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-+ if (!priv)
-+ return -ENOMEM;
-+ dev_set_drvdata(dev, priv);
-+ priv->dpdmai_dev = dpdmai_dev;
-+
-+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
-+ if (priv->iommu_domain)
-+ smmu_disable = false;
-+
-+ /* obtain a MC portal */
-+ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
-+ if (err) {
-+ if (err == -ENXIO)
-+ err = -EPROBE_DEFER;
-+ else
-+ dev_err(dev, "MC portal allocation failed\n");
-+ goto err_mcportal;
-+ }
-+
-+ /* DPDMAI initialization */
-+ err = dpaa2_qdma_setup(dpdmai_dev);
-+ if (err) {
-+ dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
-+ goto err_dpdmai_setup;
-+ }
-+
-+ /* DPIO */
-+ err = dpaa2_qdma_dpio_setup(priv);
-+ if (err) {
-+ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
-+ goto err_dpio_setup;
-+ }
-+
-+ /* DPDMAI binding to DPIO */
-+ err = dpaa2_dpdmai_bind(priv);
-+ if (err) {
-+ dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
-+ goto err_bind;
-+ }
-+
-+ /* DPDMAI enable */
-+ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpdmai_enable() faile\n");
-+ goto err_enable;
-+ }
-+
-+ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
-+ if (!dpaa2_qdma) {
-+ err = -ENOMEM;
-+ goto err_eng;
-+ }
-+
-+ priv->dpaa2_qdma = dpaa2_qdma;
-+ dpaa2_qdma->priv = priv;
-+
-+ dpaa2_qdma->n_chans = NUM_CH;
-+
-+ err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
-+ if (err) {
-+ dev_err(dev, "QDMA alloc channels faile\n");
-+ goto err_reg;
-+ }
-+
-+ if (soc_device_match(soc_fixup_tuning))
-+ dpaa2_qdma->qdma_wrtype_fixup = true;
-+ else
-+ dpaa2_qdma->qdma_wrtype_fixup = false;
-+
-+ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
-+ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
-+ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
-+
-+ dpaa2_qdma->dma_dev.dev = dev;
-+ dpaa2_qdma->dma_dev.device_alloc_chan_resources
-+ = dpaa2_qdma_alloc_chan_resources;
-+ dpaa2_qdma->dma_dev.device_free_chan_resources
-+ = dpaa2_qdma_free_chan_resources;
-+ dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
-+ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
-+ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
-+
-+ err = dma_async_device_register(&dpaa2_qdma->dma_dev);
-+ if (err) {
-+ dev_err(dev, "Can't register NXP QDMA engine.\n");
-+ goto err_reg;
-+ }
-+
-+ return 0;
-+
-+err_reg:
-+ dpaa2_dpdmai_free_channels(dpaa2_qdma);
-+ kfree(dpaa2_qdma);
-+err_eng:
-+ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
-+err_enable:
-+ dpaa2_dpdmai_dpio_unbind(priv);
-+err_bind:
-+ dpaa2_dpmai_store_free(priv);
-+ dpaa2_dpdmai_dpio_free(priv);
-+err_dpio_setup:
-+ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
-+err_dpdmai_setup:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_mcportal:
-+ kfree(priv->ppriv);
-+ kfree(priv);
-+ dev_set_drvdata(dev, NULL);
-+ return err;
-+}
-+
-+static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
-+{
-+ struct device *dev;
-+ struct dpaa2_qdma_priv *priv;
-+ struct dpaa2_qdma_engine *dpaa2_qdma;
-+
-+ dev = &ls_dev->dev;
-+ priv = dev_get_drvdata(dev);
-+ dpaa2_qdma = priv->dpaa2_qdma;
-+
-+ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
-+ dpaa2_dpdmai_dpio_unbind(priv);
-+ dpaa2_dpmai_store_free(priv);
-+ dpaa2_dpdmai_dpio_free(priv);
-+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
-+ fsl_mc_portal_free(priv->mc_io);
-+ dev_set_drvdata(dev, NULL);
-+ dpaa2_dpdmai_free_channels(dpaa2_qdma);
-+
-+ dma_async_device_unregister(&dpaa2_qdma->dma_dev);
-+ kfree(priv);
-+ kfree(dpaa2_qdma);
-+
-+ return 0;
-+}
-+
-+static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpdmai",
-+ },
-+ { .vendor = 0x0 }
-+};
-+
-+static struct fsl_mc_driver dpaa2_qdma_driver = {
-+ .driver = {
-+ .name = "dpaa2-qdma",
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = dpaa2_qdma_probe,
-+ .remove = dpaa2_qdma_remove,
-+ .match_id_table = dpaa2_qdma_id_table
-+};
-+
-+static int __init dpaa2_qdma_driver_init(void)
-+{
-+ return fsl_mc_driver_register(&(dpaa2_qdma_driver));
-+}
-+late_initcall(dpaa2_qdma_driver_init);
-+
-+static void __exit fsl_qdma_exit(void)
-+{
-+ fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
-+}
-+module_exit(fsl_qdma_exit);
-+
-+MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
-+MODULE_LICENSE("Dual BSD/GPL");
---- /dev/null
-+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
-@@ -0,0 +1,181 @@
-+/* Copyright 2015 NXP Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of NXP Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef __DPAA2_QDMA_H
-+#define __DPAA2_QDMA_H
-+
-+#define LONG_FORMAT 1
-+
-+#define DPAA2_QDMA_STORE_SIZE 16
-+#define NUM_CH 8
-+
-+#define QDMA_DMR_OFFSET 0x0
-+#define QDMA_DQ_EN (0 << 30)
-+#define QDMA_DQ_DIS (1 << 30)
-+
-+#define QDMA_DSR_M_OFFSET 0x10004
-+
-+struct dpaa2_qdma_sd_d {
-+ uint32_t rsv:32;
-+ union {
-+ struct {
-+ uint32_t ssd:12; /* souce stride distance */
-+ uint32_t sss:12; /* souce stride size */
-+ uint32_t rsv1:8;
-+ } sdf;
-+ struct {
-+ uint32_t dsd:12; /* Destination stride distance */
-+ uint32_t dss:12; /* Destination stride size */
-+ uint32_t rsv2:8;
-+ } ddf;
-+ } df;
-+ uint32_t rbpcmd; /* Route-by-port command */
-+ uint32_t cmd;
-+} __attribute__((__packed__));
-+/* Source descriptor command read transaction type for RBP=0:
-+ coherent copy of cacheable memory */
-+#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
-+/* Destination descriptor command write transaction type for RBP=0:
-+ coherent copy of cacheable memory */
-+#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
-+#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
-+
-+#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
-+#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
-+#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
-+#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
-+#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
-+
-+#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
-+#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
-+
-+#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
-+#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
-+#define QDMA_SER_DISABLE (0 << 8) /* no notification */
-+#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
-+#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
-+#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
-+#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
-+
-+#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
-+#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
-+#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
-+#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
-+#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
-+
-+#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
-+#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
-+#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */
-+#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
-+#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */
-+#define QDMA_FL_SL_SHORT 0x1 /* short length */
-+#define QDMA_FL_F (0x1)/* last frame list bit */
-+/*Description of Frame list table structure*/
-+
-+struct dpaa2_qdma_chan {
-+ struct virt_dma_chan vchan;
-+ struct virt_dma_desc vdesc;
-+ enum dma_status status;
-+ struct dpaa2_qdma_engine *qdma;
-+
-+ struct mutex dpaa2_queue_mutex;
-+ spinlock_t queue_lock;
-+ struct dma_pool *fd_pool;
-+
-+ struct list_head comp_used;
-+ struct list_head comp_free;
-+
-+};
-+
-+struct dpaa2_qdma_comp {
-+ dma_addr_t fd_bus_addr;
-+ dma_addr_t fl_bus_addr;
-+ dma_addr_t desc_bus_addr;
-+ void *fd_virt_addr;
-+ void *fl_virt_addr;
-+ void *desc_virt_addr;
-+ struct dpaa2_qdma_chan *qchan;
-+ struct virt_dma_desc vdesc;
-+ struct list_head list;
-+};
-+
-+struct dpaa2_qdma_engine {
-+ struct dma_device dma_dev;
-+ u32 n_chans;
-+ struct dpaa2_qdma_chan chans[NUM_CH];
-+ bool qdma_wrtype_fixup;
-+
-+ struct dpaa2_qdma_priv *priv;
-+};
-+
-+/*
-+ * dpaa2_qdma_priv - driver private data
-+ */
-+struct dpaa2_qdma_priv {
-+ int dpqdma_id;
-+
-+ struct iommu_domain *iommu_domain;
-+ struct dpdmai_attr dpdmai_attr;
-+ struct device *dev;
-+ struct fsl_mc_io *mc_io;
-+ struct fsl_mc_device *dpdmai_dev;
-+
-+ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
-+ struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
-+
-+ uint8_t num_pairs;
-+
-+ struct dpaa2_qdma_engine *dpaa2_qdma;
-+ struct dpaa2_qdma_priv_per_prio *ppriv;
-+};
-+
-+struct dpaa2_qdma_priv_per_prio {
-+ int req_fqid;
-+ int rsp_fqid;
-+ int prio;
-+
-+ struct dpaa2_io_store *store;
-+ struct dpaa2_io_notification_ctx nctx;
-+
-+ struct dpaa2_qdma_priv *priv;
-+};
-+
-+static struct soc_device_attribute soc_fixup_tuning[] = {
-+ { .family = "QorIQ LX2160A"},
-+ { },
-+};
-+
-+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
-+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
-+ sizeof(struct dpaa2_fl_entry) * 3 + \
-+ sizeof(struct dpaa2_qdma_sd_d) * 2)
-+
-+#endif /* __DPAA2_QDMA_H */
---- /dev/null
-+++ b/drivers/dma/dpaa2-qdma/dpdmai.c
-@@ -0,0 +1,515 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/types.h>
-+#include <linux/io.h>
-+#include "fsl_dpdmai.h"
-+#include "fsl_dpdmai_cmd.h"
-+#include <linux/fsl/mc.h>
-+
-+struct dpdmai_cmd_open {
-+ __le32 dpdmai_id;
-+};
-+
-+struct dpdmai_rsp_get_attributes {
-+ __le32 id;
-+ u8 num_of_priorities;
-+ u8 pad0[3];
-+ __le16 major;
-+ __le16 minor;
-+};
-+
-+
-+struct dpdmai_cmd_queue {
-+ __le32 dest_id;
-+ u8 priority;
-+ u8 queue;
-+ u8 dest_type;
-+ u8 pad;
-+ __le64 user_ctx;
-+ union {
-+ __le32 options;
-+ __le32 fqid;
-+ };
-+};
-+
-+struct dpdmai_rsp_get_tx_queue {
-+ __le64 pad;
-+ __le32 fqid;
-+};
-+
-+
-+int dpdmai_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdmai_id,
-+ uint16_t *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmai_cmd_open *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+
-+ cmd_params = (struct dpdmai_cmd_open *)cmd.params;
-+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+ return 0;
-+}
-+
-+int dpdmai_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdmai_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPDMAI_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpdmai_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdmai_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdmai_irq_cfg *irq_cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdmai_irq_cfg *irq_cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdmai_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ int err;
-+ struct dpdmai_rsp_get_attributes *rsp_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
-+ attr->id = le32_to_cpu(rsp_params->id);
-+ attr->version.major = le16_to_cpu(rsp_params->major);
-+ attr->version.minor = le16_to_cpu(rsp_params->minor);
-+ attr->num_of_priorities = rsp_params->num_of_priorities;
-+
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ const struct dpdmai_rx_queue_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmai_cmd_queue *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+
-+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
-+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
-+ cmd_params->priority = cfg->dest_cfg.priority;
-+ cmd_params->queue = priority;
-+ cmd_params->dest_type = cfg->dest_cfg.dest_type;
-+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
-+ cmd_params->options = cpu_to_le32(cfg->options);
-+
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority, struct dpdmai_rx_queue_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmai_cmd_queue *cmd_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+
-+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
-+ cmd_params->queue = priority;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
-+ attr->dest_cfg.priority = cmd_params->priority;
-+ attr->dest_cfg.dest_type = cmd_params->dest_type;
-+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
-+ attr->fqid = le32_to_cpu(cmd_params->fqid);
-+
-+ return 0;
-+}
-+
-+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpdmai_tx_queue_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpdmai_cmd_queue *cmd_params;
-+ struct dpdmai_rsp_get_tx_queue *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
-+ cmd_flags,
-+ token);
-+
-+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
-+ cmd_params->queue = priority;
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+
-+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
-+ attr->fqid = le32_to_cpu(rsp_params->fqid);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
-@@ -0,0 +1,521 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPDMAI_H
-+#define __FSL_DPDMAI_H
-+
-+struct fsl_mc_io;
-+
-+/* Data Path DMA Interface API
-+ * Contains initialization APIs and runtime control APIs for DPDMAI
-+ */
-+
-+/* General DPDMAI macros */
-+
-+/**
-+ * Maximum number of Tx/Rx priorities per DPDMAI object
-+ */
-+#define DPDMAI_PRIO_NUM 2
-+
-+/**
-+ * All queues considered; see dpdmai_set_rx_queue()
-+ */
-+#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
-+
-+/**
-+ * dpdmai_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpdmai_id: DPDMAI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpdmai_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdmai_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpdmai_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
-+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
-+ * configured with values 1-8; the entry following last valid entry
-+ * should be configured with 0
-+ */
-+struct dpdmai_cfg {
-+ uint8_t priorities[DPDMAI_PRIO_NUM];
-+};
-+
-+/**
-+ * dpdmai_create() - Create the DPDMAI object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPDMAI object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpdmai_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdmai_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpdmai_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdmai_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpdmai_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdmai_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdmai_get_irq() - Get IRQ information from the DPDMAI
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdmai_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdmai_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpdmai_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned Interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpdmai_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpdmai_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpdmai_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpdmai_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpdmai_attr - Structure representing DPDMAI attributes
-+ * @id: DPDMAI object ID
-+ * @version: DPDMAI version
-+ * @num_of_priorities: number of priorities
-+ */
-+struct dpdmai_attr {
-+ int id;
-+ /**
-+ * struct version - DPDMAI version
-+ * @major: DPDMAI major version
-+ * @minor: DPDMAI minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint8_t num_of_priorities;
-+};
-+
-+/**
-+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdmai_attr *attr);
-+
-+/**
-+ * enum dpdmai_dest - DPDMAI destination types
-+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
-+ * and does not generate FQDAN notifications; user is expected to dequeue
-+ * from the queue based on polling or other user-defined method
-+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to dequeue
-+ * from the queue only after notification is received
-+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified DPCON object;
-+ * user is expected to dequeue from the DPCON channel
-+ */
-+enum dpdmai_dest {
-+ DPDMAI_DEST_NONE = 0,
-+ DPDMAI_DEST_DPIO = 1,
-+ DPDMAI_DEST_DPCON = 2
-+};
-+
-+/**
-+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
-+ */
-+struct dpdmai_dest_cfg {
-+ enum dpdmai_dest dest_type;
-+ int dest_id;
-+ uint8_t priority;
-+};
-+
-+/* DPDMAI queue modification options */
-+
-+/**
-+ * Select to modify the user's context associated with the queue
-+ */
-+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
-+
-+/**
-+ * Select to modify the queue's destination
-+ */
-+#define DPDMAI_QUEUE_OPT_DEST 0x00000002
-+
-+/**
-+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
-+ * @options: Flags representing the suggested modifications to the queue;
-+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame;
-+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
-+ * @dest_cfg: Queue destination parameters;
-+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
-+ */
-+struct dpdmai_rx_queue_cfg {
-+ uint32_t options;
-+ uint64_t user_ctx;
-+ struct dpdmai_dest_cfg dest_cfg;
-+
-+};
-+
-+/**
-+ * dpdmai_set_rx_queue() - Set Rx queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPDMAI creation; use
-+ * DPDMAI_ALL_QUEUES to configure all Rx queues
-+ * identically.
-+ * @cfg: Rx queue configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ const struct dpdmai_rx_queue_cfg *cfg);
-+
-+/**
-+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame
-+ * @dest_cfg: Queue destination configuration
-+ * @fqid: Virtual FQID value to be used for dequeue operations
-+ */
-+struct dpdmai_rx_queue_attr {
-+ uint64_t user_ctx;
-+ struct dpdmai_dest_cfg dest_cfg;
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPDMAI creation
-+ * @attr: Returned Rx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpdmai_rx_queue_attr *attr);
-+
-+/**
-+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
-+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
-+ */
-+
-+struct dpdmai_tx_queue_attr {
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPDMAI creation
-+ * @attr: Returned Tx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpdmai_tx_queue_attr *attr);
-+
-+#endif /* __FSL_DPDMAI_H */
---- /dev/null
-+++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
-@@ -0,0 +1,222 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPDMAI_CMD_H
-+#define _FSL_DPDMAI_CMD_H
-+
-+/* DPDMAI Version */
-+#define DPDMAI_VER_MAJOR 2
-+#define DPDMAI_VER_MINOR 2
-+
-+#define DPDMAI_CMD_BASE_VERSION 0
-+#define DPDMAI_CMD_ID_OFFSET 4
-+
-+/* Command IDs */
-+#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+
-+#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+
-+#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+
-+#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
-+
-+
-+#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
-+#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
-+
-+
-+#define MAKE_UMASK64(_width) \
-+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
-+ (uint64_t)-1))
-+
-+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
-+{
-+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
-+}
-+
-+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
-+{
-+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
-+}
-+
-+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
-+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
-+
-+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
-+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
-+
-+#define MC_CMD_HDR_READ_TOKEN(_hdr) \
-+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
-+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
-+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
-+
-+#endif /* _FSL_DPDMAI_CMD_H */
---- a/drivers/dma/fsl-edma.c
-+++ b/drivers/dma/fsl-edma.c
-@@ -146,6 +146,8 @@ struct fsl_edma_slave_config {
- u32 dev_addr;
- u32 burst;
- u32 attr;
-+ dma_addr_t dma_dev_addr;
-+ enum dma_data_direction dma_dir;
- };
-
- struct fsl_edma_chan {
-@@ -342,6 +344,53 @@ static int fsl_edma_resume(struct dma_ch
- return 0;
- }
-
-+static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
-+{
-+ if (fsl_chan->fsc.dma_dir != DMA_NONE)
-+ dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
-+ fsl_chan->fsc.dma_dev_addr,
-+ fsl_chan->fsc.burst, fsl_chan->fsc.dma_dir, 0);
-+ fsl_chan->fsc.dma_dir = DMA_NONE;
-+}
-+
-+static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
-+ enum dma_transfer_direction dir)
-+{
-+ struct device *dev = fsl_chan->vchan.chan.device->dev;
-+ enum dma_data_direction dma_dir;
-+
-+ switch (dir) {
-+ case DMA_MEM_TO_DEV:
-+ dma_dir = DMA_FROM_DEVICE;
-+ break;
-+ case DMA_DEV_TO_MEM:
-+ dma_dir = DMA_TO_DEVICE;
-+ break;
-+ case DMA_DEV_TO_DEV:
-+ dma_dir = DMA_BIDIRECTIONAL;
-+ break;
-+ default:
-+ dma_dir = DMA_NONE;
-+ break;
-+ }
-+
-+ /* Already mapped for this config? */
-+ if (fsl_chan->fsc.dma_dir == dma_dir)
-+ return true;
-+
-+ fsl_edma_unprep_slave_dma(fsl_chan);
-+ fsl_chan->fsc.dma_dev_addr = dma_map_resource(dev,
-+ fsl_chan->fsc.dev_addr,
-+ fsl_chan->fsc.burst,
-+ dma_dir, 0);
-+ if (dma_mapping_error(dev, fsl_chan->fsc.dma_dev_addr))
-+ return false;
-+
-+ fsl_chan->fsc.dma_dir = dma_dir;
-+
-+ return true;
-+}
-+
- static int fsl_edma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *cfg)
- {
-@@ -361,6 +410,7 @@ static int fsl_edma_slave_config(struct
- } else {
- return -EINVAL;
- }
-+ fsl_edma_unprep_slave_dma(fsl_chan);
- return 0;
- }
-
-@@ -553,6 +603,9 @@ static struct dma_async_tx_descriptor *f
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
-+ if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
-+ return NULL;
-+
- sg_len = buf_len / period_len;
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
-@@ -572,11 +625,11 @@ static struct dma_async_tx_descriptor *f
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = dma_buf_next;
-- dst_addr = fsl_chan->fsc.dev_addr;
-+ dst_addr = fsl_chan->fsc.dma_dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
-- src_addr = fsl_chan->fsc.dev_addr;
-+ src_addr = fsl_chan->fsc.dma_dev_addr;
- dst_addr = dma_buf_next;
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
-@@ -606,6 +659,9 @@ static struct dma_async_tx_descriptor *f
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
-+ if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
-+ return NULL;
-+
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
- return NULL;
-@@ -618,11 +674,11 @@ static struct dma_async_tx_descriptor *f
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = sg_dma_address(sg);
-- dst_addr = fsl_chan->fsc.dev_addr;
-+ dst_addr = fsl_chan->fsc.dma_dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
-- src_addr = fsl_chan->fsc.dev_addr;
-+ src_addr = fsl_chan->fsc.dma_dev_addr;
- dst_addr = sg_dma_address(sg);
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
-@@ -802,6 +858,7 @@ static void fsl_edma_free_chan_resources
- fsl_edma_chan_mux(fsl_chan, 0, false);
- fsl_chan->edesc = NULL;
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
-+ fsl_edma_unprep_slave_dma(fsl_chan);
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
-@@ -937,6 +994,7 @@ static int fsl_edma_probe(struct platfor
- fsl_chan->slave_id = 0;
- fsl_chan->idle = true;
- fsl_chan->vchan.desc_free = fsl_edma_free_desc;
-+ fsl_chan->fsc.dma_dir = DMA_NONE;
- vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
-
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
---- /dev/null
-+++ b/drivers/dma/fsl-qdma.c
-@@ -0,0 +1,1278 @@
-+/*
-+ * Driver for NXP Layerscape Queue direct memory access controller (qDMA)
-+ *
-+ * Copyright 2017 NXP
-+ *
-+ * Author:
-+ * Jiaheng Fan <jiaheng.fan@nxp.com>
-+ * Wen He <wen.he_1@nxp.com>
-+ *
-+ * SPDX-License-Identifier: GPL-2.0+
-+ */
-+
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <linux/delay.h>
-+#include <linux/of_irq.h>
-+#include <linux/of_address.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_dma.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dmapool.h>
-+#include <linux/dmaengine.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
-+
-+#include "virt-dma.h"
-+
-+#define FSL_QDMA_DMR 0x0
-+#define FSL_QDMA_DSR 0x4
-+#define FSL_QDMA_DEIER 0xe00
-+#define FSL_QDMA_DEDR 0xe04
-+#define FSL_QDMA_DECFDW0R 0xe10
-+#define FSL_QDMA_DECFDW1R 0xe14
-+#define FSL_QDMA_DECFDW2R 0xe18
-+#define FSL_QDMA_DECFDW3R 0xe1c
-+#define FSL_QDMA_DECFQIDR 0xe30
-+#define FSL_QDMA_DECBR 0xe34
-+
-+#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
-+#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
-+#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
-+#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
-+#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
-+#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
-+#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
-+#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
-+
-+#define FSL_QDMA_SQDPAR 0x80c
-+#define FSL_QDMA_SQEPAR 0x814
-+#define FSL_QDMA_BSQMR 0x800
-+#define FSL_QDMA_BSQSR 0x804
-+#define FSL_QDMA_BSQICR 0x828
-+#define FSL_QDMA_CQMR 0xa00
-+#define FSL_QDMA_CQDSCR1 0xa08
-+#define FSL_QDMA_CQDSCR2 0xa0c
-+#define FSL_QDMA_CQIER 0xa10
-+#define FSL_QDMA_CQEDR 0xa14
-+#define FSL_QDMA_SQCCMR 0xa20
-+
-+#define FSL_QDMA_SQICR_ICEN
-+
-+#define FSL_QDMA_CQIDR_CQT 0xff000000
-+#define FSL_QDMA_CQIDR_SQPE 0x800000
-+#define FSL_QDMA_CQIDR_SQT 0x8000
-+
-+#define FSL_QDMA_BCQIER_CQTIE 0x8000
-+#define FSL_QDMA_BCQIER_CQPEIE 0x800000
-+#define FSL_QDMA_BSQICR_ICEN 0x80000000
-+#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
-+#define FSL_QDMA_CQIER_MEIE 0x80000000
-+#define FSL_QDMA_CQIER_TEIE 0x1
-+#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
-+
-+#define FSL_QDMA_QUEUE_MAX 8
-+
-+#define FSL_QDMA_BCQMR_EN 0x80000000
-+#define FSL_QDMA_BCQMR_EI 0x40000000
-+#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
-+#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
-+
-+#define FSL_QDMA_BCQSR_QF 0x10000
-+#define FSL_QDMA_BCQSR_XOFF 0x1
-+
-+#define FSL_QDMA_BSQMR_EN 0x80000000
-+#define FSL_QDMA_BSQMR_DI 0x40000000
-+#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
-+
-+#define FSL_QDMA_BSQSR_QE 0x20000
-+
-+#define FSL_QDMA_DMR_DQD 0x40000000
-+#define FSL_QDMA_DSR_DB 0x80000000
-+
-+#define FSL_QDMA_COMMAND_BUFFER_SIZE 64
-+#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
-+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
-+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
-+#define FSL_QDMA_QUEUE_NUM_MAX 8
-+
-+#define FSL_QDMA_CMD_RWTTYPE 0x4
-+#define FSL_QDMA_CMD_LWC 0x2
-+
-+#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
-+#define FSL_QDMA_CMD_NS_OFFSET 27
-+#define FSL_QDMA_CMD_DQOS_OFFSET 24
-+#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
-+#define FSL_QDMA_CMD_DSEN_OFFSET 19
-+#define FSL_QDMA_CMD_LWC_OFFSET 16
-+
-+#define QDMA_CCDF_STATUS 20
-+#define QDMA_CCDF_OFFSET 20
-+#define QDMA_CCDF_MASK GENMASK(28, 20)
-+#define QDMA_CCDF_FOTMAT BIT(29)
-+#define QDMA_CCDF_SER BIT(30)
-+
-+#define QDMA_SG_FIN BIT(30)
-+#define QDMA_SG_EXT BIT(31)
-+#define QDMA_SG_LEN_MASK GENMASK(29, 0)
-+
-+#define QDMA_BIG_ENDIAN 0x00000001
-+#define COMP_TIMEOUT 1000
-+#define COMMAND_QUEUE_OVERFLLOW 10
-+
-+#define QDMA_IN(fsl_qdma_engine, addr) \
-+ (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
-+ ioread32be(addr) : ioread32(addr))
-+#define QDMA_OUT(fsl_qdma_engine, addr, val) \
-+ (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
-+ iowrite32be(val, addr) : iowrite32(val, addr))
-+
-+#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
-+ (((fsl_qdma_engine)->block_offset) * (x))
-+
-+static DEFINE_PER_CPU(u64, pre_addr);
-+static DEFINE_PER_CPU(u64, pre_queue);
-+
-+/* qDMA Command Descriptor Fotmats */
-+
-+struct fsl_qdma_format {
-+ __le32 status; /* ser, status */
-+ __le32 cfg; /* format, offset */
-+ union {
-+ struct {
-+ __le32 addr_lo; /* low 32-bits of 40-bit address */
-+ u8 addr_hi; /* high 8-bits of 40-bit address */
-+ u8 __reserved1[2];
-+ u8 cfg8b_w1; /* dd, queue */
-+ } __packed;
-+ __le64 data;
-+ };
-+} __packed;
-+
-+static inline u64
-+qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
-+{
-+ return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
-+}
-+
-+static inline void
-+qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
-+{
-+ ccdf->addr_hi = upper_32_bits(addr);
-+ ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
-+}
-+
-+static inline u64
-+qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
-+{
-+ return ccdf->cfg8b_w1 & 0xff;
-+}
-+
-+static inline int
-+qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
-+{
-+ return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
-+}
-+
-+static inline void
-+qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-+{
-+ ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
-+}
-+
-+static inline int
-+qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
-+{
-+ return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
-+}
-+
-+static inline void
-+qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
-+{
-+ ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
-+}
-+
-+static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
-+{
-+ csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
-+}
-+
-+static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
-+{
-+ csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
-+}
-+
-+static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
-+{
-+ csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
-+}
-+
-+/* qDMA Source Descriptor Format */
-+struct fsl_qdma_sdf {
-+ __le32 rev3;
-+ __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-+ __le32 rev5;
-+ __le32 cmd;
-+} __packed;
-+
-+/* qDMA Destination Descriptor Format */
-+struct fsl_qdma_ddf {
-+ __le32 rev1;
-+ __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-+ __le32 rev3;
-+ __le32 cmd;
-+} __packed;
-+
-+struct fsl_qdma_chan {
-+ struct virt_dma_chan vchan;
-+ struct virt_dma_desc vdesc;
-+ enum dma_status status;
-+ struct fsl_qdma_engine *qdma;
-+ struct fsl_qdma_queue *queue;
-+};
-+
-+struct fsl_qdma_queue {
-+ struct fsl_qdma_format *virt_head;
-+ struct fsl_qdma_format *virt_tail;
-+ struct list_head comp_used;
-+ struct list_head comp_free;
-+ struct dma_pool *comp_pool;
-+ struct dma_pool *desc_pool;
-+ spinlock_t queue_lock;
-+ dma_addr_t bus_addr;
-+ u32 n_cq;
-+ u32 id;
-+ struct fsl_qdma_format *cq;
-+ void __iomem *block_base;
-+};
-+
-+struct fsl_qdma_comp {
-+ dma_addr_t bus_addr;
-+ dma_addr_t desc_bus_addr;
-+ void *virt_addr;
-+ void *desc_virt_addr;
-+ struct fsl_qdma_chan *qchan;
-+ struct virt_dma_desc vdesc;
-+ struct list_head list;
-+};
-+
-+struct fsl_qdma_engine {
-+ struct dma_device dma_dev;
-+ void __iomem *ctrl_base;
-+ void __iomem *status_base;
-+ void __iomem *block_base;
-+ u32 n_chans;
-+ u32 n_queues;
-+ struct mutex fsl_qdma_mutex;
-+ int error_irq;
-+ int *queue_irq;
-+ bool big_endian;
-+ struct fsl_qdma_queue *queue;
-+ struct fsl_qdma_queue **status;
-+ struct fsl_qdma_chan *chans;
-+ int block_number;
-+ int block_offset;
-+ int irq_base;
-+ int desc_allocated;
-+
-+};
-+
-+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
-+{
-+ return QDMA_IN(qdma, addr);
-+}
-+
-+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
-+ void __iomem *addr)
-+{
-+ QDMA_OUT(qdma, addr, val);
-+}
-+
-+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
-+{
-+ return container_of(chan, struct fsl_qdma_chan, vchan.chan);
-+}
-+
-+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
-+{
-+ return container_of(vd, struct fsl_qdma_comp, vdesc);
-+}
-+
-+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
-+{
-+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
-+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-+ struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-+ struct fsl_qdma_comp *comp_temp, *_comp_temp;
-+ unsigned long flags;
-+ LIST_HEAD(head);
-+
-+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
-+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-+
-+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
-+
-+ if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
-+ return;
-+
-+ list_for_each_entry_safe(comp_temp, _comp_temp,
-+ &fsl_queue->comp_used, list) {
-+ dma_pool_free(fsl_queue->comp_pool,
-+ comp_temp->virt_addr,
-+ comp_temp->bus_addr);
-+ dma_pool_free(fsl_queue->desc_pool,
-+ comp_temp->desc_virt_addr,
-+ comp_temp->desc_bus_addr);
-+ list_del(&comp_temp->list);
-+ kfree(comp_temp);
-+ }
-+
-+ list_for_each_entry_safe(comp_temp, _comp_temp,
-+ &fsl_queue->comp_free, list) {
-+ dma_pool_free(fsl_queue->comp_pool,
-+ comp_temp->virt_addr,
-+ comp_temp->bus_addr);
-+ dma_pool_free(fsl_queue->desc_pool,
-+ comp_temp->desc_virt_addr,
-+ comp_temp->desc_bus_addr);
-+ list_del(&comp_temp->list);
-+ kfree(comp_temp);
-+ }
-+
-+ dma_pool_destroy(fsl_queue->comp_pool);
-+ dma_pool_destroy(fsl_queue->desc_pool);
-+
-+ fsl_qdma->desc_allocated--;
-+ fsl_queue->comp_pool = NULL;
-+ fsl_queue->desc_pool = NULL;
-+}
-+
-+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-+ dma_addr_t dst, dma_addr_t src, u32 len)
-+{
-+ struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
-+ struct fsl_qdma_sdf *sdf;
-+ struct fsl_qdma_ddf *ddf;
-+
-+ ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
-+ csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
-+ csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-+ csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-+ sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
-+ ddf = (struct fsl_qdma_ddf *)fsl_comp->desc_virt_addr + 1;
-+
-+ memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-+ memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
-+ /* Head Command Descriptor(Frame Descriptor) */
-+ qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-+ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
-+ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
-+ /* Status notification is enqueued to status queue. */
-+ /* Compound Command Descriptor(Frame List Table) */
-+ qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
-+ /* It must be 32 as Compound S/G Descriptor */
-+ qdma_csgf_set_len(csgf_desc, 32);
-+ qdma_desc_addr_set64(csgf_src, src);
-+ qdma_csgf_set_len(csgf_src, len);
-+ qdma_desc_addr_set64(csgf_dest, dst);
-+ qdma_csgf_set_len(csgf_dest, len);
-+ /* This entry is the last entry. */
-+ qdma_csgf_set_f(csgf_dest, len);
-+ /* Descriptor Buffer */
-+ sdf->cmd = cpu_to_le32(
-+ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
-+ ddf->cmd = cpu_to_le32(
-+ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
-+ ddf->cmd |= cpu_to_le32(
-+ FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
-+}
-+
-+/*
-+ * Pre-request command descriptor and compound S/G for enqueue.
-+ */
-+static int fsl_qdma_pre_request_enqueue_comp_desc(struct fsl_qdma_queue *queue)
-+{
-+ struct fsl_qdma_comp *comp_temp;
-+ int i;
-+
-+ for (i = 0; i < queue->n_cq + COMMAND_QUEUE_OVERFLLOW; i++) {
-+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
-+ if (!comp_temp)
-+ return -ENOMEM;
-+ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
-+ GFP_KERNEL,
-+ &comp_temp->bus_addr);
-+
-+ if (!comp_temp->virt_addr) {
-+ kfree(comp_temp);
-+ return -ENOMEM;
-+ }
-+
-+ list_add_tail(&comp_temp->list, &queue->comp_free);
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Pre-request source and destination descriptor for enqueue.
-+ */
-+static int fsl_qdma_pre_request_enqueue_sd_desc(struct fsl_qdma_queue *queue)
-+{
-+ struct fsl_qdma_comp *comp_temp, *_comp_temp;
-+
-+ list_for_each_entry_safe(comp_temp, _comp_temp,
-+ &queue->comp_free, list) {
-+ comp_temp->desc_virt_addr = dma_pool_alloc(queue->desc_pool,
-+ GFP_KERNEL,
-+ &comp_temp->desc_bus_addr);
-+ if (!comp_temp->desc_virt_addr)
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Request a command descriptor for enqueue.
-+ */
-+static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
-+ struct fsl_qdma_chan *fsl_chan)
-+{
-+ struct fsl_qdma_comp *comp_temp;
-+ struct fsl_qdma_queue *queue = fsl_chan->queue;
-+ unsigned long flags;
-+ int timeout = COMP_TIMEOUT;
-+
-+ while (timeout) {
-+ spin_lock_irqsave(&queue->queue_lock, flags);
-+ if (!list_empty(&queue->comp_free)) {
-+ comp_temp = list_first_entry(&queue->comp_free,
-+ struct fsl_qdma_comp,
-+ list);
-+ list_del(&comp_temp->list);
-+
-+ spin_unlock_irqrestore(&queue->queue_lock, flags);
-+ comp_temp->qchan = fsl_chan;
-+ return comp_temp;
-+ }
-+ spin_unlock_irqrestore(&queue->queue_lock, flags);
-+ udelay(1);
-+ timeout--;
-+ }
-+
-+ return NULL;
-+}
-+
-+static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
-+ struct platform_device *pdev,
-+ struct fsl_qdma_engine *fsl_qdma)
-+{
-+ struct fsl_qdma_queue *queue_head, *queue_temp;
-+ int ret, len, i, j;
-+ unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-+ int queue_num;
-+ int block_number;
-+
-+ queue_num = fsl_qdma->n_queues;
-+ block_number = fsl_qdma->block_number;
-+
-+ if (queue_num > FSL_QDMA_QUEUE_MAX)
-+ queue_num = FSL_QDMA_QUEUE_MAX;
-+ len = sizeof(*queue_head) * queue_num * block_number;
-+ queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
-+ if (!queue_head)
-+ return NULL;
-+
-+ ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
-+ queue_size, queue_num);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't get queue-sizes.\n");
-+ return NULL;
-+ }
-+ for (j = 0; j < block_number; j++) {
-+ for (i = 0; i < queue_num; i++) {
-+ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-+ queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-+ dev_err(&pdev->dev,
-+ "Get wrong queue-sizes.\n");
-+ return NULL;
-+ }
-+ queue_temp = queue_head + i + (j * queue_num);
-+
-+ queue_temp->cq =
-+ dma_alloc_coherent(&pdev->dev,
-+ sizeof(struct fsl_qdma_format) *
-+ queue_size[i],
-+ &queue_temp->bus_addr,
-+ GFP_KERNEL);
-+ if (!queue_temp->cq)
-+ return NULL;
-+ queue_temp->block_base = fsl_qdma->block_base +
-+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-+ queue_temp->n_cq = queue_size[i];
-+ queue_temp->id = i;
-+ queue_temp->virt_head = queue_temp->cq;
-+ queue_temp->virt_tail = queue_temp->cq;
-+ /*
-+ * List for queue command buffer
-+ */
-+ INIT_LIST_HEAD(&queue_temp->comp_used);
-+ spin_lock_init(&queue_temp->queue_lock);
-+ }
-+ }
-+ return queue_head;
-+}
-+
-+static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
-+ struct platform_device *pdev)
-+{
-+ struct device_node *np = pdev->dev.of_node;
-+ struct fsl_qdma_queue *status_head;
-+ unsigned int status_size;
-+ int ret;
-+
-+ ret = of_property_read_u32(np, "status-sizes", &status_size);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't get status-sizes.\n");
-+ return NULL;
-+ }
-+ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
-+ || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-+ dev_err(&pdev->dev, "Get wrong status_size.\n");
-+ return NULL;
-+ }
-+ status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
-+ GFP_KERNEL);
-+ if (!status_head)
-+ return NULL;
-+
-+ /*
-+ * Buffer for queue command
-+ */
-+ status_head->cq = dma_alloc_coherent(&pdev->dev,
-+ sizeof(struct fsl_qdma_format) *
-+ status_size,
-+ &status_head->bus_addr,
-+ GFP_KERNEL);
-+ if (!status_head->cq)
-+ return NULL;
-+ status_head->n_cq = status_size;
-+ status_head->virt_head = status_head->cq;
-+ status_head->virt_tail = status_head->cq;
-+ status_head->comp_pool = NULL;
-+
-+ return status_head;
-+}
-+
-+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
-+{
-+ void __iomem *ctrl = fsl_qdma->ctrl_base;
-+ void __iomem *block;
-+ int i, count = 5;
-+ int j;
-+ u32 reg;
-+
-+ /* Disable the command queue and wait for idle state. */
-+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
-+ reg |= FSL_QDMA_DMR_DQD;
-+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
-+ for (j = 0; j < fsl_qdma->block_number; j++) {
-+ block = fsl_qdma->block_base +
-+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-+ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
-+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
-+ }
-+ while (1) {
-+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
-+ if (!(reg & FSL_QDMA_DSR_DB))
-+ break;
-+ if (count-- < 0)
-+ return -EBUSY;
-+ udelay(100);
-+ }
-+
-+ for (j = 0; j < fsl_qdma->block_number; j++) {
-+
-+ block = fsl_qdma->block_base +
-+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-+
-+ /* Disable status queue. */
-+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
-+
-+ /*
-+ * clear the command queue interrupt detect register for
-+ * all queues.
-+ */
-+ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
-+ }
-+
-+ return 0;
-+}
-+
-+static int fsl_qdma_queue_transfer_complete(
-+ struct fsl_qdma_engine *fsl_qdma,
-+ void *block,
-+ int id)
-+{
-+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
-+ struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-+ struct fsl_qdma_queue *temp_queue;
-+ struct fsl_qdma_format *status_addr;
-+ struct fsl_qdma_comp *fsl_comp = NULL;
-+ u32 reg, i;
-+ bool duplicate, duplicate_handle;
-+
-+ while (1) {
-+ duplicate = 0;
-+ duplicate_handle = 0;
-+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
-+ if (reg & FSL_QDMA_BSQSR_QE)
-+ return 0;
-+
-+ status_addr = fsl_status->virt_head;
-+
-+ if (qdma_ccdf_get_queue(status_addr) ==
-+ __this_cpu_read(pre_queue) &&
-+ qdma_ccdf_addr_get64(status_addr) ==
-+ __this_cpu_read(pre_addr))
-+ duplicate = 1;
-+ i = qdma_ccdf_get_queue(status_addr) +
-+ id * fsl_qdma->n_queues;
-+ __this_cpu_write(pre_addr, qdma_ccdf_addr_get64(status_addr));
-+ __this_cpu_write(pre_queue, qdma_ccdf_get_queue(status_addr));
-+ temp_queue = fsl_queue + i;
-+
-+ spin_lock(&temp_queue->queue_lock);
-+ if (list_empty(&temp_queue->comp_used)) {
-+ if (duplicate)
-+ duplicate_handle = 1;
-+ else {
-+ spin_unlock(&temp_queue->queue_lock);
-+ return -1;
-+ }
-+ } else {
-+ fsl_comp = list_first_entry(&temp_queue->comp_used,
-+ struct fsl_qdma_comp,
-+ list);
-+ if (fsl_comp->bus_addr + 16 !=
-+ __this_cpu_read(pre_addr)) {
-+ if (duplicate)
-+ duplicate_handle = 1;
-+ else {
-+ spin_unlock(&temp_queue->queue_lock);
-+ return -1;
-+ }
-+ }
-+
-+ }
-+
-+ if (duplicate_handle) {
-+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
-+ reg |= FSL_QDMA_BSQMR_DI;
-+ qdma_desc_addr_set64(status_addr, 0x0);
-+ fsl_status->virt_head++;
-+ if (fsl_status->virt_head == fsl_status->cq
-+ + fsl_status->n_cq)
-+ fsl_status->virt_head = fsl_status->cq;
-+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
-+ spin_unlock(&temp_queue->queue_lock);
-+ continue;
-+ }
-+ list_del(&fsl_comp->list);
-+
-+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
-+ reg |= FSL_QDMA_BSQMR_DI;
-+ qdma_desc_addr_set64(status_addr, 0x0);
-+ fsl_status->virt_head++;
-+ if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-+ fsl_status->virt_head = fsl_status->cq;
-+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
-+ spin_unlock(&temp_queue->queue_lock);
-+
-+ spin_lock(&fsl_comp->qchan->vchan.lock);
-+ vchan_cookie_complete(&fsl_comp->vdesc);
-+ fsl_comp->qchan->status = DMA_COMPLETE;
-+ spin_unlock(&fsl_comp->qchan->vchan.lock);
-+ }
-+ return 0;
-+}
-+
-+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
-+{
-+ struct fsl_qdma_engine *fsl_qdma = dev_id;
-+ unsigned int intr;
-+ void __iomem *status = fsl_qdma->status_base;
-+
-+ intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
-+
-+ if (intr)
-+ dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
-+
-+ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
-+ return IRQ_HANDLED;
-+}
-+
-+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
-+{
-+ struct fsl_qdma_engine *fsl_qdma = dev_id;
-+ unsigned int intr, reg;
-+ void __iomem *ctrl = fsl_qdma->ctrl_base;
-+ void __iomem *block;
-+ int id;
-+
-+ id = irq - fsl_qdma->irq_base;
-+ if (id < 0 && id > fsl_qdma->block_number) {
-+ dev_err(fsl_qdma->dma_dev.dev,
-+ "irq %d is wrong irq_base is %d\n",
-+ irq, fsl_qdma->irq_base);
-+ }
-+
-+ block = fsl_qdma->block_base +
-+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-+
-+ intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
-+
-+ if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
-+ intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
-+
-+ if (intr != 0) {
-+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
-+ reg |= FSL_QDMA_DMR_DQD;
-+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
-+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
-+ dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
-+ }
-+
-+ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static int
-+fsl_qdma_irq_init(struct platform_device *pdev,
-+ struct fsl_qdma_engine *fsl_qdma)
-+{
-+ char irq_name[20];
-+ int i;
-+ int cpu;
-+ int ret;
-+
-+ fsl_qdma->error_irq = platform_get_irq_byname(pdev,
-+ "qdma-error");
-+ if (fsl_qdma->error_irq < 0) {
-+ dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
-+ return fsl_qdma->error_irq;
-+ }
-+
-+ ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
-+ fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
-+ return ret;
-+ }
-+
-+ for (i = 0; i < fsl_qdma->block_number; i++) {
-+ sprintf(irq_name, "qdma-queue%d", i);
-+ fsl_qdma->queue_irq[i] = platform_get_irq_byname(pdev,
-+ irq_name);
-+
-+ if (fsl_qdma->queue_irq[i] < 0) {
-+ dev_err(&pdev->dev,
-+ "Can't get qdma queue %d irq.\n",
-+ i);
-+ return fsl_qdma->queue_irq[i];
-+ }
-+
-+ ret = devm_request_irq(&pdev->dev,
-+ fsl_qdma->queue_irq[i],
-+ fsl_qdma_queue_handler,
-+ 0,
-+ "qDMA queue",
-+ fsl_qdma);
-+ if (ret) {
-+ dev_err(&pdev->dev,
-+ "Can't register qDMA queue IRQ.\n");
-+ return ret;
-+ }
-+
-+ cpu = i % num_online_cpus();
-+ ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
-+ get_cpu_mask(cpu));
-+ if (ret) {
-+ dev_err(&pdev->dev,
-+ "Can't set cpu %d affinity to IRQ %d.\n",
-+ cpu,
-+ fsl_qdma->queue_irq[i]);
-+ return ret;
-+ }
-+
-+ }
-+
-+ return 0;
-+}
-+
-+static void fsl_qdma_irq_exit(
-+ struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
-+{
-+ if (fsl_qdma->queue_irq[0] == fsl_qdma->error_irq) {
-+ devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
-+ } else {
-+ devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
-+ devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
-+ }
-+}
-+
-+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
-+{
-+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
-+ struct fsl_qdma_queue *temp;
-+ void __iomem *ctrl = fsl_qdma->ctrl_base;
-+ void __iomem *status = fsl_qdma->status_base;
-+ void __iomem *block;
-+ int i, j, ret;
-+ u32 reg;
-+
-+ /* Try to halt the qDMA engine first. */
-+ ret = fsl_qdma_halt(fsl_qdma);
-+ if (ret) {
-+ dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
-+ return ret;
-+ }
-+
-+ for (i = 0; i < fsl_qdma->block_number; i++) {
-+ /*
-+ * Clear the command queue interrupt detect register for
-+ * all queues.
-+ */
-+
-+ block = fsl_qdma->block_base +
-+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
-+ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
-+ }
-+
-+ for (j = 0; j < fsl_qdma->block_number; j++) {
-+ block = fsl_qdma->block_base +
-+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-+ for (i = 0; i < fsl_qdma->n_queues; i++) {
-+ temp = fsl_queue + i + (j * fsl_qdma->n_queues);
-+ /*
-+ * Initialize Command Queue registers to
-+ * point to the first
-+ * command descriptor in memory.
-+ * Dequeue Pointer Address Registers
-+ * Enqueue Pointer Address Registers
-+ */
-+
-+ qdma_writel(fsl_qdma, temp->bus_addr,
-+ block + FSL_QDMA_BCQDPA_SADDR(i));
-+ qdma_writel(fsl_qdma, temp->bus_addr,
-+ block + FSL_QDMA_BCQEPA_SADDR(i));
-+
-+ /* Initialize the queue mode. */
-+ reg = FSL_QDMA_BCQMR_EN;
-+ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-+ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
-+ }
-+
-+ /*
-+ * Workaround for erratum: ERR010812.
-+ * We must enable XOFF to avoid the enqueue rejection occurs.
-+ * Setting SQCCMR ENTER_WM to 0x20.
-+ */
-+
-+ qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
-+ block + FSL_QDMA_SQCCMR);
-+
-+ /*
-+ * Initialize status queue registers to point to the first
-+ * command descriptor in memory.
-+ * Dequeue Pointer Address Registers
-+ * Enqueue Pointer Address Registers
-+ */
-+
-+ qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
-+ block + FSL_QDMA_SQEPAR);
-+ qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
-+ block + FSL_QDMA_SQDPAR);
-+ /* Initialize status queue interrupt. */
-+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
-+ block + FSL_QDMA_BCQIER(0));
-+ qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
-+ FSL_QDMA_BSQICR_ICST(5) | 0x8000,
-+ block + FSL_QDMA_BSQICR);
-+ qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
-+ FSL_QDMA_CQIER_TEIE,
-+ block + FSL_QDMA_CQIER);
-+
-+ /* Initialize the status queue mode. */
-+ reg = FSL_QDMA_BSQMR_EN;
-+ reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(
-+ fsl_qdma->status[j]->n_cq) - 6);
-+
-+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
-+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
-+
-+ }
-+
-+ /* Initialize controller interrupt register. */
-+ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
-+ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
-+
-+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
-+ reg &= ~FSL_QDMA_DMR_DQD;
-+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
-+
-+ return 0;
-+}
-+
-+static struct dma_async_tx_descriptor *
-+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
-+ dma_addr_t src, size_t len, unsigned long flags)
-+{
-+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
-+ struct fsl_qdma_comp *fsl_comp;
-+
-+ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
-+
-+ if (!fsl_comp)
-+ return NULL;
-+
-+ fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-+
-+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
-+}
-+
-+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
-+{
-+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-+ struct fsl_qdma_comp *fsl_comp;
-+ struct virt_dma_desc *vdesc;
-+ void __iomem *block = fsl_queue->block_base;
-+ u32 reg;
-+
-+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
-+ if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
-+ return;
-+ vdesc = vchan_next_desc(&fsl_chan->vchan);
-+ if (!vdesc)
-+ return;
-+ list_del(&vdesc->node);
-+ fsl_comp = to_fsl_qdma_comp(vdesc);
-+
-+ memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
-+ if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
-+ fsl_queue->virt_head = fsl_queue->cq;
-+
-+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
-+ barrier();
-+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
-+ reg |= FSL_QDMA_BCQMR_EI;
-+ qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-+ fsl_chan->status = DMA_IN_PROGRESS;
-+}
-+
-+static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
-+ dma_cookie_t cookie, struct dma_tx_state *txstate)
-+{
-+ return dma_cookie_status(chan, cookie, txstate);
-+}
-+
-+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
-+{
-+ struct fsl_qdma_comp *fsl_comp;
-+ struct fsl_qdma_queue *fsl_queue;
-+ unsigned long flags;
-+
-+ fsl_comp = to_fsl_qdma_comp(vdesc);
-+ fsl_queue = fsl_comp->qchan->queue;
-+
-+ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
-+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
-+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
-+}
-+
-+static void fsl_qdma_issue_pending(struct dma_chan *chan)
-+{
-+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
-+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
-+ spin_lock(&fsl_chan->vchan.lock);
-+ if (vchan_issue_pending(&fsl_chan->vchan))
-+ fsl_qdma_enqueue_desc(fsl_chan);
-+ spin_unlock(&fsl_chan->vchan.lock);
-+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
-+}
-+
-+static void fsl_qdma_synchronize(struct dma_chan *chan)
-+{
-+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
-+
-+ vchan_synchronize(&fsl_chan->vchan);
-+}
-+
-+static int fsl_qdma_terminate_all(struct dma_chan *chan)
-+{
-+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
-+ unsigned long flags;
-+ LIST_HEAD(head);
-+
-+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
-+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
-+ return 0;
-+}
-+
-+static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
-+{
-+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
-+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-+ struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-+ int ret;
-+
-+ if (fsl_queue->comp_pool && fsl_queue->desc_pool)
-+ return fsl_qdma->desc_allocated;
-+
-+ INIT_LIST_HEAD(&fsl_queue->comp_free);
-+
-+ /*
-+ * The dma pool for queue command buffer
-+ */
-+ fsl_queue->comp_pool =
-+ dma_pool_create("comp_pool",
-+ chan->device->dev,
-+ FSL_QDMA_COMMAND_BUFFER_SIZE,
-+ 64, 0);
-+ if (!fsl_queue->comp_pool)
-+ return -ENOMEM;
-+
-+ /*
-+ * The dma pool for Descriptor(SD/DD) buffer
-+ */
-+ fsl_queue->desc_pool =
-+ dma_pool_create("desc_pool",
-+ chan->device->dev,
-+ FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
-+ 32, 0);
-+ if (!fsl_queue->desc_pool)
-+ goto err_desc_pool;
-+
-+ ret = fsl_qdma_pre_request_enqueue_comp_desc(fsl_queue);
-+ if (ret) {
-+ dev_err(chan->device->dev, "failed to alloc dma buffer for "
-+ "comp S/G descriptor\n");
-+ goto err_mem;
-+ }
-+
-+ ret = fsl_qdma_pre_request_enqueue_sd_desc(fsl_queue);
-+ if (ret) {
-+ dev_err(chan->device->dev, "failed to alloc dma buffer for "
-+ "S/D descriptor\n");
-+ goto err_mem;
-+ }
-+
-+ fsl_qdma->desc_allocated++;
-+ return fsl_qdma->desc_allocated;
-+
-+err_mem:
-+ dma_pool_destroy(fsl_queue->desc_pool);
-+err_desc_pool:
-+ dma_pool_destroy(fsl_queue->comp_pool);
-+ return -ENOMEM;
-+}
-+
-+static int fsl_qdma_probe(struct platform_device *pdev)
-+{
-+ struct device_node *np = pdev->dev.of_node;
-+ struct fsl_qdma_engine *fsl_qdma;
-+ struct fsl_qdma_chan *fsl_chan;
-+ struct resource *res;
-+ unsigned int len, chans, queues;
-+ int ret, i;
-+ int blk_num;
-+ int blk_off;
-+
-+ ret = of_property_read_u32(np, "channels", &chans);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't get channels.\n");
-+ return ret;
-+ }
-+
-+ ret = of_property_read_u32(np, "block-offset", &blk_off);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't get block-offset.\n");
-+ return ret;
-+ }
-+
-+ ret = of_property_read_u32(np, "block-number", &blk_num);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't get block-number.\n");
-+ return ret;
-+ }
-+
-+ blk_num = min_t(int, blk_num, num_online_cpus());
-+
-+ len = sizeof(*fsl_qdma);
-+ fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
-+ if (!fsl_qdma)
-+ return -ENOMEM;
-+
-+ len = sizeof(*fsl_chan) * chans;
-+ fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
-+ if (!fsl_qdma->chans)
-+ return -ENOMEM;
-+
-+ len = sizeof(struct fsl_qdma_queue *) * blk_num;
-+ fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
-+ if (!fsl_qdma->status)
-+ return -ENOMEM;
-+
-+ len = sizeof(int) * blk_num;
-+ fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
-+ if (!fsl_qdma->queue_irq)
-+ return -ENOMEM;
-+
-+ ret = of_property_read_u32(np, "queues", &queues);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't get queues.\n");
-+ return ret;
-+ }
-+
-+ fsl_qdma->desc_allocated = 0;
-+ fsl_qdma->n_chans = chans;
-+ fsl_qdma->n_queues = queues;
-+ fsl_qdma->block_number = blk_num;
-+ fsl_qdma->block_offset = blk_off;
-+
-+ mutex_init(&fsl_qdma->fsl_qdma_mutex);
-+
-+ for (i = 0; i < fsl_qdma->block_number; i++) {
-+ fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
-+ if (!fsl_qdma->status[i])
-+ return -ENOMEM;
-+ }
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+ fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
-+ if (IS_ERR(fsl_qdma->ctrl_base))
-+ return PTR_ERR(fsl_qdma->ctrl_base);
-+
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-+ fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
-+ if (IS_ERR(fsl_qdma->status_base))
-+ return PTR_ERR(fsl_qdma->status_base);
-+
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-+ fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
-+ if (IS_ERR(fsl_qdma->block_base))
-+ return PTR_ERR(fsl_qdma->block_base);
-+ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
-+ if (!fsl_qdma->queue)
-+ return -ENOMEM;
-+
-+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
-+ if (ret)
-+ return ret;
-+
-+ fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
-+ fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
-+ INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
-+
-+ for (i = 0; i < fsl_qdma->n_chans; i++) {
-+ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-+
-+ fsl_chan->qdma = fsl_qdma;
-+ fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-+ fsl_qdma->block_number);
-+ fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
-+ vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
-+ }
-+
-+ dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
-+
-+ fsl_qdma->dma_dev.dev = &pdev->dev;
-+ fsl_qdma->dma_dev.device_free_chan_resources
-+ = fsl_qdma_free_chan_resources;
-+ fsl_qdma->dma_dev.device_alloc_chan_resources
-+ = fsl_qdma_alloc_chan_resources;
-+ fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
-+ fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
-+ fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
-+ fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
-+ fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
-+
-+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
-+
-+ platform_set_drvdata(pdev, fsl_qdma);
-+
-+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
-+ if (ret) {
-+ dev_err(&pdev->dev,
-+ "Can't register NXP Layerscape qDMA engine.\n");
-+ return ret;
-+ }
-+
-+ ret = fsl_qdma_reg_init(fsl_qdma);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
-+{
-+ struct fsl_qdma_chan *chan, *_chan;
-+
-+ list_for_each_entry_safe(chan, _chan,
-+ &dmadev->channels, vchan.chan.device_node) {
-+ list_del(&chan->vchan.chan.device_node);
-+ tasklet_kill(&chan->vchan.task);
-+ }
-+}
-+
-+static int fsl_qdma_remove(struct platform_device *pdev)
-+{
-+ struct device_node *np = pdev->dev.of_node;
-+ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
-+ struct fsl_qdma_queue *status;
-+ int i;
-+
-+ fsl_qdma_irq_exit(pdev, fsl_qdma);
-+ fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
-+ of_dma_controller_free(np);
-+ dma_async_device_unregister(&fsl_qdma->dma_dev);
-+
-+ for (i = 0; i < fsl_qdma->block_number; i++) {
-+ status = fsl_qdma->status[i];
-+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
-+ status->n_cq, status->cq, status->bus_addr);
-+ }
-+ return 0;
-+}
-+
-+static const struct of_device_id fsl_qdma_dt_ids[] = {
-+ { .compatible = "fsl,ls1021a-qdma", },
-+ { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
-+
-+static struct platform_driver fsl_qdma_driver = {
-+ .driver = {
-+ .name = "fsl-qdma",
-+ .of_match_table = fsl_qdma_dt_ids,
-+ },
-+ .probe = fsl_qdma_probe,
-+ .remove = fsl_qdma_remove,
-+};
-+
-+module_platform_driver(fsl_qdma_driver);
-+
-+MODULE_ALIAS("platform:fsl-qdma");
-+MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
-+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/layerscape/patches-4.14/803-flextimer-support-layerscape.patch b/target/linux/layerscape/patches-4.14/803-flextimer-support-layerscape.patch
deleted file mode 100644
index b3616f040d..0000000000
--- a/target/linux/layerscape/patches-4.14/803-flextimer-support-layerscape.patch
+++ /dev/null
@@ -1,457 +0,0 @@
-From 0f31298eb0a9b2cd7990b709ff18229fadfa474b Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:38 +0800
-Subject: [PATCH] flextimer: support layerscape
-
-This is an integrated patch of flextimer for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Meng Yi <meng.yi@nxp.com>
-Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
-Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
----
- .../bindings/soc/fsl/layerscape/ftm-alarm.txt | 32 ++
- drivers/clocksource/fsl_ftm_timer.c | 8 +-
- drivers/soc/fsl/layerscape/ftm_alarm.c | 382 ++++++++++++++++++
- 3 files changed, 418 insertions(+), 4 deletions(-)
- create mode 100644 Documentation/devicetree/bindings/soc/fsl/layerscape/ftm-alarm.txt
- create mode 100644 drivers/soc/fsl/layerscape/ftm_alarm.c
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/soc/fsl/layerscape/ftm-alarm.txt
-@@ -0,0 +1,32 @@
-+Freescale FlexTimer Module (FTM) Alarm
-+
-+Required properties:
-+
-+- compatible : Should be "fsl,ftm-alarm" or "fsl,<chip>-ftm-alarm", the
-+ supported chips include
-+ "fsl,ls1012a-ftm-alarm"
-+ "fsl,ls1021a-ftm-alarm"
-+ "fsl,ls1043a-ftm-alarm"
-+ "fsl,ls1046a-ftm-alarm"
-+ "fsl,ls1088a-ftm-alarm"
-+ "fsl,ls208xa-ftm-alarm"
-+- reg : Specifies base physical address and size of the register sets for the
-+ FlexTimer Module and base physical address of IP Powerdown Exception Control
-+ Register.
-+- reg-names: names of the mapped memory regions listed in regs property.
-+ should include the following entries:
-+ "ftm": Address of the register sets for FlexTimer Module
-+ "pmctrl": Address of IP Powerdown Exception Control register
-+- interrupts : Should be the FlexTimer Module interrupt.
-+- big-endian: If the host controller is big-endian mode, specify this property.
-+ The default endian mode is little-endian.
-+
-+Example:
-+ftm0: ftm0@29d0000 {
-+ compatible = "fsl,ls1043a-ftm-alarm";
-+ reg = <0x0 0x29d0000 0x0 0x10000>,
-+ <0x0 0x1ee2140 0x0 0x4>;
-+ reg-names = "ftm", "pmctrl";
-+ interrupts = <0 86 0x4>;
-+ big-endian;
-+};
---- a/drivers/clocksource/fsl_ftm_timer.c
-+++ b/drivers/clocksource/fsl_ftm_timer.c
-@@ -83,11 +83,11 @@ static inline void ftm_counter_disable(v
-
- static inline void ftm_irq_acknowledge(void __iomem *base)
- {
-- u32 val;
-+ unsigned int timeout = 100;
-
-- val = ftm_readl(base + FTM_SC);
-- val &= ~FTM_SC_TOF;
-- ftm_writel(val, base + FTM_SC);
-+ while ((FTM_SC_TOF & ftm_readl(base + FTM_SC)) && timeout--)
-+ ftm_writel(ftm_readl(base + FTM_SC) & (~FTM_SC_TOF),
-+ base + FTM_SC);
- }
-
- static inline void ftm_irq_enable(void __iomem *base)
---- /dev/null
-+++ b/drivers/soc/fsl/layerscape/ftm_alarm.c
-@@ -0,0 +1,382 @@
-+/*
-+ * Freescale FlexTimer Module (FTM) Alarm driver.
-+ *
-+ * Copyright 2014 Freescale Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2
-+ * of the License, or (at your option) any later version.
-+ */
-+
-+#include <linux/device.h>
-+#include <linux/err.h>
-+#include <linux/interrupt.h>
-+#include <linux/io.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/platform_device.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/libata.h>
-+#include <linux/module.h>
-+
-+#define FTM_SC 0x00
-+#define FTM_SC_CLK_SHIFT 3
-+#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT)
-+#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT)
-+#define FTM_SC_PS_MASK 0x7
-+#define FTM_SC_TOIE BIT(6)
-+#define FTM_SC_TOF BIT(7)
-+
-+#define FTM_SC_CLKS_FIXED_FREQ 0x02
-+
-+#define FTM_CNT 0x04
-+#define FTM_MOD 0x08
-+#define FTM_CNTIN 0x4C
-+
-+#define FIXED_FREQ_CLK 32000
-+#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK)
-+#define MAX_COUNT_VAL 0xffff
-+
-+static void __iomem *ftm1_base;
-+static void __iomem *rcpm_ftm_addr;
-+static void __iomem *scfg_scrachpad_addr;
-+static u32 alarm_freq;
-+static bool big_endian;
-+
-+enum pmu_endian_type {
-+ BIG_ENDIAN,
-+ LITTLE_ENDIAN,
-+};
-+
-+struct rcpm_cfg {
-+ enum pmu_endian_type big_endian; /* Big/Little endian of PMU module */
-+ u32 flextimer_set_bit; /* FTM is not powerdown during device LPM20 */
-+};
-+
-+static struct rcpm_cfg ls1012a_rcpm_cfg = {
-+ .big_endian = BIG_ENDIAN,
-+ .flextimer_set_bit = 0x20000,
-+};
-+
-+static struct rcpm_cfg ls1021a_rcpm_cfg = {
-+ .big_endian = BIG_ENDIAN,
-+ .flextimer_set_bit = 0x30000000,
-+};
-+
-+static struct rcpm_cfg ls1043a_rcpm_cfg = {
-+ .big_endian = BIG_ENDIAN,
-+ .flextimer_set_bit = 0x20000,
-+};
-+
-+static struct rcpm_cfg ls1046a_rcpm_cfg = {
-+ .big_endian = BIG_ENDIAN,
-+ .flextimer_set_bit = 0x20000,
-+};
-+
-+static struct rcpm_cfg ls1088a_rcpm_cfg = {
-+ .big_endian = LITTLE_ENDIAN,
-+ .flextimer_set_bit = 0x4000,
-+};
-+
-+static struct rcpm_cfg ls208xa_rcpm_cfg = {
-+ .big_endian = LITTLE_ENDIAN,
-+ .flextimer_set_bit = 0x4000,
-+};
-+
-+static struct rcpm_cfg lx2160a_rcpm_cfg = {
-+ .big_endian = LITTLE_ENDIAN,
-+ .flextimer_set_bit = 0x4000,
-+};
-+
-+static const struct of_device_id ippdexpcr_of_match[] = {
-+ { .compatible = "fsl,ls1012a-ftm-alarm", .data = &ls1012a_rcpm_cfg},
-+ { .compatible = "fsl,ls1021a-ftm-alarm", .data = &ls1021a_rcpm_cfg},
-+ { .compatible = "fsl,ls1043a-ftm-alarm", .data = &ls1043a_rcpm_cfg},
-+ { .compatible = "fsl,ls1046a-ftm-alarm", .data = &ls1046a_rcpm_cfg},
-+ { .compatible = "fsl,ls1088a-ftm-alarm", .data = &ls1088a_rcpm_cfg},
-+ { .compatible = "fsl,ls208xa-ftm-alarm", .data = &ls208xa_rcpm_cfg},
-+ { .compatible = "fsl,lx2160a-ftm-alarm", .data = &lx2160a_rcpm_cfg},
-+ {},
-+};
-+MODULE_DEVICE_TABLE(of, ippdexpcr_of_match);
-+
-+static inline u32 ftm_readl(void __iomem *addr)
-+{
-+ if (big_endian)
-+ return ioread32be(addr);
-+
-+ return ioread32(addr);
-+}
-+
-+static inline void ftm_writel(u32 val, void __iomem *addr)
-+{
-+ if (big_endian)
-+ iowrite32be(val, addr);
-+ else
-+ iowrite32(val, addr);
-+}
-+
-+static inline void ftm_counter_enable(void __iomem *base)
-+{
-+ u32 val;
-+
-+ /* select and enable counter clock source */
-+ val = ftm_readl(base + FTM_SC);
-+ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
-+ val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ));
-+ ftm_writel(val, base + FTM_SC);
-+}
-+
-+static inline void ftm_counter_disable(void __iomem *base)
-+{
-+ u32 val;
-+
-+ /* disable counter clock source */
-+ val = ftm_readl(base + FTM_SC);
-+ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
-+ ftm_writel(val, base + FTM_SC);
-+}
-+
-+static inline void ftm_irq_acknowledge(void __iomem *base)
-+{
-+ unsigned int timeout = 100;
-+
-+ while ((FTM_SC_TOF & ftm_readl(base + FTM_SC)) && timeout--)
-+ ftm_writel(ftm_readl(base + FTM_SC) & (~FTM_SC_TOF),
-+ base + FTM_SC);
-+}
-+
-+static inline void ftm_irq_enable(void __iomem *base)
-+{
-+ u32 val;
-+
-+ val = ftm_readl(base + FTM_SC);
-+ val |= FTM_SC_TOIE;
-+ ftm_writel(val, base + FTM_SC);
-+}
-+
-+static inline void ftm_irq_disable(void __iomem *base)
-+{
-+ u32 val;
-+
-+ val = ftm_readl(base + FTM_SC);
-+ val &= ~FTM_SC_TOIE;
-+ ftm_writel(val, base + FTM_SC);
-+}
-+
-+static inline void ftm_reset_counter(void __iomem *base)
-+{
-+ /*
-+ * The CNT register contains the FTM counter value.
-+ * Reset clears the CNT register. Writing any value to COUNT
-+ * updates the counter with its initial value, CNTIN.
-+ */
-+ ftm_writel(0x00, base + FTM_CNT);
-+}
-+
-+static u32 time_to_cycle(unsigned long time)
-+{
-+ u32 cycle;
-+
-+ cycle = time * alarm_freq;
-+ if (cycle > MAX_COUNT_VAL) {
-+ pr_err("Out of alarm range.\n");
-+ cycle = 0;
-+ }
-+
-+ return cycle;
-+}
-+
-+static u32 cycle_to_time(u32 cycle)
-+{
-+ return cycle / alarm_freq + 1;
-+}
-+
-+static void ftm_clean_alarm(void)
-+{
-+ ftm_counter_disable(ftm1_base);
-+
-+ ftm_writel(0x00, ftm1_base + FTM_CNTIN);
-+ ftm_writel(~0U, ftm1_base + FTM_MOD);
-+
-+ ftm_reset_counter(ftm1_base);
-+}
-+
-+static int ftm_set_alarm(u64 cycle)
-+{
-+ ftm_irq_disable(ftm1_base);
-+
-+ /*
-+ * The counter increments until the value of MOD is reached,
-+ * at which point the counter is reloaded with the value of CNTIN.
-+ * The TOF (the overflow flag) bit is set when the FTM counter
-+ * changes from MOD to CNTIN. So we should using the cycle - 1.
-+ */
-+ ftm_writel(cycle - 1, ftm1_base + FTM_MOD);
-+
-+ ftm_counter_enable(ftm1_base);
-+
-+ ftm_irq_enable(ftm1_base);
-+
-+ return 0;
-+}
-+
-+static irqreturn_t ftm_alarm_interrupt(int irq, void *dev_id)
-+{
-+ ftm_irq_acknowledge(ftm1_base);
-+ ftm_irq_disable(ftm1_base);
-+ ftm_clean_alarm();
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static ssize_t ftm_alarm_show(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ u32 count, val;
-+
-+ count = ftm_readl(ftm1_base + FTM_MOD);
-+ val = ftm_readl(ftm1_base + FTM_CNT);
-+ val = (count & MAX_COUNT_VAL) - val;
-+ val = cycle_to_time(val);
-+
-+ return sprintf(buf, "%u\n", val);
-+}
-+
-+static ssize_t ftm_alarm_store(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ u32 cycle;
-+ unsigned long time;
-+
-+ if (kstrtoul(buf, 0, &time))
-+ return -EINVAL;
-+
-+ ftm_clean_alarm();
-+
-+ cycle = time_to_cycle(time);
-+ if (!cycle)
-+ return -EINVAL;
-+
-+ ftm_set_alarm(cycle);
-+
-+ return count;
-+}
-+
-+static struct device_attribute ftm_alarm_attributes = __ATTR(ftm_alarm, 0644,
-+ ftm_alarm_show, ftm_alarm_store);
-+
-+static int ftm_alarm_probe(struct platform_device *pdev)
-+{
-+ struct device_node *np = pdev->dev.of_node;
-+ struct resource *r;
-+ int irq;
-+ int ret;
-+ struct rcpm_cfg *rcpm_cfg;
-+ u32 ippdexpcr, flextimer;
-+ const struct of_device_id *of_id;
-+ enum pmu_endian_type endian;
-+
-+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+ if (!r)
-+ return -ENODEV;
-+
-+ ftm1_base = devm_ioremap_resource(&pdev->dev, r);
-+ if (IS_ERR(ftm1_base))
-+ return PTR_ERR(ftm1_base);
-+
-+ of_id = of_match_node(ippdexpcr_of_match, np);
-+ if (!of_id)
-+ return -ENODEV;
-+
-+ rcpm_cfg = devm_kzalloc(&pdev->dev, sizeof(*rcpm_cfg), GFP_KERNEL);
-+ if (!rcpm_cfg)
-+ return -ENOMEM;
-+
-+ rcpm_cfg = (struct rcpm_cfg *)of_id->data;
-+ endian = rcpm_cfg->big_endian;
-+ flextimer = rcpm_cfg->flextimer_set_bit;
-+
-+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmctrl");
-+ if (r) {
-+ rcpm_ftm_addr = devm_ioremap_resource(&pdev->dev, r);
-+ if (IS_ERR(rcpm_ftm_addr))
-+ return PTR_ERR(rcpm_ftm_addr);
-+ if (endian == BIG_ENDIAN)
-+ ippdexpcr = ioread32be(rcpm_ftm_addr);
-+ else
-+ ippdexpcr = ioread32(rcpm_ftm_addr);
-+ ippdexpcr |= flextimer;
-+ if (endian == BIG_ENDIAN)
-+ iowrite32be(ippdexpcr, rcpm_ftm_addr);
-+ else
-+ iowrite32(ippdexpcr, rcpm_ftm_addr);
-+
-+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scrachpad");
-+ if (r) {
-+ scfg_scrachpad_addr = devm_ioremap_resource(&pdev->dev, r);
-+ iowrite32(ippdexpcr, scfg_scrachpad_addr);
-+ }
-+ }
-+
-+ irq = irq_of_parse_and_map(np, 0);
-+ if (irq <= 0) {
-+ pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
-+ return -EINVAL;
-+ }
-+
-+ big_endian = of_property_read_bool(np, "big-endian");
-+
-+ ret = devm_request_irq(&pdev->dev, irq, ftm_alarm_interrupt,
-+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), NULL);
-+ if (ret < 0) {
-+ dev_err(&pdev->dev, "failed to request irq\n");
-+ return ret;
-+ }
-+
-+ ret = device_create_file(&pdev->dev, &ftm_alarm_attributes);
-+ if (ret) {
-+ dev_err(&pdev->dev, "create sysfs fail.\n");
-+ return ret;
-+ }
-+
-+ alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV;
-+
-+ ftm_clean_alarm();
-+
-+ device_init_wakeup(&pdev->dev, true);
-+
-+ return ret;
-+}
-+
-+static const struct of_device_id ftm_alarm_match[] = {
-+ { .compatible = "fsl,ftm-alarm", },
-+ { .compatible = "fsl,ls1012a-ftm-alarm", },
-+ { .compatible = "fsl,ls1021a-ftm-alarm", },
-+ { .compatible = "fsl,ls1043a-ftm-alarm", },
-+ { .compatible = "fsl,ls1046a-ftm-alarm", },
-+ { .compatible = "fsl,ls1088a-ftm-alarm", },
-+ { .compatible = "fsl,ls208xa-ftm-alarm", },
-+ { .compatible = "fsl,lx2160a-ftm-alarm", },
-+ { .compatible = "fsl,ftm-timer", },
-+ { },
-+};
-+
-+static struct platform_driver ftm_alarm_driver = {
-+ .probe = ftm_alarm_probe,
-+ .driver = {
-+ .name = "ftm-alarm",
-+ .owner = THIS_MODULE,
-+ .of_match_table = ftm_alarm_match,
-+ },
-+};
-+
-+static int __init ftm_alarm_init(void)
-+{
-+ return platform_driver_register(&ftm_alarm_driver);
-+}
-+device_initcall(ftm_alarm_init);
diff --git a/target/linux/layerscape/patches-4.14/804-i2c-support-layerscape.patch b/target/linux/layerscape/patches-4.14/804-i2c-support-layerscape.patch
deleted file mode 100644
index 31c686d561..0000000000
--- a/target/linux/layerscape/patches-4.14/804-i2c-support-layerscape.patch
+++ /dev/null
@@ -1,478 +0,0 @@
-From 3f7d59061c38287bdc2fec2e94b4df9e6e62dbc6 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:39 +0800
-Subject: [PATCH] i2c: support layerscape
-
-This is an integrated patch of i2c for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
-Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
-Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
----
- drivers/i2c/busses/i2c-imx.c | 245 +++++++++++++++++++++++++---
- drivers/i2c/muxes/i2c-mux-pca954x.c | 44 ++++-
- 2 files changed, 268 insertions(+), 21 deletions(-)
-
---- a/drivers/i2c/busses/i2c-imx.c
-+++ b/drivers/i2c/busses/i2c-imx.c
-@@ -53,6 +53,11 @@
- #include <linux/pm_runtime.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
-+#include <linux/gpio.h>
-+#include <linux/of_address.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/libata.h>
-
- /* This will be the driver name the kernel reports */
- #define DRIVER_NAME "imx-i2c"
-@@ -117,6 +122,54 @@
-
- #define I2C_PM_TIMEOUT 10 /* ms */
-
-+enum pinmux_endian_type {
-+ BIG_ENDIAN,
-+ LITTLE_ENDIAN,
-+};
-+
-+struct pinmux_cfg {
-+ enum pinmux_endian_type endian; /* endian of RCWPMUXCR0 */
-+ u32 pmuxcr_offset;
-+ u32 pmuxcr_set_bit; /* pin mux of RCWPMUXCR0 */
-+};
-+
-+static struct pinmux_cfg ls1012a_pinmux_cfg = {
-+ .endian = BIG_ENDIAN,
-+ .pmuxcr_offset = 0x430,
-+ .pmuxcr_set_bit = 0x10,
-+};
-+
-+static struct pinmux_cfg ls1043a_pinmux_cfg = {
-+ .endian = BIG_ENDIAN,
-+ .pmuxcr_offset = 0x40C,
-+ .pmuxcr_set_bit = 0x10,
-+};
-+
-+static struct pinmux_cfg ls1046a_pinmux_cfg = {
-+ .endian = BIG_ENDIAN,
-+ .pmuxcr_offset = 0x40C,
-+ .pmuxcr_set_bit = 0x80000000,
-+};
-+
-+static const struct of_device_id pinmux_of_match[] = {
-+ { .compatible = "fsl,ls1012a-vf610-i2c", .data = &ls1012a_pinmux_cfg},
-+ { .compatible = "fsl,ls1043a-vf610-i2c", .data = &ls1043a_pinmux_cfg},
-+ { .compatible = "fsl,ls1046a-vf610-i2c", .data = &ls1046a_pinmux_cfg},
-+ {},
-+};
-+MODULE_DEVICE_TABLE(of, pinmux_of_match);
-+
-+/* The SCFG, Supplemental Configuration Unit, provides SoC specific
-+ * configuration and status registers for the device. There is a
-+ * SDHC IO VSEL control register on SCFG for some platforms. It's
-+ * used to support SDHC IO voltage switching.
-+ */
-+static const struct of_device_id scfg_device_ids[] = {
-+ { .compatible = "fsl,ls1012a-scfg", },
-+ { .compatible = "fsl,ls1043a-scfg", },
-+ { .compatible = "fsl,ls1046a-scfg", },
-+ {}
-+};
- /*
- * sorted list of clock divider, register value pairs
- * taken from table 26-5, p.26-9, Freescale i.MX
-@@ -210,6 +263,12 @@ struct imx_i2c_struct {
- struct pinctrl_state *pinctrl_pins_gpio;
-
- struct imx_i2c_dma *dma;
-+ int layerscape_bus_recover;
-+ int gpio;
-+ int need_set_pmuxcr;
-+ int pmuxcr_set;
-+ int pmuxcr_endian;
-+ void __iomem *pmuxcr_addr;
- };
-
- static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
-@@ -281,8 +340,8 @@ static inline unsigned char imx_i2c_read
- }
-
- /* Functions for DMA support */
--static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
-- dma_addr_t phy_addr)
-+static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
-+ dma_addr_t phy_addr)
- {
- struct imx_i2c_dma *dma;
- struct dma_slave_config dma_sconfig;
-@@ -291,11 +350,13 @@ static void i2c_imx_dma_request(struct i
-
- dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
- if (!dma)
-- return;
-+ return -ENOMEM;
-
-- dma->chan_tx = dma_request_slave_channel(dev, "tx");
-- if (!dma->chan_tx) {
-- dev_dbg(dev, "can't request DMA tx channel\n");
-+ dma->chan_tx = dma_request_chan(dev, "tx");
-+ if (IS_ERR(dma->chan_tx)) {
-+ ret = PTR_ERR(dma->chan_tx);
-+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
-+ dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
- goto fail_al;
- }
-
-@@ -306,13 +367,15 @@ static void i2c_imx_dma_request(struct i
- dma_sconfig.direction = DMA_MEM_TO_DEV;
- ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig);
- if (ret < 0) {
-- dev_dbg(dev, "can't configure tx channel\n");
-+ dev_err(dev, "can't configure tx channel (%d)\n", ret);
- goto fail_tx;
- }
-
-- dma->chan_rx = dma_request_slave_channel(dev, "rx");
-- if (!dma->chan_rx) {
-- dev_dbg(dev, "can't request DMA rx channel\n");
-+ dma->chan_rx = dma_request_chan(dev, "rx");
-+ if (IS_ERR(dma->chan_rx)) {
-+ ret = PTR_ERR(dma->chan_rx);
-+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
-+ dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
- goto fail_tx;
- }
-
-@@ -323,7 +386,7 @@ static void i2c_imx_dma_request(struct i
- dma_sconfig.direction = DMA_DEV_TO_MEM;
- ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig);
- if (ret < 0) {
-- dev_dbg(dev, "can't configure rx channel\n");
-+ dev_err(dev, "can't configure rx channel (%d)\n", ret);
- goto fail_rx;
- }
-
-@@ -332,7 +395,7 @@ static void i2c_imx_dma_request(struct i
- dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
- dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
-
-- return;
-+ return 0;
-
- fail_rx:
- dma_release_channel(dma->chan_rx);
-@@ -340,7 +403,8 @@ fail_tx:
- dma_release_channel(dma->chan_tx);
- fail_al:
- devm_kfree(dev, dma);
-- dev_info(dev, "can't use DMA, using PIO instead.\n");
-+ /* return successfully if there is no dma support */
-+ return ret == -ENODEV ? 0 : ret;
- }
-
- static void i2c_imx_dma_callback(void *arg)
-@@ -878,6 +942,78 @@ static int i2c_imx_read(struct imx_i2c_s
- return 0;
- }
-
-+/*
-+ * Based on the I2C specification, if the data line (SDA) is
-+ * stuck low, the master should send nine * clock pulses.
-+ * The I2C slave device that held the bus low should release it
-+ * sometime within * those nine clocks. Due to this erratum,
-+ * the I2C controller cannot generate nine clock pulses.
-+ */
-+static int i2c_imx_recovery_for_layerscape(struct imx_i2c_struct *i2c_imx)
-+{
-+ u32 pmuxcr = 0;
-+ int ret;
-+ unsigned int i, temp;
-+
-+ /* configure IICx_SCL/GPIO pin as a GPIO */
-+ if (i2c_imx->need_set_pmuxcr == 1) {
-+ pmuxcr = ioread32be(i2c_imx->pmuxcr_addr);
-+ if (i2c_imx->pmuxcr_endian == BIG_ENDIAN)
-+ iowrite32be(i2c_imx->pmuxcr_set|pmuxcr,
-+ i2c_imx->pmuxcr_addr);
-+ else
-+ iowrite32(i2c_imx->pmuxcr_set|pmuxcr,
-+ i2c_imx->pmuxcr_addr);
-+ }
-+
-+ ret = gpio_request(i2c_imx->gpio, i2c_imx->adapter.name);
-+ if (ret) {
-+ dev_err(&i2c_imx->adapter.dev,
-+ "can't get gpio: %d\n", ret);
-+ return ret;
-+ }
-+
-+ /* Configure GPIO pin as an output and open drain. */
-+ gpio_direction_output(i2c_imx->gpio, 1);
-+ udelay(10);
-+
-+ /* Write data to generate 9 pulses */
-+ for (i = 0; i < 9; i++) {
-+ gpio_set_value(i2c_imx->gpio, 1);
-+ udelay(10);
-+ gpio_set_value(i2c_imx->gpio, 0);
-+ udelay(10);
-+ }
-+ /* ensure that the last level sent is always high */
-+ gpio_set_value(i2c_imx->gpio, 1);
-+
-+ /*
-+ * Set I2Cx_IBCR = 0h00 to generate a STOP and then
-+ * set I2Cx_IBCR = 0h80 to reset
-+ */
-+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
-+ temp &= ~(I2CR_MSTA | I2CR_MTX);
-+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
-+
-+ /* Restore the saved value of the register SCFG_RCWPMUXCR0 */
-+ if (i2c_imx->need_set_pmuxcr == 1) {
-+ if (i2c_imx->pmuxcr_endian == BIG_ENDIAN)
-+ iowrite32be(pmuxcr, i2c_imx->pmuxcr_addr);
-+ else
-+ iowrite32(pmuxcr, i2c_imx->pmuxcr_addr);
-+ }
-+ /*
-+ * Set I2C_IBSR[IBAL] to clear the IBAL bit if-
-+ * I2C_IBSR[IBAL] = 1
-+ */
-+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
-+ if (temp & I2SR_IAL) {
-+ temp &= ~I2SR_IAL;
-+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
-+ }
-+ return 0;
-+}
-+
- static int i2c_imx_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs, int num)
- {
-@@ -888,6 +1024,19 @@ static int i2c_imx_xfer(struct i2c_adapt
-
- dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
-
-+ /*
-+ * workround for ERR010027: ensure that the I2C BUS is idle
-+ * before switching to master mode and attempting a Start cycle
-+ */
-+ result = i2c_imx_bus_busy(i2c_imx, 0);
-+ if (result) {
-+ /* timeout */
-+ if ((result == -ETIMEDOUT) && (i2c_imx->layerscape_bus_recover == 1))
-+ i2c_imx_recovery_for_layerscape(i2c_imx);
-+ else
-+ goto out;
-+ }
-+
- result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
- if (result < 0)
- goto out;
-@@ -1030,6 +1179,50 @@ static int i2c_imx_init_recovery_info(st
- return 0;
- }
-
-+/*
-+ * switch SCL and SDA to their GPIO function and do some bitbanging
-+ * for bus recovery.
-+ * There are platforms such as Layerscape that don't support pinctrl, so add
-+ * workaround for layerscape, it has no effect for other platforms.
-+ */
-+static int i2c_imx_init_recovery_for_layerscape(
-+ struct imx_i2c_struct *i2c_imx,
-+ struct platform_device *pdev)
-+{
-+ const struct of_device_id *of_id;
-+ struct device_node *np = pdev->dev.of_node;
-+ struct pinmux_cfg *pinmux_cfg;
-+ struct device_node *scfg_node;
-+ void __iomem *scfg_base = NULL;
-+
-+ i2c_imx->gpio = of_get_named_gpio(np, "fsl-scl-gpio", 0);
-+ if (!gpio_is_valid(i2c_imx->gpio)) {
-+ dev_info(&pdev->dev, "fsl-scl-gpio not found\n");
-+ return 0;
-+ }
-+ pinmux_cfg = devm_kzalloc(&pdev->dev, sizeof(*pinmux_cfg), GFP_KERNEL);
-+ if (!pinmux_cfg)
-+ return -ENOMEM;
-+
-+ i2c_imx->need_set_pmuxcr = 0;
-+ of_id = of_match_node(pinmux_of_match, np);
-+ if (of_id) {
-+ pinmux_cfg = (struct pinmux_cfg *)of_id->data;
-+ i2c_imx->pmuxcr_endian = pinmux_cfg->endian;
-+ i2c_imx->pmuxcr_set = pinmux_cfg->pmuxcr_set_bit;
-+ scfg_node = of_find_matching_node(NULL, scfg_device_ids);
-+ if (scfg_node) {
-+ scfg_base = of_iomap(scfg_node, 0);
-+ if (scfg_base) {
-+ i2c_imx->pmuxcr_addr = scfg_base + pinmux_cfg->pmuxcr_offset;
-+ i2c_imx->need_set_pmuxcr = 1;
-+ }
-+ }
-+ }
-+ i2c_imx->layerscape_bus_recover = 1;
-+ return 0;
-+}
-+
- static u32 i2c_imx_func(struct i2c_adapter *adapter)
- {
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
-@@ -1085,6 +1278,11 @@ static int i2c_imx_probe(struct platform
- i2c_imx->adapter.dev.of_node = pdev->dev.of_node;
- i2c_imx->base = base;
-
-+ /* Init optional bus recovery for layerscape */
-+ ret = i2c_imx_init_recovery_for_layerscape(i2c_imx, pdev);
-+ if (ret)
-+ return ret;
-+
- /* Get I2C clock */
- i2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(i2c_imx->clk)) {
-@@ -1104,7 +1302,8 @@ static int i2c_imx_probe(struct platform
- pdev->name, i2c_imx);
- if (ret) {
- dev_err(&pdev->dev, "can't claim irq %d\n", irq);
-- goto clk_disable;
-+ clk_disable_unprepare(i2c_imx->clk);
-+ return ret;
- }
-
- /* Init queue */
-@@ -1151,25 +1350,31 @@ static int i2c_imx_probe(struct platform
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
-
-+ /* Init DMA config if supported */
-+ ret = i2c_imx_dma_request(i2c_imx, phy_addr);
-+ if (ret) {
-+ if (ret != -EPROBE_DEFER)
-+ dev_info(&pdev->dev, "can't use DMA, using PIO instead.\n");
-+ else
-+ goto del_adapter;
-+ }
-+
- dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq);
- dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
- dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
- i2c_imx->adapter.name);
-- dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
--
-- /* Init DMA config if supported */
-- i2c_imx_dma_request(i2c_imx, phy_addr);
-
-+ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
- return 0; /* Return OK */
-
-+del_adapter:
-+ i2c_del_adapter(&i2c_imx->adapter);
- rpm_disable:
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
-
--clk_disable:
-- clk_disable_unprepare(i2c_imx->clk);
- return ret;
- }
-
---- a/drivers/i2c/muxes/i2c-mux-pca954x.c
-+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
-@@ -85,6 +85,7 @@ struct pca954x {
- struct irq_domain *irq;
- unsigned int irq_mask;
- raw_spinlock_t lock;
-+ u8 disable_mux; /* do not disable mux if val not 0 */
- };
-
- /* Provide specs for the PCA954x types we know about */
-@@ -221,6 +222,13 @@ static int pca954x_deselect_mux(struct i
- if (!(data->deselect & (1 << chan)))
- return 0;
-
-+#ifdef CONFIG_ARCH_LAYERSCAPE
-+ if (data->disable_mux != 0)
-+ data->last_chan = data->chip->nchans;
-+ else
-+ data->last_chan = 0;
-+ return pca954x_reg_write(muxc->parent, client, data->disable_mux);
-+#endif
- /* Deselect active channel */
- data->last_chan = 0;
- return pca954x_reg_write(muxc->parent, client, data->last_chan);
-@@ -361,6 +369,28 @@ static int pca954x_probe(struct i2c_clie
- return -ENOMEM;
- data = i2c_mux_priv(muxc);
-
-+#ifdef CONFIG_ARCH_LAYERSCAPE
-+ /* The point here is that you must not disable a mux if there
-+ * are no pullups on the input or you mess up the I2C. This
-+ * needs to be put into the DTS really as the kernel cannot
-+ * know this otherwise.
-+ */
-+ match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
-+ if (match)
-+ data->chip = of_device_get_match_data(&client->dev);
-+ else
-+ data->chip = &chips[id->driver_data];
-+
-+ data->disable_mux = of_node &&
-+ of_property_read_bool(of_node, "i2c-mux-never-disable") &&
-+ data->chip->muxtype == pca954x_ismux ?
-+ data->chip->enable : 0;
-+ /* force the first selection */
-+ if (data->disable_mux != 0)
-+ data->last_chan = data->chip->nchans;
-+ else
-+ data->last_chan = 0;
-+#endif
- i2c_set_clientdata(client, muxc);
- data->client = client;
-
-@@ -373,18 +403,23 @@ static int pca954x_probe(struct i2c_clie
- * that the mux is in fact present. This also
- * initializes the mux to disconnected state.
- */
-+#ifdef CONFIG_ARCH_LAYERSCAPE
-+ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) {
-+#else
- if (i2c_smbus_write_byte(client, 0) < 0) {
-+#endif
- dev_warn(&client->dev, "probe failed\n");
- return -ENODEV;
- }
-
-+#ifndef CONFIG_ARCH_LAYERSCAPE
- match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
- if (match)
- data->chip = of_device_get_match_data(&client->dev);
- else
- data->chip = &chips[id->driver_data];
--
- data->last_chan = 0; /* force the first selection */
-+#endif
-
- idle_disconnect_dt = of_node &&
- of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
-@@ -454,6 +489,13 @@ static int pca954x_resume(struct device
- struct i2c_mux_core *muxc = i2c_get_clientdata(client);
- struct pca954x *data = i2c_mux_priv(muxc);
-
-+#ifdef CONFIG_ARCH_LAYERSCAPE
-+ if (data->disable_mux != 0)
-+ data->last_chan = data->chip->nchans;
-+ else
-+ data->last_chan = 0;
-+ return i2c_smbus_write_byte(client, data->disable_mux);
-+#endif
- data->last_chan = 0;
- return i2c_smbus_write_byte(client, 0);
- }
diff --git a/target/linux/layerscape/patches-4.14/805-qe-support-layerscape.patch b/target/linux/layerscape/patches-4.14/805-qe-support-layerscape.patch
deleted file mode 100644
index 0a6ad579ff..0000000000
--- a/target/linux/layerscape/patches-4.14/805-qe-support-layerscape.patch
+++ /dev/null
@@ -1,1961 +0,0 @@
-From f4e3e2cf6484056225385d717da4e9c4f8613935 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:58 +0800
-Subject: [PATCH] qe: support layerscape
-
-This is an integrated patch of qe for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
----
- .../fsl/qe/qe_ic.c => irqchip/irq-qeic.c} | 389 +++++++++++-------
- drivers/soc/fsl/qe/Kconfig | 2 +-
- drivers/soc/fsl/qe/Makefile | 2 +-
- drivers/soc/fsl/qe/qe.c | 80 ++--
- drivers/soc/fsl/qe/qe_ic.h | 103 -----
- drivers/soc/fsl/qe/qe_io.c | 42 +-
- drivers/soc/fsl/qe/qe_tdm.c | 8 +-
- drivers/soc/fsl/qe/ucc.c | 10 +-
- drivers/soc/fsl/qe/ucc_fast.c | 74 ++--
- drivers/tty/serial/ucc_uart.c | 1 +
- include/soc/fsl/qe/qe.h | 1 -
- include/soc/fsl/qe/qe_ic.h | 139 -------
- 12 files changed, 359 insertions(+), 492 deletions(-)
- rename drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} (54%)
- delete mode 100644 drivers/soc/fsl/qe/qe_ic.h
- delete mode 100644 include/soc/fsl/qe/qe_ic.h
-
---- a/drivers/soc/fsl/qe/qe_ic.c
-+++ /dev/null
-@@ -1,512 +0,0 @@
--/*
-- * arch/powerpc/sysdev/qe_lib/qe_ic.c
-- *
-- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
-- *
-- * Author: Li Yang <leoli@freescale.com>
-- * Based on code from Shlomi Gridish <gridish@freescale.com>
-- *
-- * QUICC ENGINE Interrupt Controller
-- *
-- * This program is free software; you can redistribute it and/or modify it
-- * under the terms of the GNU General Public License as published by the
-- * Free Software Foundation; either version 2 of the License, or (at your
-- * option) any later version.
-- */
--
--#include <linux/of_irq.h>
--#include <linux/of_address.h>
--#include <linux/kernel.h>
--#include <linux/init.h>
--#include <linux/errno.h>
--#include <linux/reboot.h>
--#include <linux/slab.h>
--#include <linux/stddef.h>
--#include <linux/sched.h>
--#include <linux/signal.h>
--#include <linux/device.h>
--#include <linux/spinlock.h>
--#include <asm/irq.h>
--#include <asm/io.h>
--#include <soc/fsl/qe/qe_ic.h>
--
--#include "qe_ic.h"
--
--static DEFINE_RAW_SPINLOCK(qe_ic_lock);
--
--static struct qe_ic_info qe_ic_info[] = {
-- [1] = {
-- .mask = 0x00008000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 0,
-- .pri_reg = QEIC_CIPWCC,
-- },
-- [2] = {
-- .mask = 0x00004000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 1,
-- .pri_reg = QEIC_CIPWCC,
-- },
-- [3] = {
-- .mask = 0x00002000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 2,
-- .pri_reg = QEIC_CIPWCC,
-- },
-- [10] = {
-- .mask = 0x00000040,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 1,
-- .pri_reg = QEIC_CIPZCC,
-- },
-- [11] = {
-- .mask = 0x00000020,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 2,
-- .pri_reg = QEIC_CIPZCC,
-- },
-- [12] = {
-- .mask = 0x00000010,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 3,
-- .pri_reg = QEIC_CIPZCC,
-- },
-- [13] = {
-- .mask = 0x00000008,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 4,
-- .pri_reg = QEIC_CIPZCC,
-- },
-- [14] = {
-- .mask = 0x00000004,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 5,
-- .pri_reg = QEIC_CIPZCC,
-- },
-- [15] = {
-- .mask = 0x00000002,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 6,
-- .pri_reg = QEIC_CIPZCC,
-- },
-- [20] = {
-- .mask = 0x10000000,
-- .mask_reg = QEIC_CRIMR,
-- .pri_code = 3,
-- .pri_reg = QEIC_CIPRTA,
-- },
-- [25] = {
-- .mask = 0x00800000,
-- .mask_reg = QEIC_CRIMR,
-- .pri_code = 0,
-- .pri_reg = QEIC_CIPRTB,
-- },
-- [26] = {
-- .mask = 0x00400000,
-- .mask_reg = QEIC_CRIMR,
-- .pri_code = 1,
-- .pri_reg = QEIC_CIPRTB,
-- },
-- [27] = {
-- .mask = 0x00200000,
-- .mask_reg = QEIC_CRIMR,
-- .pri_code = 2,
-- .pri_reg = QEIC_CIPRTB,
-- },
-- [28] = {
-- .mask = 0x00100000,
-- .mask_reg = QEIC_CRIMR,
-- .pri_code = 3,
-- .pri_reg = QEIC_CIPRTB,
-- },
-- [32] = {
-- .mask = 0x80000000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 0,
-- .pri_reg = QEIC_CIPXCC,
-- },
-- [33] = {
-- .mask = 0x40000000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 1,
-- .pri_reg = QEIC_CIPXCC,
-- },
-- [34] = {
-- .mask = 0x20000000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 2,
-- .pri_reg = QEIC_CIPXCC,
-- },
-- [35] = {
-- .mask = 0x10000000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 3,
-- .pri_reg = QEIC_CIPXCC,
-- },
-- [36] = {
-- .mask = 0x08000000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 4,
-- .pri_reg = QEIC_CIPXCC,
-- },
-- [40] = {
-- .mask = 0x00800000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 0,
-- .pri_reg = QEIC_CIPYCC,
-- },
-- [41] = {
-- .mask = 0x00400000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 1,
-- .pri_reg = QEIC_CIPYCC,
-- },
-- [42] = {
-- .mask = 0x00200000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 2,
-- .pri_reg = QEIC_CIPYCC,
-- },
-- [43] = {
-- .mask = 0x00100000,
-- .mask_reg = QEIC_CIMR,
-- .pri_code = 3,
-- .pri_reg = QEIC_CIPYCC,
-- },
--};
--
--static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
--{
-- return in_be32(base + (reg >> 2));
--}
--
--static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
-- u32 value)
--{
-- out_be32(base + (reg >> 2), value);
--}
--
--static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
--{
-- return irq_get_chip_data(virq);
--}
--
--static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
--{
-- return irq_data_get_irq_chip_data(d);
--}
--
--static void qe_ic_unmask_irq(struct irq_data *d)
--{
-- struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
-- unsigned int src = irqd_to_hwirq(d);
-- unsigned long flags;
-- u32 temp;
--
-- raw_spin_lock_irqsave(&qe_ic_lock, flags);
--
-- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
-- qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
-- temp | qe_ic_info[src].mask);
--
-- raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
--}
--
--static void qe_ic_mask_irq(struct irq_data *d)
--{
-- struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
-- unsigned int src = irqd_to_hwirq(d);
-- unsigned long flags;
-- u32 temp;
--
-- raw_spin_lock_irqsave(&qe_ic_lock, flags);
--
-- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
-- qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
-- temp & ~qe_ic_info[src].mask);
--
-- /* Flush the above write before enabling interrupts; otherwise,
-- * spurious interrupts will sometimes happen. To be 100% sure
-- * that the write has reached the device before interrupts are
-- * enabled, the mask register would have to be read back; however,
-- * this is not required for correctness, only to avoid wasting
-- * time on a large number of spurious interrupts. In testing,
-- * a sync reduced the observed spurious interrupts to zero.
-- */
-- mb();
--
-- raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
--}
--
--static struct irq_chip qe_ic_irq_chip = {
-- .name = "QEIC",
-- .irq_unmask = qe_ic_unmask_irq,
-- .irq_mask = qe_ic_mask_irq,
-- .irq_mask_ack = qe_ic_mask_irq,
--};
--
--static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
-- enum irq_domain_bus_token bus_token)
--{
-- /* Exact match, unless qe_ic node is NULL */
-- struct device_node *of_node = irq_domain_get_of_node(h);
-- return of_node == NULL || of_node == node;
--}
--
--static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
-- irq_hw_number_t hw)
--{
-- struct qe_ic *qe_ic = h->host_data;
-- struct irq_chip *chip;
--
-- if (hw >= ARRAY_SIZE(qe_ic_info)) {
-- pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
-- return -EINVAL;
-- }
--
-- if (qe_ic_info[hw].mask == 0) {
-- printk(KERN_ERR "Can't map reserved IRQ\n");
-- return -EINVAL;
-- }
-- /* Default chip */
-- chip = &qe_ic->hc_irq;
--
-- irq_set_chip_data(virq, qe_ic);
-- irq_set_status_flags(virq, IRQ_LEVEL);
--
-- irq_set_chip_and_handler(virq, chip, handle_level_irq);
--
-- return 0;
--}
--
--static const struct irq_domain_ops qe_ic_host_ops = {
-- .match = qe_ic_host_match,
-- .map = qe_ic_host_map,
-- .xlate = irq_domain_xlate_onetwocell,
--};
--
--/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
--unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
--{
-- int irq;
--
-- BUG_ON(qe_ic == NULL);
--
-- /* get the interrupt source vector. */
-- irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
--
-- if (irq == 0)
-- return NO_IRQ;
--
-- return irq_linear_revmap(qe_ic->irqhost, irq);
--}
--
--/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
--unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
--{
-- int irq;
--
-- BUG_ON(qe_ic == NULL);
--
-- /* get the interrupt source vector. */
-- irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
--
-- if (irq == 0)
-- return NO_IRQ;
--
-- return irq_linear_revmap(qe_ic->irqhost, irq);
--}
--
--void __init qe_ic_init(struct device_node *node, unsigned int flags,
-- void (*low_handler)(struct irq_desc *desc),
-- void (*high_handler)(struct irq_desc *desc))
--{
-- struct qe_ic *qe_ic;
-- struct resource res;
-- u32 temp = 0, ret, high_active = 0;
--
-- ret = of_address_to_resource(node, 0, &res);
-- if (ret)
-- return;
--
-- qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
-- if (qe_ic == NULL)
-- return;
--
-- qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
-- &qe_ic_host_ops, qe_ic);
-- if (qe_ic->irqhost == NULL) {
-- kfree(qe_ic);
-- return;
-- }
--
-- qe_ic->regs = ioremap(res.start, resource_size(&res));
--
-- qe_ic->hc_irq = qe_ic_irq_chip;
--
-- qe_ic->virq_high = irq_of_parse_and_map(node, 0);
-- qe_ic->virq_low = irq_of_parse_and_map(node, 1);
--
-- if (qe_ic->virq_low == NO_IRQ) {
-- printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
-- kfree(qe_ic);
-- return;
-- }
--
-- /* default priority scheme is grouped. If spread mode is */
-- /* required, configure cicr accordingly. */
-- if (flags & QE_IC_SPREADMODE_GRP_W)
-- temp |= CICR_GWCC;
-- if (flags & QE_IC_SPREADMODE_GRP_X)
-- temp |= CICR_GXCC;
-- if (flags & QE_IC_SPREADMODE_GRP_Y)
-- temp |= CICR_GYCC;
-- if (flags & QE_IC_SPREADMODE_GRP_Z)
-- temp |= CICR_GZCC;
-- if (flags & QE_IC_SPREADMODE_GRP_RISCA)
-- temp |= CICR_GRTA;
-- if (flags & QE_IC_SPREADMODE_GRP_RISCB)
-- temp |= CICR_GRTB;
--
-- /* choose destination signal for highest priority interrupt */
-- if (flags & QE_IC_HIGH_SIGNAL) {
-- temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
-- high_active = 1;
-- }
--
-- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
--
-- irq_set_handler_data(qe_ic->virq_low, qe_ic);
-- irq_set_chained_handler(qe_ic->virq_low, low_handler);
--
-- if (qe_ic->virq_high != NO_IRQ &&
-- qe_ic->virq_high != qe_ic->virq_low) {
-- irq_set_handler_data(qe_ic->virq_high, qe_ic);
-- irq_set_chained_handler(qe_ic->virq_high, high_handler);
-- }
--}
--
--void qe_ic_set_highest_priority(unsigned int virq, int high)
--{
-- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-- unsigned int src = virq_to_hw(virq);
-- u32 temp = 0;
--
-- temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
--
-- temp &= ~CICR_HP_MASK;
-- temp |= src << CICR_HP_SHIFT;
--
-- temp &= ~CICR_HPIT_MASK;
-- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
--
-- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
--}
--
--/* Set Priority level within its group, from 1 to 8 */
--int qe_ic_set_priority(unsigned int virq, unsigned int priority)
--{
-- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-- unsigned int src = virq_to_hw(virq);
-- u32 temp;
--
-- if (priority > 8 || priority == 0)
-- return -EINVAL;
-- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
-- "%s: Invalid hw irq number for QEIC\n", __func__))
-- return -EINVAL;
-- if (qe_ic_info[src].pri_reg == 0)
-- return -EINVAL;
--
-- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
--
-- if (priority < 4) {
-- temp &= ~(0x7 << (32 - priority * 3));
-- temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
-- } else {
-- temp &= ~(0x7 << (24 - priority * 3));
-- temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
-- }
--
-- qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
--
-- return 0;
--}
--
--/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
--int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
--{
-- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-- unsigned int src = virq_to_hw(virq);
-- u32 temp, control_reg = QEIC_CICNR, shift = 0;
--
-- if (priority > 2 || priority == 0)
-- return -EINVAL;
-- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
-- "%s: Invalid hw irq number for QEIC\n", __func__))
-- return -EINVAL;
--
-- switch (qe_ic_info[src].pri_reg) {
-- case QEIC_CIPZCC:
-- shift = CICNR_ZCC1T_SHIFT;
-- break;
-- case QEIC_CIPWCC:
-- shift = CICNR_WCC1T_SHIFT;
-- break;
-- case QEIC_CIPYCC:
-- shift = CICNR_YCC1T_SHIFT;
-- break;
-- case QEIC_CIPXCC:
-- shift = CICNR_XCC1T_SHIFT;
-- break;
-- case QEIC_CIPRTA:
-- shift = CRICR_RTA1T_SHIFT;
-- control_reg = QEIC_CRICR;
-- break;
-- case QEIC_CIPRTB:
-- shift = CRICR_RTB1T_SHIFT;
-- control_reg = QEIC_CRICR;
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- shift += (2 - priority) * 2;
-- temp = qe_ic_read(qe_ic->regs, control_reg);
-- temp &= ~(SIGNAL_MASK << shift);
-- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
-- qe_ic_write(qe_ic->regs, control_reg, temp);
--
-- return 0;
--}
--
--static struct bus_type qe_ic_subsys = {
-- .name = "qe_ic",
-- .dev_name = "qe_ic",
--};
--
--static struct device device_qe_ic = {
-- .id = 0,
-- .bus = &qe_ic_subsys,
--};
--
--static int __init init_qe_ic_sysfs(void)
--{
-- int rc;
--
-- printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
--
-- rc = subsys_system_register(&qe_ic_subsys, NULL);
-- if (rc) {
-- printk(KERN_ERR "Failed registering qe_ic sys class\n");
-- return -ENODEV;
-- }
-- rc = device_register(&device_qe_ic);
-- if (rc) {
-- printk(KERN_ERR "Failed registering qe_ic sys device\n");
-- return -ENODEV;
-- }
-- return 0;
--}
--
--subsys_initcall(init_qe_ic_sysfs);
---- /dev/null
-+++ b/drivers/irqchip/irq-qeic.c
-@@ -0,0 +1,605 @@
-+/*
-+ * drivers/irqchip/irq-qeic.c
-+ *
-+ * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved.
-+ *
-+ * Author: Li Yang <leoli@freescale.com>
-+ * Based on code from Shlomi Gridish <gridish@freescale.com>
-+ *
-+ * QUICC ENGINE Interrupt Controller
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the
-+ * Free Software Foundation; either version 2 of the License, or (at your
-+ * option) any later version.
-+ */
-+
-+#include <linux/of_irq.h>
-+#include <linux/of_address.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/irqdomain.h>
-+#include <linux/irqchip.h>
-+#include <linux/errno.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/reboot.h>
-+#include <linux/slab.h>
-+#include <linux/stddef.h>
-+#include <linux/sched.h>
-+#include <linux/signal.h>
-+#include <linux/device.h>
-+#include <linux/spinlock.h>
-+#include <linux/irq.h>
-+#include <asm/io.h>
-+
-+#define NR_QE_IC_INTS 64
-+
-+/* QE IC registers offset */
-+#define QEIC_CICR 0x00
-+#define QEIC_CIVEC 0x04
-+#define QEIC_CRIPNR 0x08
-+#define QEIC_CIPNR 0x0c
-+#define QEIC_CIPXCC 0x10
-+#define QEIC_CIPYCC 0x14
-+#define QEIC_CIPWCC 0x18
-+#define QEIC_CIPZCC 0x1c
-+#define QEIC_CIMR 0x20
-+#define QEIC_CRIMR 0x24
-+#define QEIC_CICNR 0x28
-+#define QEIC_CIPRTA 0x30
-+#define QEIC_CIPRTB 0x34
-+#define QEIC_CRICR 0x3c
-+#define QEIC_CHIVEC 0x60
-+
-+/* Interrupt priority registers */
-+#define CIPCC_SHIFT_PRI0 29
-+#define CIPCC_SHIFT_PRI1 26
-+#define CIPCC_SHIFT_PRI2 23
-+#define CIPCC_SHIFT_PRI3 20
-+#define CIPCC_SHIFT_PRI4 13
-+#define CIPCC_SHIFT_PRI5 10
-+#define CIPCC_SHIFT_PRI6 7
-+#define CIPCC_SHIFT_PRI7 4
-+
-+/* CICR priority modes */
-+#define CICR_GWCC 0x00040000
-+#define CICR_GXCC 0x00020000
-+#define CICR_GYCC 0x00010000
-+#define CICR_GZCC 0x00080000
-+#define CICR_GRTA 0x00200000
-+#define CICR_GRTB 0x00400000
-+#define CICR_HPIT_SHIFT 8
-+#define CICR_HPIT_MASK 0x00000300
-+#define CICR_HP_SHIFT 24
-+#define CICR_HP_MASK 0x3f000000
-+
-+/* CICNR */
-+#define CICNR_WCC1T_SHIFT 20
-+#define CICNR_ZCC1T_SHIFT 28
-+#define CICNR_YCC1T_SHIFT 12
-+#define CICNR_XCC1T_SHIFT 4
-+
-+/* CRICR */
-+#define CRICR_RTA1T_SHIFT 20
-+#define CRICR_RTB1T_SHIFT 28
-+
-+/* Signal indicator */
-+#define SIGNAL_MASK 3
-+#define SIGNAL_HIGH 2
-+#define SIGNAL_LOW 0
-+
-+#define NUM_OF_QE_IC_GROUPS 6
-+
-+/* Flags when we init the QE IC */
-+#define QE_IC_SPREADMODE_GRP_W 0x00000001
-+#define QE_IC_SPREADMODE_GRP_X 0x00000002
-+#define QE_IC_SPREADMODE_GRP_Y 0x00000004
-+#define QE_IC_SPREADMODE_GRP_Z 0x00000008
-+#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
-+#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
-+
-+#define QE_IC_LOW_SIGNAL 0x00000100
-+#define QE_IC_HIGH_SIGNAL 0x00000200
-+
-+#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
-+#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
-+#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
-+#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
-+#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
-+#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
-+#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
-+#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
-+#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
-+#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
-+#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
-+#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
-+#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
-+
-+/* QE interrupt sources groups */
-+enum qe_ic_grp_id {
-+ QE_IC_GRP_W = 0, /* QE interrupt controller group W */
-+ QE_IC_GRP_X, /* QE interrupt controller group X */
-+ QE_IC_GRP_Y, /* QE interrupt controller group Y */
-+ QE_IC_GRP_Z, /* QE interrupt controller group Z */
-+ QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
-+ QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
-+};
-+
-+struct qe_ic {
-+ /* Control registers offset */
-+ u32 __iomem *regs;
-+
-+ /* The remapper for this QEIC */
-+ struct irq_domain *irqhost;
-+
-+ /* The "linux" controller struct */
-+ struct irq_chip hc_irq;
-+
-+ /* VIRQ numbers of QE high/low irqs */
-+ unsigned int virq_high;
-+ unsigned int virq_low;
-+};
-+
-+/*
-+ * QE interrupt controller internal structure
-+ */
-+struct qe_ic_info {
-+ /* location of this source at the QIMR register. */
-+ u32 mask;
-+
-+ /* Mask register offset */
-+ u32 mask_reg;
-+
-+ /*
-+ * for grouped interrupts sources - the interrupt
-+ * code as appears at the group priority register
-+ */
-+ u8 pri_code;
-+
-+ /* Group priority register offset */
-+ u32 pri_reg;
-+};
-+
-+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
-+
-+static struct qe_ic_info qe_ic_info[] = {
-+ [1] = {
-+ .mask = 0x00008000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 0,
-+ .pri_reg = QEIC_CIPWCC,
-+ },
-+ [2] = {
-+ .mask = 0x00004000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 1,
-+ .pri_reg = QEIC_CIPWCC,
-+ },
-+ [3] = {
-+ .mask = 0x00002000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 2,
-+ .pri_reg = QEIC_CIPWCC,
-+ },
-+ [10] = {
-+ .mask = 0x00000040,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 1,
-+ .pri_reg = QEIC_CIPZCC,
-+ },
-+ [11] = {
-+ .mask = 0x00000020,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 2,
-+ .pri_reg = QEIC_CIPZCC,
-+ },
-+ [12] = {
-+ .mask = 0x00000010,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 3,
-+ .pri_reg = QEIC_CIPZCC,
-+ },
-+ [13] = {
-+ .mask = 0x00000008,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 4,
-+ .pri_reg = QEIC_CIPZCC,
-+ },
-+ [14] = {
-+ .mask = 0x00000004,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 5,
-+ .pri_reg = QEIC_CIPZCC,
-+ },
-+ [15] = {
-+ .mask = 0x00000002,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 6,
-+ .pri_reg = QEIC_CIPZCC,
-+ },
-+ [20] = {
-+ .mask = 0x10000000,
-+ .mask_reg = QEIC_CRIMR,
-+ .pri_code = 3,
-+ .pri_reg = QEIC_CIPRTA,
-+ },
-+ [25] = {
-+ .mask = 0x00800000,
-+ .mask_reg = QEIC_CRIMR,
-+ .pri_code = 0,
-+ .pri_reg = QEIC_CIPRTB,
-+ },
-+ [26] = {
-+ .mask = 0x00400000,
-+ .mask_reg = QEIC_CRIMR,
-+ .pri_code = 1,
-+ .pri_reg = QEIC_CIPRTB,
-+ },
-+ [27] = {
-+ .mask = 0x00200000,
-+ .mask_reg = QEIC_CRIMR,
-+ .pri_code = 2,
-+ .pri_reg = QEIC_CIPRTB,
-+ },
-+ [28] = {
-+ .mask = 0x00100000,
-+ .mask_reg = QEIC_CRIMR,
-+ .pri_code = 3,
-+ .pri_reg = QEIC_CIPRTB,
-+ },
-+ [32] = {
-+ .mask = 0x80000000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 0,
-+ .pri_reg = QEIC_CIPXCC,
-+ },
-+ [33] = {
-+ .mask = 0x40000000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 1,
-+ .pri_reg = QEIC_CIPXCC,
-+ },
-+ [34] = {
-+ .mask = 0x20000000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 2,
-+ .pri_reg = QEIC_CIPXCC,
-+ },
-+ [35] = {
-+ .mask = 0x10000000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 3,
-+ .pri_reg = QEIC_CIPXCC,
-+ },
-+ [36] = {
-+ .mask = 0x08000000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 4,
-+ .pri_reg = QEIC_CIPXCC,
-+ },
-+ [40] = {
-+ .mask = 0x00800000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 0,
-+ .pri_reg = QEIC_CIPYCC,
-+ },
-+ [41] = {
-+ .mask = 0x00400000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 1,
-+ .pri_reg = QEIC_CIPYCC,
-+ },
-+ [42] = {
-+ .mask = 0x00200000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 2,
-+ .pri_reg = QEIC_CIPYCC,
-+ },
-+ [43] = {
-+ .mask = 0x00100000,
-+ .mask_reg = QEIC_CIMR,
-+ .pri_code = 3,
-+ .pri_reg = QEIC_CIPYCC,
-+ },
-+};
-+
-+static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
-+{
-+ return ioread32be(base + (reg >> 2));
-+}
-+
-+static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
-+ u32 value)
-+{
-+ iowrite32be(value, base + (reg >> 2));
-+}
-+
-+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
-+{
-+ return irq_get_chip_data(virq);
-+}
-+
-+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
-+{
-+ return irq_data_get_irq_chip_data(d);
-+}
-+
-+static void qe_ic_unmask_irq(struct irq_data *d)
-+{
-+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
-+ unsigned int src = irqd_to_hwirq(d);
-+ unsigned long flags;
-+ u32 temp;
-+
-+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
-+
-+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
-+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
-+ temp | qe_ic_info[src].mask);
-+
-+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
-+}
-+
-+static void qe_ic_mask_irq(struct irq_data *d)
-+{
-+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
-+ unsigned int src = irqd_to_hwirq(d);
-+ unsigned long flags;
-+ u32 temp;
-+
-+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
-+
-+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
-+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
-+ temp & ~qe_ic_info[src].mask);
-+
-+ /* Flush the above write before enabling interrupts; otherwise,
-+ * spurious interrupts will sometimes happen. To be 100% sure
-+ * that the write has reached the device before interrupts are
-+ * enabled, the mask register would have to be read back; however,
-+ * this is not required for correctness, only to avoid wasting
-+ * time on a large number of spurious interrupts. In testing,
-+ * a sync reduced the observed spurious interrupts to zero.
-+ */
-+ mb();
-+
-+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
-+}
-+
-+static struct irq_chip qe_ic_irq_chip = {
-+ .name = "QEIC",
-+ .irq_unmask = qe_ic_unmask_irq,
-+ .irq_mask = qe_ic_mask_irq,
-+ .irq_mask_ack = qe_ic_mask_irq,
-+};
-+
-+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
-+ enum irq_domain_bus_token bus_token)
-+{
-+ /* Exact match, unless qe_ic node is NULL */
-+ struct device_node *of_node = irq_domain_get_of_node(h);
-+ return of_node == NULL || of_node == node;
-+}
-+
-+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
-+ irq_hw_number_t hw)
-+{
-+ struct qe_ic *qe_ic = h->host_data;
-+ struct irq_chip *chip;
-+
-+ if (hw >= ARRAY_SIZE(qe_ic_info)) {
-+ pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ if (qe_ic_info[hw].mask == 0) {
-+ printk(KERN_ERR "Can't map reserved IRQ\n");
-+ return -EINVAL;
-+ }
-+ /* Default chip */
-+ chip = &qe_ic->hc_irq;
-+
-+ irq_set_chip_data(virq, qe_ic);
-+ irq_set_status_flags(virq, IRQ_LEVEL);
-+
-+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
-+
-+ return 0;
-+}
-+
-+static const struct irq_domain_ops qe_ic_host_ops = {
-+ .match = qe_ic_host_match,
-+ .map = qe_ic_host_map,
-+ .xlate = irq_domain_xlate_onetwocell,
-+};
-+
-+/* Return an interrupt vector or 0 if no interrupt is pending. */
-+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
-+{
-+ int irq;
-+
-+ BUG_ON(qe_ic == NULL);
-+
-+ /* get the interrupt source vector. */
-+ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
-+
-+ if (irq == 0)
-+ return 0;
-+
-+ return irq_linear_revmap(qe_ic->irqhost, irq);
-+}
-+
-+/* Return an interrupt vector or 0 if no interrupt is pending. */
-+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
-+{
-+ int irq;
-+
-+ BUG_ON(qe_ic == NULL);
-+
-+ /* get the interrupt source vector. */
-+ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
-+
-+ if (irq == 0)
-+ return 0;
-+
-+ return irq_linear_revmap(qe_ic->irqhost, irq);
-+}
-+
-+static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
-+{
-+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-+
-+ if (cascade_irq != 0)
-+ generic_handle_irq(cascade_irq);
-+}
-+
-+static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
-+{
-+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-+
-+ if (cascade_irq != 0)
-+ generic_handle_irq(cascade_irq);
-+}
-+
-+static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
-+{
-+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-+ struct irq_chip *chip = irq_desc_get_chip(desc);
-+
-+ if (cascade_irq != 0)
-+ generic_handle_irq(cascade_irq);
-+
-+ chip->irq_eoi(&desc->irq_data);
-+}
-+
-+static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
-+{
-+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-+ struct irq_chip *chip = irq_desc_get_chip(desc);
-+
-+ if (cascade_irq != 0)
-+ generic_handle_irq(cascade_irq);
-+
-+ chip->irq_eoi(&desc->irq_data);
-+}
-+
-+static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
-+{
-+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-+ unsigned int cascade_irq;
-+ struct irq_chip *chip = irq_desc_get_chip(desc);
-+
-+ cascade_irq = qe_ic_get_high_irq(qe_ic);
-+ if (cascade_irq == 0)
-+ cascade_irq = qe_ic_get_low_irq(qe_ic);
-+
-+ if (cascade_irq != 0)
-+ generic_handle_irq(cascade_irq);
-+
-+ chip->irq_eoi(&desc->irq_data);
-+}
-+
-+static int __init qe_ic_init(struct device_node *node, unsigned int flags)
-+{
-+ struct qe_ic *qe_ic;
-+ struct resource res;
-+ u32 temp = 0, high_active = 0;
-+ int ret = 0;
-+
-+ if (!node)
-+ return -ENODEV;
-+
-+ ret = of_address_to_resource(node, 0, &res);
-+ if (ret) {
-+ ret = -ENODEV;
-+ goto err_put_node;
-+ }
-+
-+ qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
-+ if (qe_ic == NULL) {
-+ ret = -ENOMEM;
-+ goto err_put_node;
-+ }
-+
-+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
-+ &qe_ic_host_ops, qe_ic);
-+ if (qe_ic->irqhost == NULL) {
-+ ret = -ENOMEM;
-+ goto err_free_qe_ic;
-+ }
-+
-+ qe_ic->regs = ioremap(res.start, resource_size(&res));
-+
-+ qe_ic->hc_irq = qe_ic_irq_chip;
-+
-+ qe_ic->virq_high = irq_of_parse_and_map(node, 0);
-+ qe_ic->virq_low = irq_of_parse_and_map(node, 1);
-+
-+ if (qe_ic->virq_low == 0) {
-+ pr_err("Failed to map QE_IC low IRQ\n");
-+ ret = -ENOMEM;
-+ goto err_domain_remove;
-+ }
-+
-+ /* default priority scheme is grouped. If spread mode is */
-+ /* required, configure cicr accordingly. */
-+ if (flags & QE_IC_SPREADMODE_GRP_W)
-+ temp |= CICR_GWCC;
-+ if (flags & QE_IC_SPREADMODE_GRP_X)
-+ temp |= CICR_GXCC;
-+ if (flags & QE_IC_SPREADMODE_GRP_Y)
-+ temp |= CICR_GYCC;
-+ if (flags & QE_IC_SPREADMODE_GRP_Z)
-+ temp |= CICR_GZCC;
-+ if (flags & QE_IC_SPREADMODE_GRP_RISCA)
-+ temp |= CICR_GRTA;
-+ if (flags & QE_IC_SPREADMODE_GRP_RISCB)
-+ temp |= CICR_GRTB;
-+
-+ /* choose destination signal for highest priority interrupt */
-+ if (flags & QE_IC_HIGH_SIGNAL) {
-+ temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
-+ high_active = 1;
-+ }
-+
-+ qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
-+
-+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
-+ irq_set_chained_handler(qe_ic->virq_low, qe_ic_cascade_low_mpic);
-+
-+ if (qe_ic->virq_high != 0 &&
-+ qe_ic->virq_high != qe_ic->virq_low) {
-+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
-+ irq_set_chained_handler(qe_ic->virq_high,
-+ qe_ic_cascade_high_mpic);
-+ }
-+ of_node_put(node);
-+ return 0;
-+
-+err_domain_remove:
-+ irq_domain_remove(qe_ic->irqhost);
-+err_free_qe_ic:
-+ kfree(qe_ic);
-+err_put_node:
-+ of_node_put(node);
-+ return ret;
-+}
-+
-+static int __init init_qe_ic(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ int ret;
-+
-+ ret = qe_ic_init(node, 0);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+IRQCHIP_DECLARE(qeic, "fsl,qe-ic", init_qe_ic);
---- a/drivers/soc/fsl/qe/Kconfig
-+++ b/drivers/soc/fsl/qe/Kconfig
-@@ -4,7 +4,7 @@
-
- config QUICC_ENGINE
- bool "Freescale QUICC Engine (QE) Support"
-- depends on FSL_SOC && PPC32
-+ depends on OF && HAS_IOMEM
- select GENERIC_ALLOCATOR
- select CRC32
- help
---- a/drivers/soc/fsl/qe/Makefile
-+++ b/drivers/soc/fsl/qe/Makefile
-@@ -2,7 +2,7 @@
- #
- # Makefile for the linux ppc-specific parts of QE
- #
--obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
-+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_io.o
- obj-$(CONFIG_CPM) += qe_common.o
- obj-$(CONFIG_UCC) += ucc.o
- obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
---- a/drivers/soc/fsl/qe/qe.c
-+++ b/drivers/soc/fsl/qe/qe.c
-@@ -33,8 +33,6 @@
- #include <asm/pgtable.h>
- #include <soc/fsl/qe/immap_qe.h>
- #include <soc/fsl/qe/qe.h>
--#include <asm/prom.h>
--#include <asm/rheap.h>
-
- static void qe_snums_init(void);
- static int qe_sdma_init(void);
-@@ -107,15 +105,27 @@ void qe_reset(void)
- panic("sdma init failed!");
- }
-
-+/* issue commands to QE, return 0 on success while -EIO on error
-+ *
-+ * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on
-+ * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8
-+ * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3,
-+ * QE_CR_SUBBLOCK_IDMA1 - 4 and such on.
-+ * @mcn_protocol: specifies mode for the command for non-MCC, should be
-+ * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART
-+ * and such on.
-+ * @cmd_input: command related data.
-+ */
- int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
- {
- unsigned long flags;
- u8 mcn_shift = 0, dev_shift = 0;
-- u32 ret;
-+ int ret;
-+ int i;
-
- spin_lock_irqsave(&qe_lock, flags);
- if (cmd == QE_RESET) {
-- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
-+ iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr);
- } else {
- if (cmd == QE_ASSIGN_PAGE) {
- /* Here device is the SNUM, not sub-block */
-@@ -132,20 +142,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8
- mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
- }
-
-- out_be32(&qe_immr->cp.cecdr, cmd_input);
-- out_be32(&qe_immr->cp.cecr,
-- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
-- mcn_protocol << mcn_shift));
-+ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
-+ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) |
-+ (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
- }
-
- /* wait for the QE_CR_FLG to clear */
-- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
-- 100, 0);
-+ ret = -EIO;
-+ for (i = 0; i < 100; i++) {
-+ if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) {
-+ ret = 0;
-+ break;
-+ }
-+ udelay(1);
-+ }
-+
- /* On timeout (e.g. failure), the expression will be false (ret == 0),
- otherwise it will be true (ret == 1). */
- spin_unlock_irqrestore(&qe_lock, flags);
-
-- return ret == 1;
-+ return ret;
- }
- EXPORT_SYMBOL(qe_issue_cmd);
-
-@@ -170,6 +186,8 @@ unsigned int qe_get_brg_clk(void)
- int size;
- const u32 *prop;
- unsigned int mod;
-+ u32 val;
-+ int ret;
-
- if (brg_clk)
- return brg_clk;
-@@ -181,9 +199,9 @@ unsigned int qe_get_brg_clk(void)
- return brg_clk;
- }
-
-- prop = of_get_property(qe, "brg-frequency", &size);
-- if (prop && size == sizeof(*prop))
-- brg_clk = *prop;
-+ ret = of_property_read_u32(qe, "brg-frequency", &val);
-+ if (!ret)
-+ brg_clk = val;
-
- of_node_put(qe);
-
-@@ -229,14 +247,16 @@ int qe_setbrg(enum qe_clock brg, unsigne
- /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
- that the BRG divisor must be even if you're not using divide-by-16
- mode. */
-+#ifdef CONFIG_PPC
- if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
- if (!div16 && (divisor & 1) && (divisor > 3))
- divisor++;
-+#endif
-
- tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
- QE_BRGC_ENABLE | div16;
-
-- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
-+ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
-
- return 0;
- }
-@@ -370,9 +390,9 @@ static int qe_sdma_init(void)
- return -ENOMEM;
- }
-
-- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
-- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
-- (0x1 << QE_SDMR_CEN_SHIFT)));
-+ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
-+ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
-+ &sdma->sdmr);
-
- return 0;
- }
-@@ -410,14 +430,14 @@ static void qe_upload_microcode(const vo
- "uploading microcode '%s'\n", ucode->id);
-
- /* Use auto-increment */
-- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
-- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
-+ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
-+ QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
-
- for (i = 0; i < be32_to_cpu(ucode->count); i++)
-- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
-+ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
-
- /* Set I-RAM Ready Register */
-- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
-+ iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
- }
-
- /*
-@@ -502,7 +522,7 @@ int qe_upload_firmware(const struct qe_f
- * If the microcode calls for it, split the I-RAM.
- */
- if (!firmware->split)
-- setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
-+ qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
-
- if (firmware->soc.model)
- printk(KERN_INFO
-@@ -536,11 +556,11 @@ int qe_upload_firmware(const struct qe_f
- u32 trap = be32_to_cpu(ucode->traps[j]);
-
- if (trap)
-- out_be32(&qe_immr->rsp[i].tibcr[j], trap);
-+ iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
- }
-
- /* Enable traps */
-- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
-+ iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
- }
-
- qe_firmware_uploaded = 1;
-@@ -659,9 +679,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
- unsigned int qe_get_num_of_snums(void)
- {
- struct device_node *qe;
-- int size;
- unsigned int num_of_snums;
-- const u32 *prop;
-+ u32 val;
-+ int ret;
-
- num_of_snums = 28; /* The default number of snum for threads is 28 */
- qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
-@@ -675,9 +695,9 @@ unsigned int qe_get_num_of_snums(void)
- return num_of_snums;
- }
-
-- prop = of_get_property(qe, "fsl,qe-num-snums", &size);
-- if (prop && size == sizeof(*prop)) {
-- num_of_snums = *prop;
-+ ret = of_property_read_u32(qe, "fsl,qe-num-snums", &val);
-+ if (!ret) {
-+ num_of_snums = val;
- if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
- /* No QE ever has fewer than 28 SNUMs */
- pr_err("QE: number of snum is invalid\n");
---- a/drivers/soc/fsl/qe/qe_ic.h
-+++ /dev/null
-@@ -1,103 +0,0 @@
--/*
-- * drivers/soc/fsl/qe/qe_ic.h
-- *
-- * QUICC ENGINE Interrupt Controller Header
-- *
-- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
-- *
-- * Author: Li Yang <leoli@freescale.com>
-- * Based on code from Shlomi Gridish <gridish@freescale.com>
-- *
-- * This program is free software; you can redistribute it and/or modify it
-- * under the terms of the GNU General Public License as published by the
-- * Free Software Foundation; either version 2 of the License, or (at your
-- * option) any later version.
-- */
--#ifndef _POWERPC_SYSDEV_QE_IC_H
--#define _POWERPC_SYSDEV_QE_IC_H
--
--#include <soc/fsl/qe/qe_ic.h>
--
--#define NR_QE_IC_INTS 64
--
--/* QE IC registers offset */
--#define QEIC_CICR 0x00
--#define QEIC_CIVEC 0x04
--#define QEIC_CRIPNR 0x08
--#define QEIC_CIPNR 0x0c
--#define QEIC_CIPXCC 0x10
--#define QEIC_CIPYCC 0x14
--#define QEIC_CIPWCC 0x18
--#define QEIC_CIPZCC 0x1c
--#define QEIC_CIMR 0x20
--#define QEIC_CRIMR 0x24
--#define QEIC_CICNR 0x28
--#define QEIC_CIPRTA 0x30
--#define QEIC_CIPRTB 0x34
--#define QEIC_CRICR 0x3c
--#define QEIC_CHIVEC 0x60
--
--/* Interrupt priority registers */
--#define CIPCC_SHIFT_PRI0 29
--#define CIPCC_SHIFT_PRI1 26
--#define CIPCC_SHIFT_PRI2 23
--#define CIPCC_SHIFT_PRI3 20
--#define CIPCC_SHIFT_PRI4 13
--#define CIPCC_SHIFT_PRI5 10
--#define CIPCC_SHIFT_PRI6 7
--#define CIPCC_SHIFT_PRI7 4
--
--/* CICR priority modes */
--#define CICR_GWCC 0x00040000
--#define CICR_GXCC 0x00020000
--#define CICR_GYCC 0x00010000
--#define CICR_GZCC 0x00080000
--#define CICR_GRTA 0x00200000
--#define CICR_GRTB 0x00400000
--#define CICR_HPIT_SHIFT 8
--#define CICR_HPIT_MASK 0x00000300
--#define CICR_HP_SHIFT 24
--#define CICR_HP_MASK 0x3f000000
--
--/* CICNR */
--#define CICNR_WCC1T_SHIFT 20
--#define CICNR_ZCC1T_SHIFT 28
--#define CICNR_YCC1T_SHIFT 12
--#define CICNR_XCC1T_SHIFT 4
--
--/* CRICR */
--#define CRICR_RTA1T_SHIFT 20
--#define CRICR_RTB1T_SHIFT 28
--
--/* Signal indicator */
--#define SIGNAL_MASK 3
--#define SIGNAL_HIGH 2
--#define SIGNAL_LOW 0
--
--struct qe_ic {
-- /* Control registers offset */
-- volatile u32 __iomem *regs;
--
-- /* The remapper for this QEIC */
-- struct irq_domain *irqhost;
--
-- /* The "linux" controller struct */
-- struct irq_chip hc_irq;
--
-- /* VIRQ numbers of QE high/low irqs */
-- unsigned int virq_high;
-- unsigned int virq_low;
--};
--
--/*
-- * QE interrupt controller internal structure
-- */
--struct qe_ic_info {
-- u32 mask; /* location of this source at the QIMR register. */
-- u32 mask_reg; /* Mask register offset */
-- u8 pri_code; /* for grouped interrupts sources - the interrupt
-- code as appears at the group priority register */
-- u32 pri_reg; /* Group priority register offset */
--};
--
--#endif /* _POWERPC_SYSDEV_QE_IC_H */
---- a/drivers/soc/fsl/qe/qe_io.c
-+++ b/drivers/soc/fsl/qe/qe_io.c
-@@ -22,8 +22,6 @@
-
- #include <asm/io.h>
- #include <soc/fsl/qe/qe.h>
--#include <asm/prom.h>
--#include <sysdev/fsl_soc.h>
-
- #undef DEBUG
-
-@@ -61,16 +59,16 @@ void __par_io_config_pin(struct qe_pio_r
- pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
-
- /* Set open drain, if required */
-- tmp_val = in_be32(&par_io->cpodr);
-+ tmp_val = ioread32be(&par_io->cpodr);
- if (open_drain)
-- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
-+ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
- else
-- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
-+ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
-
- /* define direction */
- tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
-- in_be32(&par_io->cpdir2) :
-- in_be32(&par_io->cpdir1);
-+ ioread32be(&par_io->cpdir2) :
-+ ioread32be(&par_io->cpdir1);
-
- /* get all bits mask for 2 bit per port */
- pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
-@@ -82,34 +80,30 @@ void __par_io_config_pin(struct qe_pio_r
-
- /* clear and set 2 bits mask */
- if (pin > (QE_PIO_PINS / 2) - 1) {
-- out_be32(&par_io->cpdir2,
-- ~pin_mask2bits & tmp_val);
-+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
- tmp_val &= ~pin_mask2bits;
-- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
-+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
- } else {
-- out_be32(&par_io->cpdir1,
-- ~pin_mask2bits & tmp_val);
-+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
- tmp_val &= ~pin_mask2bits;
-- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
-+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
- }
- /* define pin assignment */
- tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
-- in_be32(&par_io->cppar2) :
-- in_be32(&par_io->cppar1);
-+ ioread32be(&par_io->cppar2) :
-+ ioread32be(&par_io->cppar1);
-
- new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
- (pin % (QE_PIO_PINS / 2) + 1) * 2));
- /* clear and set 2 bits mask */
- if (pin > (QE_PIO_PINS / 2) - 1) {
-- out_be32(&par_io->cppar2,
-- ~pin_mask2bits & tmp_val);
-+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
- tmp_val &= ~pin_mask2bits;
-- out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
-+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
- } else {
-- out_be32(&par_io->cppar1,
-- ~pin_mask2bits & tmp_val);
-+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
- tmp_val &= ~pin_mask2bits;
-- out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
-+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
- }
- }
- EXPORT_SYMBOL(__par_io_config_pin);
-@@ -137,12 +131,12 @@ int par_io_data_set(u8 port, u8 pin, u8
- /* calculate pin location */
- pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
-
-- tmp_val = in_be32(&par_io[port].cpdata);
-+ tmp_val = ioread32be(&par_io[port].cpdata);
-
- if (val == 0) /* clear */
-- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
-+ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
- else /* set */
-- out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
-+ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
-
- return 0;
- }
---- a/drivers/soc/fsl/qe/qe_tdm.c
-+++ b/drivers/soc/fsl/qe/qe_tdm.c
-@@ -228,10 +228,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm,
- &siram[siram_entry_id * 32 + 0x200 + i]);
- }
-
-- setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
-- SIR_LAST);
-- setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
-- SIR_LAST);
-+ qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
-+ SIR_LAST);
-+ qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 +
-+ (utdm->num_of_ts - 1)], SIR_LAST);
-
- /* Set SIxMR register */
- sixmr = SIMR_SAD(siram_entry_id);
---- a/drivers/soc/fsl/qe/ucc.c
-+++ b/drivers/soc/fsl/qe/ucc.c
-@@ -39,7 +39,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int
- return -EINVAL;
-
- spin_lock_irqsave(&cmxgcr_lock, flags);
-- clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
-+ qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
- ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
- spin_unlock_irqrestore(&cmxgcr_lock, flags);
-
-@@ -84,7 +84,7 @@ int ucc_set_type(unsigned int ucc_num, e
- return -EINVAL;
- }
-
-- clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
-+ qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK,
- UCC_GUEMR_SET_RESERVED3 | speed);
-
- return 0;
-@@ -113,9 +113,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned
- get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
-
- if (set)
-- setbits32(cmxucr, mask << shift);
-+ qe_setbits32(cmxucr, mask << shift);
- else
-- clrbits32(cmxucr, mask << shift);
-+ qe_clrbits32(cmxucr, mask << shift);
-
- return 0;
- }
-@@ -211,7 +211,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc
- if (mode == COMM_DIR_RX)
- shift += 4;
-
-- clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
-+ qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
- clock_bits << shift);
-
- return 0;
---- a/drivers/soc/fsl/qe/ucc_fast.c
-+++ b/drivers/soc/fsl/qe/ucc_fast.c
-@@ -33,41 +33,41 @@ void ucc_fast_dump_regs(struct ucc_fast_
- printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
-
- printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
-- &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
-+ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
- printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
-- &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
-+ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
- printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
-+ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
- printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
-+ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
- printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
-- &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
-+ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
- printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
-- &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
-+ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
- printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
-- &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
-+ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
- printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
-- &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
-+ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
- printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
-+ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
- printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
-+ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
- printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
-+ &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset));
- printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
-- &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
-+ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
- printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
-+ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
- printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
-+ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
- printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
-+ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
- printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
-- &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
-+ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
- printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
-- &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
-+ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
- printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
-- &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
-+ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
- }
- EXPORT_SYMBOL(ucc_fast_dump_regs);
-
-@@ -89,7 +89,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subbloc
-
- void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
- {
-- out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
-+ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
- }
- EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
-
-@@ -101,7 +101,7 @@ void ucc_fast_enable(struct ucc_fast_pri
- uf_regs = uccf->uf_regs;
-
- /* Enable reception and/or transmission on this UCC. */
-- gumr = in_be32(&uf_regs->gumr);
-+ gumr = ioread32be(&uf_regs->gumr);
- if (mode & COMM_DIR_TX) {
- gumr |= UCC_FAST_GUMR_ENT;
- uccf->enabled_tx = 1;
-@@ -110,7 +110,7 @@ void ucc_fast_enable(struct ucc_fast_pri
- gumr |= UCC_FAST_GUMR_ENR;
- uccf->enabled_rx = 1;
- }
-- out_be32(&uf_regs->gumr, gumr);
-+ iowrite32be(gumr, &uf_regs->gumr);
- }
- EXPORT_SYMBOL(ucc_fast_enable);
-
-@@ -122,7 +122,7 @@ void ucc_fast_disable(struct ucc_fast_pr
- uf_regs = uccf->uf_regs;
-
- /* Disable reception and/or transmission on this UCC. */
-- gumr = in_be32(&uf_regs->gumr);
-+ gumr = ioread32be(&uf_regs->gumr);
- if (mode & COMM_DIR_TX) {
- gumr &= ~UCC_FAST_GUMR_ENT;
- uccf->enabled_tx = 0;
-@@ -131,7 +131,7 @@ void ucc_fast_disable(struct ucc_fast_pr
- gumr &= ~UCC_FAST_GUMR_ENR;
- uccf->enabled_rx = 0;
- }
-- out_be32(&uf_regs->gumr, gumr);
-+ iowrite32be(gumr, &uf_regs->gumr);
- }
- EXPORT_SYMBOL(ucc_fast_disable);
-
-@@ -263,12 +263,13 @@ int ucc_fast_init(struct ucc_fast_info *
- gumr |= uf_info->tenc;
- gumr |= uf_info->tcrc;
- gumr |= uf_info->mode;
-- out_be32(&uf_regs->gumr, gumr);
-+ iowrite32be(gumr, &uf_regs->gumr);
-
- /* Allocate memory for Tx Virtual Fifo */
- uccf->ucc_fast_tx_virtual_fifo_base_offset =
- qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
-- if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
-+ if (IS_ERR_VALUE((unsigned long)uccf->
-+ ucc_fast_tx_virtual_fifo_base_offset)) {
- printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
- __func__);
- uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
-@@ -281,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info *
- qe_muram_alloc(uf_info->urfs +
- UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
- UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
-- if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
-+ if (IS_ERR_VALUE((unsigned long)uccf->
-+ ucc_fast_rx_virtual_fifo_base_offset)) {
- printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
- __func__);
- uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
-@@ -290,15 +292,15 @@ int ucc_fast_init(struct ucc_fast_info *
- }
-
- /* Set Virtual Fifo registers */
-- out_be16(&uf_regs->urfs, uf_info->urfs);
-- out_be16(&uf_regs->urfet, uf_info->urfet);
-- out_be16(&uf_regs->urfset, uf_info->urfset);
-- out_be16(&uf_regs->utfs, uf_info->utfs);
-- out_be16(&uf_regs->utfet, uf_info->utfet);
-- out_be16(&uf_regs->utftt, uf_info->utftt);
-+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
-+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
-+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
-+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
-+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
-+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
- /* utfb, urfb are offsets from MURAM base */
-- out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
-- out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
-+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
-+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
-
- /* Mux clocking */
- /* Grant Support */
-@@ -366,14 +368,14 @@ int ucc_fast_init(struct ucc_fast_info *
- }
-
- /* Set interrupt mask register at UCC level. */
-- out_be32(&uf_regs->uccm, uf_info->uccm_mask);
-+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
-
- /* First, clear anything pending at UCC level,
- * otherwise, old garbage may come through
- * as soon as the dam is opened. */
-
- /* Writing '1' clears */
-- out_be32(&uf_regs->ucce, 0xffffffff);
-+ iowrite32be(0xffffffff, &uf_regs->ucce);
-
- *uccf_ret = uccf;
- return 0;
---- a/drivers/tty/serial/ucc_uart.c
-+++ b/drivers/tty/serial/ucc_uart.c
-@@ -34,6 +34,7 @@
- #include <soc/fsl/qe/ucc_slow.h>
-
- #include <linux/firmware.h>
-+#include <asm/cpm.h>
- #include <asm/reg.h>
-
- /*
---- a/include/soc/fsl/qe/qe.h
-+++ b/include/soc/fsl/qe/qe.h
-@@ -21,7 +21,6 @@
- #include <linux/spinlock.h>
- #include <linux/errno.h>
- #include <linux/err.h>
--#include <asm/cpm.h>
- #include <soc/fsl/qe/immap_qe.h>
- #include <linux/of.h>
- #include <linux/of_address.h>
---- a/include/soc/fsl/qe/qe_ic.h
-+++ /dev/null
-@@ -1,139 +0,0 @@
--/*
-- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
-- *
-- * Authors: Shlomi Gridish <gridish@freescale.com>
-- * Li Yang <leoli@freescale.com>
-- *
-- * Description:
-- * QE IC external definitions and structure.
-- *
-- * This program is free software; you can redistribute it and/or modify it
-- * under the terms of the GNU General Public License as published by the
-- * Free Software Foundation; either version 2 of the License, or (at your
-- * option) any later version.
-- */
--#ifndef _ASM_POWERPC_QE_IC_H
--#define _ASM_POWERPC_QE_IC_H
--
--#include <linux/irq.h>
--
--struct device_node;
--struct qe_ic;
--
--#define NUM_OF_QE_IC_GROUPS 6
--
--/* Flags when we init the QE IC */
--#define QE_IC_SPREADMODE_GRP_W 0x00000001
--#define QE_IC_SPREADMODE_GRP_X 0x00000002
--#define QE_IC_SPREADMODE_GRP_Y 0x00000004
--#define QE_IC_SPREADMODE_GRP_Z 0x00000008
--#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
--#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
--
--#define QE_IC_LOW_SIGNAL 0x00000100
--#define QE_IC_HIGH_SIGNAL 0x00000200
--
--#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
--#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
--#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
--#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
--#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
--#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
--#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
--#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
--#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
--#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
--#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
--#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
--#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
--
--/* QE interrupt sources groups */
--enum qe_ic_grp_id {
-- QE_IC_GRP_W = 0, /* QE interrupt controller group W */
-- QE_IC_GRP_X, /* QE interrupt controller group X */
-- QE_IC_GRP_Y, /* QE interrupt controller group Y */
-- QE_IC_GRP_Z, /* QE interrupt controller group Z */
-- QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
-- QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
--};
--
--#ifdef CONFIG_QUICC_ENGINE
--void qe_ic_init(struct device_node *node, unsigned int flags,
-- void (*low_handler)(struct irq_desc *desc),
-- void (*high_handler)(struct irq_desc *desc));
--unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
--unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
--#else
--static inline void qe_ic_init(struct device_node *node, unsigned int flags,
-- void (*low_handler)(struct irq_desc *desc),
-- void (*high_handler)(struct irq_desc *desc))
--{}
--static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
--{ return 0; }
--static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
--{ return 0; }
--#endif /* CONFIG_QUICC_ENGINE */
--
--void qe_ic_set_highest_priority(unsigned int virq, int high);
--int qe_ic_set_priority(unsigned int virq, unsigned int priority);
--int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
--
--static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
--{
-- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
--
-- if (cascade_irq != NO_IRQ)
-- generic_handle_irq(cascade_irq);
--}
--
--static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
--{
-- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
--
-- if (cascade_irq != NO_IRQ)
-- generic_handle_irq(cascade_irq);
--}
--
--static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
--{
-- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-- struct irq_chip *chip = irq_desc_get_chip(desc);
--
-- if (cascade_irq != NO_IRQ)
-- generic_handle_irq(cascade_irq);
--
-- chip->irq_eoi(&desc->irq_data);
--}
--
--static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
--{
-- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-- struct irq_chip *chip = irq_desc_get_chip(desc);
--
-- if (cascade_irq != NO_IRQ)
-- generic_handle_irq(cascade_irq);
--
-- chip->irq_eoi(&desc->irq_data);
--}
--
--static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
--{
-- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-- unsigned int cascade_irq;
-- struct irq_chip *chip = irq_desc_get_chip(desc);
--
-- cascade_irq = qe_ic_get_high_irq(qe_ic);
-- if (cascade_irq == NO_IRQ)
-- cascade_irq = qe_ic_get_low_irq(qe_ic);
--
-- if (cascade_irq != NO_IRQ)
-- generic_handle_irq(cascade_irq);
--
-- chip->irq_eoi(&desc->irq_data);
--}
--
--#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/target/linux/layerscape/patches-4.14/806-rtc-support-layerscape.patch b/target/linux/layerscape/patches-4.14/806-rtc-support-layerscape.patch
deleted file mode 100644
index 5e4aac047a..0000000000
--- a/target/linux/layerscape/patches-4.14/806-rtc-support-layerscape.patch
+++ /dev/null
@@ -1,776 +0,0 @@
-From f8d89482075e2a4a62fc5cbacf6bea6baf4dc65f Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:27:31 +0800
-Subject: [PATCH 23/40] rtc: support layerscape
-This is an integrated patch of rtc for layerscape
-
-Signed-off-by: Martin Fuzzey <mfuzzey@parkeon.com>
-Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- .../devicetree/bindings/rtc/nxp,pcf85263.txt | 42 ++
- drivers/rtc/Kconfig | 8 +
- drivers/rtc/Makefile | 1 +
- drivers/rtc/rtc-pcf85263.c | 664 ++++++++++++++++++
- include/dt-bindings/rtc/nxp,pcf85263.h | 14 +
- 5 files changed, 729 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/rtc/nxp,pcf85263.txt
- create mode 100644 drivers/rtc/rtc-pcf85263.c
- create mode 100644 include/dt-bindings/rtc/nxp,pcf85263.h
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf85263.txt
-@@ -0,0 +1,42 @@
-+NXP PCF85263 I2C Real Time Clock
-+
-+Required properties:
-+- compatible: must be: "nxp,rtc-pcf85263"
-+- reg: must be the I2C address
-+
-+Optional properties:
-+- interrupt-names: Which interrupt signal is used must be "INTA" or "INTB"
-+ Defaults to "INTA"
-+
-+- quartz-load-capacitance: The internal capacitor to select for the quartz:
-+ PCF85263_QUARTZCAP_7pF [0]
-+ PCF85263_QUARTZCAP_6pF [1]
-+ PCF85263_QUARTZCAP_12p5pF [2] DEFAULT
-+
-+- quartz-drive-strength: Drive strength for the quartz:
-+ PCF85263_QUARTZDRIVE_NORMAL [0] DEFAULT
-+ PCF85263_QUARTZDRIVE_LOW [1]
-+ PCF85263_QUARTZDRIVE_HIGH [2]
-+
-+- quartz-low-jitter: Boolean property, if present enables low jitter mode
-+which
-+ reduces jitter at the cost of increased power consumption.
-+
-+- wakeup-source: mark the chip as a wakeup source, independently of
-+ the availability of an IRQ line connected to the SoC.
-+ This is useful if the IRQ line is connected to a PMIC or other circuit
-+ that can power up the device rather than to a normal SOC interrupt.
-+
-+Example:
-+
-+rtc@51 {
-+ compatible = "nxp,pcf85263";
-+ reg = <0x51>;
-+
-+ interrupt-parent = <&gpio4>;
-+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
-+ interrupt-names = "INTB";
-+
-+ quartz-load-capacitance = <PCF85263_QUARTZCAP_12p5pF>;
-+ quartz-drive-strength = <PCF85263_QUARTZDRIVE_LOW>;
-+};
---- a/drivers/rtc/Kconfig
-+++ b/drivers/rtc/Kconfig
-@@ -434,6 +434,14 @@ config RTC_DRV_PCF85063
- This driver can also be built as a module. If so, the module
- will be called rtc-pcf85063.
-
-+config RTC_DRV_PCF85263
-+ tristate "NXP PCF85263"
-+ help
-+ If you say yes here you get support for the PCF85263 RTC chip
-+
-+ This driver can also be built as a module. If so, the module
-+ will be called rtc-pcf85263.
-+
- config RTC_DRV_PCF8563
- tristate "Philips PCF8563/Epson RTC8564"
- help
---- a/drivers/rtc/Makefile
-+++ b/drivers/rtc/Makefile
-@@ -115,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf
- obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
- obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
- obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
-+obj-$(CONFIG_RTC_DRV_PCF85263) += rtc-pcf85263.o
- obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
- obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
- obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o
---- /dev/null
-+++ b/drivers/rtc/rtc-pcf85263.c
-@@ -0,0 +1,664 @@
-+/*
-+ * rtc-pcf85263 Driver for the NXP PCF85263 RTC
-+ * Copyright 2016 Parkeon
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/rtc.h>
-+#include <linux/i2c.h>
-+#include <linux/bcd.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/regmap.h>
-+
-+
-+#define DRV_NAME "rtc-pcf85263"
-+
-+/* Quartz capacitance */
-+#define PCF85263_QUARTZCAP_7pF 0
-+#define PCF85263_QUARTZCAP_6pF 1
-+#define PCF85263_QUARTZCAP_12p5pF 2
-+
-+/* Quartz drive strength */
-+#define PCF85263_QUARTZDRIVE_NORMAL 0
-+#define PCF85263_QUARTZDRIVE_LOW 1
-+#define PCF85263_QUARTZDRIVE_HIGH 2
-+
-+
-+#define PCF85263_REG_RTC_SC 0x01 /* Seconds */
-+#define PCF85263_REG_RTC_SC_OS BIT(7) /* Oscilator stopped flag */
-+
-+#define PCF85263_REG_RTC_MN 0x02 /* Minutes */
-+#define PCF85263_REG_RTC_HR 0x03 /* Hours */
-+#define PCF85263_REG_RTC_DT 0x04 /* Day of month 1-31 */
-+#define PCF85263_REG_RTC_DW 0x05 /* Day of week 0-6 */
-+#define PCF85263_REG_RTC_MO 0x06 /* Month 1-12 */
-+#define PCF85263_REG_RTC_YR 0x07 /* Year 0-99 */
-+
-+#define PCF85263_REG_ALM1_SC 0x08 /* Seconds */
-+#define PCF85263_REG_ALM1_MN 0x09 /* Minutes */
-+#define PCF85263_REG_ALM1_HR 0x0a /* Hours */
-+#define PCF85263_REG_ALM1_DT 0x0b /* Day of month 1-31 */
-+#define PCF85263_REG_ALM1_MO 0x0c /* Month 1-12 */
-+
-+#define PCF85263_REG_ALM_CTL 0x10
-+#define PCF85263_REG_ALM_CTL_ALL_A1E 0x1f /* sec,min,hr,day,mon alarm 1 */
-+
-+#define PCF85263_REG_OSC 0x25
-+#define PCF85263_REG_OSC_CL_MASK (BIT(0) | BIT(1))
-+#define PCF85263_REG_OSC_CL_SHIFT 0
-+#define PCF85263_REG_OSC_OSCD_MASK (BIT(2) | BIT(3))
-+#define PCF85263_REG_OSC_OSCD_SHIFT 2
-+#define PCF85263_REG_OSC_LOWJ BIT(4)
-+#define PCF85263_REG_OSC_12H BIT(5)
-+
-+#define PCF85263_REG_PINIO 0x27
-+#define PCF85263_REG_PINIO_INTAPM_MASK (BIT(0) | BIT(1))
-+#define PCF85263_REG_PINIO_INTAPM_SHIFT 0
-+#define PCF85263_INTAPM_INTA (0x2 << PCF85263_REG_PINIO_INTAPM_SHIFT)
-+#define PCF85263_INTAPM_HIGHZ (0x3 << PCF85263_REG_PINIO_INTAPM_SHIFT)
-+#define PCF85263_REG_PINIO_TSPM_MASK (BIT(2) | BIT(3))
-+#define PCF85263_REG_PINIO_TSPM_SHIFT 2
-+#define PCF85263_TSPM_DISABLED (0x0 << PCF85263_REG_PINIO_TSPM_SHIFT)
-+#define PCF85263_TSPM_INTB (0x1 << PCF85263_REG_PINIO_TSPM_SHIFT)
-+#define PCF85263_REG_PINIO_CLKDISABLE BIT(7)
-+
-+#define PCF85263_REG_FUNCTION 0x28
-+#define PCF85263_REG_FUNCTION_COF_MASK 0x7
-+#define PCF85263_REG_FUNCTION_COF_OFF 0x7 /* No clock output */
-+
-+#define PCF85263_REG_INTA_CTL 0x29
-+#define PCF85263_REG_INTB_CTL 0x2A
-+#define PCF85263_REG_INTx_CTL_A1E BIT(4) /* Alarm 1 */
-+#define PCF85263_REG_INTx_CTL_ILP BIT(7) /* 0=pulse, 1=level */
-+
-+#define PCF85263_REG_FLAGS 0x2B
-+#define PCF85263_REG_FLAGS_A1F BIT(5)
-+
-+#define PCF85263_REG_RAM_BYTE 0x2c
-+
-+#define PCF85263_REG_STOPENABLE 0x2e
-+#define PCF85263_REG_STOPENABLE_STOP BIT(0)
-+
-+#define PCF85263_REG_RESET 0x2f /* Reset command */
-+#define PCF85263_REG_RESET_CMD_CPR 0xa4 /* Clear prescaler */
-+
-+#define PCF85263_MAX_REG 0x2f
-+
-+#define PCF85263_HR_PM BIT(5)
-+
-+enum pcf85263_irqpin {
-+ PCF85263_IRQPIN_NONE,
-+ PCF85263_IRQPIN_INTA,
-+ PCF85263_IRQPIN_INTB
-+};
-+
-+static const char *const pcf85263_irqpin_names[] = {
-+ [PCF85263_IRQPIN_NONE] = "None",
-+ [PCF85263_IRQPIN_INTA] = "INTA",
-+ [PCF85263_IRQPIN_INTB] = "INTB"
-+};
-+
-+struct pcf85263 {
-+ struct device *dev;
-+ struct rtc_device *rtc;
-+ struct regmap *regmap;
-+ enum pcf85263_irqpin irq_pin;
-+ int irq;
-+ bool mode_12h;
-+};
-+
-+/*
-+ * Helpers to convert 12h to 24h and vice versa.
-+ * Values in register are stored in BCD with a PM flag in bit 5
-+ *
-+ * 23:00 <=> 11PM <=> 0x31
-+ * 00:00 <=> 12AM <=> 0x12
-+ * 01:00 <=> 1AM <=> 0x01
-+ * 12:00 <=> 12PM <=> 0x32
-+ * 13:00 <=> 1PM <=> 0x21
-+ */
-+static int pcf85263_bcd12h_to_bin24h(int regval)
-+{
-+ int hr = bcd2bin(regval & 0x1f);
-+ bool pm = regval & PCF85263_HR_PM;
-+
-+ if (hr == 12)
-+ return pm ? 12 : 0;
-+
-+ return pm ? hr + 12 : hr;
-+}
-+
-+static int pcf85263_bin24h_to_bcd12h(int hr24)
-+{
-+ bool pm = hr24 >= 12;
-+ int hr12 = hr24 % 12;
-+
-+ if (!hr12)
-+ hr12++;
-+
-+ return bin2bcd(hr12) | pm ? 0 : PCF85263_HR_PM;
-+}
-+
-+static int pcf85263_read_time(struct device *dev, struct rtc_time *tm)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+ const int first = PCF85263_REG_RTC_SC;
-+ const int last = PCF85263_REG_RTC_YR;
-+ const int len = last - first + 1;
-+ u8 regs[len];
-+ u8 hr_reg;
-+ int ret;
-+
-+ ret = regmap_bulk_read(pcf85263->regmap, first, regs, len);
-+ if (ret)
-+ return ret;
-+
-+ if (regs[PCF85263_REG_RTC_SC - first] & PCF85263_REG_RTC_SC_OS) {
-+ dev_warn(dev, "Oscillator stop detected, date/time is not reliable.\n");
-+ return -EINVAL;
-+ }
-+
-+ tm->tm_sec = bcd2bin(regs[PCF85263_REG_RTC_SC - first] & 0x7f);
-+ tm->tm_min = bcd2bin(regs[PCF85263_REG_RTC_MN - first] & 0x7f);
-+
-+ hr_reg = regs[PCF85263_REG_RTC_HR - first];
-+ if (pcf85263->mode_12h)
-+ tm->tm_hour = pcf85263_bcd12h_to_bin24h(hr_reg);
-+ else
-+ tm->tm_hour = bcd2bin(hr_reg & 0x3f);
-+
-+ tm->tm_mday = bcd2bin(regs[PCF85263_REG_RTC_DT - first]);
-+ tm->tm_wday = bcd2bin(regs[PCF85263_REG_RTC_DW - first]);
-+ tm->tm_mon = bcd2bin(regs[PCF85263_REG_RTC_MO - first]) - 1;
-+ tm->tm_year = bcd2bin(regs[PCF85263_REG_RTC_YR - first]);
-+
-+ tm->tm_year += 100; /* Assume 21st century */
-+
-+ return 0;
-+}
-+
-+static int pcf85263_set_time(struct device *dev, struct rtc_time *tm)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+
-+ /*
-+ * Before setting time need to stop RTC and disable prescaler
-+ * Do this all in a single I2C transaction exploiting wraparound
-+ * as described in data sheet.
-+ * This means that the array below must be in register order
-+ */
-+ u8 regs[] = {
-+ PCF85263_REG_STOPENABLE_STOP, /* STOP */
-+ PCF85263_REG_RESET_CMD_CPR, /* Disable prescaler */
-+ /* Wrap around to register 0 (1/100s) */
-+ 0, /* 1/100s always zero. */
-+ bin2bcd(tm->tm_sec),
-+ bin2bcd(tm->tm_min),
-+ bin2bcd(tm->tm_hour), /* 24-hour */
-+ bin2bcd(tm->tm_mday),
-+ bin2bcd(tm->tm_wday + 1),
-+ bin2bcd(tm->tm_mon + 1),
-+ bin2bcd(tm->tm_year % 100)
-+ };
-+ int ret;
-+
-+ ret = regmap_bulk_write(pcf85263->regmap, PCF85263_REG_STOPENABLE,
-+ regs, sizeof(regs));
-+ if (ret)
-+ return ret;
-+
-+ /* As we have set the time in 24H update the hardware for that */
-+ if (pcf85263->mode_12h) {
-+ pcf85263->mode_12h = false;
-+ ret = regmap_update_bits(pcf85263->regmap, PCF85263_REG_OSC,
-+ PCF85263_REG_OSC_12H, 0);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Start it again */
-+ return regmap_write(pcf85263->regmap, PCF85263_REG_STOPENABLE, 0);
-+}
-+
-+static int pcf85263_enable_alarm(struct pcf85263 *pcf85263, bool enable)
-+{
-+ int reg;
-+ int ret;
-+
-+ ret = regmap_update_bits(pcf85263->regmap, PCF85263_REG_ALM_CTL,
-+ PCF85263_REG_ALM_CTL_ALL_A1E,
-+ enable ? PCF85263_REG_ALM_CTL_ALL_A1E : 0);
-+ if (ret)
-+ return ret;
-+
-+ switch (pcf85263->irq_pin) {
-+ case PCF85263_IRQPIN_NONE:
-+ return 0;
-+
-+ case PCF85263_IRQPIN_INTA:
-+ reg = PCF85263_REG_INTA_CTL;
-+ break;
-+
-+ case PCF85263_IRQPIN_INTB:
-+ reg = PCF85263_REG_INTB_CTL;
-+ break;
-+
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return regmap_update_bits(pcf85263->regmap, reg,
-+ PCF85263_REG_INTx_CTL_A1E,
-+ enable ? PCF85263_REG_INTx_CTL_A1E : 0);
-+}
-+
-+static int pcf85263_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+ struct rtc_time *tm = &alarm->time;
-+ const int first = PCF85263_REG_ALM1_SC;
-+ const int last = PCF85263_REG_ALM1_MO;
-+ const int len = last - first + 1;
-+ u8 regs[len];
-+ u8 hr_reg;
-+ unsigned int regval;
-+ int ret;
-+
-+ ret = regmap_bulk_read(pcf85263->regmap, first, regs, len);
-+ if (ret)
-+ return ret;
-+
-+ tm->tm_sec = bcd2bin(regs[PCF85263_REG_ALM1_SC - first] & 0x7f);
-+ tm->tm_min = bcd2bin(regs[PCF85263_REG_ALM1_MN - first] & 0x7f);
-+
-+ hr_reg = regs[PCF85263_REG_ALM1_HR - first];
-+ if (pcf85263->mode_12h)
-+ tm->tm_hour = pcf85263_bcd12h_to_bin24h(hr_reg);
-+ else
-+ tm->tm_hour = bcd2bin(hr_reg & 0x3f);
-+
-+ tm->tm_mday = bcd2bin(regs[PCF85263_REG_ALM1_DT - first]);
-+ tm->tm_mon = bcd2bin(regs[PCF85263_REG_ALM1_MO - first]) - 1;
-+ tm->tm_year = -1;
-+ tm->tm_wday = -1;
-+
-+ ret = regmap_read(pcf85263->regmap, PCF85263_REG_ALM_CTL, &regval);
-+ if (ret)
-+ return ret;
-+ alarm->enabled = !!(regval & PCF85263_REG_ALM_CTL_ALL_A1E);
-+
-+ ret = regmap_read(pcf85263->regmap, PCF85263_REG_FLAGS, &regval);
-+ if (ret)
-+ return ret;
-+ alarm->pending = !!(regval & PCF85263_REG_FLAGS_A1F);
-+
-+ return 0;
-+}
-+
-+static int pcf85263_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+ struct rtc_time *tm = &alarm->time;
-+ const int first = PCF85263_REG_ALM1_SC;
-+ const int last = PCF85263_REG_ALM1_MO;
-+ const int len = last - first + 1;
-+ u8 regs[len];
-+ int ret;
-+
-+ /* Disable alarm comparison during update */
-+ ret = pcf85263_enable_alarm(pcf85263, false);
-+ if (ret)
-+ return ret;
-+
-+ /* Clear any pending alarm (write 0=>clr, 1=>no change) */
-+ ret = regmap_write(pcf85263->regmap, PCF85263_REG_FLAGS,
-+ (unsigned int)(~PCF85263_REG_FLAGS_A1F));
-+ if (ret)
-+ return ret;
-+
-+ /* Set the alarm time registers */
-+ regs[PCF85263_REG_ALM1_SC - first] = bin2bcd(tm->tm_sec);
-+ regs[PCF85263_REG_ALM1_MN - first] = bin2bcd(tm->tm_min);
-+ regs[PCF85263_REG_ALM1_HR - first] = pcf85263->mode_12h ?
-+ pcf85263_bin24h_to_bcd12h(tm->tm_hour) :
-+ bin2bcd(tm->tm_hour);
-+ regs[PCF85263_REG_ALM1_DT - first] = bin2bcd(tm->tm_mday);
-+ regs[PCF85263_REG_ALM1_MO - first] = bin2bcd(tm->tm_mon + 1);
-+
-+ ret = regmap_bulk_write(pcf85263->regmap, first, regs, sizeof(regs));
-+ if (ret)
-+ return ret;
-+
-+ if (alarm->enabled)
-+ ret = pcf85263_enable_alarm(pcf85263, true);
-+
-+ return ret;
-+}
-+
-+static int pcf85263_alarm_irq_enable(struct device *dev, unsigned int enable)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+
-+ return pcf85263_enable_alarm(pcf85263, !!enable);
-+}
-+
-+static irqreturn_t pcf85263_irq(int irq, void *data)
-+{
-+ struct pcf85263 *pcf85263 = data;
-+ unsigned int regval;
-+ int ret;
-+
-+ ret = regmap_read(pcf85263->regmap, PCF85263_REG_FLAGS, &regval);
-+ if (ret)
-+ return IRQ_NONE;
-+
-+ if (regval & PCF85263_REG_FLAGS_A1F) {
-+ regmap_write(pcf85263->regmap, PCF85263_REG_FLAGS,
-+ (unsigned int)(~PCF85263_REG_FLAGS_A1F));
-+
-+ rtc_update_irq(pcf85263->rtc, 1, RTC_IRQF | RTC_AF);
-+
-+ return IRQ_HANDLED;
-+ }
-+
-+ return IRQ_NONE;
-+}
-+
-+static int pcf85263_check_osc_stopped(struct pcf85263 *pcf85263)
-+{
-+ unsigned int regval;
-+ int ret;
-+
-+ ret = regmap_read(pcf85263->regmap, PCF85263_REG_RTC_SC, &regval);
-+ if (ret)
-+ return ret;
-+
-+ ret = regval & PCF85263_REG_RTC_SC_OS ? 1 : 0;
-+ if (ret)
-+ dev_warn(pcf85263->dev, "Oscillator stop detected, date/time is not reliable.\n");
-+
-+ return ret;
-+}
-+
-+#ifdef CONFIG_RTC_INTF_DEV
-+static int pcf85263_ioctl(struct device *dev,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+ int ret;
-+
-+ switch (cmd) {
-+ case RTC_VL_READ:
-+ ret = pcf85263_check_osc_stopped(pcf85263);
-+ if (ret < 0)
-+ return ret;
-+
-+ if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
-+ return -EFAULT;
-+ return 0;
-+
-+ case RTC_VL_CLR:
-+ return regmap_update_bits(pcf85263->regmap,
-+ PCF85263_REG_RTC_SC,
-+ PCF85263_REG_RTC_SC_OS, 0);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+#else
-+#define pcf85263_ioctl NULL
-+#endif
-+
-+static int pcf85263_init_hw(struct pcf85263 *pcf85263)
-+{
-+ struct device_node *np = pcf85263->dev->of_node;
-+ unsigned int regval;
-+ u32 propval;
-+ int ret;
-+
-+ /* Determine if oscilator has been stopped (probably low power) */
-+ ret = pcf85263_check_osc_stopped(pcf85263);
-+ if (ret < 0) {
-+ /* Log here since this is the first hw access on probe */
-+ dev_err(pcf85263->dev, "Unable to read register\n");
-+
-+ return ret;
-+ }
-+
-+ /* Determine 12/24H mode */
-+ ret = regmap_read(pcf85263->regmap, PCF85263_REG_OSC, &regval);
-+ if (ret)
-+ return ret;
-+ pcf85263->mode_12h = !!(regval & PCF85263_REG_OSC_12H);
-+
-+ /* Set oscilator register */
-+ regval &= ~PCF85263_REG_OSC_12H; /* keep current 12/24 h setting */
-+
-+ propval = PCF85263_QUARTZCAP_12p5pF;
-+ of_property_read_u32(np, "quartz-load-capacitance", &propval);
-+ regval |= ((propval << PCF85263_REG_OSC_CL_SHIFT)
-+ & PCF85263_REG_OSC_CL_MASK);
-+
-+ propval = PCF85263_QUARTZDRIVE_NORMAL;
-+ of_property_read_u32(np, "quartz-drive-strength", &propval);
-+ regval |= ((propval << PCF85263_REG_OSC_OSCD_SHIFT)
-+ & PCF85263_REG_OSC_OSCD_MASK);
-+
-+ if (of_property_read_bool(np, "quartz-low-jitter"))
-+ regval |= PCF85263_REG_OSC_LOWJ;
-+
-+ ret = regmap_write(pcf85263->regmap, PCF85263_REG_OSC, regval);
-+ if (ret)
-+ return ret;
-+
-+ /* Set function register (RTC mode, 1s tick, clock output static) */
-+ ret = regmap_write(pcf85263->regmap, PCF85263_REG_FUNCTION,
-+ PCF85263_REG_FUNCTION_COF_OFF);
-+ if (ret)
-+ return ret;
-+
-+ /* Set all interrupts to disabled, level mode */
-+ ret = regmap_write(pcf85263->regmap, PCF85263_REG_INTA_CTL,
-+ PCF85263_REG_INTx_CTL_ILP);
-+ if (ret)
-+ return ret;
-+ ret = regmap_write(pcf85263->regmap, PCF85263_REG_INTB_CTL,
-+ PCF85263_REG_INTx_CTL_ILP);
-+ if (ret)
-+ return ret;
-+
-+ /* Setup IO pin config register */
-+ regval = PCF85263_REG_PINIO_CLKDISABLE;
-+ switch (pcf85263->irq_pin) {
-+ case PCF85263_IRQPIN_INTA:
-+ regval |= (PCF85263_INTAPM_INTA | PCF85263_TSPM_DISABLED);
-+ break;
-+ case PCF85263_IRQPIN_INTB:
-+ regval |= (PCF85263_INTAPM_HIGHZ | PCF85263_TSPM_INTB);
-+ break;
-+ case PCF85263_IRQPIN_NONE:
-+ regval |= (PCF85263_INTAPM_HIGHZ | PCF85263_TSPM_DISABLED);
-+ break;
-+ }
-+ ret = regmap_write(pcf85263->regmap, PCF85263_REG_PINIO, regval);
-+
-+ return ret;
-+}
-+
-+static const struct rtc_class_ops rtc_ops = {
-+ .ioctl = pcf85263_ioctl,
-+ .read_time = pcf85263_read_time,
-+ .set_time = pcf85263_set_time,
-+ .read_alarm = pcf85263_read_alarm,
-+ .set_alarm = pcf85263_set_alarm,
-+ .alarm_irq_enable = pcf85263_alarm_irq_enable,
-+};
-+
-+static const struct regmap_config pcf85263_regmap_cfg = {
-+ .reg_bits = 8,
-+ .val_bits = 8,
-+ .max_register = PCF85263_MAX_REG,
-+};
-+
-+/*
-+ * On some boards the interrupt line may not be wired to the CPU but only to
-+ * a power supply circuit.
-+ * In that case no interrupt will be specified in the device tree but the
-+ * wakeup-source DT property may be used to enable wakeup programming in
-+ * sysfs
-+ */
-+static bool pcf85263_can_wakeup_machine(struct pcf85263 *pcf85263)
-+{
-+ return pcf85263->irq ||
-+ of_property_read_bool(pcf85263->dev->of_node, "wakeup-source");
-+}
-+
-+static int pcf85263_probe(struct i2c_client *client,
-+ const struct i2c_device_id *id)
-+{
-+ struct device *dev = &client->dev;
-+ struct pcf85263 *pcf85263;
-+ int ret;
-+
-+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
-+ I2C_FUNC_SMBUS_BYTE_DATA |
-+ I2C_FUNC_SMBUS_I2C_BLOCK))
-+ return -ENODEV;
-+
-+ pcf85263 = devm_kzalloc(dev, sizeof(*pcf85263), GFP_KERNEL);
-+ if (!pcf85263)
-+ return -ENOMEM;
-+
-+ pcf85263->dev = dev;
-+ pcf85263->irq = client->irq;
-+ dev_set_drvdata(dev, pcf85263);
-+
-+ pcf85263->regmap = devm_regmap_init_i2c(client, &pcf85263_regmap_cfg);
-+ if (IS_ERR(pcf85263->regmap)) {
-+ ret = PTR_ERR(pcf85263->regmap);
-+ dev_err(dev, "regmap allocation failed (%d)\n", ret);
-+
-+ return ret;
-+ }
-+
-+ /* Determine which interrupt pin the board uses */
-+ if (pcf85263_can_wakeup_machine(pcf85263)) {
-+ if (of_property_match_string(dev->of_node,
-+ "interrupt-names", "INTB") >= 0)
-+ pcf85263->irq_pin = PCF85263_IRQPIN_INTB;
-+ else
-+ pcf85263->irq_pin = PCF85263_IRQPIN_INTA;
-+ } else {
-+ pcf85263->irq_pin = PCF85263_IRQPIN_NONE;
-+ }
-+
-+ ret = pcf85263_init_hw(pcf85263);
-+ if (ret)
-+ return ret;
-+
-+ if (pcf85263->irq) {
-+ ret = devm_request_threaded_irq(dev, pcf85263->irq, NULL,
-+ pcf85263_irq,
-+ IRQF_ONESHOT,
-+ dev->driver->name, pcf85263);
-+ if (ret) {
-+ dev_err(dev, "irq %d unavailable (%d)\n",
-+ pcf85263->irq, ret);
-+ pcf85263->irq = 0;
-+ }
-+ }
-+
-+ if (pcf85263_can_wakeup_machine(pcf85263))
-+ device_init_wakeup(dev, true);
-+
-+ pcf85263->rtc = devm_rtc_device_register(dev, dev->driver->name,
-+ &rtc_ops, THIS_MODULE);
-+ ret = PTR_ERR_OR_ZERO(pcf85263->rtc);
-+ if (ret)
-+ return ret;
-+
-+ /* We cannot support UIE mode if we do not have an IRQ line */
-+ if (!pcf85263->irq)
-+ pcf85263->rtc->uie_unsupported = 1;
-+
-+ dev_info(pcf85263->dev,
-+ "PCF85263 RTC (irqpin=%s irq=%d)\n",
-+ pcf85263_irqpin_names[pcf85263->irq_pin],
-+ pcf85263->irq);
-+
-+ return 0;
-+}
-+
-+static int pcf85263_remove(struct i2c_client *client)
-+{
-+ struct pcf85263 *pcf85263 = i2c_get_clientdata(client);
-+
-+ if (pcf85263_can_wakeup_machine(pcf85263))
-+ device_init_wakeup(pcf85263->dev, false);
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_PM_SLEEP
-+static int pcf85263_suspend(struct device *dev)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+ int ret = 0;
-+
-+ if (device_may_wakeup(dev))
-+ ret = enable_irq_wake(pcf85263->irq);
-+
-+ return ret;
-+}
-+
-+static int pcf85263_resume(struct device *dev)
-+{
-+ struct pcf85263 *pcf85263 = dev_get_drvdata(dev);
-+ int ret = 0;
-+
-+ if (device_may_wakeup(dev))
-+ ret = disable_irq_wake(pcf85263->irq);
-+
-+ return ret;
-+}
-+
-+#endif
-+
-+static const struct i2c_device_id pcf85263_id[] = {
-+ { "pcf85263", 0 },
-+ { }
-+};
-+MODULE_DEVICE_TABLE(i2c, pcf85263_id);
-+
-+#ifdef CONFIG_OF
-+static const struct of_device_id pcf85263_of_match[] = {
-+ { .compatible = "nxp,pcf85263" },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, pcf85263_of_match);
-+#endif
-+
-+static SIMPLE_DEV_PM_OPS(pcf85263_pm_ops, pcf85263_suspend, pcf85263_resume);
-+
-+static struct i2c_driver pcf85263_driver = {
-+ .driver = {
-+ .name = "rtc-pcf85263",
-+ .of_match_table = of_match_ptr(pcf85263_of_match),
-+ .pm = &pcf85263_pm_ops,
-+ },
-+ .probe = pcf85263_probe,
-+ .remove = pcf85263_remove,
-+ .id_table = pcf85263_id,
-+};
-+
-+module_i2c_driver(pcf85263_driver);
-+
-+MODULE_AUTHOR("Martin Fuzzey <mfuzzey@parkeon.com>");
-+MODULE_DESCRIPTION("PCF85263 RTC Driver");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/include/dt-bindings/rtc/nxp,pcf85263.h
-@@ -0,0 +1,14 @@
-+#ifndef _DT_BINDINGS_RTC_NXP_PCF85263_H
-+#define _DT_BINDINGS_RTC_NXP_PCF85263_H
-+
-+/* Quartz capacitance */
-+#define PCF85263_QUARTZCAP_7pF 0
-+#define PCF85263_QUARTZCAP_6pF 1
-+#define PCF85263_QUARTZCAP_12p5pF 2
-+
-+/* Quartz drive strength */
-+#define PCF85263_QUARTZDRIVE_NORMAL 0
-+#define PCF85263_QUARTZDRIVE_LOW 1
-+#define PCF85263_QUARTZDRIVE_HIGH 2
-+
-+#endif /* _DT_BINDINGS_RTC_NXP_PCF85263_H */
diff --git a/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch b/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch
deleted file mode 100644
index d0bee3b450..0000000000
--- a/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch
+++ /dev/null
@@ -1,1588 +0,0 @@
-From ca86ebf3fddbdfa8aecc4b887ef059948ee79621 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:59:08 +0800
-Subject: [PATCH] usb: support layerscape
-
-This is an integrated patch of usb for layerscape
-
-Signed-off-by: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-Signed-off-by: Changming Huang <jerry.huang@nxp.com>
-Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Li Yang <leoli@freescale.com>
-Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
-Signed-off-by: Rajesh Bhagat <rajesh.bhagat@nxp.com>
-Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
-Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
-Signed-off-by: Roger Quadros <rogerq@ti.com>
-Signed-off-by: Shengzhou Liu <Shengzhou.Liu@freescale.com>
-Signed-off-by: Suresh Gupta <suresh.gupta@freescale.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
-Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
----
- arch/arm64/include/asm/io.h | 28 +++
- drivers/usb/common/common.c | 50 +++++
- drivers/usb/core/usb.c | 1 +
- drivers/usb/dwc3/core.c | 167 ++++++++++++++++
- drivers/usb/dwc3/core.h | 58 ++++++
- drivers/usb/dwc3/ep0.c | 4 +-
- drivers/usb/dwc3/gadget.c | 7 +
- drivers/usb/dwc3/host.c | 9 +
- drivers/usb/gadget/udc/fsl_udc_core.c | 46 +++--
- drivers/usb/gadget/udc/fsl_usb2_udc.h | 16 +-
- drivers/usb/host/Kconfig | 2 +-
- drivers/usb/host/ehci-fsl.c | 276 +++++++++++++++++++++++---
- drivers/usb/host/ehci-fsl.h | 3 +
- drivers/usb/host/ehci-hub.c | 2 +
- drivers/usb/host/ehci.h | 3 +
- drivers/usb/host/fsl-mph-dr-of.c | 11 +
- drivers/usb/host/xhci-hub.c | 22 ++
- drivers/usb/host/xhci-plat.c | 16 +-
- drivers/usb/host/xhci-ring.c | 28 ++-
- drivers/usb/host/xhci.c | 37 +++-
- drivers/usb/host/xhci.h | 10 +-
- drivers/usb/phy/phy-fsl-usb.c | 59 ++++--
- drivers/usb/phy/phy-fsl-usb.h | 8 +
- include/linux/usb.h | 1 +
- include/linux/usb/of.h | 2 +
- 25 files changed, 780 insertions(+), 86 deletions(-)
-
---- a/arch/arm64/include/asm/io.h
-+++ b/arch/arm64/include/asm/io.h
-@@ -210,6 +210,34 @@ extern void __iomem *ioremap_cache(phys_
- #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
- #define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
-
-+/* access ports */
-+#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
-+#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
-+
-+#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
-+#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
-+
-+#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
-+#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
-+
-+/* Clear and set bits in one shot. These macros can be used to clear and
-+ * set multiple bits in a register using a single read-modify-write. These
-+ * macros can also be used to set a multiple-bit bit pattern using a mask,
-+ * by specifying the mask in the 'clear' parameter and the new bit pattern
-+ * in the 'set' parameter.
-+ */
-+
-+#define clrsetbits_be32(addr, clear, set) \
-+ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_le32(addr, clear, set) \
-+ iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_be16(addr, clear, set) \
-+ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_le16(addr, clear, set) \
-+ iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
-+#define clrsetbits_8(addr, clear, set) \
-+ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
-+
- #include <asm-generic/io.h>
-
- /*
---- a/drivers/usb/common/common.c
-+++ b/drivers/usb/common/common.c
-@@ -105,6 +105,56 @@ static const char *const usb_dr_modes[]
- [USB_DR_MODE_OTG] = "otg",
- };
-
-+/**
-+ * of_usb_get_dr_mode - Get dual role mode for given device_node
-+ * @np: Pointer to the given device_node
-+ *
-+ * The function gets phy interface string from property 'dr_mode',
-+ * and returns the correspondig enum usb_dr_mode
-+ */
-+enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
-+{
-+ const char *dr_mode;
-+ int err, i;
-+
-+ err = of_property_read_string(np, "dr_mode", &dr_mode);
-+ if (err < 0)
-+ return USB_DR_MODE_UNKNOWN;
-+
-+ for (i = 0; i < ARRAY_SIZE(usb_dr_modes); i++)
-+ if (!strcmp(dr_mode, usb_dr_modes[i]))
-+ return i;
-+
-+ return USB_DR_MODE_UNKNOWN;
-+}
-+EXPORT_SYMBOL_GPL(of_usb_get_dr_mode);
-+
-+/**
-+ * of_usb_get_maximum_speed - Get maximum requested speed for a given USB
-+ * controller.
-+ * @np: Pointer to the given device_node
-+ *
-+ * The function gets the maximum speed string from property "maximum-speed",
-+ * and returns the corresponding enum usb_device_speed.
-+ */
-+enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np)
-+{
-+ const char *maximum_speed;
-+ int err;
-+ int i;
-+
-+ err = of_property_read_string(np, "maximum-speed", &maximum_speed);
-+ if (err < 0)
-+ return USB_SPEED_UNKNOWN;
-+
-+ for (i = 0; i < ARRAY_SIZE(speed_names); i++)
-+ if (strcmp(maximum_speed, speed_names[i]) == 0)
-+ return i;
-+
-+ return USB_SPEED_UNKNOWN;
-+}
-+EXPORT_SYMBOL_GPL(of_usb_get_maximum_speed);
-+
- static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str)
- {
- int ret;
---- a/drivers/usb/core/usb.c
-+++ b/drivers/usb/core/usb.c
-@@ -593,6 +593,7 @@ struct usb_device *usb_alloc_dev(struct
- dev->dev.dma_mask = bus->sysdev->dma_mask;
- dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset;
- set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
-+ dev->dev.of_node = bus->controller->of_node;
- dev->state = USB_STATE_ATTACHED;
- dev->lpm_disable_count = 1;
- atomic_set(&dev->urbnum, 0);
---- a/drivers/usb/dwc3/core.c
-+++ b/drivers/usb/dwc3/core.c
-@@ -103,6 +103,41 @@ static int dwc3_get_dr_mode(struct dwc3
- static void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
- static int dwc3_event_buffers_setup(struct dwc3 *dwc);
-
-+/*
-+ * dwc3_power_of_all_roothub_ports - Power off all Root hub ports
-+ * @dwc3: Pointer to our controller context structure
-+ */
-+static void dwc3_power_off_all_roothub_ports(struct dwc3 *dwc)
-+{
-+ int i, port_num;
-+ u32 reg, op_regs_base, offset;
-+ void __iomem *xhci_regs;
-+
-+ /* xhci regs is not mapped yet, do it temperary here */
-+ if (dwc->xhci_resources[0].start) {
-+ xhci_regs = ioremap(dwc->xhci_resources[0].start,
-+ DWC3_XHCI_REGS_END);
-+ if (IS_ERR(xhci_regs)) {
-+ dev_err(dwc->dev, "Failed to ioremap xhci_regs\n");
-+ return;
-+ }
-+
-+ op_regs_base = HC_LENGTH(readl(xhci_regs));
-+ reg = readl(xhci_regs + XHCI_HCSPARAMS1);
-+ port_num = HCS_MAX_PORTS(reg);
-+
-+ for (i = 1; i <= port_num; i++) {
-+ offset = op_regs_base + XHCI_PORTSC_BASE + 0x10*(i-1);
-+ reg = readl(xhci_regs + offset);
-+ reg &= ~PORT_POWER;
-+ writel(reg, xhci_regs + offset);
-+ }
-+
-+ iounmap(xhci_regs);
-+ } else
-+ dev_err(dwc->dev, "xhci base reg invalid\n");
-+}
-+
- static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
- {
- u32 reg;
-@@ -111,6 +146,15 @@ static void dwc3_set_prtcap(struct dwc3
- reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
- reg |= DWC3_GCTL_PRTCAPDIR(mode);
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-+
-+ /*
-+ * We have to power off all Root hub ports immediately after DWC3 set
-+ * to host mode to avoid VBUS glitch happen when xhci get reset later.
-+ */
-+ if (dwc->host_vbus_glitches) {
-+ if (mode == DWC3_GCTL_PRTCAP_HOST)
-+ dwc3_power_off_all_roothub_ports(dwc);
-+ }
- }
-
- static void __dwc3_set_mode(struct work_struct *work)
-@@ -765,6 +809,96 @@ static void dwc3_core_setup_global_contr
- static int dwc3_core_get_phy(struct dwc3 *dwc);
- static int dwc3_core_ulpi_init(struct dwc3 *dwc);
-
-+/* set global soc bus configuration registers */
-+static void dwc3_set_soc_bus_cfg(struct dwc3 *dwc)
-+{
-+ struct device *dev = dwc->dev;
-+ u32 *vals;
-+ u32 cfg;
-+ int ntype;
-+ int ret;
-+ int i;
-+
-+ cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
-+
-+ /*
-+ * Handle property "snps,incr-burst-type-adjustment".
-+ * Get the number of value from this property:
-+ * result <= 0, means this property is not supported.
-+ * result = 1, means INCRx burst mode supported.
-+ * result > 1, means undefined length burst mode supported.
-+ */
-+ ntype = device_property_read_u32_array(dev,
-+ "snps,incr-burst-type-adjustment", NULL, 0);
-+ if (ntype > 0) {
-+ vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
-+ if (!vals) {
-+ dev_err(dev, "Error to get memory\n");
-+ return;
-+ }
-+ /* Get INCR burst type, and parse it */
-+ ret = device_property_read_u32_array(dev,
-+ "snps,incr-burst-type-adjustment", vals, ntype);
-+ if (ret) {
-+ dev_err(dev, "Error to get property\n");
-+ return;
-+ }
-+ *(dwc->incrx_type + 1) = vals[0];
-+ if (ntype > 1) {
-+ *dwc->incrx_type = 1;
-+ for (i = 1; i < ntype; i++) {
-+ if (vals[i] > *(dwc->incrx_type + 1))
-+ *(dwc->incrx_type + 1) = vals[i];
-+ }
-+ } else
-+ *dwc->incrx_type = 0;
-+
-+ /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
-+ cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
-+ if (*dwc->incrx_type)
-+ cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
-+ switch (*(dwc->incrx_type + 1)) {
-+ case 256:
-+ cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
-+ break;
-+ case 128:
-+ cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
-+ break;
-+ case 64:
-+ cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
-+ break;
-+ case 32:
-+ cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
-+ break;
-+ case 16:
-+ cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
-+ break;
-+ case 8:
-+ cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
-+ break;
-+ case 4:
-+ cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
-+ break;
-+ case 1:
-+ break;
-+ default:
-+ dev_err(dev, "Invalid property\n");
-+ break;
-+ }
-+ }
-+
-+ /* Handle usb snooping */
-+ if (dwc->dma_coherent) {
-+ cfg &= ~DWC3_GSBUSCFG0_SNP_MASK;
-+ cfg |= (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DATARD_SHIFT) |
-+ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DESCRD_SHIFT) |
-+ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DATAWR_SHIFT) |
-+ (AXI3_CACHE_TYPE_SNP << DWC3_GSBUSCFG0_DESCWR_SHIFT);
-+ }
-+
-+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
-+}
-+
- /**
- * dwc3_core_init - Low-level initialization of DWC3 Core
- * @dwc: Pointer to our controller context structure
-@@ -827,6 +961,8 @@ static int dwc3_core_init(struct dwc3 *d
- /* Adjust Frame Length */
- dwc3_frame_length_adjustment(dwc);
-
-+ dwc3_set_soc_bus_cfg(dwc);
-+
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
- ret = phy_power_on(dwc->usb2_generic_phy);
-@@ -873,6 +1009,22 @@ static int dwc3_core_init(struct dwc3 *d
- dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
- }
-
-+ if (dwc->dr_mode == USB_DR_MODE_HOST ||
-+ dwc->dr_mode == USB_DR_MODE_OTG) {
-+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
-+
-+ /*
-+ * Enable Auto retry Feature to make the controller operating in
-+ * Host mode on seeing transaction errors(CRC errors or internal
-+ * overrun scenerios) on IN transfers to reply to the device
-+ * with a non-terminating retry ACK (i.e, an ACK transcation
-+ * packet with Retry=1 & Nump != 0)
-+ */
-+ reg |= DWC3_GUCTL_HSTINAUTORETRY;
-+
-+ dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
-+ }
-+
- return 0;
-
- err4:
-@@ -1079,6 +1231,8 @@ static void dwc3_get_properties(struct d
- &hird_threshold);
- dwc->usb3_lpm_capable = device_property_read_bool(dev,
- "snps,usb3_lpm_capable");
-+ dwc->dma_coherent = device_property_read_bool(dev,
-+ "dma-coherent");
-
- dwc->disable_scramble_quirk = device_property_read_bool(dev,
- "snps,disable_scramble_quirk");
-@@ -1113,8 +1267,16 @@ static void dwc3_get_properties(struct d
- dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
- "snps,parkmode-disable-ss-quirk");
-
-+ dwc->quirk_reverse_in_out = device_property_read_bool(dev,
-+ "snps,quirk_reverse_in_out");
-+ dwc->quirk_stop_transfer_in_block = device_property_read_bool(dev,
-+ "snps,quirk_stop_transfer_in_block");
-+ dwc->quirk_stop_ep_in_u1 = device_property_read_bool(dev,
-+ "snps,quirk_stop_ep_in_u1");
- dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
- "snps,tx_de_emphasis_quirk");
-+ dwc->disable_devinit_u1u2_quirk = device_property_read_bool(dev,
-+ "snps,disable_devinit_u1u2");
- device_property_read_u8(dev, "snps,tx_de_emphasis",
- &tx_de_emphasis);
- device_property_read_string(dev, "snps,hsphy_interface",
-@@ -1125,6 +1287,9 @@ static void dwc3_get_properties(struct d
- dwc->dis_metastability_quirk = device_property_read_bool(dev,
- "snps,dis_metastability_quirk");
-
-+ dwc->host_vbus_glitches = device_property_read_bool(dev,
-+ "snps,host-vbus-glitches");
-+
- dwc->lpm_nyet_threshold = lpm_nyet_threshold;
- dwc->tx_de_emphasis = tx_de_emphasis;
-
-@@ -1376,12 +1541,14 @@ static int dwc3_resume_common(struct dwc
-
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
-+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
- case USB_DR_MODE_OTG:
- spin_lock_irqsave(&dwc->lock, flags);
- dwc3_gadget_resume(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
- /* FALLTHROUGH */
- case USB_DR_MODE_HOST:
-+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
- default:
- /* do nothing */
- break;
---- a/drivers/usb/dwc3/core.h
-+++ b/drivers/usb/dwc3/core.h
-@@ -161,6 +161,32 @@
-
- /* Bit fields */
-
-+/* Global SoC Bus Configuration Register 0 */
-+#define AXI3_CACHE_TYPE_AW 0x8 /* write allocate */
-+#define AXI3_CACHE_TYPE_AR 0x4 /* read allocate */
-+#define AXI3_CACHE_TYPE_SNP 0x2 /* cacheable */
-+#define AXI3_CACHE_TYPE_BUF 0x1 /* bufferable */
-+#define DWC3_GSBUSCFG0_DATARD_SHIFT 28
-+#define DWC3_GSBUSCFG0_DESCRD_SHIFT 24
-+#define DWC3_GSBUSCFG0_DATAWR_SHIFT 20
-+#define DWC3_GSBUSCFG0_DESCWR_SHIFT 16
-+#define DWC3_GSBUSCFG0_SNP_MASK 0xffff0000
-+#define DWC3_GSBUSCFG0_DATABIGEND (1 << 11)
-+#define DWC3_GSBUSCFG0_DESCBIGEND (1 << 10)
-+#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */
-+#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */
-+#define DWC3_GSBUSCFG0_INCR64BRSTENA (1 << 5) /* INCR64 burst */
-+#define DWC3_GSBUSCFG0_INCR32BRSTENA (1 << 4) /* INCR32 burst */
-+#define DWC3_GSBUSCFG0_INCR16BRSTENA (1 << 3) /* INCR16 burst */
-+#define DWC3_GSBUSCFG0_INCR8BRSTENA (1 << 2) /* INCR8 burst */
-+#define DWC3_GSBUSCFG0_INCR4BRSTENA (1 << 1) /* INCR4 burst */
-+#define DWC3_GSBUSCFG0_INCRBRSTENA (1 << 0) /* undefined length enable */
-+#define DWC3_GSBUSCFG0_INCRBRST_MASK 0xff
-+
-+/* Global SoC Bus Configuration Register 1 */
-+#define DWC3_GSBUSCFG1_1KPAGEENA (1 << 12) /* 1K page boundary enable */
-+#define DWC3_GSBUSCFG1_PTRANSLIMIT_MASK 0xf00
-+
- /* Global Debug Queue/FIFO Space Available Register */
- #define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f)
- #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
-@@ -205,6 +231,9 @@
- #define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
- #define DWC3_GCTL_DSBLCLKGTNG BIT(0)
-
-+/* Global User Control Register */
-+#define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
-+
- /* Global User Control 1 Register */
- #define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
- #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
-@@ -478,6 +507,14 @@
- #define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
- #define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
-
-+/* Partial XHCI Register and Bit fields for quirk */
-+#define XHCI_HCSPARAMS1 0x4
-+#define XHCI_PORTSC_BASE 0x400
-+#define PORT_POWER (1 << 9)
-+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
-+#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff)
-+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
-+
- /* Structures */
-
- struct dwc3_trb;
-@@ -789,6 +826,7 @@ struct dwc3_scratchpad_array {
- * @regs: base address for our registers
- * @regs_size: address space size
- * @fladj: frame length adjustment
-+ * @incrx_type: INCR burst type adjustment
- * @irq_gadget: peripheral controller's IRQ number
- * @nr_scratch: number of scratch buffers
- * @u1u2: only used on revisions <1.83a for workaround
-@@ -844,6 +882,7 @@ struct dwc3_scratchpad_array {
- * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
- * @three_stage_setup: set if we perform a three phase setup
- * @usb3_lpm_capable: set if hadrware supports Link Power Management
-+ * @dma_coherent: set if hadrware supports DMA snoop
- * @disable_scramble_quirk: set if we enable the disable scramble quirk
- * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
- * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
-@@ -925,6 +964,12 @@ struct dwc3 {
- enum usb_phy_interface hsphy_mode;
-
- u32 fladj;
-+ /*
-+ * For INCR burst type.
-+ * First field: for undefined length INCR burst type enable.
-+ * Second field: for INCRx burst type enable
-+ */
-+ u32 incrx_type[2];
- u32 irq_gadget;
- u32 nr_scratch;
- u32 u1u2;
-@@ -1009,6 +1054,7 @@ struct dwc3 {
- unsigned setup_packet_pending:1;
- unsigned three_stage_setup:1;
- unsigned usb3_lpm_capable:1;
-+ unsigned dma_coherent:1;
-
- unsigned disable_scramble_quirk:1;
- unsigned u2exit_lfps_quirk:1;
-@@ -1029,6 +1075,11 @@ struct dwc3 {
-
- unsigned tx_de_emphasis_quirk:1;
- unsigned tx_de_emphasis:2;
-+ unsigned disable_devinit_u1u2_quirk:1;
-+ unsigned quirk_reverse_in_out:1;
-+ unsigned quirk_stop_transfer_in_block:1;
-+ unsigned quirk_stop_ep_in_u1:1;
-+ unsigned host_vbus_glitches:1;
-
- unsigned dis_metastability_quirk:1;
-
---- a/drivers/usb/dwc3/ep0.c
-+++ b/drivers/usb/dwc3/ep0.c
-@@ -391,7 +391,7 @@ static int dwc3_ep0_handle_u1(struct dwc
- return -EINVAL;
-
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-- if (set)
-+ if (set && !dwc->disable_devinit_u1u2_quirk)
- reg |= DWC3_DCTL_INITU1ENA;
- else
- reg &= ~DWC3_DCTL_INITU1ENA;
-@@ -413,7 +413,7 @@ static int dwc3_ep0_handle_u2(struct dwc
- return -EINVAL;
-
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-- if (set)
-+ if (set && !dwc->disable_devinit_u1u2_quirk)
- reg |= DWC3_DCTL_INITU2ENA;
- else
- reg &= ~DWC3_DCTL_INITU2ENA;
---- a/drivers/usb/dwc3/gadget.c
-+++ b/drivers/usb/dwc3/gadget.c
-@@ -3216,6 +3216,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
- {
- int ret;
- int irq;
-+ u32 reg;
-
- irq = dwc3_gadget_get_irq(dwc);
- if (irq < 0) {
-@@ -3294,6 +3295,12 @@ int dwc3_gadget_init(struct dwc3 *dwc)
-
- dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
-
-+ if (dwc->disable_devinit_u1u2_quirk) {
-+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-+ reg &= ~(DWC3_DCTL_INITU1ENA | DWC3_DCTL_INITU2ENA);
-+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-+ }
-+
- return 0;
-
- err4:
---- a/drivers/usb/dwc3/host.c
-+++ b/drivers/usb/dwc3/host.c
-@@ -98,6 +98,15 @@ int dwc3_host_init(struct dwc3 *dwc)
-
- memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
-
-+ if (dwc->quirk_reverse_in_out)
-+ props[prop_idx++].name = "quirk-reverse-in-out";
-+
-+ if (dwc->quirk_stop_transfer_in_block)
-+ props[prop_idx++].name = "quirk-stop-transfer-in-block";
-+
-+ if (dwc->quirk_stop_ep_in_u1)
-+ props[prop_idx++].name = "quirk-stop-ep-in-u1";
-+
- if (dwc->usb3_lpm_capable)
- props[prop_idx++].name = "usb3-lpm-capable";
-
---- a/drivers/usb/gadget/udc/fsl_udc_core.c
-+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
-@@ -198,7 +198,11 @@ __acquires(ep->udc->lock)
-
- spin_unlock(&ep->udc->lock);
-
-- usb_gadget_giveback_request(&ep->ep, &req->req);
-+ /* this complete() should a func implemented by gadget layer,
-+ * eg fsg->bulk_in_complete()
-+ */
-+ if (req->req.complete)
-+ usb_gadget_giveback_request(&ep->ep, &req->req);
-
- spin_lock(&ep->udc->lock);
- ep->stopped = stopped;
-@@ -245,10 +249,10 @@ static int dr_controller_setup(struct fs
- if (udc->pdata->have_sysif_regs) {
- if (udc->pdata->controller_ver) {
- /* controller version 1.6 or above */
-- ctrl = __raw_readl(&usb_sys_regs->control);
-+ ctrl = ioread32be(&usb_sys_regs->control);
- ctrl &= ~USB_CTRL_UTMI_PHY_EN;
- ctrl |= USB_CTRL_USB_EN;
-- __raw_writel(ctrl, &usb_sys_regs->control);
-+ iowrite32be(ctrl, &usb_sys_regs->control);
- }
- }
- portctrl |= PORTSCX_PTS_ULPI;
-@@ -257,13 +261,14 @@ static int dr_controller_setup(struct fs
- portctrl |= PORTSCX_PTW_16BIT;
- /* fall through */
- case FSL_USB2_PHY_UTMI:
-+ case FSL_USB2_PHY_UTMI_DUAL:
- if (udc->pdata->have_sysif_regs) {
- if (udc->pdata->controller_ver) {
- /* controller version 1.6 or above */
-- ctrl = __raw_readl(&usb_sys_regs->control);
-+ ctrl = ioread32be(&usb_sys_regs->control);
- ctrl |= (USB_CTRL_UTMI_PHY_EN |
- USB_CTRL_USB_EN);
-- __raw_writel(ctrl, &usb_sys_regs->control);
-+ iowrite32be(ctrl, &usb_sys_regs->control);
- mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI
- PHY CLK to become stable - 10ms*/
- }
-@@ -329,22 +334,22 @@ static int dr_controller_setup(struct fs
- /* Config control enable i/o output, cpu endian register */
- #ifndef CONFIG_ARCH_MXC
- if (udc->pdata->have_sysif_regs) {
-- ctrl = __raw_readl(&usb_sys_regs->control);
-+ ctrl = ioread32be(&usb_sys_regs->control);
- ctrl |= USB_CTRL_IOENB;
-- __raw_writel(ctrl, &usb_sys_regs->control);
-+ iowrite32be(ctrl, &usb_sys_regs->control);
- }
- #endif
-
--#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
-+#if !defined(CONFIG_NOT_COHERENT_CACHE)
- /* Turn on cache snooping hardware, since some PowerPC platforms
- * wholly rely on hardware to deal with cache coherent. */
-
- if (udc->pdata->have_sysif_regs) {
- /* Setup Snooping for all the 4GB space */
- tmp = SNOOP_SIZE_2GB; /* starts from 0x0, size 2G */
-- __raw_writel(tmp, &usb_sys_regs->snoop1);
-+ iowrite32be(tmp, &usb_sys_regs->snoop1);
- tmp |= 0x80000000; /* starts from 0x8000000, size 2G */
-- __raw_writel(tmp, &usb_sys_regs->snoop2);
-+ iowrite32be(tmp, &usb_sys_regs->snoop2);
- }
- #endif
-
-@@ -1056,7 +1061,7 @@ static int fsl_ep_fifo_status(struct usb
- struct ep_queue_head *qh;
-
- ep = container_of(_ep, struct fsl_ep, ep);
-- if (!_ep || (!ep->ep.desc && ep_index(ep) != 0))
-+ if (!_ep || !ep->ep.desc || (ep_index(ep) == 0))
- return -ENODEV;
-
- udc = (struct fsl_udc *)ep->udc;
-@@ -1598,14 +1603,13 @@ static int process_ep_req(struct fsl_udc
- struct fsl_req *curr_req)
- {
- struct ep_td_struct *curr_td;
-- int td_complete, actual, remaining_length, j, tmp;
-+ int actual, remaining_length, j, tmp;
- int status = 0;
- int errors = 0;
- struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
- int direction = pipe % 2;
-
- curr_td = curr_req->head;
-- td_complete = 0;
- actual = curr_req->req.length;
-
- for (j = 0; j < curr_req->dtd_count; j++) {
-@@ -1650,11 +1654,9 @@ static int process_ep_req(struct fsl_udc
- status = -EPROTO;
- break;
- } else {
-- td_complete++;
- break;
- }
- } else {
-- td_complete++;
- VDBG("dTD transmitted successful");
- }
-
-@@ -1697,7 +1699,7 @@ static void dtd_complete_irq(struct fsl_
- curr_ep = get_ep_by_pipe(udc, i);
-
- /* If the ep is configured */
-- if (!curr_ep->ep.name) {
-+ if (strncmp(curr_ep->name, "ep", 2)) {
- WARNING("Invalid EP?");
- continue;
- }
-@@ -2419,10 +2421,12 @@ static int fsl_udc_probe(struct platform
- usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
- #endif
-
-+#ifdef CONFIG_ARCH_MXC
- /* Initialize USB clocks */
- ret = fsl_udc_clk_init(pdev);
- if (ret < 0)
- goto err_iounmap_noclk;
-+#endif
-
- /* Read Device Controller Capability Parameters register */
- dccparams = fsl_readl(&dr_regs->dccparams);
-@@ -2462,9 +2466,11 @@ static int fsl_udc_probe(struct platform
- dr_controller_setup(udc_controller);
- }
-
-+#ifdef CONFIG_ARCH_MXC
- ret = fsl_udc_clk_finalize(pdev);
- if (ret)
- goto err_free_irq;
-+#endif
-
- /* Setup gadget structure */
- udc_controller->gadget.ops = &fsl_gadget_ops;
-@@ -2477,6 +2483,7 @@ static int fsl_udc_probe(struct platform
- /* Setup gadget.dev and register with kernel */
- dev_set_name(&udc_controller->gadget.dev, "gadget");
- udc_controller->gadget.dev.of_node = pdev->dev.of_node;
-+ set_dma_ops(&udc_controller->gadget.dev, pdev->dev.dma_ops);
-
- if (!IS_ERR_OR_NULL(udc_controller->transceiver))
- udc_controller->gadget.is_otg = 1;
-@@ -2528,7 +2535,9 @@ err_free_irq:
- err_iounmap:
- if (pdata->exit)
- pdata->exit(pdev);
-+#ifdef CONFIG_ARCH_MXC
- fsl_udc_clk_release();
-+#endif
- err_iounmap_noclk:
- iounmap(dr_regs);
- err_release_mem_region:
-@@ -2556,8 +2565,9 @@ static int fsl_udc_remove(struct platfor
- udc_controller->done = &done;
- usb_del_gadget_udc(&udc_controller->gadget);
-
-+#ifdef CONFIG_ARCH_MXC
- fsl_udc_clk_release();
--
-+#endif
- /* DR has been stopped in usb_gadget_unregister_driver() */
- remove_proc_file();
-
---- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
-+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
-@@ -20,6 +20,10 @@
- #define USB_MAX_CTRL_PAYLOAD 64
- #define USB_DR_SYS_OFFSET 0x400
-
-+#ifdef CONFIG_SOC_LS1021A
-+#undef CONFIG_ARCH_MXC
-+#endif
-+
- /* USB DR device mode registers (Little Endian) */
- struct usb_dr_device {
- /* Capability register */
-@@ -597,18 +601,6 @@ struct platform_device;
- int fsl_udc_clk_init(struct platform_device *pdev);
- int fsl_udc_clk_finalize(struct platform_device *pdev);
- void fsl_udc_clk_release(void);
--#else
--static inline int fsl_udc_clk_init(struct platform_device *pdev)
--{
-- return 0;
--}
--static inline int fsl_udc_clk_finalize(struct platform_device *pdev)
--{
-- return 0;
--}
--static inline void fsl_udc_clk_release(void)
--{
--}
- #endif
-
- #endif
---- a/drivers/usb/host/Kconfig
-+++ b/drivers/usb/host/Kconfig
-@@ -165,7 +165,7 @@ config XPS_USB_HCD_XILINX
-
- config USB_EHCI_FSL
- tristate "Support for Freescale PPC on-chip EHCI USB controller"
-- depends on FSL_SOC
-+ depends on USB_EHCI_HCD
- select USB_EHCI_ROOT_HUB_TT
- ---help---
- Variation of ARC USB block used in some Freescale chips.
---- a/drivers/usb/host/ehci-fsl.c
-+++ b/drivers/usb/host/ehci-fsl.c
-@@ -36,15 +36,126 @@
- #include <linux/platform_device.h>
- #include <linux/fsl_devices.h>
- #include <linux/of_platform.h>
-+#include <linux/io.h>
-+
-+#ifdef CONFIG_PM
-+#include <linux/suspend.h>
-+#endif
-
- #include "ehci.h"
- #include "ehci-fsl.h"
-
-+#define FSL_USB_PHY_ADDR 0xffe214000
-+
-+struct ccsr_usb_port_ctrl {
-+ u32 ctrl;
-+ u32 drvvbuscfg;
-+ u32 pwrfltcfg;
-+ u32 sts;
-+ u8 res_14[0xc];
-+ u32 bistcfg;
-+ u32 biststs;
-+ u32 abistcfg;
-+ u32 abiststs;
-+ u8 res_30[0x10];
-+ u32 xcvrprg;
-+ u32 anaprg;
-+ u32 anadrv;
-+ u32 anasts;
-+};
-+
-+struct ccsr_usb_phy {
-+ u32 id;
-+ struct ccsr_usb_port_ctrl port1;
-+ u8 res_50[0xc];
-+ u32 tvr;
-+ u32 pllprg[4];
-+ u8 res_70[0x4];
-+ u32 anaccfg;
-+ u32 dbg;
-+ u8 res_7c[0x4];
-+ struct ccsr_usb_port_ctrl port2;
-+ u8 res_dc[0x334];
-+};
-+
- #define DRIVER_DESC "Freescale EHCI Host controller driver"
- #define DRV_NAME "ehci-fsl"
-
- static struct hc_driver __read_mostly fsl_ehci_hc_driver;
-
-+struct ehci_fsl {
-+ struct ehci_hcd ehci;
-+
-+#ifdef CONFIG_PM
-+struct ehci_regs saved_regs;
-+struct ccsr_usb_phy saved_phy_regs;
-+/* Saved USB PHY settings, need to restore after deep sleep. */
-+u32 usb_ctrl;
-+#endif
-+ /*
-+ * store current hcd state for otg;
-+ * have_hcd is true when host drv al already part of otg framework,
-+ * otherwise false;
-+ * hcd_add is true when otg framework wants to add host
-+ * drv as part of otg;flase when it wants to remove it
-+ */
-+unsigned have_hcd:1;
-+unsigned hcd_add:1;
-+};
-+
-+static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
-+{
-+struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-+
-+return container_of(ehci, struct ehci_fsl, ehci);
-+}
-+
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
-+static void do_change_hcd(struct work_struct *work)
-+{
-+ struct ehci_hcd *ehci = container_of(work, struct ehci_hcd,
-+ change_hcd_work);
-+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
-+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
-+ void __iomem *non_ehci = hcd->regs;
-+ int retval;
-+
-+ if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) {
-+ writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE);
-+ /* host, gadget and otg share same int line */
-+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
-+ if (retval == 0)
-+ ehci_fsl->have_hcd = 1;
-+ } else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) {
-+ usb_remove_hcd(hcd);
-+ ehci_fsl->have_hcd = 0;
-+ }
-+}
-+#endif
-+
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
-+static void do_change_hcd(struct work_struct *work)
-+{
-+ struct ehci_hcd *ehci = container_of(work, struct ehci_hcd,
-+ change_hcd_work);
-+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
-+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
-+ void __iomem *non_ehci = hcd->regs;
-+ int retval;
-+
-+ if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) {
-+ writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE);
-+ /* host, gadget and otg share same int line */
-+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
-+ if (retval == 0)
-+ ehci_fsl->have_hcd = 1;
-+ } else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) {
-+ usb_remove_hcd(hcd);
-+ ehci_fsl->have_hcd = 0;
-+ }
-+}
-+#endif
-+
- /* configure so an HC device and id are always provided */
- /* always called with process context; sleeping is OK */
-
-@@ -131,6 +242,12 @@ static int fsl_ehci_drv_probe(struct pla
- clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK, 0x4);
-
-+ /* Set USB_EN bit to select ULPI phy for USB controller version 2.5 */
-+ if (pdata->controller_ver == FSL_USB_VER_2_5 &&
-+ pdata->phy_mode == FSL_USB2_PHY_ULPI)
-+ iowrite32be(USB_CTRL_USB_EN, hcd->regs + FSL_SOC_USB_CTRL);
-+
-+
- /*
- * Enable UTMI phy and program PTS field in UTMI mode before asserting
- * controller reset for USB Controller version 2.5
-@@ -143,16 +260,20 @@ static int fsl_ehci_drv_probe(struct pla
-
- /* Don't need to set host mode here. It will be done by tdi_reset() */
-
-- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
-+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_NO_SUSPEND);
- if (retval != 0)
- goto err2;
- device_wakeup_enable(hcd->self.controller);
-
--#ifdef CONFIG_USB_OTG
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
- if (pdata->operating_mode == FSL_USB2_DR_OTG) {
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
-
- hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
-+
-+ INIT_WORK(&ehci->change_hcd_work, do_change_hcd);
-+
- dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, phy=0x%p\n",
- hcd, ehci, hcd->usb_phy);
-
-@@ -168,6 +289,11 @@ static int fsl_ehci_drv_probe(struct pla
- retval = -ENODEV;
- goto err2;
- }
-+
-+ ehci_fsl->have_hcd = 1;
-+ } else {
-+ dev_err(&pdev->dev, "wrong operating mode\n");
-+ return -ENODEV;
- }
- #endif
- return retval;
-@@ -181,6 +307,17 @@ static int fsl_ehci_drv_probe(struct pla
- return retval;
- }
-
-+static bool usb_phy_clk_valid(struct usb_hcd *hcd)
-+{
-+ void __iomem *non_ehci = hcd->regs;
-+ bool ret = true;
-+
-+ if (!(ioread32be(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID))
-+ ret = false;
-+
-+ return ret;
-+}
-+
- static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
- enum fsl_usb2_phy_modes phy_mode,
- unsigned int port_offset)
-@@ -219,6 +356,21 @@ static int ehci_fsl_setup_phy(struct usb
- /* fall through */
- case FSL_USB2_PHY_UTMI:
- case FSL_USB2_PHY_UTMI_DUAL:
-+ if (pdata->has_fsl_erratum_a006918) {
-+ pr_warn("fsl-ehci: USB PHY clock invalid\n");
-+ return -EINVAL;
-+ }
-+
-+ /* PHY_CLK_VALID bit is de-featured from all controller
-+ * versions below 2.4 and is to be checked only for
-+ * internal UTMI phy
-+ */
-+ if (pdata->controller_ver > FSL_USB_VER_2_4 &&
-+ pdata->have_sysif_regs && !usb_phy_clk_valid(hcd)) {
-+ pr_err("fsl-ehci: USB PHY clock invalid\n");
-+ return -EINVAL;
-+ }
-+
- if (pdata->have_sysif_regs && pdata->controller_ver) {
- /* controller version 1.6 or above */
- clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
-@@ -295,14 +447,9 @@ static int ehci_fsl_usb_setup(struct ehc
- return -EINVAL;
-
- if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
-- unsigned int chip, rev, svr;
--
-- svr = mfspr(SPRN_SVR);
-- chip = svr >> 16;
-- rev = (svr >> 4) & 0xf;
-
- /* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */
-- if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055))
-+ if (pdata->has_fsl_erratum_14 == 1)
- ehci->has_fsl_port_bug = 1;
-
- if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
-@@ -382,16 +529,56 @@ static int ehci_fsl_setup(struct usb_hcd
- return retval;
- }
-
--struct ehci_fsl {
-- struct ehci_hcd ehci;
--
- #ifdef CONFIG_PM
-- /* Saved USB PHY settings, need to restore after deep sleep. */
-- u32 usb_ctrl;
--#endif
--};
-+void __iomem *phy_reg;
-
--#ifdef CONFIG_PM
-+#ifdef CONFIG_PPC
-+/* save usb registers */
-+static int ehci_fsl_save_context(struct usb_hcd *hcd)
-+{
-+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
-+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-+ void __iomem *non_ehci = hcd->regs;
-+ struct device *dev = hcd->self.controller;
-+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
-+
-+ if (pdata->phy_mode == FSL_USB2_PHY_UTMI_DUAL) {
-+ phy_reg = ioremap(FSL_USB_PHY_ADDR,
-+ sizeof(struct ccsr_usb_phy));
-+ _memcpy_fromio((void *)&ehci_fsl->saved_phy_regs, phy_reg,
-+ sizeof(struct ccsr_usb_phy));
-+ }
-+
-+ _memcpy_fromio((void *)&ehci_fsl->saved_regs, ehci->regs,
-+ sizeof(struct ehci_regs));
-+ ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
-+
-+ return 0;
-+}
-+
-+/*Restore usb registers */
-+static int ehci_fsl_restore_context(struct usb_hcd *hcd)
-+{
-+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
-+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-+ void __iomem *non_ehci = hcd->regs;
-+ struct device *dev = hcd->self.controller;
-+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
-+
-+ if (pdata->phy_mode == FSL_USB2_PHY_UTMI_DUAL) {
-+ if (phy_reg)
-+ _memcpy_toio(phy_reg,
-+ (void *)&ehci_fsl->saved_phy_regs,
-+ sizeof(struct ccsr_usb_phy));
-+ }
-+
-+ _memcpy_toio(ehci->regs, (void *)&ehci_fsl->saved_regs,
-+ sizeof(struct ehci_regs));
-+ iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
-+
-+ return 0;
-+}
-+#endif
-
- #ifdef CONFIG_PPC_MPC512x
- static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
-@@ -538,26 +725,45 @@ static inline int ehci_fsl_mpc512x_drv_r
- }
- #endif /* CONFIG_PPC_MPC512x */
-
--static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
--{
-- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
--
-- return container_of(ehci, struct ehci_fsl, ehci);
--}
--
- static int ehci_fsl_drv_suspend(struct device *dev)
- {
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
- void __iomem *non_ehci = hcd->regs;
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
-+ struct usb_bus host = hcd->self;
-+#endif
-+
-+#ifdef CONFIG_PPC
-+ suspend_state_t pm_state;
-+ /* FIXME:Need to port fsl_pm.h before enable below code. */
-+ /*pm_state = pm_suspend_state();*/
-+ pm_state = PM_SUSPEND_MEM;
-+
-+if (pm_state == PM_SUSPEND_MEM)
-+ ehci_fsl_save_context(hcd);
-+#endif
-
- if (of_device_is_compatible(dev->parent->of_node,
- "fsl,mpc5121-usb2-dr")) {
- return ehci_fsl_mpc512x_drv_suspend(dev);
- }
-
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
-+ if (host.is_otg) {
-+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-+
-+ /* remove hcd */
-+ ehci_fsl->hcd_add = 0;
-+ schedule_work(&ehci->change_hcd_work);
-+ host.is_otg = 0;
-+ return 0;
-+ }
-+#endif
-+
- ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
- device_may_wakeup(dev));
-+
- if (!fsl_deep_sleep())
- return 0;
-
-@@ -571,12 +777,36 @@ static int ehci_fsl_drv_resume(struct de
- struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- void __iomem *non_ehci = hcd->regs;
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
-+ struct usb_bus host = hcd->self;
-+#endif
-+
-+#ifdef CONFIG_PPC
-+ suspend_state_t pm_state;
-+ /* FIXME:Need to port fsl_pm.h before enable below code.*/
-+ /* pm_state = pm_suspend_state(); */
-+ pm_state = PM_SUSPEND_MEM;
-+
-+ if (pm_state == PM_SUSPEND_MEM)
-+ ehci_fsl_restore_context(hcd);
-+#endif
-
- if (of_device_is_compatible(dev->parent->of_node,
- "fsl,mpc5121-usb2-dr")) {
- return ehci_fsl_mpc512x_drv_resume(dev);
- }
-
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
-+ if (host.is_otg) {
-+ /* add hcd */
-+ ehci_fsl->hcd_add = 1;
-+ schedule_work(&ehci->change_hcd_work);
-+ usb_hcd_resume_root_hub(hcd);
-+ host.is_otg = 0;
-+ return 0;
-+ }
-+#endif
-+
- ehci_prepare_ports_for_controller_resume(ehci);
- if (!fsl_deep_sleep())
- return 0;
---- a/drivers/usb/host/ehci-fsl.h
-+++ b/drivers/usb/host/ehci-fsl.h
-@@ -63,4 +63,7 @@
- #define UTMI_PHY_EN (1<<9)
- #define ULPI_PHY_CLK_SEL (1<<10)
- #define PHY_CLK_VALID (1<<17)
-+
-+/* Retry count for checking UTMI PHY CLK validity */
-+#define UTMI_PHY_CLK_VALID_CHK_RETRY 5
- #endif /* _EHCI_FSL_H */
---- a/drivers/usb/host/ehci-hub.c
-+++ b/drivers/usb/host/ehci-hub.c
-@@ -305,6 +305,8 @@ static int ehci_bus_suspend (struct usb_
- USB_PORT_STAT_HIGH_SPEED)
- fs_idle_delay = true;
- ehci_writel(ehci, t2, reg);
-+ if (ehci_has_fsl_susp_errata(ehci))
-+ usleep_range(10000, 20000);
- changed = 1;
- }
- }
---- a/drivers/usb/host/ehci.h
-+++ b/drivers/usb/host/ehci.h
-@@ -180,6 +180,9 @@ struct ehci_hcd { /* one per controlle
- unsigned periodic_count; /* periodic activity count */
- unsigned uframe_periodic_max; /* max periodic time per uframe */
-
-+#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
-+ struct work_struct change_hcd_work;
-+#endif
-
- /* list of itds & sitds completed while now_frame was still active */
- struct list_head cached_itd_list;
---- a/drivers/usb/host/fsl-mph-dr-of.c
-+++ b/drivers/usb/host/fsl-mph-dr-of.c
-@@ -229,6 +229,17 @@ static int fsl_usb2_mph_dr_of_probe(stru
- pdata->has_fsl_erratum_a005697 =
- of_property_read_bool(np, "fsl,usb_erratum-a005697");
-
-+ if (of_get_property(np, "fsl,erratum_a006918", NULL))
-+ pdata->has_fsl_erratum_a006918 = 1;
-+ else
-+ pdata->has_fsl_erratum_a006918 = 0;
-+
-+ if (of_get_property(np, "fsl,usb_erratum_14", NULL))
-+ pdata->has_fsl_erratum_14 = 1;
-+ else
-+ pdata->has_fsl_erratum_14 = 0;
-+
-+
- /*
- * Determine whether phy_clk_valid needs to be checked
- * by reading property in device tree
---- a/drivers/usb/host/xhci-hub.c
-+++ b/drivers/usb/host/xhci-hub.c
-@@ -689,12 +689,34 @@ void xhci_set_link_state(struct xhci_hcd
- int port_id, u32 link_state)
- {
- u32 temp;
-+ u32 portpmsc_u2_backup = 0;
-+
-+ /* Backup U2 timeout info before initiating U3 entry erratum A-010131 */
-+ if (xhci->shared_hcd->speed >= HCD_USB3 &&
-+ link_state == USB_SS_PORT_LS_U3 &&
-+ (xhci->quirks & XHCI_DIS_U1U2_WHEN_U3)) {
-+ portpmsc_u2_backup = readl(port_array[port_id] + PORTPMSC);
-+ portpmsc_u2_backup &= PORT_U2_TIMEOUT_MASK;
-+ temp = readl(port_array[port_id] + PORTPMSC);
-+ temp |= PORT_U2_TIMEOUT_MASK;
-+ writel(temp, port_array[port_id] + PORTPMSC);
-+ }
-
- temp = readl(port_array[port_id]);
- temp = xhci_port_state_to_neutral(temp);
- temp &= ~PORT_PLS_MASK;
- temp |= PORT_LINK_STROBE | link_state;
- writel(temp, port_array[port_id]);
-+
-+ /* Restore U2 timeout info after U3 entry complete */
-+ if (xhci->shared_hcd->speed >= HCD_USB3 &&
-+ link_state == USB_SS_PORT_LS_U3 &&
-+ (xhci->quirks & XHCI_DIS_U1U2_WHEN_U3)) {
-+ temp = readl(port_array[port_id] + PORTPMSC);
-+ temp &= ~PORT_U2_TIMEOUT_MASK;
-+ temp |= portpmsc_u2_backup;
-+ writel(temp, port_array[port_id] + PORTPMSC);
-+ }
- }
-
- static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
---- a/drivers/usb/host/xhci-plat.c
-+++ b/drivers/usb/host/xhci-plat.c
-@@ -263,8 +263,22 @@ static int xhci_plat_probe(struct platfo
- goto disable_clk;
- }
-
-- if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
-+ if (device_property_read_bool(sysdev, "usb3-lpm-capable")) {
- xhci->quirks |= XHCI_LPM_SUPPORT;
-+ if (device_property_read_bool(sysdev,
-+ "snps,dis-u1u2-when-u3-quirk"))
-+ xhci->quirks |= XHCI_DIS_U1U2_WHEN_U3;
-+ }
-+
-+ if (device_property_read_bool(&pdev->dev, "quirk-reverse-in-out"))
-+ xhci->quirks |= XHCI_REVERSE_IN_OUT;
-+
-+ if (device_property_read_bool(&pdev->dev,
-+ "quirk-stop-transfer-in-block"))
-+ xhci->quirks |= XHCI_STOP_TRANSFER_IN_BLOCK;
-+
-+ if (device_property_read_bool(&pdev->dev, "quirk-stop-ep-in-u1"))
-+ xhci->quirks |= XHCI_STOP_EP_IN_U1;
-
- if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
- xhci->quirks |= XHCI_BROKEN_PORT_PED;
---- a/drivers/usb/host/xhci-ring.c
-+++ b/drivers/usb/host/xhci-ring.c
-@@ -1978,10 +1978,12 @@ static int finish_td(struct xhci_hcd *xh
- union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
- {
-+ struct xhci_dequeue_state deq_state;
- struct xhci_virt_device *xdev;
- struct xhci_ep_ctx *ep_ctx;
- struct xhci_ring *ep_ring;
- unsigned int slot_id;
-+ u32 remaining;
- u32 trb_comp_code;
- int ep_index;
-
-@@ -2004,14 +2006,30 @@ static int finish_td(struct xhci_hcd *xh
- if (trb_comp_code == COMP_STALL_ERROR ||
- xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
- trb_comp_code)) {
-- /* Issue a reset endpoint command to clear the host side
-- * halt, followed by a set dequeue command to move the
-- * dequeue pointer past the TD.
-- * The class driver clears the device side halt later.
-+ /*erratum A-007463:
-+ *After transaction error, controller switches control transfer
-+ *data stage from IN to OUT direction.
- */
-- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
-+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-+ if (remaining && xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
-+ trb_comp_code) &&
-+ (xhci->quirks & XHCI_REVERSE_IN_OUT)) {
-+ memset(&deq_state, 0, sizeof(deq_state));
-+ xhci_find_new_dequeue_state(xhci, slot_id,
-+ ep_index, td->urb->stream_id, td, &deq_state);
-+ xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
-+ &deq_state);
-+ xhci_ring_cmd_db(xhci);
-+ } else {
-+ /* Issue a reset endpoint command to clear the host side
-+ * halt, followed by a set dequeue command to move the
-+ * dequeue pointer past the TD.
-+ * The class driver clears the device side halt later.
-+ */
-+ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, ep_trb,
- EP_HARD_RESET);
-+ }
- } else {
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
---- a/drivers/usb/host/xhci.c
-+++ b/drivers/usb/host/xhci.c
-@@ -1604,13 +1604,38 @@ static int xhci_urb_dequeue(struct usb_h
- ret = -ENOMEM;
- goto done;
- }
-- ep->ep_state |= EP_STOP_CMD_PENDING;
-- ep->stop_cmd_timer.expires = jiffies +
-+ /*
-+ *erratum A-009611: Issuing an End Transfer command on an IN
-+ *endpoint. when a transfer is in progress on USB blocks the
-+ *transmission.
-+ *Workaround: Software must wait for all existing TRBs to
-+ *complete before issuing End transfer command.
-+ */
-+ if ((ep_ring->enqueue == ep_ring->dequeue &&
-+ (xhci->quirks & XHCI_STOP_TRANSFER_IN_BLOCK)) ||
-+ !(xhci->quirks & XHCI_STOP_TRANSFER_IN_BLOCK)) {
-+ ep->ep_state |= EP_STOP_CMD_PENDING;
-+ ep->stop_cmd_timer.expires = jiffies +
- XHCI_STOP_EP_CMD_TIMEOUT * HZ;
-- add_timer(&ep->stop_cmd_timer);
-- xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
-- ep_index, 0);
-- xhci_ring_cmd_db(xhci);
-+ add_timer(&ep->stop_cmd_timer);
-+ xhci_queue_stop_endpoint(xhci, command,
-+ urb->dev->slot_id,
-+ ep_index, 0);
-+ xhci_ring_cmd_db(xhci);
-+ }
-+
-+ /*
-+ *erratum A-009668: Stop Endpoint Command does not complete.
-+ *Workaround: Instead of issuing a Stop Endpoint Command,
-+ *issue a Disable Slot Command with the corresponding slot ID.
-+ *Alternately, you can issue an Address Device Command with
-+ *BSR=1
-+ */
-+ if ((urb->dev->speed <= USB_SPEED_HIGH) &&
-+ (xhci->quirks & XHCI_STOP_EP_IN_U1)) {
-+ xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
-+ urb->dev->slot_id);
-+ }
- }
- done:
- spin_unlock_irqrestore(&xhci->lock, flags);
---- a/drivers/usb/host/xhci.h
-+++ b/drivers/usb/host/xhci.h
-@@ -1794,7 +1794,7 @@ struct xhci_hcd {
- #define XHCI_STATE_DYING (1 << 0)
- #define XHCI_STATE_HALTED (1 << 1)
- #define XHCI_STATE_REMOVING (1 << 2)
-- unsigned long long quirks;
-+ unsigned long long quirks;
- #define XHCI_LINK_TRB_QUIRK BIT_ULL(0)
- #define XHCI_RESET_EP_QUIRK BIT_ULL(1)
- #define XHCI_NEC_HOST BIT_ULL(2)
-@@ -1830,6 +1830,9 @@ struct xhci_hcd {
- #define XHCI_SSIC_PORT_UNUSED BIT_ULL(22)
- #define XHCI_NO_64BIT_SUPPORT BIT_ULL(23)
- #define XHCI_MISSING_CAS BIT_ULL(24)
-+#define XHCI_REVERSE_IN_OUT BIT(32)
-+#define XHCI_STOP_TRANSFER_IN_BLOCK BIT(33)
-+#define XHCI_STOP_EP_IN_U1 BIT(34)
- /* For controller with a broken Port Disable implementation */
- #define XHCI_BROKEN_PORT_PED BIT_ULL(25)
- #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 BIT_ULL(26)
-@@ -1838,8 +1841,9 @@ struct xhci_hcd {
- #define XHCI_HW_LPM_DISABLE BIT_ULL(29)
- #define XHCI_SUSPEND_DELAY BIT_ULL(30)
- #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
--#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
--#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
-+#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(35)
-+#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(36)
-+#define XHCI_DIS_U1U2_WHEN_U3 BIT(37)
-
- unsigned int num_active_eps;
- unsigned int limit_active_eps;
---- a/drivers/usb/phy/phy-fsl-usb.c
-+++ b/drivers/usb/phy/phy-fsl-usb.c
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (C) 2007,2008 Freescale semiconductor, Inc.
-+ * Copyright 2007,2008 Freescale Semiconductor, Inc.
- *
- * Author: Li Yang <LeoLi@freescale.com>
- * Jerry Huang <Chang-Ming.Huang@freescale.com>
-@@ -470,6 +470,7 @@ void otg_reset_controller(void)
- int fsl_otg_start_host(struct otg_fsm *fsm, int on)
- {
- struct usb_otg *otg = fsm->otg;
-+ struct usb_bus *host = otg->host;
- struct device *dev;
- struct fsl_otg *otg_dev =
- container_of(otg->usb_phy, struct fsl_otg, phy);
-@@ -493,6 +494,7 @@ int fsl_otg_start_host(struct otg_fsm *f
- otg_reset_controller();
- VDBG("host on......\n");
- if (dev->driver->pm && dev->driver->pm->resume) {
-+ host->is_otg = 1;
- retval = dev->driver->pm->resume(dev);
- if (fsm->id) {
- /* default-b */
-@@ -517,8 +519,11 @@ int fsl_otg_start_host(struct otg_fsm *f
- else {
- VDBG("host off......\n");
- if (dev && dev->driver) {
-- if (dev->driver->pm && dev->driver->pm->suspend)
-+ if (dev->driver->pm &&
-+ dev->driver->pm->suspend) {
-+ host->is_otg = 1;
- retval = dev->driver->pm->suspend(dev);
-+ }
- if (fsm->id)
- /* default-b */
- fsl_otg_drv_vbus(fsm, 0);
-@@ -546,8 +551,17 @@ int fsl_otg_start_gadget(struct otg_fsm
- dev = otg->gadget->dev.parent;
-
- if (on) {
-- if (dev->driver->resume)
-+ /* Delay gadget resume to synchronize between host and gadget
-+ * drivers. Upon role-reversal host drv is shutdown by kernel
-+ * worker thread. By the time host drv shuts down, controller
-+ * gets programmed for gadget role. Shutting host drv after
-+ * this results in controller getting reset, and it stops
-+ * responding to otg events
-+ */
-+ if (dev->driver->resume) {
-+ msleep(1000);
- dev->driver->resume(dev);
-+ }
- } else {
- if (dev->driver->suspend)
- dev->driver->suspend(dev, otg_suspend_state);
-@@ -668,6 +682,10 @@ static void fsl_otg_event(struct work_st
- fsl_otg_start_host(fsm, 0);
- otg_drv_vbus(fsm, 0);
- fsl_otg_start_gadget(fsm, 1);
-+ } else {
-+ fsl_otg_start_gadget(fsm, 0);
-+ otg_drv_vbus(fsm, 1);
-+ fsl_otg_start_host(fsm, 1);
- }
- }
-
-@@ -720,6 +738,7 @@ irqreturn_t fsl_otg_isr(int irq, void *d
- {
- struct otg_fsm *fsm = &((struct fsl_otg *)dev_id)->fsm;
- struct usb_otg *otg = ((struct fsl_otg *)dev_id)->phy.otg;
-+ struct fsl_otg *otg_dev = dev_id;
- u32 otg_int_src, otg_sc;
-
- otg_sc = fsl_readl(&usb_dr_regs->otgsc);
-@@ -749,18 +768,8 @@ irqreturn_t fsl_otg_isr(int irq, void *d
- otg->gadget->is_a_peripheral = !fsm->id;
- VDBG("ID int (ID is %d)\n", fsm->id);
-
-- if (fsm->id) { /* switch to gadget */
-- schedule_delayed_work(
-- &((struct fsl_otg *)dev_id)->otg_event,
-- 100);
-- } else { /* switch to host */
-- cancel_delayed_work(&
-- ((struct fsl_otg *)dev_id)->
-- otg_event);
-- fsl_otg_start_gadget(fsm, 0);
-- otg_drv_vbus(fsm, 1);
-- fsl_otg_start_host(fsm, 1);
-- }
-+ schedule_delayed_work(&otg_dev->otg_event, 100);
-+
- return IRQ_HANDLED;
- }
- }
-@@ -920,12 +929,32 @@ int usb_otg_start(struct platform_device
- temp &= ~(PORTSC_PHY_TYPE_SEL | PORTSC_PTW);
- switch (pdata->phy_mode) {
- case FSL_USB2_PHY_ULPI:
-+ if (pdata->controller_ver) {
-+ /* controller version 1.6 or above */
-+ setbits32(&p_otg->dr_mem_map->control,
-+ USB_CTRL_ULPI_PHY_CLK_SEL);
-+ /*
-+ * Due to controller issue of PHY_CLK_VALID in ULPI
-+ * mode, we set USB_CTRL_USB_EN before checking
-+ * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
-+ */
-+ clrsetbits_be32(&p_otg->dr_mem_map->control,
-+ USB_CTRL_UTMI_PHY_EN, USB_CTRL_IOENB);
-+ }
- temp |= PORTSC_PTS_ULPI;
- break;
- case FSL_USB2_PHY_UTMI_WIDE:
- temp |= PORTSC_PTW_16BIT;
- /* fall through */
- case FSL_USB2_PHY_UTMI:
-+ if (pdata->controller_ver) {
-+ /* controller version 1.6 or above */
-+ setbits32(&p_otg->dr_mem_map->control,
-+ USB_CTRL_UTMI_PHY_EN);
-+ /* Delay for UTMI PHY CLK to become stable - 10ms */
-+ mdelay(FSL_UTMI_PHY_DLY);
-+ }
-+ setbits32(&p_otg->dr_mem_map->control, USB_CTRL_UTMI_PHY_EN);
- temp |= PORTSC_PTS_UTMI;
- /* fall through */
- default:
---- a/drivers/usb/phy/phy-fsl-usb.h
-+++ b/drivers/usb/phy/phy-fsl-usb.h
-@@ -199,6 +199,14 @@
- /* control Register Bit Masks */
- #define USB_CTRL_IOENB (0x1<<2)
- #define USB_CTRL_ULPI_INT0EN (0x1<<0)
-+#define USB_CTRL_WU_INT_EN (0x1<<1)
-+#define USB_CTRL_LINE_STATE_FILTER__EN (0x1<<3)
-+#define USB_CTRL_KEEP_OTG_ON (0x1<<4)
-+#define USB_CTRL_OTG_PORT (0x1<<5)
-+#define USB_CTRL_PLL_RESET (0x1<<8)
-+#define USB_CTRL_UTMI_PHY_EN (0x1<<9)
-+#define USB_CTRL_ULPI_PHY_CLK_SEL (0x1<<10)
-+#define USB_CTRL_PHY_CLK_VALID (0x1<<17)
-
- /* BCSR5 */
- #define BCSR5_INT_USB (0x02)
---- a/include/linux/usb.h
-+++ b/include/linux/usb.h
-@@ -432,6 +432,7 @@ struct usb_bus {
- * for control transfers?
- */
- u8 otg_port; /* 0, or number of OTG/HNP port */
-+ unsigned is_otg:1; /* true when host is also otg */
- unsigned is_b_host:1; /* true during some HNP roleswitches */
- unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
- unsigned no_stop_on_short:1; /*
---- a/include/linux/usb/of.h
-+++ b/include/linux/usb/of.h
-@@ -11,6 +11,8 @@
- #include <linux/usb/otg.h>
- #include <linux/usb/phy.h>
-
-+enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
-+
- #if IS_ENABLED(CONFIG_OF)
- enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
- bool of_usb_host_tpl_support(struct device_node *np);
diff --git a/target/linux/layerscape/patches-4.14/808-vfio-support-layerscape.patch b/target/linux/layerscape/patches-4.14/808-vfio-support-layerscape.patch
deleted file mode 100644
index c2424a41f0..0000000000
--- a/target/linux/layerscape/patches-4.14/808-vfio-support-layerscape.patch
+++ /dev/null
@@ -1,1093 +0,0 @@
-From 03ce521cd071706f755e3d2304ab1b8c47fd4910 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:59:09 +0800
-Subject: [PATCH] vfio: support layerscape
-
-This is an integrated patch of vfio for layerscape
-
-Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/vfio/Kconfig | 1 +
- drivers/vfio/Makefile | 1 +
- drivers/vfio/fsl-mc/Kconfig | 9 +
- drivers/vfio/fsl-mc/Makefile | 2 +
- drivers/vfio/fsl-mc/vfio_fsl_mc.c | 759 ++++++++++++++++++++++
- drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++
- drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 57 ++
- include/uapi/linux/vfio.h | 1 +
- 8 files changed, 1029 insertions(+)
- create mode 100644 drivers/vfio/fsl-mc/Kconfig
- create mode 100644 drivers/vfio/fsl-mc/Makefile
- create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
- create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
- create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
-
---- a/drivers/vfio/Kconfig
-+++ b/drivers/vfio/Kconfig
-@@ -47,4 +47,5 @@ menuconfig VFIO_NOIOMMU
- source "drivers/vfio/pci/Kconfig"
- source "drivers/vfio/platform/Kconfig"
- source "drivers/vfio/mdev/Kconfig"
-+source "drivers/vfio/fsl-mc/Kconfig"
- source "virt/lib/Kconfig"
---- a/drivers/vfio/Makefile
-+++ b/drivers/vfio/Makefile
-@@ -9,3 +9,4 @@ obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spa
- obj-$(CONFIG_VFIO_PCI) += pci/
- obj-$(CONFIG_VFIO_PLATFORM) += platform/
- obj-$(CONFIG_VFIO_MDEV) += mdev/
-+obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/
---- /dev/null
-+++ b/drivers/vfio/fsl-mc/Kconfig
-@@ -0,0 +1,9 @@
-+config VFIO_FSL_MC
-+ tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
-+ depends on VFIO && FSL_MC_BUS && EVENTFD
-+ help
-+ Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
-+ (Management Complex) devices. This is required to passthrough
-+ fsl-mc bus devices using the VFIO framework.
-+
-+ If you don't know what to do here, say N.
---- /dev/null
-+++ b/drivers/vfio/fsl-mc/Makefile
-@@ -0,0 +1,2 @@
-+vfio-fsl_mc-y := vfio_fsl_mc.o
-+obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
---- /dev/null
-+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
-@@ -0,0 +1,759 @@
-+/*
-+ * Freescale Management Complex (MC) device passthrough using VFIO
-+ *
-+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2016-2017 NXP
-+ * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
-+
-+#include <linux/device.h>
-+#include <linux/iommu.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <linux/vfio.h>
-+#include <linux/delay.h>
-+#include <linux/fsl/mc.h>
-+
-+#include "vfio_fsl_mc_private.h"
-+
-+#define DRIVER_VERSION "0.10"
-+#define DRIVER_AUTHOR "Bharat Bhushan <bharat.bhushan@nxp.com>"
-+#define DRIVER_DESC "VFIO for FSL-MC devices - User Level meta-driver"
-+
-+static DEFINE_MUTEX(driver_lock);
-+
-+/* FSl-MC device regions (address and size) are aligned to 64K.
-+ * While MC firmware reports size less than 64K for some objects (it actually
-+ * reports size which does not include reserved space beyond valid bytes).
-+ * Align the size to PAGE_SIZE for userspace to mmap.
-+ */
-+static size_t aligned_region_size(struct fsl_mc_device *mc_dev, int index)
-+{
-+ size_t size;
-+
-+ size = resource_size(&mc_dev->regions[index]);
-+ return PAGE_ALIGN(size);
-+}
-+
-+static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
-+{
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+ int count = mc_dev->obj_desc.region_count;
-+ int i;
-+
-+ vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
-+ GFP_KERNEL);
-+ if (!vdev->regions)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < mc_dev->obj_desc.region_count; i++) {
-+ vdev->regions[i].addr = mc_dev->regions[i].start;
-+ vdev->regions[i].size = aligned_region_size(mc_dev, i);
-+ vdev->regions[i].type = VFIO_FSL_MC_REGION_TYPE_MMIO;
-+ if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE)
-+ vdev->regions[i].type |=
-+ VFIO_FSL_MC_REGION_TYPE_CACHEABLE;
-+ if (mc_dev->regions[i].flags & IORESOURCE_MEM)
-+ vdev->regions[i].type |=
-+ VFIO_FSL_MC_REGION_TYPE_SHAREABLE;
-+
-+ vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP;
-+ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
-+ if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
-+ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
-+ }
-+
-+ vdev->num_regions = mc_dev->obj_desc.region_count;
-+ return 0;
-+}
-+
-+static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
-+{
-+ int i;
-+
-+ for (i = 0; i < vdev->num_regions; i++)
-+ iounmap(vdev->regions[i].ioaddr);
-+
-+ vdev->num_regions = 0;
-+ kfree(vdev->regions);
-+}
-+
-+static int vfio_fsl_mc_open(void *device_data)
-+{
-+ struct vfio_fsl_mc_device *vdev = device_data;
-+ int ret;
-+
-+ if (!try_module_get(THIS_MODULE))
-+ return -ENODEV;
-+
-+ mutex_lock(&driver_lock);
-+ if (!vdev->refcnt) {
-+ ret = vfio_fsl_mc_regions_init(vdev);
-+ if (ret)
-+ goto error_region_init;
-+
-+ ret = vfio_fsl_mc_irqs_init(vdev);
-+ if (ret)
-+ goto error_irq_init;
-+ }
-+
-+ vdev->refcnt++;
-+ mutex_unlock(&driver_lock);
-+ return 0;
-+
-+error_irq_init:
-+ vfio_fsl_mc_regions_cleanup(vdev);
-+error_region_init:
-+ mutex_unlock(&driver_lock);
-+ if (ret)
-+ module_put(THIS_MODULE);
-+
-+ return ret;
-+}
-+
-+static void vfio_fsl_mc_release(void *device_data)
-+{
-+ struct vfio_fsl_mc_device *vdev = device_data;
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+
-+ mutex_lock(&driver_lock);
-+
-+ if (!(--vdev->refcnt)) {
-+ vfio_fsl_mc_regions_cleanup(vdev);
-+ vfio_fsl_mc_irqs_cleanup(vdev);
-+ }
-+
-+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
-+ dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ mc_dev->obj_desc.id);
-+
-+ mutex_unlock(&driver_lock);
-+
-+ module_put(THIS_MODULE);
-+}
-+
-+static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ struct vfio_fsl_mc_device *vdev = device_data;
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+ unsigned long minsz;
-+
-+ if (WARN_ON(!mc_dev))
-+ return -ENODEV;
-+
-+ switch (cmd) {
-+ case VFIO_DEVICE_GET_INFO:
-+ {
-+ struct vfio_device_info info;
-+
-+ minsz = offsetofend(struct vfio_device_info, num_irqs);
-+
-+ if (copy_from_user(&info, (void __user *)arg, minsz))
-+ return -EFAULT;
-+
-+ if (info.argsz < minsz)
-+ return -EINVAL;
-+
-+ info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
-+ info.num_regions = mc_dev->obj_desc.region_count;
-+ info.num_irqs = mc_dev->obj_desc.irq_count;
-+
-+ return copy_to_user((void __user *)arg, &info, minsz);
-+ }
-+ case VFIO_DEVICE_GET_REGION_INFO:
-+ {
-+ struct vfio_region_info info;
-+
-+ minsz = offsetofend(struct vfio_region_info, offset);
-+
-+ if (copy_from_user(&info, (void __user *)arg, minsz))
-+ return -EFAULT;
-+
-+ if (info.argsz < minsz)
-+ return -EINVAL;
-+
-+ if (info.index >= vdev->num_regions)
-+ return -EINVAL;
-+
-+ /* map offset to the physical address */
-+ info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
-+ info.size = vdev->regions[info.index].size;
-+ info.flags = vdev->regions[info.index].flags;
-+
-+ return copy_to_user((void __user *)arg, &info, minsz);
-+ }
-+ case VFIO_DEVICE_GET_IRQ_INFO:
-+ {
-+ struct vfio_irq_info info;
-+
-+ minsz = offsetofend(struct vfio_irq_info, count);
-+ if (copy_from_user(&info, (void __user *)arg, minsz))
-+ return -EFAULT;
-+
-+ if (info.argsz < minsz)
-+ return -EINVAL;
-+
-+ if (info.index >= mc_dev->obj_desc.irq_count)
-+ return -EINVAL;
-+
-+ if (vdev->mc_irqs != NULL) {
-+ info.flags = vdev->mc_irqs[info.index].flags;
-+ info.count = vdev->mc_irqs[info.index].count;
-+ } else {
-+ /*
-+ * If IRQs are not initialized then these can not
-+ * be configuted and used by user-space/
-+ */
-+ info.flags = 0;
-+ info.count = 0;
-+ }
-+
-+ return copy_to_user((void __user *)arg, &info, minsz);
-+ }
-+ case VFIO_DEVICE_SET_IRQS:
-+ {
-+ struct vfio_irq_set hdr;
-+ u8 *data = NULL;
-+ int ret = 0;
-+
-+ minsz = offsetofend(struct vfio_irq_set, count);
-+
-+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
-+ return -EFAULT;
-+
-+ if (hdr.argsz < minsz)
-+ return -EINVAL;
-+
-+ if (hdr.index >= mc_dev->obj_desc.irq_count)
-+ return -EINVAL;
-+
-+ if (hdr.start != 0 || hdr.count > 1)
-+ return -EINVAL;
-+
-+ if (hdr.count == 0 &&
-+ (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) ||
-+ !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER)))
-+ return -EINVAL;
-+
-+ if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
-+ VFIO_IRQ_SET_ACTION_TYPE_MASK))
-+ return -EINVAL;
-+
-+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
-+ size_t size;
-+
-+ if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
-+ size = sizeof(uint8_t);
-+ else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
-+ size = sizeof(int32_t);
-+ else
-+ return -EINVAL;
-+
-+ if (hdr.argsz - minsz < hdr.count * size)
-+ return -EINVAL;
-+
-+ data = memdup_user((void __user *)(arg + minsz),
-+ hdr.count * size);
-+ if (IS_ERR(data))
-+ return PTR_ERR(data);
-+ }
-+
-+ ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
-+ hdr.index, hdr.start,
-+ hdr.count, data);
-+ return ret;
-+ }
-+ case VFIO_DEVICE_RESET:
-+ {
-+ return -EINVAL;
-+ }
-+ default:
-+ return -EINVAL;
-+ }
-+}
-+
-+static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ struct vfio_fsl_mc_device *vdev = device_data;
-+ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
-+ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
-+ struct vfio_fsl_mc_region *region;
-+ uint64_t data[8];
-+ int i;
-+
-+ /* Read ioctl supported only for DPRC and DPMCP device */
-+ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc") &&
-+ strcmp(vdev->mc_dev->obj_desc.type, "dpmcp"))
-+ return -EINVAL;
-+
-+ if (index >= vdev->num_regions)
-+ return -EINVAL;
-+
-+ region = &vdev->regions[index];
-+
-+ if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
-+ return -EINVAL;
-+
-+ if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
-+ return -EINVAL;
-+
-+ if (!region->ioaddr) {
-+ region->ioaddr = ioremap_nocache(region->addr, region->size);
-+ if (!region->ioaddr)
-+ return -ENOMEM;
-+ }
-+
-+ if (count != 64 || off != 0)
-+ return -EINVAL;
-+
-+ for (i = 7; i >= 0; i--)
-+ data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
-+
-+ if (copy_to_user(buf, data, 64))
-+ return -EFAULT;
-+
-+ return count;
-+}
-+
-+#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
-+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
-+
-+static int vfio_fsl_mc_dprc_wait_for_response(void __iomem *ioaddr)
-+{
-+ enum mc_cmd_status status;
-+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
-+
-+ for (;;) {
-+ u64 header;
-+ struct mc_cmd_header *resp_hdr;
-+
-+ header = cpu_to_le64(readq_relaxed(ioaddr));
-+
-+ resp_hdr = (struct mc_cmd_header *)&header;
-+ status = (enum mc_cmd_status)resp_hdr->status;
-+ if (status != MC_CMD_STATUS_READY)
-+ break;
-+
-+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
-+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
-+ if (timeout_usecs == 0)
-+ return -ETIMEDOUT;
-+ }
-+
-+ return 0;
-+}
-+
-+static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
-+{
-+ int i;
-+
-+ /* Write at command parameter into portal */
-+ for (i = 7; i >= 1; i--)
-+ writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
-+
-+ /* Write command header in the end */
-+ writeq(cmd_data[0], ioaddr);
-+
-+ /* Wait for response before returning to user-space
-+ * This can be optimized in future to even prepare response
-+ * before returning to user-space and avoid read ioctl.
-+ */
-+ return vfio_fsl_mc_dprc_wait_for_response(ioaddr);
-+}
-+
-+static int vfio_handle_dprc_commands(void __iomem *ioaddr, uint64_t *cmd_data)
-+{
-+ uint64_t cmd_hdr = cmd_data[0];
-+ int cmd = (cmd_hdr >> 52) & 0xfff;
-+
-+ switch (cmd) {
-+ case DPRC_CMDID_OPEN:
-+ default:
-+ return vfio_fsl_mc_send_command(ioaddr, cmd_data);
-+ }
-+
-+ return 0;
-+}
-+
-+static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ struct vfio_fsl_mc_device *vdev = device_data;
-+ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
-+ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
-+ struct vfio_fsl_mc_region *region;
-+ uint64_t data[8];
-+ int ret;
-+
-+ /* Write ioctl supported only for DPRC and DPMCP device */
-+ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc") &&
-+ strcmp(vdev->mc_dev->obj_desc.type, "dpmcp"))
-+ return -EINVAL;
-+
-+ if (index >= vdev->num_regions)
-+ return -EINVAL;
-+
-+ region = &vdev->regions[index];
-+
-+ if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
-+ return -EINVAL;
-+
-+ if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
-+ return -EINVAL;
-+
-+ if (!region->ioaddr) {
-+ region->ioaddr = ioremap_nocache(region->addr, region->size);
-+ if (!region->ioaddr)
-+ return -ENOMEM;
-+ }
-+
-+ if (count != 64 || off != 0)
-+ return -EINVAL;
-+
-+ if (copy_from_user(&data, buf, 64))
-+ return -EFAULT;
-+
-+ ret = vfio_handle_dprc_commands(region->ioaddr, data);
-+ if (ret)
-+ return ret;
-+
-+ return count;
-+}
-+
-+static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
-+ struct vm_area_struct *vma)
-+{
-+ u64 size = vma->vm_end - vma->vm_start;
-+ u64 pgoff, base;
-+
-+ pgoff = vma->vm_pgoff &
-+ ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
-+ base = pgoff << PAGE_SHIFT;
-+
-+ if (region.size < PAGE_SIZE || base + size > region.size)
-+ return -EINVAL;
-+ /*
-+ * Set the REGION_TYPE_CACHEABLE (QBman CENA regs) to be the
-+ * cache inhibited area of the portal to avoid coherency issues
-+ * if a user migrates to another core.
-+ */
-+ if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE) {
-+ if (region.type & VFIO_FSL_MC_REGION_TYPE_SHAREABLE)
-+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
-+ else
-+ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
-+ } else
-+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-+
-+ vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
-+
-+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+ size, vma->vm_page_prot);
-+}
-+
-+/* Allows mmaping fsl_mc device regions in assigned DPRC */
-+static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
-+{
-+ struct vfio_fsl_mc_device *vdev = device_data;
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+ unsigned long size, addr;
-+ int index;
-+
-+ index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
-+
-+ if (vma->vm_end < vma->vm_start)
-+ return -EINVAL;
-+ if (vma->vm_start & ~PAGE_MASK)
-+ return -EINVAL;
-+ if (vma->vm_end & ~PAGE_MASK)
-+ return -EINVAL;
-+ if (!(vma->vm_flags & VM_SHARED))
-+ return -EINVAL;
-+ if (index >= vdev->num_regions)
-+ return -EINVAL;
-+
-+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
-+ return -EINVAL;
-+
-+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
-+ && (vma->vm_flags & VM_READ))
-+ return -EINVAL;
-+
-+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
-+ && (vma->vm_flags & VM_WRITE))
-+ return -EINVAL;
-+
-+ addr = vdev->regions[index].addr;
-+ size = vdev->regions[index].size;
-+
-+ vma->vm_private_data = mc_dev;
-+
-+ if (vdev->regions[index].type & VFIO_FSL_MC_REGION_TYPE_MMIO)
-+ return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
-+
-+ return -EFAULT;
-+}
-+
-+static const struct vfio_device_ops vfio_fsl_mc_ops = {
-+ .name = "vfio-fsl-mc",
-+ .open = vfio_fsl_mc_open,
-+ .release = vfio_fsl_mc_release,
-+ .ioctl = vfio_fsl_mc_ioctl,
-+ .read = vfio_fsl_mc_read,
-+ .write = vfio_fsl_mc_write,
-+ .mmap = vfio_fsl_mc_mmap,
-+};
-+
-+static int vfio_fsl_mc_initialize_dprc(struct vfio_fsl_mc_device *vdev)
-+{
-+ struct device *root_dprc_dev;
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+ struct device *dev = &mc_dev->dev;
-+ struct fsl_mc_bus *mc_bus;
-+ struct irq_domain *mc_msi_domain;
-+ unsigned int irq_count;
-+ int ret;
-+
-+ /* device must be DPRC */
-+ if (strcmp(mc_dev->obj_desc.type, "dprc"))
-+ return -EINVAL;
-+
-+ /* mc_io must be un-initialized */
-+ WARN_ON(mc_dev->mc_io);
-+
-+ /* allocate a portal from the root DPRC for vfio use */
-+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
-+ if (WARN_ON(!root_dprc_dev))
-+ return -EINVAL;
-+
-+ ret = fsl_mc_portal_allocate(to_fsl_mc_device(root_dprc_dev),
-+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
-+ &mc_dev->mc_io);
-+ if (ret < 0)
-+ goto clean_msi_domain;
-+
-+ /* Reset MCP before move on */
-+ ret = fsl_mc_portal_reset(mc_dev->mc_io);
-+ if (ret < 0) {
-+ dev_err(dev, "dprc portal reset failed: error = %d\n", ret);
-+ goto free_mc_portal;
-+ }
-+
-+ /* MSI domain set up */
-+ ret = fsl_mc_find_msi_domain(root_dprc_dev->parent, &mc_msi_domain);
-+ if (ret < 0)
-+ goto free_mc_portal;
-+
-+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
-+
-+ ret = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
-+ &mc_dev->mc_handle);
-+ if (ret) {
-+ dev_err(dev, "dprc_open() failed: error = %d\n", ret);
-+ goto free_mc_portal;
-+ }
-+
-+ /* Initialize resource pool */
-+ fsl_mc_init_all_resource_pools(mc_dev);
-+
-+ mc_bus = to_fsl_mc_bus(mc_dev);
-+
-+ if (!mc_bus->irq_resources) {
-+ irq_count = FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS;
-+ ret = fsl_mc_populate_irq_pool(mc_bus, irq_count);
-+ if (ret < 0) {
-+ dev_err(dev, "%s: Failed to init irq-pool\n", __func__);
-+ goto clean_resource_pool;
-+ }
-+ }
-+
-+ mutex_init(&mc_bus->scan_mutex);
-+
-+ mutex_lock(&mc_bus->scan_mutex);
-+ ret = dprc_scan_objects(mc_dev, mc_dev->driver_override,
-+ &irq_count);
-+ mutex_unlock(&mc_bus->scan_mutex);
-+ if (ret) {
-+ dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret);
-+ goto clean_irq_pool;
-+ }
-+
-+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
-+ dev_warn(&mc_dev->dev,
-+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
-+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
-+ }
-+
-+ return 0;
-+
-+clean_irq_pool:
-+ fsl_mc_cleanup_irq_pool(mc_bus);
-+
-+clean_resource_pool:
-+ fsl_mc_cleanup_all_resource_pools(mc_dev);
-+ dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-+
-+free_mc_portal:
-+ fsl_mc_portal_free(mc_dev->mc_io);
-+
-+clean_msi_domain:
-+ dev_set_msi_domain(&mc_dev->dev, NULL);
-+
-+ return ret;
-+}
-+
-+static int vfio_fsl_mc_device_remove(struct device *dev, void *data)
-+{
-+ struct fsl_mc_device *mc_dev;
-+
-+ WARN_ON(dev == NULL);
-+
-+ mc_dev = to_fsl_mc_device(dev);
-+ if (WARN_ON(mc_dev == NULL))
-+ return -ENODEV;
-+
-+ fsl_mc_device_remove(mc_dev);
-+ return 0;
-+}
-+
-+static void vfio_fsl_mc_cleanup_dprc(struct vfio_fsl_mc_device *vdev)
-+{
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+ struct fsl_mc_bus *mc_bus;
-+
-+ /* device must be DPRC */
-+ if (strcmp(mc_dev->obj_desc.type, "dprc"))
-+ return;
-+
-+ device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove);
-+
-+ mc_bus = to_fsl_mc_bus(mc_dev);
-+ if (dev_get_msi_domain(&mc_dev->dev))
-+ fsl_mc_cleanup_irq_pool(mc_bus);
-+
-+ dev_set_msi_domain(&mc_dev->dev, NULL);
-+
-+ fsl_mc_cleanup_all_resource_pools(mc_dev);
-+ dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-+ fsl_mc_portal_free(mc_dev->mc_io);
-+}
-+
-+static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
-+{
-+ struct iommu_group *group;
-+ struct vfio_fsl_mc_device *vdev;
-+ struct device *dev = &mc_dev->dev;
-+ int ret;
-+
-+ group = vfio_iommu_group_get(dev);
-+ if (!group) {
-+ dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__);
-+ return -EINVAL;
-+ }
-+
-+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
-+ if (!vdev) {
-+ vfio_iommu_group_put(group, dev);
-+ return -ENOMEM;
-+ }
-+
-+ vdev->mc_dev = mc_dev;
-+
-+ ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
-+ if (ret) {
-+ dev_err(dev, "%s: Failed to add to vfio group\n", __func__);
-+ goto free_vfio_device;
-+ }
-+
-+ /* DPRC container scanned and it's chilren bound with vfio driver */
-+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
-+ ret = vfio_fsl_mc_initialize_dprc(vdev);
-+ if (ret) {
-+ vfio_del_group_dev(dev);
-+ goto free_vfio_device;
-+ }
-+ } else {
-+ struct fsl_mc_device *mc_bus_dev;
-+
-+ /* Non-dprc devices share mc_io from the parent dprc */
-+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
-+ if (mc_bus_dev == NULL) {
-+ vfio_del_group_dev(dev);
-+ goto free_vfio_device;
-+ }
-+
-+ mc_dev->mc_io = mc_bus_dev->mc_io;
-+
-+ /* Inherit parent MSI domain */
-+ dev_set_msi_domain(&mc_dev->dev,
-+ dev_get_msi_domain(mc_dev->dev.parent));
-+ }
-+ return 0;
-+
-+free_vfio_device:
-+ kfree(vdev);
-+ vfio_iommu_group_put(group, dev);
-+ return ret;
-+}
-+
-+static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
-+{
-+ struct vfio_fsl_mc_device *vdev;
-+ struct device *dev = &mc_dev->dev;
-+
-+ vdev = vfio_del_group_dev(dev);
-+ if (!vdev)
-+ return -EINVAL;
-+
-+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
-+ vfio_fsl_mc_cleanup_dprc(vdev);
-+ else
-+ dev_set_msi_domain(&mc_dev->dev, NULL);
-+
-+ mc_dev->mc_io = NULL;
-+
-+ vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
-+ kfree(vdev);
-+
-+ return 0;
-+}
-+
-+/*
-+ * vfio-fsl_mc is a meta-driver, so use driver_override interface to
-+ * bind a fsl_mc container with this driver and match_id_table is NULL.
-+ */
-+static struct fsl_mc_driver vfio_fsl_mc_driver = {
-+ .probe = vfio_fsl_mc_probe,
-+ .remove = vfio_fsl_mc_remove,
-+ .match_id_table = NULL,
-+ .driver = {
-+ .name = "vfio-fsl-mc",
-+ .owner = THIS_MODULE,
-+ },
-+};
-+
-+static int __init vfio_fsl_mc_driver_init(void)
-+{
-+ return fsl_mc_driver_register(&vfio_fsl_mc_driver);
-+}
-+
-+static void __exit vfio_fsl_mc_driver_exit(void)
-+{
-+ fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
-+}
-+
-+module_init(vfio_fsl_mc_driver_init);
-+module_exit(vfio_fsl_mc_driver_exit);
-+
-+MODULE_VERSION(DRIVER_VERSION);
-+MODULE_LICENSE("GPL v2");
-+MODULE_AUTHOR(DRIVER_AUTHOR);
-+MODULE_DESCRIPTION(DRIVER_DESC);
---- /dev/null
-+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
-@@ -0,0 +1,199 @@
-+/*
-+ * Freescale Management Complex (MC) device passthrough using VFIO
-+ *
-+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
-+ * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
-+
-+#include <linux/vfio.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <linux/eventfd.h>
-+#include <linux/msi.h>
-+
-+#include "linux/fsl/mc.h"
-+#include "vfio_fsl_mc_private.h"
-+
-+static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
-+{
-+ struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
-+
-+ eventfd_signal(mc_irq->trigger, 1);
-+ return IRQ_HANDLED;
-+}
-+
-+static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev,
-+ unsigned int index, unsigned int start,
-+ unsigned int count, uint32_t flags,
-+ void *data)
-+{
-+ return -EINVAL;
-+}
-+
-+static int vfio_fsl_mc_irq_unmask(struct vfio_fsl_mc_device *vdev,
-+ unsigned int index, unsigned int start,
-+ unsigned int count, uint32_t flags,
-+ void *data)
-+{
-+ return -EINVAL;
-+}
-+
-+static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
-+ int index, int fd)
-+{
-+ struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
-+ struct eventfd_ctx *trigger;
-+ int hwirq;
-+ int ret;
-+
-+ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
-+ if (irq->trigger) {
-+ free_irq(hwirq, irq);
-+ kfree(irq->name);
-+ eventfd_ctx_put(irq->trigger);
-+ irq->trigger = NULL;
-+ }
-+
-+ if (fd < 0) /* Disable only */
-+ return 0;
-+
-+ irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
-+ hwirq, dev_name(&vdev->mc_dev->dev));
-+ if (!irq->name)
-+ return -ENOMEM;
-+
-+ trigger = eventfd_ctx_fdget(fd);
-+ if (IS_ERR(trigger)) {
-+ kfree(irq->name);
-+ return PTR_ERR(trigger);
-+ }
-+
-+ irq->trigger = trigger;
-+
-+ ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
-+ irq->name, irq);
-+ if (ret) {
-+ kfree(irq->name);
-+ eventfd_ctx_put(trigger);
-+ irq->trigger = NULL;
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev)
-+{
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+ struct vfio_fsl_mc_irq *mc_irq;
-+ int irq_count;
-+ int ret, i;
-+
-+ /* Device does not support any interrupt */
-+ if (mc_dev->obj_desc.irq_count == 0)
-+ return 0;
-+
-+ irq_count = mc_dev->obj_desc.irq_count;
-+
-+ mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL);
-+ if (mc_irq == NULL)
-+ return -ENOMEM;
-+
-+ /* Allocate IRQs */
-+ ret = fsl_mc_allocate_irqs(mc_dev);
-+ if (ret) {
-+ kfree(mc_irq);
-+ return ret;
-+ }
-+
-+ for (i = 0; i < irq_count; i++) {
-+ mc_irq[i].count = 1;
-+ mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
-+ }
-+
-+ vdev->mc_irqs = mc_irq;
-+
-+ return 0;
-+}
-+
-+/* Free All IRQs for the given MC object */
-+void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
-+{
-+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
-+ int irq_count = mc_dev->obj_desc.irq_count;
-+ int i;
-+
-+ /* Device does not support any interrupt */
-+ if (mc_dev->obj_desc.irq_count == 0)
-+ return;
-+
-+ for (i = 0; i < irq_count; i++)
-+ vfio_set_trigger(vdev, i, -1);
-+
-+ fsl_mc_free_irqs(mc_dev);
-+ kfree(vdev->mc_irqs);
-+}
-+
-+static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
-+ unsigned int index, unsigned int start,
-+ unsigned int count, uint32_t flags,
-+ void *data)
-+{
-+ struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
-+ int hwirq;
-+
-+ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
-+ return vfio_set_trigger(vdev, index, -1);
-+
-+ if (start != 0 || count != 1)
-+ return -EINVAL;
-+
-+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
-+ int32_t fd = *(int32_t *)data;
-+
-+ return vfio_set_trigger(vdev, index, fd);
-+ }
-+
-+ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
-+
-+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
-+ vfio_fsl_mc_irq_handler(hwirq, irq);
-+
-+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
-+ uint8_t trigger = *(uint8_t *)data;
-+
-+ if (trigger)
-+ vfio_fsl_mc_irq_handler(hwirq, irq);
-+ }
-+
-+ return 0;
-+}
-+
-+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
-+ uint32_t flags, unsigned int index,
-+ unsigned int start, unsigned int count,
-+ void *data)
-+{
-+ int ret = -ENOTTY;
-+
-+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
-+ case VFIO_IRQ_SET_ACTION_MASK:
-+ ret = vfio_fsl_mc_irq_mask(vdev, index, start, count,
-+ flags, data);
-+ break;
-+ case VFIO_IRQ_SET_ACTION_UNMASK:
-+ ret = vfio_fsl_mc_irq_unmask(vdev, index, start, count,
-+ flags, data);
-+ break;
-+ case VFIO_IRQ_SET_ACTION_TRIGGER:
-+ ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start,
-+ count, flags, data);
-+ break;
-+ }
-+
-+ return ret;
-+}
---- /dev/null
-+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
-@@ -0,0 +1,57 @@
-+/*
-+ * Freescale Management Complex VFIO private declarations
-+ *
-+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2016 NXP
-+ * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
-+
-+#ifndef VFIO_FSL_MC_PRIVATE_H
-+#define VFIO_FSL_MC_PRIVATE_H
-+
-+#define VFIO_FSL_MC_OFFSET_SHIFT 40
-+#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1)
-+
-+#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) (off >> VFIO_FSL_MC_OFFSET_SHIFT)
-+
-+#define VFIO_FSL_MC_INDEX_TO_OFFSET(index) \
-+ ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT)
-+
-+struct vfio_fsl_mc_irq {
-+ u32 flags;
-+ u32 count;
-+ struct eventfd_ctx *trigger;
-+ char *name;
-+};
-+
-+struct vfio_fsl_mc_region {
-+ u32 flags;
-+#define VFIO_FSL_MC_REGION_TYPE_MMIO 1
-+#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE 2
-+#define VFIO_FSL_MC_REGION_TYPE_SHAREABLE 4
-+
-+ u32 type;
-+ u64 addr;
-+ resource_size_t size;
-+ void __iomem *ioaddr;
-+};
-+
-+struct vfio_fsl_mc_device {
-+ struct fsl_mc_device *mc_dev;
-+ int refcnt;
-+ u32 num_regions;
-+ struct vfio_fsl_mc_region *regions;
-+ struct vfio_fsl_mc_irq *mc_irqs;
-+};
-+
-+int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev);
-+void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev);
-+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
-+ uint32_t flags, unsigned int index,
-+ unsigned int start, unsigned int count,
-+ void *data);
-+#endif /* VFIO_PCI_PRIVATE_H */
---- a/include/uapi/linux/vfio.h
-+++ b/include/uapi/linux/vfio.h
-@@ -200,6 +200,7 @@ struct vfio_device_info {
- #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
- #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
- #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
-+#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 5) /* vfio-fsl-mc device */
- __u32 num_regions; /* Max region index + 1 */
- __u32 num_irqs; /* Max IRQ index + 1 */
- };
diff --git a/target/linux/layerscape/patches-4.14/809-flexcan-support-layerscape.patch b/target/linux/layerscape/patches-4.14/809-flexcan-support-layerscape.patch
deleted file mode 100644
index 75a6d97adb..0000000000
--- a/target/linux/layerscape/patches-4.14/809-flexcan-support-layerscape.patch
+++ /dev/null
@@ -1,596 +0,0 @@
-From 3ed707fde8a33f2b888f75ac2f5e0a98e7774dad Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:27 +0800
-Subject: [PATCH 26/40] flexcan: support layerscape
-This is an integrated patch of flexcan for layerscape
-
-Signed-off-by: Bhupesh Sharma <bhupesh.sharma@freescale.com>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
-Signed-off-by: Sakar Arora <Sakar.Arora@freescale.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- drivers/net/can/flexcan.c | 240 ++++++++++++++++++++++----------------
- 1 file changed, 138 insertions(+), 102 deletions(-)
-
---- a/drivers/net/can/flexcan.c
-+++ b/drivers/net/can/flexcan.c
-@@ -190,6 +190,7 @@
- * MX53 FlexCAN2 03.00.00.00 yes no no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
- * VF610 FlexCAN3 ? no yes no yes yes?
-+ * LS1021A FlexCAN2 03.00.04.00 no yes no yes
- *
- * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
- */
-@@ -279,6 +280,10 @@ struct flexcan_priv {
- struct clk *clk_per;
- const struct flexcan_devtype_data *devtype_data;
- struct regulator *reg_xceiver;
-+
-+ /* Read and Write APIs */
-+ u32 (*read)(void __iomem *addr);
-+ void (*write)(u32 val, void __iomem *addr);
- };
-
- static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
-@@ -301,6 +306,11 @@ static const struct flexcan_devtype_data
- FLEXCAN_QUIRK_BROKEN_PERR_STATE,
- };
-
-+static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
-+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
-+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
-+};
-+
- static const struct can_bittiming_const flexcan_bittiming_const = {
- .name = DRV_NAME,
- .tseg1_min = 4,
-@@ -313,39 +323,45 @@ static const struct can_bittiming_const
- .brp_inc = 1,
- };
-
--/* Abstract off the read/write for arm versus ppc. This
-- * assumes that PPC uses big-endian registers and everything
-- * else uses little-endian registers, independent of CPU
-- * endianness.
-+/* FlexCAN module is essentially modelled as a little-endian IP in most
-+ * SoCs, i.e the registers as well as the message buffer areas are
-+ * implemented in a little-endian fashion.
-+ *
-+ * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN
-+ * module in a big-endian fashion (i.e the registers as well as the
-+ * message buffer areas are implemented in a big-endian way).
-+ *
-+ * In addition, the FlexCAN module can be found on SoCs having ARM or
-+ * PPC cores. So, we need to abstract off the register read/write
-+ * functions, ensuring that these cater to all the combinations of module
-+ * endianness and underlying CPU endianness.
- */
--#if defined(CONFIG_PPC)
--static inline u32 flexcan_read(void __iomem *addr)
-+static inline u32 flexcan_read_be(void __iomem *addr)
- {
-- return in_be32(addr);
-+ return ioread32be(addr);
- }
-
--static inline void flexcan_write(u32 val, void __iomem *addr)
-+static inline void flexcan_write_be(u32 val, void __iomem *addr)
- {
-- out_be32(addr, val);
-+ iowrite32be(val, addr);
- }
--#else
--static inline u32 flexcan_read(void __iomem *addr)
-+
-+static inline u32 flexcan_read_le(void __iomem *addr)
- {
-- return readl(addr);
-+ return ioread32(addr);
- }
-
--static inline void flexcan_write(u32 val, void __iomem *addr)
-+static inline void flexcan_write_le(u32 val, void __iomem *addr)
- {
-- writel(val, addr);
-+ iowrite32(val, addr);
- }
--#endif
-
- static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
- {
- struct flexcan_regs __iomem *regs = priv->regs;
- u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
-
-- flexcan_write(reg_ctrl, &regs->ctrl);
-+ priv->write(reg_ctrl, &regs->ctrl);
- }
-
- static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
-@@ -353,7 +369,7 @@ static inline void flexcan_error_irq_dis
- struct flexcan_regs __iomem *regs = priv->regs;
- u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
-
-- flexcan_write(reg_ctrl, &regs->ctrl);
-+ priv->write(reg_ctrl, &regs->ctrl);
- }
-
- static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
-@@ -378,14 +394,14 @@ static int flexcan_chip_enable(struct fl
- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
- u32 reg;
-
-- reg = flexcan_read(&regs->mcr);
-+ reg = priv->read(&regs->mcr);
- reg &= ~FLEXCAN_MCR_MDIS;
-- flexcan_write(reg, &regs->mcr);
-+ priv->write(reg, &regs->mcr);
-
-- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
- udelay(10);
-
-- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
-+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
- return -ETIMEDOUT;
-
- return 0;
-@@ -397,14 +413,14 @@ static int flexcan_chip_disable(struct f
- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
- u32 reg;
-
-- reg = flexcan_read(&regs->mcr);
-+ reg = priv->read(&regs->mcr);
- reg |= FLEXCAN_MCR_MDIS;
-- flexcan_write(reg, &regs->mcr);
-+ priv->write(reg, &regs->mcr);
-
-- while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-+ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
- udelay(10);
-
-- if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-+ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
- return -ETIMEDOUT;
-
- return 0;
-@@ -416,14 +432,14 @@ static int flexcan_chip_freeze(struct fl
- unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
- u32 reg;
-
-- reg = flexcan_read(&regs->mcr);
-+ reg = priv->read(&regs->mcr);
- reg |= FLEXCAN_MCR_HALT;
-- flexcan_write(reg, &regs->mcr);
-+ priv->write(reg, &regs->mcr);
-
-- while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-+ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
- udelay(100);
-
-- if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-+ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
- return -ETIMEDOUT;
-
- return 0;
-@@ -435,14 +451,14 @@ static int flexcan_chip_unfreeze(struct
- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
- u32 reg;
-
-- reg = flexcan_read(&regs->mcr);
-+ reg = priv->read(&regs->mcr);
- reg &= ~FLEXCAN_MCR_HALT;
-- flexcan_write(reg, &regs->mcr);
-+ priv->write(reg, &regs->mcr);
-
-- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
-+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
- udelay(10);
-
-- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
-+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
- return -ETIMEDOUT;
-
- return 0;
-@@ -453,11 +469,11 @@ static int flexcan_chip_softreset(struct
- struct flexcan_regs __iomem *regs = priv->regs;
- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
-
-- flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
-- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
-+ priv->write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
-+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
- udelay(10);
-
-- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
-+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
- return -ETIMEDOUT;
-
- return 0;
-@@ -468,7 +484,7 @@ static int __flexcan_get_berr_counter(co
- {
- const struct flexcan_priv *priv = netdev_priv(dev);
- struct flexcan_regs __iomem *regs = priv->regs;
-- u32 reg = flexcan_read(&regs->ecr);
-+ u32 reg = priv->read(&regs->ecr);
-
- bec->txerr = (reg >> 0) & 0xff;
- bec->rxerr = (reg >> 8) & 0xff;
-@@ -524,24 +540,24 @@ static int flexcan_start_xmit(struct sk_
-
- if (cf->can_dlc > 0) {
- data = be32_to_cpup((__be32 *)&cf->data[0]);
-- flexcan_write(data, &priv->tx_mb->data[0]);
-+ priv->write(data, &priv->tx_mb->data[0]);
- }
- if (cf->can_dlc > 4) {
- data = be32_to_cpup((__be32 *)&cf->data[4]);
-- flexcan_write(data, &priv->tx_mb->data[1]);
-+ priv->write(data, &priv->tx_mb->data[1]);
- }
-
- can_put_echo_skb(skb, dev, 0);
-
-- flexcan_write(can_id, &priv->tx_mb->can_id);
-- flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
-+ priv->write(can_id, &priv->tx_mb->can_id);
-+ priv->write(ctrl, &priv->tx_mb->can_ctrl);
-
- /* Errata ERR005829 step8:
- * Write twice INACTIVE(0x8) code to first MB.
- */
-- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb_reserved->can_ctrl);
-- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb_reserved->can_ctrl);
-
- return NETDEV_TX_OK;
-@@ -660,7 +676,7 @@ static unsigned int flexcan_mailbox_read
- u32 code;
-
- do {
-- reg_ctrl = flexcan_read(&mb->can_ctrl);
-+ reg_ctrl = priv->read(&mb->can_ctrl);
- } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
-
- /* is this MB empty? */
-@@ -675,17 +691,17 @@ static unsigned int flexcan_mailbox_read
- offload->dev->stats.rx_errors++;
- }
- } else {
-- reg_iflag1 = flexcan_read(&regs->iflag1);
-+ reg_iflag1 = priv->read(&regs->iflag1);
- if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
- return 0;
-
-- reg_ctrl = flexcan_read(&mb->can_ctrl);
-+ reg_ctrl = priv->read(&mb->can_ctrl);
- }
-
- /* increase timstamp to full 32 bit */
- *timestamp = reg_ctrl << 16;
-
-- reg_id = flexcan_read(&mb->can_id);
-+ reg_id = priv->read(&mb->can_id);
- if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
- cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
- else
-@@ -695,19 +711,19 @@ static unsigned int flexcan_mailbox_read
- cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
-
-- *(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
-- *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
-+ *(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
-+ *(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
-
- /* mark as read */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- /* Clear IRQ */
- if (n < 32)
-- flexcan_write(BIT(n), &regs->iflag1);
-+ priv->write(BIT(n), &regs->iflag1);
- else
-- flexcan_write(BIT(n - 32), &regs->iflag2);
-+ priv->write(BIT(n - 32), &regs->iflag2);
- } else {
-- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
-- flexcan_read(&regs->timer);
-+ priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
-+ priv->read(&regs->timer);
- }
-
- return 1;
-@@ -719,8 +735,8 @@ static inline u64 flexcan_read_reg_iflag
- struct flexcan_regs __iomem *regs = priv->regs;
- u32 iflag1, iflag2;
-
-- iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
-- iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
-+ iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
-+ iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
- ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
-
- return (u64)iflag2 << 32 | iflag1;
-@@ -736,7 +752,7 @@ static irqreturn_t flexcan_irq(int irq,
- u32 reg_iflag1, reg_esr;
- enum can_state last_state = priv->can.state;
-
-- reg_iflag1 = flexcan_read(&regs->iflag1);
-+ reg_iflag1 = priv->read(&regs->iflag1);
-
- /* reception interrupt */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
-@@ -759,7 +775,8 @@ static irqreturn_t flexcan_irq(int irq,
- /* FIFO overflow interrupt */
- if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
- handled = IRQ_HANDLED;
-- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
-+ priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW,
-+ &regs->iflag1);
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
- }
-@@ -773,18 +790,18 @@ static irqreturn_t flexcan_irq(int irq,
- can_led_event(dev, CAN_LED_EVENT_TX);
-
- /* after sending a RTR frame MB is in RX mode */
-- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-- &priv->tx_mb->can_ctrl);
-- flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
-+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
-+ &priv->tx_mb->can_ctrl);
-+ priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
- netif_wake_queue(dev);
- }
-
-- reg_esr = flexcan_read(&regs->esr);
-+ reg_esr = priv->read(&regs->esr);
-
- /* ACK all bus error and state change IRQ sources */
- if (reg_esr & FLEXCAN_ESR_ALL_INT) {
- handled = IRQ_HANDLED;
-- flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
-+ priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
- }
-
- /* state change interrupt or broken error state quirk fix is enabled */
-@@ -846,7 +863,7 @@ static void flexcan_set_bittiming(struct
- struct flexcan_regs __iomem *regs = priv->regs;
- u32 reg;
-
-- reg = flexcan_read(&regs->ctrl);
-+ reg = priv->read(&regs->ctrl);
- reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
- FLEXCAN_CTRL_RJW(0x3) |
- FLEXCAN_CTRL_PSEG1(0x7) |
-@@ -870,11 +887,11 @@ static void flexcan_set_bittiming(struct
- reg |= FLEXCAN_CTRL_SMP;
-
- netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
-- flexcan_write(reg, &regs->ctrl);
-+ priv->write(reg, &regs->ctrl);
-
- /* print chip status */
- netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
-- flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
-+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
- }
-
- /* flexcan_chip_start
-@@ -913,7 +930,7 @@ static int flexcan_chip_start(struct net
- * choose format C
- * set max mailbox number
- */
-- reg_mcr = flexcan_read(&regs->mcr);
-+ reg_mcr = priv->read(&regs->mcr);
- reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
- reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
- FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
-@@ -927,7 +944,7 @@ static int flexcan_chip_start(struct net
- FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
- }
- netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
-- flexcan_write(reg_mcr, &regs->mcr);
-+ priv->write(reg_mcr, &regs->mcr);
-
- /* CTRL
- *
-@@ -940,7 +957,7 @@ static int flexcan_chip_start(struct net
- * enable bus off interrupt
- * (== FLEXCAN_CTRL_ERR_STATE)
- */
-- reg_ctrl = flexcan_read(&regs->ctrl);
-+ reg_ctrl = priv->read(&regs->ctrl);
- reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
- reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
- FLEXCAN_CTRL_ERR_STATE;
-@@ -960,45 +977,45 @@ static int flexcan_chip_start(struct net
- /* leave interrupts disabled for now */
- reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
- netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
-- flexcan_write(reg_ctrl, &regs->ctrl);
-+ priv->write(reg_ctrl, &regs->ctrl);
-
- if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
-- reg_ctrl2 = flexcan_read(&regs->ctrl2);
-+ reg_ctrl2 = priv->read(&regs->ctrl2);
- reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
-- flexcan_write(reg_ctrl2, &regs->ctrl2);
-+ priv->write(reg_ctrl2, &regs->ctrl2);
- }
-
- /* clear and invalidate all mailboxes first */
- for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
-- flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
-- &regs->mb[i].can_ctrl);
-+ priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
-+ &regs->mb[i].can_ctrl);
- }
-
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
-- flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
-- &regs->mb[i].can_ctrl);
-+ priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
-+ &regs->mb[i].can_ctrl);
- }
-
- /* Errata ERR005829: mark first TX mailbox as INACTIVE */
-- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-- &priv->tx_mb_reserved->can_ctrl);
-+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
-+ &priv->tx_mb_reserved->can_ctrl);
-
- /* mark TX mailbox as INACTIVE */
-- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-- &priv->tx_mb->can_ctrl);
-+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
-+ &priv->tx_mb->can_ctrl);
-
- /* acceptance mask/acceptance code (accept everything) */
-- flexcan_write(0x0, &regs->rxgmask);
-- flexcan_write(0x0, &regs->rx14mask);
-- flexcan_write(0x0, &regs->rx15mask);
-+ priv->write(0x0, &regs->rxgmask);
-+ priv->write(0x0, &regs->rx14mask);
-+ priv->write(0x0, &regs->rx15mask);
-
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
-- flexcan_write(0x0, &regs->rxfgmask);
-+ priv->write(0x0, &regs->rxfgmask);
-
- /* clear acceptance filters */
- for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
-- flexcan_write(0, &regs->rximr[i]);
-+ priv->write(0, &regs->rximr[i]);
-
- /* On Vybrid, disable memory error detection interrupts
- * and freeze mode.
-@@ -1011,17 +1028,17 @@ static int flexcan_chip_start(struct net
- * and Correction of Memory Errors" to write to
- * MECR register
- */
-- reg_ctrl2 = flexcan_read(&regs->ctrl2);
-+ reg_ctrl2 = priv->read(&regs->ctrl2);
- reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
-- flexcan_write(reg_ctrl2, &regs->ctrl2);
-+ priv->write(reg_ctrl2, &regs->ctrl2);
-
-- reg_mecr = flexcan_read(&regs->mecr);
-+ reg_mecr = priv->read(&regs->mecr);
- reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
-- flexcan_write(reg_mecr, &regs->mecr);
-+ priv->write(reg_mecr, &regs->mecr);
- reg_mecr |= FLEXCAN_MECR_ECCDIS;
- reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
- FLEXCAN_MECR_FANCEI_MSK);
-- flexcan_write(reg_mecr, &regs->mecr);
-+ priv->write(reg_mecr, &regs->mecr);
- }
-
- err = flexcan_transceiver_enable(priv);
-@@ -1037,14 +1054,14 @@ static int flexcan_chip_start(struct net
-
- /* enable interrupts atomically */
- disable_irq(dev->irq);
-- flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
-- flexcan_write(priv->reg_imask1_default, &regs->imask1);
-- flexcan_write(priv->reg_imask2_default, &regs->imask2);
-+ priv->write(priv->reg_ctrl_default, &regs->ctrl);
-+ priv->write(priv->reg_imask1_default, &regs->imask1);
-+ priv->write(priv->reg_imask2_default, &regs->imask2);
- enable_irq(dev->irq);
-
- /* print chip status */
- netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
-- flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
-+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
-
- return 0;
-
-@@ -1069,10 +1086,10 @@ static void flexcan_chip_stop(struct net
- flexcan_chip_disable(priv);
-
- /* Disable all interrupts */
-- flexcan_write(0, &regs->imask2);
-- flexcan_write(0, &regs->imask1);
-- flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
-- &regs->ctrl);
-+ priv->write(0, &regs->imask2);
-+ priv->write(0, &regs->imask1);
-+ priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
-+ &regs->ctrl);
-
- flexcan_transceiver_disable(priv);
- priv->can.state = CAN_STATE_STOPPED;
-@@ -1187,26 +1204,26 @@ static int register_flexcandev(struct ne
- err = flexcan_chip_disable(priv);
- if (err)
- goto out_disable_per;
-- reg = flexcan_read(&regs->ctrl);
-+ reg = priv->read(&regs->ctrl);
- reg |= FLEXCAN_CTRL_CLK_SRC;
-- flexcan_write(reg, &regs->ctrl);
-+ priv->write(reg, &regs->ctrl);
-
- err = flexcan_chip_enable(priv);
- if (err)
- goto out_chip_disable;
-
- /* set freeze, halt and activate FIFO, restrict register access */
-- reg = flexcan_read(&regs->mcr);
-+ reg = priv->read(&regs->mcr);
- reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
- FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
-- flexcan_write(reg, &regs->mcr);
-+ priv->write(reg, &regs->mcr);
-
- /* Currently we only support newer versions of this core
- * featuring a RX hardware FIFO (although this driver doesn't
- * make use of it on some cores). Older cores, found on some
- * Coldfire derivates are not tested.
- */
-- reg = flexcan_read(&regs->mcr);
-+ reg = priv->read(&regs->mcr);
- if (!(reg & FLEXCAN_MCR_FEN)) {
- netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
- err = -ENODEV;
-@@ -1234,8 +1251,12 @@ static void unregister_flexcandev(struct
- static const struct of_device_id flexcan_of_match[] = {
- { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
- { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
-+ { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
-+ { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
-+ { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
- { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
- { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
-+ { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
- { /* sentinel */ },
- };
- MODULE_DEVICE_TABLE(of, flexcan_of_match);
-@@ -1315,6 +1336,21 @@ static int flexcan_probe(struct platform
- dev->flags |= IFF_ECHO;
-
- priv = netdev_priv(dev);
-+
-+ if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
-+ priv->read = flexcan_read_be;
-+ priv->write = flexcan_write_be;
-+ } else {
-+ if (of_device_is_compatible(pdev->dev.of_node,
-+ "fsl,p1010-flexcan")) {
-+ priv->read = flexcan_read_be;
-+ priv->write = flexcan_write_be;
-+ } else {
-+ priv->read = flexcan_read_le;
-+ priv->write = flexcan_write_le;
-+ }
-+ }
-+
- priv->can.clock.freq = clock_freq;
- priv->can.bittiming_const = &flexcan_bittiming_const;
- priv->can.do_set_mode = flexcan_set_mode;
diff --git a/target/linux/layerscape/patches-4.14/810-kvm-support-layerscape.patch b/target/linux/layerscape/patches-4.14/810-kvm-support-layerscape.patch
deleted file mode 100644
index 5c8cd44182..0000000000
--- a/target/linux/layerscape/patches-4.14/810-kvm-support-layerscape.patch
+++ /dev/null
@@ -1,208 +0,0 @@
-From 621f2c4753b03170213e178cdafd66e78b212b3c Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:41 +0800
-Subject: [PATCH 27/40] kvm: support layerscape
-This is an integrated patch of kvm for layerscape
-
-Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
-Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
-Signed-off-by: Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- arch/arm/include/asm/kvm_mmu.h | 3 +-
- arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
- arch/powerpc/kvm/booke.c | 5 +++
- virt/kvm/arm/mmu.c | 56 ++++++++++++++++++++++++++++++--
- virt/kvm/arm/vgic/vgic-its.c | 2 +-
- virt/kvm/arm/vgic/vgic-v2.c | 3 +-
- 6 files changed, 74 insertions(+), 9 deletions(-)
-
---- a/arch/arm/include/asm/kvm_mmu.h
-+++ b/arch/arm/include/asm/kvm_mmu.h
-@@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
- int kvm_alloc_stage2_pgd(struct kvm *kvm);
- void kvm_free_stage2_pgd(struct kvm *kvm);
- int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-- phys_addr_t pa, unsigned long size, bool writable);
-+ phys_addr_t pa, unsigned long size, bool writable,
-+ pgprot_t prot);
-
- int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
---- a/arch/arm64/include/asm/kvm_mmu.h
-+++ b/arch/arm64/include/asm/kvm_mmu.h
-@@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
- int kvm_alloc_stage2_pgd(struct kvm *kvm);
- void kvm_free_stage2_pgd(struct kvm *kvm);
- int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-- phys_addr_t pa, unsigned long size, bool writable);
-+ phys_addr_t pa, unsigned long size, bool writable,
-+ pgprot_t prot);
-
- int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-@@ -270,8 +271,15 @@ static inline void __coherent_cache_gues
-
- static inline void __kvm_flush_dcache_pte(pte_t pte)
- {
-- struct page *page = pte_page(pte);
-- kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
-+ if (pfn_valid(pte_pfn(pte))) {
-+ struct page *page = pte_page(pte);
-+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
-+ } else {
-+ void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
-+
-+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-+ iounmap(va);
-+ }
- }
-
- static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
---- a/arch/powerpc/kvm/booke.c
-+++ b/arch/powerpc/kvm/booke.c
-@@ -305,6 +305,11 @@ void kvmppc_core_queue_fpunavail(struct
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
- }
-
-+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
-+{
-+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
-+}
-+
- void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
- {
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
---- a/virt/kvm/arm/mmu.c
-+++ b/virt/kvm/arm/mmu.c
-@@ -1028,9 +1028,11 @@ static int stage2_pmdp_test_and_clear_yo
- * @guest_ipa: The IPA at which to insert the mapping
- * @pa: The physical address of the device
- * @size: The size of the mapping
-+ * @prot: S2 page translation bits
- */
- int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-- phys_addr_t pa, unsigned long size, bool writable)
-+ phys_addr_t pa, unsigned long size, bool writable,
-+ pgprot_t prot)
- {
- phys_addr_t addr, end;
- int ret = 0;
-@@ -1041,7 +1043,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
- pfn = __phys_to_pfn(pa);
-
- for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
-- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
-+ pte_t pte = pfn_pte(pfn, prot);
-
- if (writable)
- pte = kvm_s2pte_mkwrite(pte);
-@@ -1065,6 +1067,30 @@ out:
- return ret;
- }
-
-+#ifdef CONFIG_ARM64
-+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
-+{
-+ switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
-+ case PTE_ATTRINDX(MT_DEVICE_nGnRE):
-+ case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
-+ case PTE_ATTRINDX(MT_DEVICE_GRE):
-+ return PAGE_S2_DEVICE;
-+ case PTE_ATTRINDX(MT_NORMAL_NC):
-+ case PTE_ATTRINDX(MT_NORMAL):
-+ return (pgprot_val(prot) & PTE_SHARED)
-+ ? PAGE_S2
-+ : PAGE_S2_NS;
-+ }
-+
-+ return PAGE_S2_DEVICE;
-+}
-+#else
-+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
-+{
-+ return PAGE_S2_DEVICE;
-+}
-+#endif
-+
- static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
- {
- kvm_pfn_t pfn = *pfnp;
-@@ -1341,6 +1367,18 @@ static int user_mem_abort(struct kvm_vcp
- hugetlb = true;
- gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
- } else {
-+ pte_t *pte;
-+ spinlock_t *ptl;
-+ pgprot_t prot;
-+
-+ pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
-+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
-+ pte_unmap_unlock(pte, ptl);
-+#ifdef CONFIG_ARM64
-+ if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
-+ mem_type = PAGE_S2_NS;
-+#endif
-+
- /*
- * Pages belonging to memslots that don't have the same
- * alignment for userspace and IPA cannot be mapped using
-@@ -1382,6 +1420,11 @@ static int user_mem_abort(struct kvm_vcp
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
-
-+#ifdef CONFIG_ARM64
-+ if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
-+ flags |= KVM_S2PTE_FLAG_IS_IOMAP;
-+ } else
-+#endif
- if (kvm_is_device_pfn(pfn)) {
- mem_type = PAGE_S2_DEVICE;
- flags |= KVM_S2PTE_FLAG_IS_IOMAP;
-@@ -1919,6 +1962,9 @@ int kvm_arch_prepare_memory_region(struc
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
- phys_addr_t pa;
-+ pgprot_t prot;
-+ pte_t *pte;
-+ spinlock_t *ptl;
-
- pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
- pa += vm_start - vma->vm_start;
-@@ -1929,9 +1975,13 @@ int kvm_arch_prepare_memory_region(struc
- goto out;
- }
-
-+ pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
-+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
-+ pte_unmap_unlock(pte, ptl);
-+
- ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
- vm_end - vm_start,
-- writable);
-+ writable, prot);
- if (ret)
- break;
- }
---- a/virt/kvm/arm/vgic/vgic-its.c
-+++ b/virt/kvm/arm/vgic/vgic-its.c
-@@ -243,7 +243,7 @@ static struct its_ite *find_ite(struct v
- #define GIC_LPI_OFFSET 8192
-
- #define VITS_TYPER_IDBITS 16
--#define VITS_TYPER_DEVBITS 16
-+#define VITS_TYPER_DEVBITS 17
- #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
- #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
-
---- a/virt/kvm/arm/vgic/vgic-v2.c
-+++ b/virt/kvm/arm/vgic/vgic-v2.c
-@@ -307,7 +307,8 @@ int vgic_v2_map_resources(struct kvm *kv
- if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
- ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
- kvm_vgic_global_state.vcpu_base,
-- KVM_VGIC_V2_CPU_SIZE, true);
-+ KVM_VGIC_V2_CPU_SIZE, true,
-+ PAGE_S2_DEVICE);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
diff --git a/target/linux/layerscape/patches-4.14/811-clock-support-layerscape.patch b/target/linux/layerscape/patches-4.14/811-clock-support-layerscape.patch
deleted file mode 100644
index 09a33572fe..0000000000
--- a/target/linux/layerscape/patches-4.14/811-clock-support-layerscape.patch
+++ /dev/null
@@ -1,95 +0,0 @@
-From a00c035c7b82f51716a1a30637b1bd276dee3c5a Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:17 +0800
-Subject: [PATCH] clock: support layerscape
-
-This is an integrated patch of clock for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
-Signed-off-by: Vabhav Sharma <vabhav.sharma@nxp.com>
-Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
----
- drivers/clk/clk-qoriq.c | 25 ++++++++++++++++++++++---
- drivers/cpufreq/qoriq-cpufreq.c | 1 +
- 2 files changed, 23 insertions(+), 3 deletions(-)
-
---- a/drivers/clk/clk-qoriq.c
-+++ b/drivers/clk/clk-qoriq.c
-@@ -41,7 +41,7 @@ struct clockgen_pll_div {
- };
-
- struct clockgen_pll {
-- struct clockgen_pll_div div[4];
-+ struct clockgen_pll_div div[8];
- };
-
- #define CLKSEL_VALID 1
-@@ -79,7 +79,7 @@ struct clockgen_chipinfo {
- const struct clockgen_muxinfo *cmux_groups[2];
- const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
- void (*init_periph)(struct clockgen *cg);
-- int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
-+ int cmux_to_group[NUM_CMUX+1]; /* array should be -1 terminated */
- u32 pll_mask; /* 1 << n bit set if PLL n is valid */
- u32 flags; /* CG_xxx */
- };
-@@ -570,6 +570,17 @@ static const struct clockgen_chipinfo ch
- .flags = CG_VER3 | CG_LITTLE_ENDIAN,
- },
- {
-+ .compat = "fsl,lx2160a-clockgen",
-+ .cmux_groups = {
-+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
-+ },
-+ .cmux_to_group = {
-+ 0, 0, 0, 0, 1, 1, 1, 1, -1
-+ },
-+ .pll_mask = 0x37,
-+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
-+ },
-+ {
- .compat = "fsl,p2041-clockgen",
- .guts_compat = "fsl,qoriq-device-config-1.0",
- .init_periph = p2041_init_periph,
-@@ -601,7 +612,7 @@ static const struct clockgen_chipinfo ch
- &p4080_cmux_grp1, &p4080_cmux_grp2
- },
- .cmux_to_group = {
-- 0, 0, 0, 0, 1, 1, 1, 1
-+ 0, 0, 0, 0, 1, 1, 1, 1, -1
- },
- .pll_mask = 0x1f,
- },
-@@ -1127,6 +1138,13 @@ static void __init create_one_pll(struct
- struct clk *clk;
- int ret;
-
-+ /*
-+ * For platform PLL, there are 8 divider clocks.
-+ * For core PLL, there are 4 divider clocks at most.
-+ */
-+ if (idx != 0 && i >= 4)
-+ break;
-+
- snprintf(pll->div[i].name, sizeof(pll->div[i].name),
- "cg-pll%d-div%d", idx, i + 1);
-
-@@ -1418,6 +1436,7 @@ CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "
- CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
- CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
- CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
-+CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init);
-
- /* Legacy nodes */
- CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
---- a/drivers/cpufreq/qoriq-cpufreq.c
-+++ b/drivers/cpufreq/qoriq-cpufreq.c
-@@ -320,6 +320,7 @@ static const struct of_device_id node_ma
- { .compatible = "fsl,ls1046a-clockgen", },
- { .compatible = "fsl,ls1088a-clockgen", },
- { .compatible = "fsl,ls2080a-clockgen", },
-+ { .compatible = "fsl,lx2160a-clockgen", },
- { .compatible = "fsl,p4080-clockgen", },
- { .compatible = "fsl,qoriq-clockgen-1.0", },
- { .compatible = "fsl,qoriq-clockgen-2.0", },
diff --git a/target/linux/layerscape/patches-4.14/812-flexspi-support-layerscape.patch b/target/linux/layerscape/patches-4.14/812-flexspi-support-layerscape.patch
deleted file mode 100644
index e356101683..0000000000
--- a/target/linux/layerscape/patches-4.14/812-flexspi-support-layerscape.patch
+++ /dev/null
@@ -1,1576 +0,0 @@
-From 9875df1e872eb2b0f9d2d72c9a761a5f03400d9f Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 19 Apr 2019 13:23:01 +0800
-Subject: [PATCH] flexspi: support layerscape
-
-This is an integrated patch of flexspi for layerscape
-
-Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Rajat Srivastava <rajat.srivastava@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
----
- .../devicetree/bindings/mtd/nxp-flexspi.txt | 41 +
- drivers/mtd/spi-nor/Kconfig | 10 +
- drivers/mtd/spi-nor/Makefile | 1 +
- drivers/mtd/spi-nor/nxp-flexspi.c | 1404 +++++++++++++++++
- drivers/mtd/spi-nor/spi-nor.c | 13 +-
- include/linux/mtd/cfi.h | 1 +
- include/linux/mtd/spi-nor.h | 3 +-
- 7 files changed, 1470 insertions(+), 3 deletions(-)
- create mode 100644 Documentation/devicetree/bindings/mtd/nxp-flexspi.txt
- create mode 100644 drivers/mtd/spi-nor/nxp-flexspi.c
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/mtd/nxp-flexspi.txt
-@@ -0,0 +1,41 @@
-+* NXP Flex Serial Peripheral Interface(FlexSPI)
-+
-+Required properties:
-+ - compatible : Should be "nxp,lx2160a-fspi"
-+ - reg : the first contains the register location and length,
-+ the second contains the memory mapping address and length
-+ - reg-names: Should contain the reg names "FSPI" and "FSPI-memory"
-+ - interrupts : Should contain the interrupt for the device
-+ - clocks : The clocks needed by the FlexSPI controller
-+ - clock-names : Should contain the name of the clocks: "fspi_en" and "fspi"
-+
-+Optional properties:
-+ - nxp,fspi-has-second-chip: The controller has two buses, bus A and bus B.
-+ Each bus can be connected with two NOR flashes.
-+ Most of the time, each bus only has one NOR flash
-+ connected, this is the default case.
-+ But if there are two NOR flashes connected to the
-+ bus, you should enable this property.
-+ (Please check the board's schematic.)
-+Example:
-+fspi0: flexspi@20c0000 {
-+ compatible = "nxp,lx2160a-fspi";
-+ reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>;
-+ reg-names = "FSPI", "FSPI-memory";
-+ interrupts = <0 25 0x4>; /* Level high type */
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "fspi_en", "fspi";
-+
-+ status = "okay";
-+ nxp,fspi-has-second-chip;
-+ flash0: mt35xu512aba@0 {
-+ reg = <0>;
-+ ....
-+ };
-+
-+ flash1: mt35xu512aba@1 {
-+ reg = <1>;
-+ ....
-+ };
-+
-+};
---- a/drivers/mtd/spi-nor/Kconfig
-+++ b/drivers/mtd/spi-nor/Kconfig
-@@ -97,6 +97,16 @@ config SPI_NXP_SPIFI
- Flash. Enable this option if you have a device with a SPIFI
- controller and want to access the Flash as a mtd device.
-
-+config SPI_NXP_FLEXSPI
-+ tristate "NXP Flex SPI controller"
-+ help
-+ This enables support for the Flex SPI controller in master mode.
-+ Up to four slave devices can be connected on two buses with two
-+ chipselects each.
-+ This controller does not support generic SPI messages and only
-+ supports the high-level SPI memory interface using SPI-NOR
-+ framework.
-+
- config SPI_INTEL_SPI
- tristate
-
---- a/drivers/mtd/spi-nor/Makefile
-+++ b/drivers/mtd/spi-nor/Makefile
-@@ -7,6 +7,7 @@ obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-qua
- obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
- obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
- obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
-+obj-$(CONFIG_SPI_NXP_FLEXSPI) += nxp-flexspi.o
- obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
- obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
- obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
---- /dev/null
-+++ b/drivers/mtd/spi-nor/nxp-flexspi.c
-@@ -0,0 +1,1404 @@
-+/*
-+ * NXP FSPI(FlexSPI controller) driver.
-+ *
-+ * Copyright 2018 NXP
-+ * Author: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/errno.h>
-+#include <linux/platform_device.h>
-+#include <linux/sched.h>
-+#include <linux/delay.h>
-+#include <linux/io.h>
-+#include <linux/clk.h>
-+#include <linux/err.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/timer.h>
-+#include <linux/jiffies.h>
-+#include <linux/completion.h>
-+#include <linux/mtd/mtd.h>
-+#include <linux/mtd/partitions.h>
-+#include <linux/mtd/spi-nor.h>
-+#include <linux/mutex.h>
-+#include <linux/pm_qos.h>
-+#include <linux/pci.h>
-+
-+/* The registers */
-+#define FSPI_MCR0 0x00
-+#define FSPI_MCR0_AHB_TIMEOUT_SHIFT 24
-+#define FSPI_MCR0_AHB_TIMEOUT_MASK (0xFF << FSPI_MCR0_AHB_TIMEOUT_SHIFT)
-+#define FSPI_MCR0_IP_TIMEOUT_SHIFT 16
-+#define FSPI_MCR0_IP_TIMEOUT_MASK (0xFF << FSPI_MCR0_IP_TIMEOUT_SHIFT)
-+#define FSPI_MCR0_LEARN_EN_SHIFT 15
-+#define FSPI_MCR0_LEARN_EN_MASK (1 << FSPI_MCR0_LEARN_EN_SHIFT)
-+#define FSPI_MCR0_SCRFRUN_EN_SHIFT 14
-+#define FSPI_MCR0_SCRFRUN_EN_MASK (1 << FSPI_MCR0_SCRFRUN_EN_SHIFT)
-+#define FSPI_MCR0_OCTCOMB_EN_SHIFT 13
-+#define FSPI_MCR0_OCTCOMB_EN_MASK (1 << FSPI_MCR0_OCTCOMB_EN_SHIFT)
-+#define FSPI_MCR0_DOZE_EN_SHIFT 12
-+#define FSPI_MCR0_DOZE_EN_MASK (1 << FSPI_MCR0_DOZE_EN_SHIFT)
-+#define FSPI_MCR0_HSEN_SHIFT 11
-+#define FSPI_MCR0_HSEN_MASK (1 << FSPI_MCR0_HSEN_SHIFT)
-+#define FSPI_MCR0_SERCLKDIV_SHIFT 8
-+#define FSPI_MCR0_SERCLKDIV_MASK (7 << FSPI_MCR0_SERCLKDIV_SHIFT)
-+#define FSPI_MCR0_ATDF_EN_SHIFT 7
-+#define FSPI_MCR0_ATDF_EN_MASK (1 << FSPI_MCR0_ATDF_EN_SHIFT)
-+#define FSPI_MCR0_ARDF_EN_SHIFT 6
-+#define FSPI_MCR0_ARDF_EN_MASK (1 << FSPI_MCR0_ARDF_EN_SHIFT)
-+#define FSPI_MCR0_RXCLKSRC_SHIFT 4
-+#define FSPI_MCR0_RXCLKSRC_MASK (3 << FSPI_MCR0_RXCLKSRC_SHIFT)
-+#define FSPI_MCR0_END_CFG_SHIFT 2
-+#define FSPI_MCR0_END_CFG_MASK (3 << FSPI_MCR0_END_CFG_SHIFT)
-+#define FSPI_MCR0_MDIS_SHIFT 1
-+#define FSPI_MCR0_MDIS_MASK (1 << FSPI_MCR0_MDIS_SHIFT)
-+#define FSPI_MCR0_SWRST_SHIFT 0
-+#define FSPI_MCR0_SWRST_MASK (1 << FSPI_MCR0_SWRST_SHIFT)
-+
-+#define FSPI_MCR1 0x04
-+#define FSPI_MCR1_SEQ_TIMEOUT_SHIFT 16
-+#define FSPI_MCR1_SEQ_TIMEOUT_MASK \
-+ (0xFFFF << FSPI_MCR1_SEQ_TIMEOUT_SHIFT)
-+#define FSPI_MCR1_AHB_TIMEOUT_SHIFT 0
-+#define FSPI_MCR1_AHB_TIMEOUT_MASK \
-+ (0xFFFF << FSPI_MCR1_AHB_TIMEOUT_SHIFT)
-+
-+#define FSPI_MCR2 0x08
-+#define FSPI_MCR2_IDLE_WAIT_SHIFT 24
-+#define FSPI_MCR2_IDLE_WAIT_MASK (0xFF << FSPI_MCR2_IDLE_WAIT_SHIFT)
-+#define FSPI_MCR2_SAMEFLASH_SHIFT 15
-+#define FSPI_MCR2_SAMEFLASH_MASK (1 << FSPI_MCR2_SAMEFLASH_SHIFT)
-+#define FSPI_MCR2_CLRLRPHS_SHIFT 14
-+#define FSPI_MCR2_CLRLRPHS_MASK (1 << FSPI_MCR2_CLRLRPHS_SHIFT)
-+#define FSPI_MCR2_ABRDATSZ_SHIFT 8
-+#define FSPI_MCR2_ABRDATSZ_MASK (1 << FSPI_MCR2_ABRDATSZ_SHIFT)
-+#define FSPI_MCR2_ABRLEARN_SHIFT 7
-+#define FSPI_MCR2_ABRLEARN_MASK (1 << FSPI_MCR2_ABRLEARN_SHIFT)
-+#define FSPI_MCR2_ABR_READ_SHIFT 6
-+#define FSPI_MCR2_ABR_READ_MASK (1 << FSPI_MCR2_ABR_READ_SHIFT)
-+#define FSPI_MCR2_ABRWRITE_SHIFT 5
-+#define FSPI_MCR2_ABRWRITE_MASK (1 << FSPI_MCR2_ABRWRITE_SHIFT)
-+#define FSPI_MCR2_ABRDUMMY_SHIFT 4
-+#define FSPI_MCR2_ABRDUMMY_MASK (1 << FSPI_MCR2_ABRDUMMY_SHIFT)
-+#define FSPI_MCR2_ABR_MODE_SHIFT 3
-+#define FSPI_MCR2_ABR_MODE_MASK (1 << FSPI_MCR2_ABR_MODE_SHIFT)
-+#define FSPI_MCR2_ABRCADDR_SHIFT 2
-+#define FSPI_MCR2_ABRCADDR_MASK (1 << FSPI_MCR2_ABRCADDR_SHIFT)
-+#define FSPI_MCR2_ABRRADDR_SHIFT 1
-+#define FSPI_MCR2_ABRRADDR_MASK (1 << FSPI_MCR2_ABRRADDR_SHIFT)
-+#define FSPI_MCR2_ABR_CMD_SHIFT 0
-+#define FSPI_MCR2_ABR_CMD_MASK (1 << FSPI_MCR2_ABR_CMD_SHIFT)
-+
-+#define FSPI_AHBCR 0x0c
-+#define FSPI_AHBCR_RDADDROPT_SHIFT 6
-+#define FSPI_AHBCR_RDADDROPT_MASK (1 << FSPI_AHBCR_RDADDROPT_SHIFT)
-+#define FSPI_AHBCR_PREF_EN_SHIFT 5
-+#define FSPI_AHBCR_PREF_EN_MASK (1 << FSPI_AHBCR_PREF_EN_SHIFT)
-+#define FSPI_AHBCR_BUFF_EN_SHIFT 4
-+#define FSPI_AHBCR_BUFF_EN_MASK (1 << FSPI_AHBCR_BUFF_EN_SHIFT)
-+#define FSPI_AHBCR_CACH_EN_SHIFT 3
-+#define FSPI_AHBCR_CACH_EN_MASK (1 << FSPI_AHBCR_CACH_EN_SHIFT)
-+#define FSPI_AHBCR_CLRTXBUF_SHIFT 2
-+#define FSPI_AHBCR_CLRTXBUF_MASK (1 << FSPI_AHBCR_CLRTXBUF_SHIFT)
-+#define FSPI_AHBCR_CLRRXBUF_SHIFT 1
-+#define FSPI_AHBCR_CLRRXBUF_MASK (1 << FSPI_AHBCR_CLRRXBUF_SHIFT)
-+#define FSPI_AHBCR_PAR_EN_SHIFT 0
-+#define FSPI_AHBCR_PAR_EN_MASK (1 << FSPI_AHBCR_PAR_EN_SHIFT)
-+
-+#define FSPI_INTEN 0x10
-+#define FSPI_INTEN_SCLKSBWR_SHIFT 9
-+#define FSPI_INTEN_SCLKSBWR_MASK (1 << FSPI_INTEN_SCLKSBWR_SHIFT)
-+#define FSPI_INTEN_SCLKSBRD_SHIFT 8
-+#define FSPI_INTEN_SCLKSBRD_MASK (1 << FSPI_INTEN_SCLKSBRD_SHIFT)
-+#define FSPI_INTEN_DATALRNFL_SHIFT 7
-+#define FSPI_INTEN_DATALRNFL_MASK (1 << FSPI_INTEN_DATALRNFL_SHIFT)
-+#define FSPI_INTEN_IPTXWE_SHIFT 6
-+#define FSPI_INTEN_IPTXWE_MASK (1 << FSPI_INTEN_IPTXWE_SHIFT)
-+#define FSPI_INTEN_IPRXWA_SHIFT 5
-+#define FSPI_INTEN_IPRXWA_MASK (1 << FSPI_INTEN_IPRXWA_SHIFT)
-+#define FSPI_INTEN_AHBCMDERR_SHIFT 4
-+#define FSPI_INTEN_AHBCMDERR_MASK (1 << FSPI_INTEN_AHBCMDERR_SHIFT)
-+#define FSPI_INTEN_IPCMDERR_SHIFT 3
-+#define FSPI_INTEN_IPCMDERR_MASK (1 << FSPI_INTEN_IPCMDERR_SHIFT)
-+#define FSPI_INTEN_AHBCMDGE_SHIFT 2
-+#define FSPI_INTEN_AHBCMDGE_MASK (1 << FSPI_INTEN_AHBCMDGE_SHIFT)
-+#define FSPI_INTEN_IPCMDGE_SHIFT 1
-+#define FSPI_INTEN_IPCMDGE_MASK (1 << FSPI_INTEN_IPCMDGE_SHIFT)
-+#define FSPI_INTEN_IPCMDDONE_SHIFT 0
-+#define FSPI_INTEN_IPCMDDONE_MASK (1 << FSPI_INTEN_IPCMDDONE_SHIFT)
-+
-+#define FSPI_INTR 0x14
-+#define FSPI_INTR_SCLKSBWR_SHIFT 9
-+#define FSPI_INTR_SCLKSBWR_MASK (1 << FSPI_INTR_SCLKSBWR_SHIFT)
-+#define FSPI_INTR_SCLKSBRD_SHIFT 8
-+#define FSPI_INTR_SCLKSBRD_MASK (1 << FSPI_INTR_SCLKSBRD_SHIFT)
-+#define FSPI_INTR_DATALRNFL_SHIFT 7
-+#define FSPI_INTR_DATALRNFL_MASK (1 << FSPI_INTR_DATALRNFL_SHIFT)
-+#define FSPI_INTR_IPTXWE_SHIFT 6
-+#define FSPI_INTR_IPTXWE_MASK (1 << FSPI_INTR_IPTXWE_SHIFT)
-+#define FSPI_INTR_IPRXWA_SHIFT 5
-+#define FSPI_INTR_IPRXWA_MASK (1 << FSPI_INTR_IPRXWA_SHIFT)
-+#define FSPI_INTR_AHBCMDERR_SHIFT 4
-+#define FSPI_INTR_AHBCMDERR_MASK (1 << FSPI_INTR_AHBCMDERR_SHIFT)
-+#define FSPI_INTR_IPCMDERR_SHIFT 3
-+#define FSPI_INTR_IPCMDERR_MASK (1 << FSPI_INTR_IPCMDERR_SHIFT)
-+#define FSPI_INTR_AHBCMDGE_SHIFT 2
-+#define FSPI_INTR_AHBCMDGE_MASK (1 << FSPI_INTR_AHBCMDGE_SHIFT)
-+#define FSPI_INTR_IPCMDGE_SHIFT 1
-+#define FSPI_INTR_IPCMDGE_MASK (1 << FSPI_INTR_IPCMDGE_SHIFT)
-+#define FSPI_INTR_IPCMDDONE_SHIFT 0
-+#define FSPI_INTR_IPCMDDONE_MASK (1 << FSPI_INTR_IPCMDDONE_SHIFT)
-+
-+#define FSPI_LUTKEY 0x18
-+#define FSPI_LUTKEY_VALUE 0x5AF05AF0
-+
-+#define FSPI_LCKCR 0x1C
-+#define FSPI_LCKER_LOCK 0x1
-+#define FSPI_LCKER_UNLOCK 0x2
-+
-+#define FSPI_BUFXCR_INVALID_MSTRID 0xe
-+#define FSPI_AHBRX_BUF0CR0 0x20
-+#define FSPI_AHBRX_BUF1CR0 0x24
-+#define FSPI_AHBRX_BUF2CR0 0x28
-+#define FSPI_AHBRX_BUF3CR0 0x2C
-+#define FSPI_AHBRX_BUF4CR0 0x30
-+#define FSPI_AHBRX_BUF5CR0 0x34
-+#define FSPI_AHBRX_BUF6CR0 0x38
-+#define FSPI_AHBRX_BUF7CR0 0x3C
-+#define FSPI_AHBRXBUF0CR7_PREF_SHIFT 31
-+#define FSPI_AHBRXBUF0CR7_PREF_MASK (1 << FSPI_AHBRXBUF0CR7_PREF_SHIFT)
-+
-+#define FSPI_AHBRX_BUF0CR1 0x40
-+#define FSPI_AHBRX_BUF1CR1 0x44
-+#define FSPI_AHBRX_BUF2CR1 0x48
-+#define FSPI_AHBRX_BUF3CR1 0x4C
-+#define FSPI_AHBRX_BUF4CR1 0x50
-+#define FSPI_AHBRX_BUF5CR1 0x54
-+#define FSPI_AHBRX_BUF6CR1 0x58
-+#define FSPI_AHBRX_BUF7CR1 0x5C
-+#define FSPI_BUFXCR1_MSID_SHIFT 0
-+#define FSPI_BUFXCR1_MSID_MASK (0xF << FSPI_BUFXCR1_MSID_SHIFT)
-+#define FSPI_BUFXCR1_PRIO_SHIFT 8
-+#define FSPI_BUFXCR1_PRIO_MASK (0x7 << FSPI_BUFXCR1_PRIO_SHIFT)
-+
-+#define FSPI_FLSHA1CR0 0x60
-+#define FSPI_FLSHA2CR0 0x64
-+#define FSPI_FLSHB1CR0 0x68
-+#define FSPI_FLSHB2CR0 0x6C
-+#define FSPI_FLSHXCR0_SZ_SHIFT 10
-+#define FSPI_FLSHXCR0_SZ_MASK (0x3FFFFF << FSPI_FLSHXCR0_SZ_SHIFT)
-+
-+#define FSPI_FLSHA1CR1 0x70
-+#define FSPI_FLSHA2CR1 0x74
-+#define FSPI_FLSHB1CR1 0x78
-+#define FSPI_FLSHB2CR1 0x7C
-+#define FSPI_FLSHXCR1_CSINTR_SHIFT 16
-+#define FSPI_FLSHXCR1_CSINTR_MASK \
-+ (0xFFFF << FSPI_FLSHXCR1_CSINTR_SHIFT)
-+#define FSPI_FLSHXCR1_CAS_SHIFT 11
-+#define FSPI_FLSHXCR1_CAS_MASK (0xF << FSPI_FLSHXCR1_CAS_SHIFT)
-+#define FSPI_FLSHXCR1_WA_SHIFT 10
-+#define FSPI_FLSHXCR1_WA_MASK (1 << FSPI_FLSHXCR1_WA_SHIFT)
-+#define FSPI_FLSHXCR1_TCSH_SHIFT 5
-+#define FSPI_FLSHXCR1_TCSH_MASK (0x1F << FSPI_FLSHXCR1_TCSH_SHIFT)
-+#define FSPI_FLSHXCR1_TCSS_SHIFT 0
-+#define FSPI_FLSHXCR1_TCSS_MASK (0x1F << FSPI_FLSHXCR1_TCSS_SHIFT)
-+
-+#define FSPI_FLSHA1CR2 0x80
-+#define FSPI_FLSHA2CR2 0x84
-+#define FSPI_FLSHB1CR2 0x88
-+#define FSPI_FLSHB2CR2 0x8C
-+#define FSPI_FLSHXCR2_CLRINSP_SHIFT 24
-+#define FSPI_FLSHXCR2_CLRINSP_MASK (1 << FSPI_FLSHXCR2_CLRINSP_SHIFT)
-+#define FSPI_FLSHXCR2_AWRWAIT_SHIFT 16
-+#define FSPI_FLSHXCR2_AWRWAIT_MASK (0xFF << FSPI_FLSHXCR2_AWRWAIT_SHIFT)
-+#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
-+#define FSPI_FLSHXCR2_AWRSEQN_MASK (0x7 << FSPI_FLSHXCR2_AWRSEQN_SHIFT)
-+#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
-+#define FSPI_FLSHXCR2_AWRSEQI_MASK (0xF << FSPI_FLSHXCR2_AWRSEQI_SHIFT)
-+#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
-+#define FSPI_FLSHXCR2_ARDSEQN_MASK (0x7 << FSPI_FLSHXCR2_ARDSEQN_SHIFT)
-+#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
-+#define FSPI_FLSHXCR2_ARDSEQI_MASK (0xF << FSPI_FLSHXCR2_ARDSEQI_SHIFT)
-+
-+#define FSPI_IPCR0 0xA0
-+
-+#define FSPI_IPCR1 0xA4
-+#define FSPI_IPCR1_IPAREN_SHIFT 31
-+#define FSPI_IPCR1_IPAREN_MASK (1 << FSPI_IPCR1_IPAREN_SHIFT)
-+#define FSPI_IPCR1_SEQNUM_SHIFT 24
-+#define FSPI_IPCR1_SEQNUM_MASK (0xF << FSPI_IPCR1_SEQNUM_SHIFT)
-+#define FSPI_IPCR1_SEQID_SHIFT 16
-+#define FSPI_IPCR1_SEQID_MASK (0xF << FSPI_IPCR1_SEQID_SHIFT)
-+#define FSPI_IPCR1_IDATSZ_SHIFT 0
-+#define FSPI_IPCR1_IDATSZ_MASK (0xFFFF << FSPI_IPCR1_IDATSZ_SHIFT)
-+
-+#define FSPI_IPCMD 0xB0
-+#define FSPI_IPCMD_TRG_SHIFT 0
-+#define FSPI_IPCMD_TRG_MASK (1 << FSPI_IPCMD_TRG_SHIFT)
-+
-+#define FSPI_DLPR 0xB4
-+
-+#define FSPI_IPRXFCR 0xB8
-+#define FSPI_IPRXFCR_CLR_SHIFT 0
-+#define FSPI_IPRXFCR_CLR_MASK (1 << FSPI_IPRXFCR_CLR_SHIFT)
-+#define FSPI_IPRXFCR_DMA_EN_SHIFT 1
-+#define FSPI_IPRXFCR_DMA_EN_MASK (1 << FSPI_IPRXFCR_DMA_EN_SHIFT)
-+#define FSPI_IPRXFCR_WMRK_SHIFT 2
-+#define FSPI_IPRXFCR_WMRK_MASK (0x1F << FSPI_IPRXFCR_WMRK_SHIFT)
-+
-+#define FSPI_IPTXFCR 0xBC
-+#define FSPI_IPTXFCR_CLR_SHIFT 0
-+#define FSPI_IPTXFCR_CLR_MASK (1 << FSPI_IPTXFCR_CLR_SHIFT)
-+#define FSPI_IPTXFCR_DMA_EN_SHIFT 1
-+#define FSPI_IPTXFCR_DMA_EN_MASK (1 << FSPI_IPTXFCR_DMA_EN_SHIFT)
-+#define FSPI_IPTXFCR_WMRK_SHIFT 2
-+#define FSPI_IPTXFCR_WMRK_MASK (0x1F << FSPI_IPTXFCR_WMRK_SHIFT)
-+
-+#define FSPI_DLLACR 0xC0
-+#define FSPI_DLLACR_OVRDEN_SHIFT 8
-+#define FSPI_DLLACR_OVRDEN_MASK (1 << FSPI_DLLACR_OVRDEN_SHIFT)
-+
-+#define FSPI_DLLBCR 0xC4
-+#define FSPI_DLLBCR_OVRDEN_SHIFT 8
-+#define FSPI_DLLBCR_OVRDEN_MASK (1 << FSPI_DLLBCR_OVRDEN_SHIFT)
-+
-+#define FSPI_STS0 0xE0
-+#define FSPI_STS0_DLPHA_SHIFT 9
-+#define FSPI_STS0_DLPHA_MASK (0x1F << FSPI_STS0_DLPHA_SHIFT)
-+#define FSPI_STS0_DLPHB_SHIFT 4
-+#define FSPI_STS0_DLPHB_MASK (0x1F << FSPI_STS0_DLPHB_SHIFT)
-+#define FSPI_STS0_CMD_SRC_SHIFT 2
-+#define FSPI_STS0_CMD_SRC_MASK (3 << FSPI_STS0_CMD_SRC_SHIFT)
-+#define FSPI_STS0_ARB_IDLE_SHIFT 1
-+#define FSPI_STS0_ARB_IDLE_MASK (1 << FSPI_STS0_ARB_IDLE_SHIFT)
-+#define FSPI_STS0_SEQ_IDLE_SHIFT 0
-+#define FSPI_STS0_SEQ_IDLE_MASK (1 << FSPI_STS0_SEQ_IDLE_SHIFT)
-+
-+#define FSPI_STS1 0xE4
-+#define FSPI_STS1_IP_ERRCD_SHIFT 24
-+#define FSPI_STS1_IP_ERRCD_MASK (0xF << FSPI_STS1_IP_ERRCD_SHIFT)
-+#define FSPI_STS1_IP_ERRID_SHIFT 16
-+#define FSPI_STS1_IP_ERRID_MASK (0xF << FSPI_STS1_IP_ERRID_SHIFT)
-+#define FSPI_STS1_AHB_ERRCD_SHIFT 8
-+#define FSPI_STS1_AHB_ERRCD_MASK (0xF << FSPI_STS1_AHB_ERRCD_SHIFT)
-+#define FSPI_STS1_AHB_ERRID_SHIFT 0
-+#define FSPI_STS1_AHB_ERRID_MASK (0xF << FSPI_STS1_AHB_ERRID_SHIFT)
-+
-+#define FSPI_AHBSPNST 0xEC
-+#define FSPI_AHBSPNST_DATLFT_SHIFT 16
-+#define FSPI_AHBSPNST_DATLFT_MASK \
-+ (0xFFFF << FSPI_AHBSPNST_DATLFT_SHIFT)
-+#define FSPI_AHBSPNST_BUFID_SHIFT 1
-+#define FSPI_AHBSPNST_BUFID_MASK (7 << FSPI_AHBSPNST_BUFID_SHIFT)
-+#define FSPI_AHBSPNST_ACTIVE_SHIFT 0
-+#define FSPI_AHBSPNST_ACTIVE_MASK (1 << FSPI_AHBSPNST_ACTIVE_SHIFT)
-+
-+#define FSPI_IPRXFSTS 0xF0
-+#define FSPI_IPRXFSTS_RDCNTR_SHIFT 16
-+#define FSPI_IPRXFSTS_RDCNTR_MASK \
-+ (0xFFFF << FSPI_IPRXFSTS_RDCNTR_SHIFT)
-+#define FSPI_IPRXFSTS_FILL_SHIFT 0
-+#define FSPI_IPRXFSTS_FILL_MASK (0xFF << FSPI_IPRXFSTS_FILL_SHIFT)
-+
-+#define FSPI_IPTXFSTS 0xF4
-+#define FSPI_IPTXFSTS_WRCNTR_SHIFT 16
-+#define FSPI_IPTXFSTS_WRCNTR_MASK \
-+ (0xFFFF << FSPI_IPTXFSTS_WRCNTR_SHIFT)
-+#define FSPI_IPTXFSTS_FILL_SHIFT 0
-+#define FSPI_IPTXFSTS_FILL_MASK (0xFF << FSPI_IPTXFSTS_FILL_SHIFT)
-+
-+#define FSPI_RFDR 0x100
-+#define FSPI_TFDR 0x180
-+
-+#define FSPI_LUT_BASE 0x200
-+
-+/* register map end */
-+
-+/*
-+ * The definition of the LUT register shows below:
-+ *
-+ * ---------------------------------------------------
-+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
-+ * ---------------------------------------------------
-+ */
-+#define OPRND0_SHIFT 0
-+#define PAD0_SHIFT 8
-+#define INSTR0_SHIFT 10
-+#define OPRND1_SHIFT 16
-+
-+/* Instruction set for the LUT register. */
-+
-+#define LUT_STOP 0x00
-+#define LUT_CMD 0x01
-+#define LUT_ADDR 0x02
-+#define LUT_CADDR_SDR 0x03
-+#define LUT_MODE 0x04
-+#define LUT_MODE2 0x05
-+#define LUT_MODE4 0x06
-+#define LUT_MODE8 0x07
-+#define LUT_NXP_WRITE 0x08
-+#define LUT_NXP_READ 0x09
-+#define LUT_LEARN_SDR 0x0A
-+#define LUT_DATSZ_SDR 0x0B
-+#define LUT_DUMMY 0x0C
-+#define LUT_DUMMY_RWDS_SDR 0x0D
-+#define LUT_JMP_ON_CS 0x1F
-+#define LUT_CMD_DDR 0x21
-+#define LUT_ADDR_DDR 0x22
-+#define LUT_CADDR_DDR 0x23
-+#define LUT_MODE_DDR 0x24
-+#define LUT_MODE2_DDR 0x25
-+#define LUT_MODE4_DDR 0x26
-+#define LUT_MODE8_DDR 0x27
-+#define LUT_WRITE_DDR 0x28
-+#define LUT_READ_DDR 0x29
-+#define LUT_LEARN_DDR 0x2A
-+#define LUT_DATSZ_DDR 0x2B
-+#define LUT_DUMMY_DDR 0x2C
-+#define LUT_DUMMY_RWDS_DDR 0x2D
-+
-+
-+/*
-+ * The PAD definitions for LUT register.
-+ *
-+ * The pad stands for the lines number of IO[0:3].
-+ * For example, the Quad read need four IO lines, so you should
-+ * set LUT_PAD4 which means we use four IO lines.
-+ */
-+#define LUT_PAD1 0
-+#define LUT_PAD2 1
-+#define LUT_PAD4 2
-+#define LUT_PAD8 3
-+
-+/* Oprands for the LUT register. */
-+#define ADDR24BIT 0x18
-+#define ADDR32BIT 0x20
-+
-+/* Macros for constructing the LUT register. */
-+#define LUT0(ins, pad, opr) \
-+ (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
-+ ((LUT_##ins) << INSTR0_SHIFT))
-+
-+#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
-+
-+/* other macros for LUT register. */
-+#define FSPI_LUT(x) (FSPI_LUT_BASE + (x) * 4)
-+#define FSPI_LUT_NUM 128
-+
-+/* SEQID -- we can have 32 seqids at most. */
-+#define SEQID_READ 0
-+#define SEQID_WREN 1
-+#define SEQID_WRDI 2
-+#define SEQID_RDSR 3
-+#define SEQID_SE 4
-+#define SEQID_CHIP_ERASE 5
-+#define SEQID_PP 6
-+#define SEQID_RDID 7
-+#define SEQID_WRSR 8
-+#define SEQID_RDCR 9
-+#define SEQID_EN4B 10
-+#define SEQID_BRWR 11
-+#define SEQID_RD_EVCR 12
-+#define SEQID_WD_EVCR 13
-+#define SEQID_RDFSR 14
-+
-+#define FSPI_MIN_IOMAP SZ_4M
-+
-+#define FSPI_RX_MAX_IPBUF_SIZE 0x200 /* 64 * 64bits */
-+#define FSPI_TX_MAX_IPBUF_SIZE 0x400 /* 128 * 64bits */
-+#define FSPI_RX_MAX_AHBBUF_SIZE 0x800 /* 256 * 64bits */
-+#define FSPI_TX_MAX_AHBBUF_SIZE 0x40 /* 8 * 64bits */
-+
-+#define TX_IPBUF_SIZE FSPI_TX_MAX_IPBUF_SIZE
-+#define RX_IPBUF_SIZE FSPI_RX_MAX_IPBUF_SIZE
-+#define RX_AHBBUF_SIZE FSPI_RX_MAX_AHBBUF_SIZE
-+#define TX_AHBBUF_SIZE FSPI_TX_MAX_AHBBUF_SIZE
-+
-+#define FSPI_SINGLE_MODE 1
-+#define FSPI_OCTAL_MODE 8
-+
-+#define FSPINOR_OP_READ_1_1_8_4B 0x7c
-+
-+enum nxp_fspi_devtype {
-+ NXP_FSPI_LX2160A,
-+};
-+
-+struct nxp_fspi_devtype_data {
-+ enum nxp_fspi_devtype devtype;
-+ int rxfifo;
-+ int txfifo;
-+ int ahb_buf_size;
-+ int driver_data;
-+};
-+
-+static struct nxp_fspi_devtype_data lx2160a_data = {
-+ .devtype = NXP_FSPI_LX2160A,
-+ .rxfifo = RX_IPBUF_SIZE,
-+ .txfifo = TX_IPBUF_SIZE,
-+ .ahb_buf_size = RX_AHBBUF_SIZE,
-+ .driver_data = 0,
-+};
-+
-+#define NXP_FSPI_MAX_CHIP 4
-+struct nxp_fspi {
-+ struct mtd_info mtd[NXP_FSPI_MAX_CHIP];
-+ struct spi_nor nor[NXP_FSPI_MAX_CHIP];
-+ void __iomem *iobase;
-+ void __iomem *ahb_addr;
-+ u32 memmap_phy;
-+ u32 memmap_offs;
-+ u32 memmap_len;
-+ struct clk *clk, *clk_en;
-+ struct device *dev;
-+ struct completion c;
-+ struct nxp_fspi_devtype_data *devtype_data;
-+ u32 nor_size;
-+ u32 nor_num;
-+ u32 clk_rate;
-+ u32 spi_rx_bus_width;
-+ u32 spi_tx_bus_width;
-+ unsigned int chip_base_addr; /* We may support two chips. */
-+ bool has_second_chip;
-+ struct mutex lock;
-+ struct pm_qos_request pm_qos_req;
-+};
-+
-+static inline void nxp_fspi_unlock_lut(struct nxp_fspi *fspi)
-+{
-+ writel(FSPI_LUTKEY_VALUE, fspi->iobase + FSPI_LUTKEY);
-+ writel(FSPI_LCKER_UNLOCK, fspi->iobase + FSPI_LCKCR);
-+}
-+
-+static inline void nxp_fspi_lock_lut(struct nxp_fspi *fspi)
-+{
-+ writel(FSPI_LUTKEY_VALUE, fspi->iobase + FSPI_LUTKEY);
-+ writel(FSPI_LCKER_LOCK, fspi->iobase + FSPI_LCKCR);
-+}
-+
-+static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id)
-+{
-+ struct nxp_fspi *fspi = dev_id;
-+ u32 reg;
-+
-+ reg = readl(fspi->iobase + FSPI_INTR);
-+ writel(FSPI_INTR_IPCMDDONE_MASK, fspi->iobase + FSPI_INTR);
-+ if (reg & FSPI_INTR_IPCMDDONE_MASK)
-+ complete(&fspi->c);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void nxp_fspi_init_lut(struct nxp_fspi *fspi)
-+{
-+ void __iomem *base = fspi->iobase;
-+ struct spi_nor *nor = &fspi->nor[0];
-+ u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
-+ u32 lut_base;
-+ u8 op, dm;
-+ int i;
-+
-+ nxp_fspi_unlock_lut(fspi);
-+
-+ /* Clear all the LUT table */
-+ for (i = 0; i < FSPI_LUT_NUM; i++)
-+ writel(0, base + FSPI_LUT_BASE + i * 4);
-+
-+ /* Read */
-+ lut_base = SEQID_READ * 4;
-+ op = nor->read_opcode;
-+ dm = nor->read_dummy;
-+
-+ if (fspi->spi_rx_bus_width == FSPI_OCTAL_MODE) {
-+ dm = 8;
-+ op = FSPINOR_OP_READ_1_1_8_4B;
-+ writel(LUT0(CMD, PAD1, op) | LUT1(ADDR, PAD1, addrlen),
-+ base + FSPI_LUT(lut_base));
-+ writel(LUT0(DUMMY, PAD8, dm) | LUT1(NXP_READ, PAD8, 0),
-+ base + FSPI_LUT(lut_base + 1));
-+ } else {
-+ if ((op == SPINOR_OP_READ_FAST_4B) ||
-+ (op == SPINOR_OP_READ_FAST) ||
-+ (op == SPINOR_OP_READ) ||
-+ (op == SPINOR_OP_READ_4B)) {
-+ dm = 8;
-+ writel(LUT0(CMD, PAD1, op) | LUT1(ADDR, PAD1, addrlen),
-+ base + FSPI_LUT(lut_base));
-+ writel(LUT0(DUMMY, PAD1, dm) | LUT1(NXP_READ, PAD1, 0),
-+ base + FSPI_LUT(lut_base + 1));
-+ } else if (nor->read_proto == SNOR_PROTO_1_4_4) {
-+ dev_dbg(nor->dev, "Unsupported opcode : 0x%.2x\n", op);
-+ /* TODO Add support for other Read ops. */
-+ } else {
-+ dev_dbg(nor->dev, "Unsupported opcode : 0x%.2x\n", op);
-+ }
-+ }
-+
-+ /* Write enable */
-+ lut_base = SEQID_WREN * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + FSPI_LUT(lut_base));
-+
-+ /* Page Program */
-+ lut_base = SEQID_PP * 4;
-+ writel(LUT0(CMD, PAD1, nor->program_opcode) | LUT1(ADDR, PAD1, addrlen),
-+ base + FSPI_LUT(lut_base));
-+ writel(LUT0(NXP_WRITE, PAD1, 0), base + FSPI_LUT(lut_base + 1));
-+
-+ /* Read Status */
-+ lut_base = SEQID_RDSR * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(NXP_READ, PAD1, 0x1),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* Erase a sector */
-+ lut_base = SEQID_SE * 4;
-+ writel(LUT0(CMD, PAD1, nor->erase_opcode) | LUT1(ADDR, PAD1, addrlen),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* Erase the whole chip */
-+ lut_base = SEQID_CHIP_ERASE * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* READ ID */
-+ lut_base = SEQID_RDID * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(NXP_READ, PAD1, 0x8),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* Write Register */
-+ lut_base = SEQID_WRSR * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(NXP_WRITE, PAD1, 0x2),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* Read Configuration Register */
-+ lut_base = SEQID_RDCR * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(NXP_READ, PAD1, 0x1),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* Write disable */
-+ lut_base = SEQID_WRDI * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + FSPI_LUT(lut_base));
-+
-+ /* Enter 4 Byte Mode (Micron) */
-+ lut_base = SEQID_EN4B * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + FSPI_LUT(lut_base));
-+
-+ /* Enter 4 Byte Mode (Spansion) */
-+ lut_base = SEQID_BRWR * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + FSPI_LUT(lut_base));
-+
-+ /* Read EVCR register */
-+ lut_base = SEQID_RD_EVCR * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_RD_EVCR),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* Write EVCR register */
-+ lut_base = SEQID_WD_EVCR * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_WD_EVCR),
-+ base + FSPI_LUT(lut_base));
-+
-+ /* Read Flag Status */
-+ lut_base = SEQID_RDFSR * 4;
-+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDFSR) | LUT1(NXP_READ, PAD1, 0x1),
-+ base + FSPI_LUT(lut_base));
-+
-+ nxp_fspi_lock_lut(fspi);
-+}
-+
-+/* Get the SEQID for the command */
-+static int nxp_fspi_get_seqid(struct nxp_fspi *fspi, u8 cmd)
-+{
-+
-+ switch (cmd) {
-+ case SPINOR_OP_READ_1_1_4_4B:
-+ case SPINOR_OP_READ_1_1_4:
-+ case SPINOR_OP_READ:
-+ case SPINOR_OP_READ_4B:
-+ case SPINOR_OP_READ_FAST:
-+ case SPINOR_OP_READ_FAST_4B:
-+ return SEQID_READ;
-+ case SPINOR_OP_WREN:
-+ return SEQID_WREN;
-+ case SPINOR_OP_WRDI:
-+ return SEQID_WRDI;
-+ case SPINOR_OP_RDSR:
-+ return SEQID_RDSR;
-+ case SPINOR_OP_RDFSR:
-+ return SEQID_RDFSR;
-+ case SPINOR_OP_BE_4K:
-+ case SPINOR_OP_SE:
-+ case SPINOR_OP_SE_4B:
-+ case SPINOR_OP_BE_4K_4B:
-+ return SEQID_SE;
-+ case SPINOR_OP_CHIP_ERASE:
-+ return SEQID_CHIP_ERASE;
-+ case SPINOR_OP_PP:
-+ case SPINOR_OP_PP_4B:
-+ return SEQID_PP;
-+ case SPINOR_OP_RDID:
-+ return SEQID_RDID;
-+ case SPINOR_OP_WRSR:
-+ return SEQID_WRSR;
-+ case SPINOR_OP_RDCR:
-+ return SEQID_RDCR;
-+ case SPINOR_OP_EN4B:
-+ return SEQID_EN4B;
-+ case SPINOR_OP_BRWR:
-+ return SEQID_BRWR;
-+ case SPINOR_OP_RD_EVCR:
-+ return SEQID_RD_EVCR;
-+ case SPINOR_OP_WD_EVCR:
-+ return SEQID_WD_EVCR;
-+ default:
-+ dev_err(fspi->dev, "Unsupported cmd 0x%.2x\n", cmd);
-+ break;
-+ }
-+ return -EINVAL;
-+}
-+
-+static int
-+nxp_fspi_runcmd(struct nxp_fspi *fspi, u8 cmd, unsigned int addr, int len)
-+{
-+ void __iomem *base = fspi->iobase;
-+ int seqid;
-+ int seqnum = 0;
-+ u32 reg;
-+ int err;
-+ int iprxfcr = 0;
-+
-+ iprxfcr = readl(fspi->iobase + FSPI_IPRXFCR);
-+ /* invalid RXFIFO first */
-+ iprxfcr &= ~FSPI_IPRXFCR_DMA_EN_MASK;
-+ iprxfcr = iprxfcr | FSPI_IPRXFCR_CLR_MASK;
-+ writel(iprxfcr, fspi->iobase + FSPI_IPRXFCR);
-+
-+ init_completion(&fspi->c);
-+ dev_dbg(fspi->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
-+ fspi->chip_base_addr, addr, len, cmd);
-+
-+ /* write address */
-+ writel(fspi->chip_base_addr + addr, base + FSPI_IPCR0);
-+
-+ seqid = nxp_fspi_get_seqid(fspi, cmd);
-+
-+ writel((seqnum << FSPI_IPCR1_SEQNUM_SHIFT) |
-+ (seqid << FSPI_IPCR1_SEQID_SHIFT) | len,
-+ base + FSPI_IPCR1);
-+
-+ /* wait till controller is idle */
-+ do {
-+ reg = readl(base + FSPI_STS0);
-+ if ((reg & FSPI_STS0_ARB_IDLE_MASK) &&
-+ (reg & FSPI_STS0_SEQ_IDLE_MASK))
-+ break;
-+ udelay(1);
-+ dev_dbg(fspi->dev, "The controller is busy, 0x%x\n", reg);
-+ } while (1);
-+
-+ /* trigger the LUT now */
-+ writel(1, base + FSPI_IPCMD);
-+
-+ /* Wait for the interrupt. */
-+ if (!wait_for_completion_timeout(&fspi->c, msecs_to_jiffies(1000))) {
-+ dev_err(fspi->dev,
-+ "cmd 0x%.2x timeout, addr@%.8x, Status0:0x%.8x, Status1:0x%.8x\n",
-+ cmd, addr, readl(base + FSPI_STS0),
-+ readl(base + FSPI_STS1));
-+ err = -ETIMEDOUT;
-+ } else {
-+ err = 0;
-+ dev_dbg(fspi->dev, "FSPI Intr done,INTR:<0x%.8x>\n",
-+ readl(base + FSPI_INTR));
-+ }
-+
-+ return err;
-+}
-+
-+/* Read out the data from the FSPI_RBDR buffer registers. */
-+static void nxp_fspi_read_data(struct nxp_fspi *fspi, int len, u8 *rxbuf)
-+{
-+ int i = 0, j = 0, tmp_size = 0;
-+ int size;
-+ u32 tmp = 0;
-+
-+ while (len > 0) {
-+
-+ size = len / 8;
-+
-+ for (i = 0; i < size; ++i) {
-+ /* Wait for RXFIFO available*/
-+ while (!(readl(fspi->iobase + FSPI_INTR)
-+ & FSPI_INTR_IPRXWA_MASK))
-+ ;
-+
-+ j = 0;
-+ tmp_size = 8;
-+ while (tmp_size > 0) {
-+ tmp = 0;
-+ tmp = readl(fspi->iobase + FSPI_RFDR + j * 4);
-+ memcpy(rxbuf, &tmp, 4);
-+ tmp_size -= 4;
-+ j++;
-+ rxbuf += 4;
-+ }
-+
-+ /* move the FIFO pointer */
-+ writel(FSPI_INTR_IPRXWA_MASK,
-+ fspi->iobase + FSPI_INTR);
-+ len -= 8;
-+ }
-+
-+ size = len % 8;
-+
-+ j = 0;
-+ if (size) {
-+ /* Wait for RXFIFO available*/
-+ while (!(readl(fspi->iobase + FSPI_INTR)
-+ & FSPI_INTR_IPRXWA_MASK))
-+ ;
-+
-+ while (len > 0) {
-+ tmp = 0;
-+ size = (len < 4) ? len : 4;
-+ tmp = readl(fspi->iobase + FSPI_RFDR + j * 4);
-+ memcpy(rxbuf, &tmp, size);
-+ len -= size;
-+ j++;
-+ rxbuf += size;
-+ }
-+ }
-+
-+ /* invalid the RXFIFO */
-+ writel(FSPI_IPRXFCR_CLR_MASK,
-+ fspi->iobase + FSPI_IPRXFCR);
-+
-+ writel(FSPI_INTR_IPRXWA_MASK,
-+ fspi->iobase + FSPI_INTR);
-+ }
-+}
-+
-+static inline void nxp_fspi_invalid(struct nxp_fspi *fspi)
-+{
-+ u32 reg;
-+
-+ reg = readl(fspi->iobase + FSPI_MCR0);
-+ writel(reg | FSPI_MCR0_SWRST_MASK, fspi->iobase + FSPI_MCR0);
-+
-+ /*
-+ * The minimum delay : 1 AHB + 2 SFCK clocks.
-+ * Delay 1 us is enough.
-+ */
-+ while (readl(fspi->iobase + FSPI_MCR0) & FSPI_MCR0_SWRST_MASK)
-+ ;
-+}
-+
-+static ssize_t nxp_fspi_nor_write(struct nxp_fspi *fspi,
-+ struct spi_nor *nor, u8 opcode,
-+ unsigned int to, u32 *txbuf,
-+ unsigned int count)
-+{
-+ int ret, i, j;
-+ int size, tmp_size;
-+ u32 data = 0;
-+
-+ dev_dbg(fspi->dev, "nor write to 0x%.8x:0x%.8x, len : %d\n",
-+ fspi->chip_base_addr, to, count);
-+
-+ /* clear the TX FIFO. */
-+ writel(FSPI_IPTXFCR_CLR_MASK, fspi->iobase + FSPI_IPTXFCR);
-+
-+ size = count / 8;
-+ for (i = 0; i < size; i++) {
-+ /* Wait for TXFIFO empty*/
-+ while (!(readl(fspi->iobase + FSPI_INTR)
-+ & FSPI_INTR_IPTXWE_MASK))
-+ ;
-+ j = 0;
-+ tmp_size = 8;
-+ while (tmp_size > 0) {
-+ data = 0;
-+ memcpy(&data, txbuf, 4);
-+ writel(data, fspi->iobase + FSPI_TFDR + j * 4);
-+ tmp_size -= 4;
-+ j++;
-+ txbuf += 1;
-+ }
-+
-+ writel(FSPI_INTR_IPTXWE_MASK, fspi->iobase + FSPI_INTR);
-+ }
-+
-+ size = count % 8;
-+ if (size) {
-+ /* Wait for TXFIFO empty*/
-+ while (!(readl(fspi->iobase + FSPI_INTR)
-+ & FSPI_INTR_IPTXWE_MASK))
-+ ;
-+
-+ j = 0;
-+ tmp_size = 0;
-+ while (size > 0) {
-+ data = 0;
-+ tmp_size = (size < 4) ? size : 4;
-+ memcpy(&data, txbuf, tmp_size);
-+ writel(data, fspi->iobase + FSPI_TFDR + j * 4);
-+ size -= tmp_size;
-+ j++;
-+ txbuf += 1;
-+ }
-+
-+ writel(FSPI_INTR_IPTXWE_MASK, fspi->iobase + FSPI_INTR);
-+ }
-+
-+ /* Trigger it */
-+ ret = nxp_fspi_runcmd(fspi, opcode, to, count);
-+
-+ if (ret == 0)
-+ return count;
-+
-+ return ret;
-+}
-+
-+static void nxp_fspi_set_map_addr(struct nxp_fspi *fspi)
-+{
-+ int nor_size = fspi->nor_size >> 10;
-+ void __iomem *base = fspi->iobase;
-+
-+ /*
-+ * Supporting same flash device as slaves on different chip-select.
-+ * As SAMEDEVICEEN bit set, by default, in mcr2 reg then need not to
-+ * configure FLSHA2CRx/FLSHB1CRx/FLSHB2CRx register as setting for
-+ * these would be ignored.
-+ * Need to Reset SAMEDEVICEEN bit in mcr2 reg, when require to add
-+ * support for different flashes.
-+ */
-+ writel(nor_size, base + FSPI_FLSHA1CR0);
-+ writel(0, base + FSPI_FLSHA2CR0);
-+ writel(0, base + FSPI_FLSHB1CR0);
-+ writel(0, base + FSPI_FLSHB2CR0);
-+}
-+
-+static void nxp_fspi_init_ahb_read(struct nxp_fspi *fspi)
-+{
-+ void __iomem *base = fspi->iobase;
-+ struct spi_nor *nor = &fspi->nor[0];
-+ int i = 0;
-+ int seqid;
-+
-+ /* AHB configuration for access buffer 0~7. */
-+ for (i = 0; i < 7; i++)
-+ writel(0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
-+
-+ /*
-+ * Set ADATSZ with the maximum AHB buffer size to improve the read
-+ * performance.
-+ */
-+ writel((fspi->devtype_data->ahb_buf_size / 8 |
-+ FSPI_AHBRXBUF0CR7_PREF_MASK), base + FSPI_AHBRX_BUF7CR0);
-+
-+ /* prefetch and no start address alignment limitation */
-+ writel(FSPI_AHBCR_PREF_EN_MASK | FSPI_AHBCR_RDADDROPT_MASK,
-+ base + FSPI_AHBCR);
-+
-+
-+ /* Set the default lut sequence for AHB Read. */
-+ seqid = nxp_fspi_get_seqid(fspi, nor->read_opcode);
-+ writel(seqid, base + FSPI_FLSHA1CR2);
-+}
-+
-+/* This function was used to prepare and enable FSPI clock */
-+static int nxp_fspi_clk_prep_enable(struct nxp_fspi *fspi)
-+{
-+ int ret;
-+
-+ ret = clk_prepare_enable(fspi->clk_en);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(fspi->clk);
-+ if (ret) {
-+ clk_disable_unprepare(fspi->clk_en);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+/* This function was used to disable and unprepare FSPI clock */
-+static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *fspi)
-+{
-+ clk_disable_unprepare(fspi->clk);
-+ clk_disable_unprepare(fspi->clk_en);
-+}
-+
-+/* We use this function to do some basic init for spi_nor_scan(). */
-+static int nxp_fspi_nor_setup(struct nxp_fspi *fspi)
-+{
-+ void __iomem *base = fspi->iobase;
-+ u32 reg;
-+
-+ /* Reset the module */
-+ writel(FSPI_MCR0_SWRST_MASK, base + FSPI_MCR0);
-+ do {
-+ udelay(1);
-+ } while (0x1 & readl(base + FSPI_MCR0));
-+
-+ /* Disable the module */
-+ writel(FSPI_MCR0_MDIS_MASK, base + FSPI_MCR0);
-+
-+ /* Reset the DLL register to default value */
-+ writel(FSPI_DLLACR_OVRDEN_MASK, base + FSPI_DLLACR);
-+ writel(FSPI_DLLBCR_OVRDEN_MASK, base + FSPI_DLLBCR);
-+
-+ /* enable module */
-+ writel(FSPI_MCR0_AHB_TIMEOUT_MASK | FSPI_MCR0_IP_TIMEOUT_MASK,
-+ base + FSPI_MCR0);
-+
-+ /* Read the register value */
-+ reg = readl(base + FSPI_MCR0);
-+
-+ /* Init the LUT table. */
-+ nxp_fspi_init_lut(fspi);
-+
-+ /* enable the interrupt */
-+ writel(FSPI_INTEN_IPCMDDONE_MASK, fspi->iobase + FSPI_INTEN);
-+ return 0;
-+}
-+
-+static int nxp_fspi_nor_setup_last(struct nxp_fspi *fspi)
-+{
-+ unsigned long rate = fspi->clk_rate;
-+ int ret;
-+
-+ /* disable and unprepare clock to avoid glitch pass to controller */
-+ nxp_fspi_clk_disable_unprep(fspi);
-+
-+ ret = clk_set_rate(fspi->clk, rate);
-+ if (ret)
-+ return ret;
-+
-+ ret = nxp_fspi_clk_prep_enable(fspi);
-+ if (ret)
-+ return ret;
-+
-+ /* Init the LUT table again. */
-+ nxp_fspi_init_lut(fspi);
-+
-+ /* Init for AHB read */
-+ nxp_fspi_init_ahb_read(fspi);
-+
-+ return 0;
-+}
-+
-+static void nxp_fspi_set_base_addr(struct nxp_fspi *fspi,
-+ struct spi_nor *nor)
-+{
-+ fspi->chip_base_addr = fspi->nor_size * (nor - fspi->nor);
-+}
-+
-+static int nxp_fspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
-+ int len)
-+{
-+ int ret;
-+ struct nxp_fspi *fspi = nor->priv;
-+
-+ ret = nxp_fspi_runcmd(fspi, opcode, 0, len);
-+ if (ret)
-+ return ret;
-+
-+ nxp_fspi_read_data(fspi, len, buf);
-+ return 0;
-+}
-+
-+static int nxp_fspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
-+ int len)
-+{
-+ struct nxp_fspi *fspi = nor->priv;
-+ int ret;
-+
-+ if (!buf) {
-+ ret = nxp_fspi_runcmd(fspi, opcode, 0, 1);
-+ if (ret)
-+ return ret;
-+
-+ if (opcode == SPINOR_OP_CHIP_ERASE)
-+ nxp_fspi_invalid(fspi);
-+
-+ } else if (len > 0) {
-+ ret = nxp_fspi_nor_write(fspi, nor, opcode, 0,
-+ (u32 *)buf, len);
-+ } else {
-+ dev_err(fspi->dev, "invalid cmd %d\n", opcode);
-+ ret = -EINVAL;
-+ }
-+
-+ return ret;
-+}
-+
-+static ssize_t nxp_fspi_write(struct spi_nor *nor, loff_t to,
-+ size_t len, const u_char *buf)
-+{
-+ struct nxp_fspi *fspi = nor->priv;
-+ ssize_t tx_size = 0, act_wrt = 0, ret = 0;
-+
-+ while (len > 0) {
-+ tx_size = (len > TX_IPBUF_SIZE) ? TX_IPBUF_SIZE : len;
-+
-+ act_wrt = nxp_fspi_nor_write(fspi, nor, nor->program_opcode, to,
-+ (u32 *)buf, tx_size);
-+ len -= tx_size;
-+ to += tx_size;
-+ ret += act_wrt;
-+ }
-+
-+ /* invalid the data in the AHB buffer. */
-+ nxp_fspi_invalid(fspi);
-+ return ret;
-+}
-+
-+static ssize_t nxp_fspi_read(struct spi_nor *nor, loff_t from,
-+ size_t len, u_char *buf)
-+{
-+ struct nxp_fspi *fspi = nor->priv;
-+
-+ /* if necessary, ioremap buffer before AHB read, */
-+ if (!fspi->ahb_addr) {
-+ fspi->memmap_offs = fspi->chip_base_addr + from;
-+ fspi->memmap_len = len > FSPI_MIN_IOMAP ?
-+ len : FSPI_MIN_IOMAP;
-+
-+ fspi->ahb_addr = ioremap_nocache(
-+ fspi->memmap_phy + fspi->memmap_offs,
-+ fspi->memmap_len);
-+ if (!fspi->ahb_addr) {
-+ dev_err(fspi->dev, "ioremap failed\n");
-+ return -ENOMEM;
-+ }
-+ /* ioremap if the data requested is out of range */
-+ } else if (fspi->chip_base_addr + from < fspi->memmap_offs
-+ || fspi->chip_base_addr + from + len >
-+ fspi->memmap_offs + fspi->memmap_len) {
-+ iounmap(fspi->ahb_addr);
-+
-+ fspi->memmap_offs = fspi->chip_base_addr + from;
-+ fspi->memmap_len = len > FSPI_MIN_IOMAP ?
-+ len : FSPI_MIN_IOMAP;
-+ fspi->ahb_addr = ioremap_nocache(
-+ fspi->memmap_phy + fspi->memmap_offs,
-+ fspi->memmap_len);
-+ if (!fspi->ahb_addr) {
-+ dev_err(fspi->dev, "ioremap failed\n");
-+ return -ENOMEM;
-+ }
-+ }
-+
-+ dev_dbg(fspi->dev, "cmd [%x],read from %p, len:%zd\n",
-+ nor->read_opcode, fspi->ahb_addr + fspi->chip_base_addr
-+ + from - fspi->memmap_offs, len);
-+
-+ /* Read out the data directly from the AHB buffer.*/
-+ memcpy_toio(buf, fspi->ahb_addr + fspi->chip_base_addr
-+ + from - fspi->memmap_offs, len);
-+
-+ return len;
-+}
-+
-+static int nxp_fspi_erase(struct spi_nor *nor, loff_t offs)
-+{
-+ struct nxp_fspi *fspi = nor->priv;
-+ int ret;
-+
-+ dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
-+ nor->mtd.erasesize / 1024, fspi->chip_base_addr, (u32)offs);
-+
-+ ret = nxp_fspi_runcmd(fspi, nor->erase_opcode, offs, 0);
-+ if (ret)
-+ return ret;
-+
-+ nxp_fspi_invalid(fspi);
-+ return 0;
-+}
-+
-+static int nxp_fspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
-+{
-+ struct nxp_fspi *fspi = nor->priv;
-+ int ret;
-+
-+ mutex_lock(&fspi->lock);
-+
-+ ret = nxp_fspi_clk_prep_enable(fspi);
-+ if (ret)
-+ goto err_mutex;
-+
-+ nxp_fspi_set_base_addr(fspi, nor);
-+ return 0;
-+
-+err_mutex:
-+ mutex_unlock(&fspi->lock);
-+ return ret;
-+}
-+
-+static void nxp_fspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
-+{
-+ struct nxp_fspi *fspi = nor->priv;
-+
-+ nxp_fspi_clk_disable_unprep(fspi);
-+ mutex_unlock(&fspi->lock);
-+}
-+
-+static const struct of_device_id nxp_fspi_dt_ids[] = {
-+ { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
-+ { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
-+
-+static int nxp_fspi_probe(struct platform_device *pdev)
-+{
-+ struct spi_nor_hwcaps hwcaps = {
-+ .mask = SPINOR_OP_READ_FAST_4B |
-+ SPINOR_OP_READ_4B |
-+ SNOR_HWCAPS_PP
-+ };
-+ struct device_node *np = pdev->dev.of_node;
-+ struct device *dev = &pdev->dev;
-+ struct nxp_fspi *fspi;
-+ struct resource *res;
-+ struct spi_nor *nor;
-+ struct mtd_info *mtd;
-+ int ret, i = 0;
-+ int find_node = 0;
-+
-+ const struct of_device_id *of_id =
-+ of_match_device(nxp_fspi_dt_ids, &pdev->dev);
-+
-+ fspi = devm_kzalloc(dev, sizeof(*fspi), GFP_KERNEL);
-+ if (!fspi)
-+ return -ENOMEM;
-+
-+ fspi->nor_num = of_get_child_count(dev->of_node);
-+ if (!fspi->nor_num || fspi->nor_num > 4)
-+ return -ENODEV;
-+
-+ fspi->dev = dev;
-+ fspi->devtype_data = (struct nxp_fspi_devtype_data *)of_id->data;
-+ platform_set_drvdata(pdev, fspi);
-+
-+ /* find the resources */
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "FSPI");
-+ if (!res) {
-+ dev_err(dev, "FSPI get resource IORESOURCE_MEM failed\n");
-+ return -ENODEV;
-+ }
-+
-+ fspi->iobase = devm_ioremap_resource(dev, res);
-+ if (IS_ERR(fspi->iobase))
-+ return PTR_ERR(fspi->iobase);
-+
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-+ "FSPI-memory");
-+ if (!res) {
-+ dev_err(dev,
-+ "FSPI-memory get resource IORESOURCE_MEM failed\n");
-+ return -ENODEV;
-+ }
-+
-+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
-+ res->name)) {
-+ dev_err(dev, "can't request region for resource %pR\n", res);
-+ return -EBUSY;
-+ }
-+
-+ fspi->memmap_phy = res->start;
-+
-+ /* find the clocks */
-+ fspi->clk_en = devm_clk_get(dev, "fspi_en");
-+ if (IS_ERR(fspi->clk_en))
-+ return PTR_ERR(fspi->clk_en);
-+
-+ fspi->clk = devm_clk_get(dev, "fspi");
-+ if (IS_ERR(fspi->clk))
-+ return PTR_ERR(fspi->clk);
-+
-+ ret = nxp_fspi_clk_prep_enable(fspi);
-+ if (ret) {
-+ dev_err(dev, "can not enable the clock\n");
-+ goto clk_failed;
-+ }
-+
-+ /* find the irq */
-+ ret = platform_get_irq(pdev, 0);
-+ if (ret < 0) {
-+ dev_err(dev, "failed to get the irq: %d\n", ret);
-+ goto irq_failed;
-+ }
-+
-+ ret = devm_request_irq(dev, ret,
-+ nxp_fspi_irq_handler, 0, pdev->name, fspi);
-+ if (ret) {
-+ dev_err(dev, "failed to request irq: %d\n", ret);
-+ goto irq_failed;
-+ }
-+
-+ ret = nxp_fspi_nor_setup(fspi);
-+ if (ret)
-+ goto irq_failed;
-+
-+ if (of_get_property(np, "nxp,fspi-has-second-chip", NULL))
-+ fspi->has_second_chip = true;
-+
-+ mutex_init(&fspi->lock);
-+
-+ find_node = 0;
-+ /* iterate the subnodes. */
-+ for_each_available_child_of_node(dev->of_node, np) {
-+ /* skip the holes */
-+ if (!fspi->has_second_chip)
-+ i *= 2;
-+
-+ nor = &fspi->nor[i];
-+ mtd = &nor->mtd;
-+
-+ nor->dev = dev;
-+ spi_nor_set_flash_node(nor, np);
-+ nor->priv = fspi;
-+
-+ /* fill the hooks */
-+ nor->read_reg = nxp_fspi_read_reg;
-+ nor->write_reg = nxp_fspi_write_reg;
-+ nor->read = nxp_fspi_read;
-+ nor->write = nxp_fspi_write;
-+ nor->erase = nxp_fspi_erase;
-+
-+ nor->prepare = nxp_fspi_prep;
-+ nor->unprepare = nxp_fspi_unprep;
-+
-+ ret = of_property_read_u32(np, "spi-max-frequency",
-+ &fspi->clk_rate);
-+ if (ret < 0)
-+ goto next_node;
-+
-+ /* set the chip address for READID */
-+ nxp_fspi_set_base_addr(fspi, nor);
-+
-+ ret = of_property_read_u32(np, "spi-rx-bus-width",
-+ &fspi->spi_rx_bus_width);
-+ if (ret < 0)
-+ fspi->spi_rx_bus_width = FSPI_SINGLE_MODE;
-+
-+ ret = of_property_read_u32(np, "spi-tx-bus-width",
-+ &fspi->spi_tx_bus_width);
-+ if (ret < 0)
-+ fspi->spi_tx_bus_width = FSPI_SINGLE_MODE;
-+
-+ ret = spi_nor_scan(nor, NULL, &hwcaps);
-+ if (ret)
-+ goto next_node;
-+
-+ ret = mtd_device_register(mtd, NULL, 0);
-+ if (ret)
-+ goto next_node;
-+
-+ /* Set the correct NOR size now. */
-+ if (fspi->nor_size == 0) {
-+ fspi->nor_size = mtd->size;
-+
-+ /* Map the SPI NOR to accessiable address */
-+ nxp_fspi_set_map_addr(fspi);
-+ }
-+
-+ /*
-+ * The write is working in the unit of the TX FIFO,
-+ * not in the unit of the SPI NOR's page size.
-+ *
-+ * So shrink the spi_nor->page_size if it is larger then the
-+ * TX FIFO.
-+ */
-+ if (nor->page_size > fspi->devtype_data->txfifo)
-+ nor->page_size = fspi->devtype_data->txfifo;
-+
-+ find_node++;
-+next_node:
-+ i++;
-+ }
-+
-+ if (find_node == 0)
-+ goto mutex_failed;
-+
-+ /* finish the rest init. */
-+ ret = nxp_fspi_nor_setup_last(fspi);
-+ if (ret)
-+ goto last_init_failed;
-+
-+ nxp_fspi_clk_disable_unprep(fspi);
-+ return 0;
-+
-+last_init_failed:
-+ for (i = 0; i < fspi->nor_num; i++) {
-+ /* skip the holes */
-+ if (!fspi->has_second_chip)
-+ i *= 2;
-+ mtd_device_unregister(&fspi->mtd[i]);
-+ }
-+mutex_failed:
-+ mutex_destroy(&fspi->lock);
-+irq_failed:
-+ nxp_fspi_clk_disable_unprep(fspi);
-+clk_failed:
-+ dev_err(dev, "NXP FSPI probe failed\n");
-+ return ret;
-+}
-+
-+static int nxp_fspi_remove(struct platform_device *pdev)
-+{
-+ struct nxp_fspi *fspi = platform_get_drvdata(pdev);
-+ int i;
-+
-+ for (i = 0; i < fspi->nor_num; i++) {
-+ /* skip the holes */
-+ if (!fspi->has_second_chip)
-+ i *= 2;
-+ mtd_device_unregister(&fspi->nor[i].mtd);
-+ }
-+
-+ /* disable the hardware */
-+ writel(FSPI_MCR0_MDIS_MASK, fspi->iobase + FSPI_MCR0);
-+
-+ mutex_destroy(&fspi->lock);
-+
-+ if (fspi->ahb_addr)
-+ iounmap(fspi->ahb_addr);
-+
-+ return 0;
-+}
-+
-+static int nxp_fspi_suspend(struct platform_device *pdev, pm_message_t state)
-+{
-+ return 0;
-+}
-+
-+static int nxp_fspi_resume(struct platform_device *pdev)
-+{
-+ return 0;
-+}
-+
-+static struct platform_driver nxp_fspi_driver = {
-+ .driver = {
-+ .name = "nxp-fspi",
-+ .bus = &platform_bus_type,
-+ .of_match_table = nxp_fspi_dt_ids,
-+ },
-+ .probe = nxp_fspi_probe,
-+ .remove = nxp_fspi_remove,
-+ .suspend = nxp_fspi_suspend,
-+ .resume = nxp_fspi_resume,
-+};
-+module_platform_driver(nxp_fspi_driver);
-+
-+MODULE_DESCRIPTION("NXP FSPI Controller Driver");
-+MODULE_AUTHOR("NXP Semiconductor");
-+MODULE_LICENSE("GPL v2");
---- a/drivers/mtd/spi-nor/spi-nor.c
-+++ b/drivers/mtd/spi-nor/spi-nor.c
-@@ -269,6 +269,7 @@ static inline int set_4byte(struct spi_n
- u8 cmd;
-
- switch (JEDEC_MFR(info)) {
-+ case SNOR_MFR_ST:
- case SNOR_MFR_MICRON:
- /* Some Micron need WREN command; all will accept it */
- need_wren = true;
-@@ -1044,7 +1045,7 @@ static const struct flash_info spi_nor_i
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
-
-- /* Micron */
-+ /* Micron <--> ST Micro */
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
-@@ -1059,6 +1060,12 @@ static const struct flash_info spi_nor_i
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-
-+ /* Micron */
-+ {
-+ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
-+ SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES)
-+ },
-+
- /* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
-@@ -2439,6 +2446,7 @@ static int spi_nor_init_params(struct sp
- params->quad_enable = macronix_quad_enable;
- break;
-
-+ case SNOR_MFR_ST:
- case SNOR_MFR_MICRON:
- break;
-
-@@ -2757,7 +2765,8 @@ int spi_nor_scan(struct spi_nor *nor, co
- mtd->_read = spi_nor_read;
-
- /* NOR protection support for STmicro/Micron chips and similar */
-- if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
-+ if (JEDEC_MFR(info) == SNOR_MFR_ST ||
-+ JEDEC_MFR(info) == SNOR_MFR_MICRON ||
- JEDEC_MFR(info) == SNOR_MFR_WINBOND ||
- info->flags & SPI_NOR_HAS_LOCK) {
- nor->flash_lock = stm_lock;
---- a/include/linux/mtd/cfi.h
-+++ b/include/linux/mtd/cfi.h
-@@ -377,6 +377,7 @@ struct cfi_fixup {
- #define CFI_MFR_SHARP 0x00B0
- #define CFI_MFR_SST 0x00BF
- #define CFI_MFR_ST 0x0020 /* STMicroelectronics */
-+#define CFI_MFR_MICRON 0x002C /* Micron */
- #define CFI_MFR_TOSHIBA 0x0098
- #define CFI_MFR_WINBOND 0x00DA
-
---- a/include/linux/mtd/spi-nor.h
-+++ b/include/linux/mtd/spi-nor.h
-@@ -23,7 +23,8 @@
- #define SNOR_MFR_ATMEL CFI_MFR_ATMEL
- #define SNOR_MFR_GIGADEVICE 0xc8
- #define SNOR_MFR_INTEL CFI_MFR_INTEL
--#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
-+#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
-+#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
- #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
- #define SNOR_MFR_SPANSION CFI_MFR_AMD
- #define SNOR_MFR_SST CFI_MFR_SST
diff --git a/target/linux/layerscape/patches-4.14/813-ifc-nor-nand-support-layerscape.patch b/target/linux/layerscape/patches-4.14/813-ifc-nor-nand-support-layerscape.patch
deleted file mode 100644
index 8e9cd1d4a1..0000000000
--- a/target/linux/layerscape/patches-4.14/813-ifc-nor-nand-support-layerscape.patch
+++ /dev/null
@@ -1,356 +0,0 @@
-From 780865643e5dbf41fe950924a68f7ee4fea8af3e Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:39 +0800
-Subject: [PATCH 30/40] ifc-nor-nand: support layerscape
-This is an integrated patch of ifc-nor-nand for
- layerscape
-
-Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
-Signed-off-by: Raghav Dogra <raghav.dogra@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- drivers/memory/fsl_ifc.c | 263 +++++++++++++++++++++++++++++
- drivers/mtd/maps/physmap_of_core.c | 4 +
- include/linux/fsl_ifc.h | 7 +
- 3 files changed, 274 insertions(+)
-
---- a/drivers/memory/fsl_ifc.c
-+++ b/drivers/memory/fsl_ifc.c
-@@ -24,6 +24,7 @@
- #include <linux/compiler.h>
- #include <linux/sched.h>
- #include <linux/spinlock.h>
-+#include <linux/delay.h>
- #include <linux/types.h>
- #include <linux/slab.h>
- #include <linux/io.h>
-@@ -37,6 +38,8 @@
-
- struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
- EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
-+#define FSL_IFC_V1_3_0 0x01030000
-+#define IFC_TIMEOUT_MSECS 1000 /* 1000ms */
-
- /*
- * convert_ifc_address - convert the base address
-@@ -311,6 +314,261 @@ err:
- return ret;
- }
-
-+#ifdef CONFIG_PM_SLEEP
-+/* save ifc registers */
-+static int fsl_ifc_suspend(struct device *dev)
-+{
-+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev);
-+ struct fsl_ifc_global __iomem *fcm = ctrl->gregs;
-+ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs;
-+ __be32 nand_evter_intr_en, cm_evter_intr_en, nor_evter_intr_en,
-+ gpcm_evter_intr_en;
-+ uint32_t ifc_bank, i;
-+
-+ ctrl->saved_gregs = kzalloc(sizeof(struct fsl_ifc_global), GFP_KERNEL);
-+ if (!ctrl->saved_gregs)
-+ return -ENOMEM;
-+ ctrl->saved_rregs = kzalloc(sizeof(struct fsl_ifc_runtime), GFP_KERNEL);
-+ if (!ctrl->saved_rregs)
-+ return -ENOMEM;
-+
-+ cm_evter_intr_en = ifc_in32(&fcm->cm_evter_intr_en);
-+ nand_evter_intr_en = ifc_in32(&runtime->ifc_nand.nand_evter_intr_en);
-+ nor_evter_intr_en = ifc_in32(&runtime->ifc_nor.nor_evter_intr_en);
-+ gpcm_evter_intr_en = ifc_in32(&runtime->ifc_gpcm.gpcm_evter_intr_en);
-+
-+/* IFC interrupts disabled */
-+
-+ ifc_out32(0x0, &fcm->cm_evter_intr_en);
-+ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en);
-+ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en);
-+ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en);
-+
-+ if (ctrl->saved_gregs) {
-+ for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) {
-+ ctrl->saved_gregs->cspr_cs[ifc_bank].cspr_ext =
-+ ifc_in32(&fcm->cspr_cs[ifc_bank].cspr_ext);
-+ ctrl->saved_gregs->cspr_cs[ifc_bank].cspr =
-+ ifc_in32(&fcm->cspr_cs[ifc_bank].cspr);
-+ ctrl->saved_gregs->amask_cs[ifc_bank].amask =
-+ ifc_in32(&fcm->amask_cs[ifc_bank].amask);
-+ ctrl->saved_gregs->csor_cs[ifc_bank].csor_ext =
-+ ifc_in32(&fcm->csor_cs[ifc_bank].csor_ext);
-+ ctrl->saved_gregs->csor_cs[ifc_bank].csor =
-+ ifc_in32(&fcm->csor_cs[ifc_bank].csor);
-+ for (i = 0; i < 4; i++) {
-+ ctrl->saved_gregs->ftim_cs[ifc_bank].ftim[i] =
-+ ifc_in32(
-+ &fcm->ftim_cs[ifc_bank].ftim[i]);
-+ }
-+ }
-+
-+ ctrl->saved_gregs->rb_map = ifc_in32(&fcm->rb_map);
-+ ctrl->saved_gregs->wb_map = ifc_in32(&fcm->wb_map);
-+ ctrl->saved_gregs->ifc_gcr = ifc_in32(&fcm->ifc_gcr);
-+ ctrl->saved_gregs->ddr_ccr_low = ifc_in32(&fcm->ddr_ccr_low);
-+ ctrl->saved_gregs->cm_evter_en = ifc_in32(&fcm->cm_evter_en);
-+ }
-+
-+ if (ctrl->saved_rregs) {
-+ /* IFC controller NAND machine registers */
-+ ctrl->saved_rregs->ifc_nand.ncfgr =
-+ ifc_in32(&runtime->ifc_nand.ncfgr);
-+ ctrl->saved_rregs->ifc_nand.nand_fcr0 =
-+ ifc_in32(&runtime->ifc_nand.nand_fcr0);
-+ ctrl->saved_rregs->ifc_nand.nand_fcr1 =
-+ ifc_in32(&runtime->ifc_nand.nand_fcr1);
-+ ctrl->saved_rregs->ifc_nand.row0 =
-+ ifc_in32(&runtime->ifc_nand.row0);
-+ ctrl->saved_rregs->ifc_nand.row1 =
-+ ifc_in32(&runtime->ifc_nand.row1);
-+ ctrl->saved_rregs->ifc_nand.col0 =
-+ ifc_in32(&runtime->ifc_nand.col0);
-+ ctrl->saved_rregs->ifc_nand.col1 =
-+ ifc_in32(&runtime->ifc_nand.col1);
-+ ctrl->saved_rregs->ifc_nand.row2 =
-+ ifc_in32(&runtime->ifc_nand.row2);
-+ ctrl->saved_rregs->ifc_nand.col2 =
-+ ifc_in32(&runtime->ifc_nand.col2);
-+ ctrl->saved_rregs->ifc_nand.row3 =
-+ ifc_in32(&runtime->ifc_nand.row3);
-+ ctrl->saved_rregs->ifc_nand.col3 =
-+ ifc_in32(&runtime->ifc_nand.col3);
-+
-+ ctrl->saved_rregs->ifc_nand.nand_fbcr =
-+ ifc_in32(&runtime->ifc_nand.nand_fbcr);
-+ ctrl->saved_rregs->ifc_nand.nand_fir0 =
-+ ifc_in32(&runtime->ifc_nand.nand_fir0);
-+ ctrl->saved_rregs->ifc_nand.nand_fir1 =
-+ ifc_in32(&runtime->ifc_nand.nand_fir1);
-+ ctrl->saved_rregs->ifc_nand.nand_fir2 =
-+ ifc_in32(&runtime->ifc_nand.nand_fir2);
-+ ctrl->saved_rregs->ifc_nand.nand_csel =
-+ ifc_in32(&runtime->ifc_nand.nand_csel);
-+ ctrl->saved_rregs->ifc_nand.nandseq_strt =
-+ ifc_in32(
-+ &runtime->ifc_nand.nandseq_strt);
-+ ctrl->saved_rregs->ifc_nand.nand_evter_en =
-+ ifc_in32(
-+ &runtime->ifc_nand.nand_evter_en);
-+ ctrl->saved_rregs->ifc_nand.nanndcr =
-+ ifc_in32(&runtime->ifc_nand.nanndcr);
-+ ctrl->saved_rregs->ifc_nand.nand_dll_lowcfg0 =
-+ ifc_in32(
-+ &runtime->ifc_nand.nand_dll_lowcfg0);
-+ ctrl->saved_rregs->ifc_nand.nand_dll_lowcfg1 =
-+ ifc_in32(
-+ &runtime->ifc_nand.nand_dll_lowcfg1);
-+
-+ /* IFC controller NOR machine registers */
-+ ctrl->saved_rregs->ifc_nor.nor_evter_en =
-+ ifc_in32(
-+ &runtime->ifc_nor.nor_evter_en);
-+ ctrl->saved_rregs->ifc_nor.norcr =
-+ ifc_in32(&runtime->ifc_nor.norcr);
-+
-+ /* IFC controller GPCM Machine registers */
-+ ctrl->saved_rregs->ifc_gpcm.gpcm_evter_en =
-+ ifc_in32(
-+ &runtime->ifc_gpcm.gpcm_evter_en);
-+ }
-+
-+/* save the interrupt values */
-+ ctrl->saved_gregs->cm_evter_intr_en = cm_evter_intr_en;
-+ ctrl->saved_rregs->ifc_nand.nand_evter_intr_en = nand_evter_intr_en;
-+ ctrl->saved_rregs->ifc_nor.nor_evter_intr_en = nor_evter_intr_en;
-+ ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en = gpcm_evter_intr_en;
-+
-+ return 0;
-+}
-+
-+/* restore ifc registers */
-+static int fsl_ifc_resume(struct device *dev)
-+{
-+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(dev);
-+ struct fsl_ifc_global __iomem *fcm = ctrl->gregs;
-+ struct fsl_ifc_runtime __iomem *runtime = ctrl->rregs;
-+ struct fsl_ifc_global *savd_gregs = ctrl->saved_gregs;
-+ struct fsl_ifc_runtime *savd_rregs = ctrl->saved_rregs;
-+ uint32_t ver = 0, ncfgr, timeout, ifc_bank, i;
-+
-+/*
-+ * IFC interrupts disabled
-+ */
-+ ifc_out32(0x0, &fcm->cm_evter_intr_en);
-+ ifc_out32(0x0, &runtime->ifc_nand.nand_evter_intr_en);
-+ ifc_out32(0x0, &runtime->ifc_nor.nor_evter_intr_en);
-+ ifc_out32(0x0, &runtime->ifc_gpcm.gpcm_evter_intr_en);
-+
-+
-+ if (ctrl->saved_gregs) {
-+ for (ifc_bank = 0; ifc_bank < FSL_IFC_BANK_COUNT; ifc_bank++) {
-+ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr_ext,
-+ &fcm->cspr_cs[ifc_bank].cspr_ext);
-+ ifc_out32(savd_gregs->cspr_cs[ifc_bank].cspr,
-+ &fcm->cspr_cs[ifc_bank].cspr);
-+ ifc_out32(savd_gregs->amask_cs[ifc_bank].amask,
-+ &fcm->amask_cs[ifc_bank].amask);
-+ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor_ext,
-+ &fcm->csor_cs[ifc_bank].csor_ext);
-+ ifc_out32(savd_gregs->csor_cs[ifc_bank].csor,
-+ &fcm->csor_cs[ifc_bank].csor);
-+ for (i = 0; i < 4; i++) {
-+ ifc_out32(savd_gregs->ftim_cs[ifc_bank].ftim[i],
-+ &fcm->ftim_cs[ifc_bank].ftim[i]);
-+ }
-+ }
-+ ifc_out32(savd_gregs->rb_map, &fcm->rb_map);
-+ ifc_out32(savd_gregs->wb_map, &fcm->wb_map);
-+ ifc_out32(savd_gregs->ifc_gcr, &fcm->ifc_gcr);
-+ ifc_out32(savd_gregs->ddr_ccr_low, &fcm->ddr_ccr_low);
-+ ifc_out32(savd_gregs->cm_evter_en, &fcm->cm_evter_en);
-+ }
-+
-+ if (ctrl->saved_rregs) {
-+ /* IFC controller NAND machine registers */
-+ ifc_out32(savd_rregs->ifc_nand.ncfgr,
-+ &runtime->ifc_nand.ncfgr);
-+ ifc_out32(savd_rregs->ifc_nand.nand_fcr0,
-+ &runtime->ifc_nand.nand_fcr0);
-+ ifc_out32(savd_rregs->ifc_nand.nand_fcr1,
-+ &runtime->ifc_nand.nand_fcr1);
-+ ifc_out32(savd_rregs->ifc_nand.row0, &runtime->ifc_nand.row0);
-+ ifc_out32(savd_rregs->ifc_nand.row1, &runtime->ifc_nand.row1);
-+ ifc_out32(savd_rregs->ifc_nand.col0, &runtime->ifc_nand.col0);
-+ ifc_out32(savd_rregs->ifc_nand.col1, &runtime->ifc_nand.col1);
-+ ifc_out32(savd_rregs->ifc_nand.row2, &runtime->ifc_nand.row2);
-+ ifc_out32(savd_rregs->ifc_nand.col2, &runtime->ifc_nand.col2);
-+ ifc_out32(savd_rregs->ifc_nand.row3, &runtime->ifc_nand.row3);
-+ ifc_out32(savd_rregs->ifc_nand.col3, &runtime->ifc_nand.col3);
-+ ifc_out32(savd_rregs->ifc_nand.nand_fbcr,
-+ &runtime->ifc_nand.nand_fbcr);
-+ ifc_out32(savd_rregs->ifc_nand.nand_fir0,
-+ &runtime->ifc_nand.nand_fir0);
-+ ifc_out32(savd_rregs->ifc_nand.nand_fir1,
-+ &runtime->ifc_nand.nand_fir1);
-+ ifc_out32(savd_rregs->ifc_nand.nand_fir2,
-+ &runtime->ifc_nand.nand_fir2);
-+ ifc_out32(savd_rregs->ifc_nand.nand_csel,
-+ &runtime->ifc_nand.nand_csel);
-+ ifc_out32(savd_rregs->ifc_nand.nandseq_strt,
-+ &runtime->ifc_nand.nandseq_strt);
-+ ifc_out32(savd_rregs->ifc_nand.nand_evter_en,
-+ &runtime->ifc_nand.nand_evter_en);
-+ ifc_out32(savd_rregs->ifc_nand.nanndcr,
-+ &runtime->ifc_nand.nanndcr);
-+ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg0,
-+ &runtime->ifc_nand.nand_dll_lowcfg0);
-+ ifc_out32(savd_rregs->ifc_nand.nand_dll_lowcfg1,
-+ &runtime->ifc_nand.nand_dll_lowcfg1);
-+
-+ /* IFC controller NOR machine registers */
-+ ifc_out32(savd_rregs->ifc_nor.nor_evter_en,
-+ &runtime->ifc_nor.nor_evter_en);
-+ ifc_out32(savd_rregs->ifc_nor.norcr, &runtime->ifc_nor.norcr);
-+
-+ /* IFC controller GPCM Machine registers */
-+ ifc_out32(savd_rregs->ifc_gpcm.gpcm_evter_en,
-+ &runtime->ifc_gpcm.gpcm_evter_en);
-+
-+ /* IFC interrupts enabled */
-+ ifc_out32(ctrl->saved_gregs->cm_evter_intr_en,
-+ &fcm->cm_evter_intr_en);
-+ ifc_out32(ctrl->saved_rregs->ifc_nand.nand_evter_intr_en,
-+ &runtime->ifc_nand.nand_evter_intr_en);
-+ ifc_out32(ctrl->saved_rregs->ifc_nor.nor_evter_intr_en,
-+ &runtime->ifc_nor.nor_evter_intr_en);
-+ ifc_out32(ctrl->saved_rregs->ifc_gpcm.gpcm_evter_intr_en,
-+ &runtime->ifc_gpcm.gpcm_evter_intr_en);
-+
-+ kfree(ctrl->saved_gregs);
-+ kfree(ctrl->saved_rregs);
-+ ctrl->saved_gregs = NULL;
-+ ctrl->saved_rregs = NULL;
-+ }
-+
-+ ver = ifc_in32(&fcm->ifc_rev);
-+ ncfgr = ifc_in32(&runtime->ifc_nand.ncfgr);
-+ if (ver >= FSL_IFC_V1_3_0) {
-+
-+ ifc_out32(ncfgr | IFC_NAND_SRAM_INIT_EN,
-+ &runtime->ifc_nand.ncfgr);
-+ /* wait for SRAM_INIT bit to be clear or timeout */
-+ timeout = 10;
-+ while ((ifc_in32(&runtime->ifc_nand.ncfgr) &
-+ IFC_NAND_SRAM_INIT_EN) && timeout) {
-+ mdelay(IFC_TIMEOUT_MSECS);
-+ timeout--;
-+ }
-+
-+ if (!timeout)
-+ dev_err(ctrl->dev, "Timeout waiting for IFC SRAM INIT");
-+ }
-+
-+ return 0;
-+}
-+#endif /* CONFIG_PM_SLEEP */
-+
- static const struct of_device_id fsl_ifc_match[] = {
- {
- .compatible = "fsl,ifc",
-@@ -318,10 +576,15 @@ static const struct of_device_id fsl_ifc
- {},
- };
-
-+static const struct dev_pm_ops ifc_pm_ops = {
-+ SET_SYSTEM_SLEEP_PM_OPS(fsl_ifc_suspend, fsl_ifc_resume)
-+};
-+
- static struct platform_driver fsl_ifc_ctrl_driver = {
- .driver = {
- .name = "fsl-ifc",
- .of_match_table = fsl_ifc_match,
-+ .pm = &ifc_pm_ops,
- },
- .probe = fsl_ifc_ctrl_probe,
- .remove = fsl_ifc_ctrl_remove,
---- a/drivers/mtd/maps/physmap_of_core.c
-+++ b/drivers/mtd/maps/physmap_of_core.c
-@@ -20,6 +20,7 @@
- #include <linux/mtd/map.h>
- #include <linux/mtd/partitions.h>
- #include <linux/mtd/concat.h>
-+#include <linux/mtd/cfi_endian.h>
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/of_platform.h>
-@@ -197,6 +198,9 @@ static int of_flash_probe(struct platfor
- info->list[i].map.bankwidth = be32_to_cpup(width);
- info->list[i].map.device_node = dp;
-
-+ if (of_property_read_bool(dp->parent, "big-endian"))
-+ info->list[i].map.swap = CFI_BIG_ENDIAN;
-+
- err = of_flash_probe_gemini(dev, dp, &info->list[i].map);
- if (err)
- goto err_out;
---- a/include/linux/fsl_ifc.h
-+++ b/include/linux/fsl_ifc.h
-@@ -274,6 +274,8 @@
- */
- /* Auto Boot Mode */
- #define IFC_NAND_NCFGR_BOOT 0x80000000
-+/* SRAM INIT EN */
-+#define IFC_NAND_SRAM_INIT_EN 0x20000000
- /* Addressing Mode-ROW0+n/COL0 */
- #define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
- /* Addressing Mode-ROW0+n/COL0+n */
-@@ -857,6 +859,11 @@ struct fsl_ifc_ctrl {
- u32 nand_stat;
- wait_queue_head_t nand_wait;
- bool little_endian;
-+#ifdef CONFIG_PM_SLEEP
-+ /*save regs when system goes to deep sleep*/
-+ struct fsl_ifc_global *saved_gregs;
-+ struct fsl_ifc_runtime *saved_rregs;
-+#endif
- };
-
- extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
diff --git a/target/linux/layerscape/patches-4.14/814-ls2-console-support-layerscape.patch b/target/linux/layerscape/patches-4.14/814-ls2-console-support-layerscape.patch
deleted file mode 100644
index 090986bd84..0000000000
--- a/target/linux/layerscape/patches-4.14/814-ls2-console-support-layerscape.patch
+++ /dev/null
@@ -1,316 +0,0 @@
-From c64d8ab6260330fa2fe9a2d676256697e4e2a83c Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:44 +0800
-Subject: [PATCH 31/40] ls2-console: support layerscape
-This is an integrated patch of ls2-console for
- layerscape
-
-Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- drivers/soc/fsl/ls2-console/Kconfig | 4 +
- drivers/soc/fsl/ls2-console/Makefile | 1 +
- drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++++++++++++++++++++++
- 3 files changed, 289 insertions(+)
- create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
- create mode 100644 drivers/soc/fsl/ls2-console/Makefile
- create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
-
---- /dev/null
-+++ b/drivers/soc/fsl/ls2-console/Kconfig
-@@ -0,0 +1,4 @@
-+config FSL_LS2_CONSOLE
-+ tristate "Layerscape MC and AIOP console support"
-+ depends on ARCH_LAYERSCAPE
-+ default y
---- /dev/null
-+++ b/drivers/soc/fsl/ls2-console/Makefile
-@@ -0,0 +1 @@
-+obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o
---- /dev/null
-+++ b/drivers/soc/fsl/ls2-console/ls2-console.c
-@@ -0,0 +1,284 @@
-+/* Copyright 2015-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/miscdevice.h>
-+#include <linux/uaccess.h>
-+#include <linux/poll.h>
-+#include <linux/compat.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/io.h>
-+
-+/* SoC address for the MC firmware base low/high registers */
-+#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020
-+#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2
-+/* MC firmware base low/high registers indexes */
-+#define MCFBALR_OFFSET 0
-+#define MCFBAHR_OFFSET 1
-+
-+/* Bit mask used to obtain the most significant part of the MC base address */
-+#define MC_FW_HIGH_ADDR_MASK 0x1FFFF
-+/* Bit mask used to obtain the least significant part of the MC base address */
-+#define MC_FW_LOW_ADDR_MASK 0xE0000000
-+
-+#define MC_BUFFER_OFFSET 0x01000000
-+#define MC_BUFFER_SIZE (1024*1024*16)
-+#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET)
-+
-+#define AIOP_BUFFER_OFFSET 0x06000000
-+#define AIOP_BUFFER_SIZE (1024*1024*16)
-+#define AIOP_OFFSET_DELTA (0)
-+
-+struct log_header {
-+ char magic_word[8]; /* magic word */
-+ uint32_t buf_start; /* holds the 32-bit little-endian
-+ * offset of the start of the buffer
-+ */
-+ uint32_t buf_length; /* holds the 32-bit little-endian
-+ * length of the buffer
-+ */
-+ uint32_t last_byte; /* holds the 32-bit little-endian offset
-+ * of the byte after the last byte that
-+ * was written
-+ */
-+ char reserved[44];
-+};
-+
-+#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
-+#define LOG_VERSION_MAJOR 1
-+#define LOG_VERSION_MINOR 0
-+
-+
-+#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); }
-+
-+struct console_data {
-+ char *map_addr;
-+ struct log_header *hdr;
-+ char *start_addr; /* Start of buffer */
-+ char *end_addr; /* End of buffer */
-+ char *end_of_data; /* Current end of data */
-+ char *cur_ptr; /* Last data sent to console */
-+};
-+
-+#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
-+
-+static inline void __adjust_end(struct console_data *cd)
-+{
-+ cd->end_of_data = cd->start_addr
-+ + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte));
-+}
-+
-+static inline void adjust_end(struct console_data *cd)
-+{
-+ invalidate(cd->hdr);
-+ __adjust_end(cd);
-+}
-+
-+static inline uint64_t get_mc_fw_base_address(void)
-+{
-+ u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS,
-+ SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE);
-+ u64 mcfwbase = 0ULL;
-+
-+ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK;
-+ mcfwbase <<= 32;
-+ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK;
-+ iounmap(mcfbaregs);
-+ pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase);
-+ return mcfwbase;
-+}
-+
-+static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp,
-+ u64 offset, u64 size,
-+ uint8_t *emagic, uint8_t magic_len,
-+ u32 offset_delta)
-+{
-+ struct console_data *cd;
-+ uint8_t *magic;
-+ uint32_t wrapped;
-+
-+ cd = kmalloc(sizeof(*cd), GFP_KERNEL);
-+ if (cd == NULL)
-+ return -ENOMEM;
-+ fp->private_data = cd;
-+ cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size);
-+
-+ cd->hdr = (struct log_header *) cd->map_addr;
-+ invalidate(cd->hdr);
-+
-+ magic = cd->hdr->magic_word;
-+ if (memcmp(magic, emagic, magic_len)) {
-+ pr_info("magic didn't match!\n");
-+ pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n",
-+ emagic[0], emagic[1], emagic[2], emagic[3],
-+ emagic[4], emagic[5], emagic[6], emagic[7]);
-+ pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n",
-+ magic[0], magic[1], magic[2], magic[3],
-+ magic[4], magic[5], magic[6], magic[7]);
-+ kfree(cd);
-+ iounmap(cd->map_addr);
-+ return -EIO;
-+ }
-+
-+ cd->start_addr = cd->map_addr
-+ + le32_to_cpu(cd->hdr->buf_start) - offset_delta;
-+ cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length);
-+
-+ wrapped = le32_to_cpu(cd->hdr->last_byte)
-+ & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
-+
-+ __adjust_end(cd);
-+ if (wrapped && (cd->end_of_data != cd->end_addr))
-+ cd->cur_ptr = cd->end_of_data+1;
-+ else
-+ cd->cur_ptr = cd->start_addr;
-+
-+ return 0;
-+}
-+
-+static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp)
-+{
-+ uint8_t magic_word[] = { 0, 1, 'C', 'M' };
-+
-+ return fsl_ls2_generic_console_open(node, fp,
-+ MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
-+ magic_word, sizeof(magic_word),
-+ MC_OFFSET_DELTA);
-+}
-+
-+static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp)
-+{
-+ uint8_t magic_word[] = { 'P', 'O', 'I', 'A' };
-+
-+ return fsl_ls2_generic_console_open(node, fp,
-+ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
-+ magic_word, sizeof(magic_word),
-+ AIOP_OFFSET_DELTA);
-+}
-+
-+static int fsl_ls2_console_close(struct inode *node, struct file *fp)
-+{
-+ struct console_data *cd = fp->private_data;
-+
-+ iounmap(cd->map_addr);
-+ kfree(cd);
-+ return 0;
-+}
-+
-+ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count,
-+ loff_t *f_pos)
-+{
-+ struct console_data *cd = fp->private_data;
-+ size_t bytes = 0;
-+ char data;
-+
-+ /* Check if we need to adjust the end of data addr */
-+ adjust_end(cd);
-+
-+ while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) {
-+ if (((u64)cd->cur_ptr) % 64 == 0)
-+ invalidate(cd->cur_ptr);
-+
-+ data = *(cd->cur_ptr);
-+ if (copy_to_user(&buf[bytes], &data, 1))
-+ return -EFAULT;
-+ cd->cur_ptr++;
-+ if (cd->cur_ptr >= cd->end_addr)
-+ cd->cur_ptr = cd->start_addr;
-+ ++bytes;
-+ }
-+ return bytes;
-+}
-+
-+static const struct file_operations fsl_ls2_mc_console_fops = {
-+ .owner = THIS_MODULE,
-+ .open = fsl_ls2_mc_console_open,
-+ .release = fsl_ls2_console_close,
-+ .read = fsl_ls2_console_read,
-+};
-+
-+static struct miscdevice fsl_ls2_mc_console_dev = {
-+ .minor = MISC_DYNAMIC_MINOR,
-+ .name = "fsl_mc_console",
-+ .fops = &fsl_ls2_mc_console_fops
-+};
-+
-+static const struct file_operations fsl_ls2_aiop_console_fops = {
-+ .owner = THIS_MODULE,
-+ .open = fsl_ls2_aiop_console_open,
-+ .release = fsl_ls2_console_close,
-+ .read = fsl_ls2_console_read,
-+};
-+
-+static struct miscdevice fsl_ls2_aiop_console_dev = {
-+ .minor = MISC_DYNAMIC_MINOR,
-+ .name = "fsl_aiop_console",
-+ .fops = &fsl_ls2_aiop_console_fops
-+};
-+
-+static int __init fsl_ls2_console_init(void)
-+{
-+ int err = 0;
-+
-+ pr_info("Freescale LS2 console driver\n");
-+ err = misc_register(&fsl_ls2_mc_console_dev);
-+ if (err) {
-+ pr_err("fsl_mc_console: cannot register device\n");
-+ return err;
-+ }
-+ pr_info("fsl-ls2-console: device %s registered\n",
-+ fsl_ls2_mc_console_dev.name);
-+
-+ err = misc_register(&fsl_ls2_aiop_console_dev);
-+ if (err) {
-+ pr_err("fsl_aiop_console: cannot register device\n");
-+ return err;
-+ }
-+ pr_info("fsl-ls2-console: device %s registered\n",
-+ fsl_ls2_aiop_console_dev.name);
-+
-+ return 0;
-+}
-+
-+static void __exit fsl_ls2_console_exit(void)
-+{
-+ misc_deregister(&fsl_ls2_mc_console_dev);
-+
-+ misc_deregister(&fsl_ls2_aiop_console_dev);
-+}
-+
-+module_init(fsl_ls2_console_init);
-+module_exit(fsl_ls2_console_exit);
-+
-+MODULE_AUTHOR("Roy Pledge <roy.pledge@freescale.com>");
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_DESCRIPTION("Freescale LS2 console driver");
diff --git a/target/linux/layerscape/patches-4.14/815-msi-support-layerscape.patch b/target/linux/layerscape/patches-4.14/815-msi-support-layerscape.patch
deleted file mode 100644
index dd2a84e79f..0000000000
--- a/target/linux/layerscape/patches-4.14/815-msi-support-layerscape.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 56d12979923250799d651b75029ff281928635a4 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:53 +0800
-Subject: [PATCH 32/40] msi: support layerscape
-This is an integrated patch of msi for layerscape
-
-Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- .../devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt | 1 +
- drivers/irqchip/irq-ls-scfg-msi.c | 1 +
- 2 files changed, 2 insertions(+)
-
---- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
-+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
-@@ -8,6 +8,7 @@ Required properties:
- "fsl,ls1043a-msi"
- "fsl,ls1046a-msi"
- "fsl,ls1043a-v1.1-msi"
-+ "fsl,ls1012a-msi"
- - msi-controller: indicates that this is a PCIe MSI controller node
- - reg: physical base address of the controller and length of memory mapped.
- - interrupts: an interrupt to the parent interrupt controller.
---- a/drivers/irqchip/irq-ls-scfg-msi.c
-+++ b/drivers/irqchip/irq-ls-scfg-msi.c
-@@ -319,6 +319,7 @@ static const struct of_device_id ls_scfg
- { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
- { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
-
-+ { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
- { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
- { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
- { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
diff --git a/target/linux/layerscape/patches-4.14/816-pcie-support-layerscape.patch b/target/linux/layerscape/patches-4.14/816-pcie-support-layerscape.patch
deleted file mode 100644
index 4362863b7b..0000000000
--- a/target/linux/layerscape/patches-4.14/816-pcie-support-layerscape.patch
+++ /dev/null
@@ -1,5977 +0,0 @@
-From c54a010fe105281259b996d318ed85efc4103fee Mon Sep 17 00:00:00 2001
-From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Mon, 6 May 2019 15:18:05 +0800
-Subject: [PATCH] pcie: support layerscape
-
-This is an integrated patch of pcie for layerscape
-
-Signed-off-by: Bao Xiaowei <xiaowei.bao@nxp.com>
-Signed-off-by: Bhumika Goyal <bhumirks@gmail.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Signed-off-by: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
-Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
-Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
-Signed-off-by: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
-Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
-Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
-Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
-Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-Signed-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>
-Signed-off-by: Niklas Cassel <niklas.cassel@axis.com>
-Signed-off-by: Po Liu <po.liu@nxp.com>
-Signed-off-by: Rob Herring <robh@kernel.org>
-Signed-off-by: Rolf Evers-Fischer <rolf.evers.fischer@aptiv.com>
-Signed-off-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
-Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- arch/arm/kernel/bios32.c | 43 ++
- arch/arm64/kernel/pci.c | 43 ++
- drivers/misc/pci_endpoint_test.c | 332 ++++++++++---
- drivers/pci/Kconfig | 1 +
- drivers/pci/dwc/Kconfig | 39 +-
- drivers/pci/dwc/Makefile | 2 +-
- drivers/pci/dwc/pci-dra7xx.c | 9 -
- drivers/pci/dwc/pci-layerscape-ep.c | 146 ++++++
- drivers/pci/dwc/pci-layerscape.c | 12 +
- drivers/pci/dwc/pcie-designware-ep.c | 338 ++++++++++++--
- drivers/pci/dwc/pcie-designware-host.c | 5 +-
- drivers/pci/dwc/pcie-designware-plat.c | 159 ++++++-
- drivers/pci/dwc/pcie-designware.c | 5 +-
- drivers/pci/dwc/pcie-designware.h | 57 ++-
- drivers/pci/endpoint/Kconfig | 1 +
- drivers/pci/endpoint/Makefile | 1 +
- drivers/pci/endpoint/functions/Kconfig | 1 +
- drivers/pci/endpoint/functions/Makefile | 1 +
- drivers/pci/endpoint/functions/pci-epf-test.c | 191 +++++---
- drivers/pci/endpoint/pci-ep-cfs.c | 95 +++-
- drivers/pci/endpoint/pci-epc-core.c | 159 +++++--
- drivers/pci/endpoint/pci-epc-mem.c | 13 +-
- drivers/pci/endpoint/pci-epf-core.c | 116 +++--
- drivers/pci/host/pci-host-common.c | 8 -
- drivers/pci/host/pcie-xilinx-nwl.c | 9 -
- drivers/pci/host/pcie-xilinx.c | 7 -
- drivers/pci/mobiveil/Kconfig | 50 ++
- drivers/pci/mobiveil/Makefile | 7 +
- drivers/pci/mobiveil/pci-layerscape-gen4-ep.c | 178 +++++++
- drivers/pci/mobiveil/pci-layerscape-gen4.c | 292 ++++++++++++
- drivers/pci/mobiveil/pcie-mobiveil-ep.c | 512 +++++++++++++++++++++
- drivers/pci/mobiveil/pcie-mobiveil-host.c | 640 ++++++++++++++++++++++++++
- drivers/pci/mobiveil/pcie-mobiveil-plat.c | 54 +++
- drivers/pci/mobiveil/pcie-mobiveil.c | 334 ++++++++++++++
- drivers/pci/mobiveil/pcie-mobiveil.h | 296 ++++++++++++
- drivers/pci/pcie/portdrv_core.c | 29 ++
- drivers/pci/quirks.c | 15 +
- include/linux/pci-ep-cfs.h | 5 +-
- include/linux/pci-epc.h | 73 +--
- include/linux/pci-epf.h | 12 +-
- include/linux/pci.h | 1 +
- include/uapi/linux/pcitest.h | 3 +
- tools/pci/pcitest.c | 51 +-
- tools/pci/pcitest.sh | 15 +
- 44 files changed, 3917 insertions(+), 443 deletions(-)
- create mode 100644 drivers/pci/dwc/pci-layerscape-ep.c
- create mode 100644 drivers/pci/mobiveil/Kconfig
- create mode 100644 drivers/pci/mobiveil/Makefile
- create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
- create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4.c
- create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-ep.c
- create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-host.c
- create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-plat.c
- create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.c
- create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.h
-
---- a/arch/arm/kernel/bios32.c
-+++ b/arch/arm/kernel/bios32.c
-@@ -12,6 +12,8 @@
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/io.h>
-+#include <linux/of_irq.h>
-+#include <linux/pcieport_if.h>
-
- #include <asm/mach-types.h>
- #include <asm/mach/map.h>
-@@ -65,6 +67,47 @@ void pcibios_report_status(u_int status_
- }
-
- /*
-+ * Check device tree if the service interrupts are there
-+ */
-+int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
-+{
-+ int ret, count = 0;
-+ struct device_node *np = NULL;
-+
-+ if (dev->bus->dev.of_node)
-+ np = dev->bus->dev.of_node;
-+
-+ if (np == NULL)
-+ return 0;
-+
-+ if (!IS_ENABLED(CONFIG_OF_IRQ))
-+ return 0;
-+
-+ /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
-+ * request irq for aer
-+ */
-+ if (mask & PCIE_PORT_SERVICE_AER) {
-+ ret = of_irq_get_byname(np, "aer");
-+ if (ret > 0) {
-+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
-+ count++;
-+ }
-+ }
-+
-+ if (mask & PCIE_PORT_SERVICE_PME) {
-+ ret = of_irq_get_byname(np, "pme");
-+ if (ret > 0) {
-+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
-+ count++;
-+ }
-+ }
-+
-+ /* TODO: add more service interrupts if there it is in the device tree*/
-+
-+ return count;
-+}
-+
-+/*
- * We don't use this to fix the device, but initialisation of it.
- * It's not the correct use for this, but it works.
- * Note that the arbiter/ISA bridge appears to be buggy, specifically in
---- a/arch/arm64/kernel/pci.c
-+++ b/arch/arm64/kernel/pci.c
-@@ -17,6 +17,8 @@
- #include <linux/mm.h>
- #include <linux/of_pci.h>
- #include <linux/of_platform.h>
-+#include <linux/of_irq.h>
-+#include <linux/pcieport_if.h>
- #include <linux/pci.h>
- #include <linux/pci-acpi.h>
- #include <linux/pci-ecam.h>
-@@ -36,6 +38,47 @@ int pcibios_alloc_irq(struct pci_dev *de
- #endif
-
- /*
-+ * Check device tree if the service interrupts are there
-+ */
-+int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
-+{
-+ int ret, count = 0;
-+ struct device_node *np = NULL;
-+
-+ if (dev->bus->dev.of_node)
-+ np = dev->bus->dev.of_node;
-+
-+ if (np == NULL)
-+ return 0;
-+
-+ if (!IS_ENABLED(CONFIG_OF_IRQ))
-+ return 0;
-+
-+ /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
-+ * request irq for aer
-+ */
-+ if (mask & PCIE_PORT_SERVICE_AER) {
-+ ret = of_irq_get_byname(np, "aer");
-+ if (ret > 0) {
-+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
-+ count++;
-+ }
-+ }
-+
-+ if (mask & PCIE_PORT_SERVICE_PME) {
-+ ret = of_irq_get_byname(np, "pme");
-+ if (ret > 0) {
-+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
-+ count++;
-+ }
-+ }
-+
-+ /* TODO: add more service interrupts if there it is in the device tree*/
-+
-+ return count;
-+}
-+
-+/*
- * raw_pci_read/write - Platform-specific PCI config space access.
- */
- int raw_pci_read(unsigned int domain, unsigned int bus,
---- a/drivers/misc/pci_endpoint_test.c
-+++ b/drivers/misc/pci_endpoint_test.c
-@@ -35,38 +35,45 @@
-
- #include <uapi/linux/pcitest.h>
-
--#define DRV_MODULE_NAME "pci-endpoint-test"
-+#define DRV_MODULE_NAME "pci-endpoint-test"
-
--#define PCI_ENDPOINT_TEST_MAGIC 0x0
-+#define IRQ_TYPE_UNDEFINED -1
-+#define IRQ_TYPE_LEGACY 0
-+#define IRQ_TYPE_MSI 1
-+#define IRQ_TYPE_MSIX 2
-+
-+#define PCI_ENDPOINT_TEST_MAGIC 0x0
-+
-+#define PCI_ENDPOINT_TEST_COMMAND 0x4
-+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
-+#define COMMAND_RAISE_MSI_IRQ BIT(1)
-+#define COMMAND_RAISE_MSIX_IRQ BIT(2)
-+#define COMMAND_READ BIT(3)
-+#define COMMAND_WRITE BIT(4)
-+#define COMMAND_COPY BIT(5)
-+
-+#define PCI_ENDPOINT_TEST_STATUS 0x8
-+#define STATUS_READ_SUCCESS BIT(0)
-+#define STATUS_READ_FAIL BIT(1)
-+#define STATUS_WRITE_SUCCESS BIT(2)
-+#define STATUS_WRITE_FAIL BIT(3)
-+#define STATUS_COPY_SUCCESS BIT(4)
-+#define STATUS_COPY_FAIL BIT(5)
-+#define STATUS_IRQ_RAISED BIT(6)
-+#define STATUS_SRC_ADDR_INVALID BIT(7)
-+#define STATUS_DST_ADDR_INVALID BIT(8)
-
--#define PCI_ENDPOINT_TEST_COMMAND 0x4
--#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
--#define COMMAND_RAISE_MSI_IRQ BIT(1)
--#define MSI_NUMBER_SHIFT 2
--/* 6 bits for MSI number */
--#define COMMAND_READ BIT(8)
--#define COMMAND_WRITE BIT(9)
--#define COMMAND_COPY BIT(10)
--
--#define PCI_ENDPOINT_TEST_STATUS 0x8
--#define STATUS_READ_SUCCESS BIT(0)
--#define STATUS_READ_FAIL BIT(1)
--#define STATUS_WRITE_SUCCESS BIT(2)
--#define STATUS_WRITE_FAIL BIT(3)
--#define STATUS_COPY_SUCCESS BIT(4)
--#define STATUS_COPY_FAIL BIT(5)
--#define STATUS_IRQ_RAISED BIT(6)
--#define STATUS_SRC_ADDR_INVALID BIT(7)
--#define STATUS_DST_ADDR_INVALID BIT(8)
--
--#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
-+#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
- #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
-
- #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
- #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
-
--#define PCI_ENDPOINT_TEST_SIZE 0x1c
--#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
-+#define PCI_ENDPOINT_TEST_SIZE 0x1c
-+#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
-+
-+#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
-+#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
-
- static DEFINE_IDA(pci_endpoint_test_ida);
-
-@@ -77,6 +84,10 @@ static bool no_msi;
- module_param(no_msi, bool, 0444);
- MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
-
-+static int irq_type = IRQ_TYPE_MSI;
-+module_param(irq_type, int, 0444);
-+MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
-+
- enum pci_barno {
- BAR_0,
- BAR_1,
-@@ -103,7 +114,7 @@ struct pci_endpoint_test {
- struct pci_endpoint_test_data {
- enum pci_barno test_reg_bar;
- size_t alignment;
-- bool no_msi;
-+ int irq_type;
- };
-
- static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
-@@ -147,6 +158,100 @@ static irqreturn_t pci_endpoint_test_irq
- return IRQ_HANDLED;
- }
-
-+static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
-+{
-+ struct pci_dev *pdev = test->pdev;
-+
-+ pci_free_irq_vectors(pdev);
-+}
-+
-+static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
-+ int type)
-+{
-+ int irq = -1;
-+ struct pci_dev *pdev = test->pdev;
-+ struct device *dev = &pdev->dev;
-+ bool res = true;
-+
-+ switch (type) {
-+ case IRQ_TYPE_LEGACY:
-+ irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
-+ if (irq < 0)
-+ dev_err(dev, "Failed to get Legacy interrupt\n");
-+ break;
-+ case IRQ_TYPE_MSI:
-+ irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
-+ if (irq < 0)
-+ dev_err(dev, "Failed to get MSI interrupts\n");
-+ break;
-+ case IRQ_TYPE_MSIX:
-+ irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
-+ if (irq < 0)
-+ dev_err(dev, "Failed to get MSI-X interrupts\n");
-+ break;
-+ default:
-+ dev_err(dev, "Invalid IRQ type selected\n");
-+ }
-+
-+ if (irq < 0) {
-+ irq = 0;
-+ res = false;
-+ }
-+ test->num_irqs = irq;
-+
-+ return res;
-+}
-+
-+static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
-+{
-+ int i;
-+ struct pci_dev *pdev = test->pdev;
-+ struct device *dev = &pdev->dev;
-+
-+ for (i = 0; i < test->num_irqs; i++)
-+ devm_free_irq(dev, pci_irq_vector(pdev, i), test);
-+
-+ test->num_irqs = 0;
-+}
-+
-+static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
-+{
-+ int i;
-+ int err;
-+ struct pci_dev *pdev = test->pdev;
-+ struct device *dev = &pdev->dev;
-+
-+ for (i = 0; i < test->num_irqs; i++) {
-+ err = devm_request_irq(dev, pci_irq_vector(pdev, i),
-+ pci_endpoint_test_irqhandler,
-+ IRQF_SHARED, DRV_MODULE_NAME, test);
-+ if (err)
-+ goto fail;
-+ }
-+
-+ return true;
-+
-+fail:
-+ switch (irq_type) {
-+ case IRQ_TYPE_LEGACY:
-+ dev_err(dev, "Failed to request IRQ %d for Legacy\n",
-+ pci_irq_vector(pdev, i));
-+ break;
-+ case IRQ_TYPE_MSI:
-+ dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
-+ pci_irq_vector(pdev, i),
-+ i + 1);
-+ break;
-+ case IRQ_TYPE_MSIX:
-+ dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
-+ pci_irq_vector(pdev, i),
-+ i + 1);
-+ break;
-+ }
-+
-+ return false;
-+}
-+
- static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
- enum pci_barno barno)
- {
-@@ -179,6 +284,9 @@ static bool pci_endpoint_test_legacy_irq
- {
- u32 val;
-
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
-+ IRQ_TYPE_LEGACY);
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- COMMAND_RAISE_LEGACY_IRQ);
- val = wait_for_completion_timeout(&test->irq_raised,
-@@ -190,20 +298,24 @@ static bool pci_endpoint_test_legacy_irq
- }
-
- static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
-- u8 msi_num)
-+ u16 msi_num, bool msix)
- {
- u32 val;
- struct pci_dev *pdev = test->pdev;
-
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
-+ msix == false ? IRQ_TYPE_MSI :
-+ IRQ_TYPE_MSIX);
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
-- msi_num << MSI_NUMBER_SHIFT |
-- COMMAND_RAISE_MSI_IRQ);
-+ msix == false ? COMMAND_RAISE_MSI_IRQ :
-+ COMMAND_RAISE_MSIX_IRQ);
- val = wait_for_completion_timeout(&test->irq_raised,
- msecs_to_jiffies(1000));
- if (!val)
- return false;
-
-- if (test->last_irq - pdev->irq == msi_num - 1)
-+ if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
- return true;
-
- return false;
-@@ -230,10 +342,18 @@ static bool pci_endpoint_test_copy(struc
- if (size > SIZE_MAX - alignment)
- goto err;
-
-+ if (size > SIZE_MAX - alignment)
-+ goto err;
-+
-+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
-+ dev_err(dev, "Invalid IRQ type option\n");
-+ goto err;
-+ }
-+
- orig_src_addr = dma_alloc_coherent(dev, size + alignment,
- &orig_src_phys_addr, GFP_KERNEL);
- if (!orig_src_addr) {
-- dev_err(dev, "failed to allocate source buffer\n");
-+ dev_err(dev, "Failed to allocate source buffer\n");
- ret = false;
- goto err;
- }
-@@ -259,7 +379,7 @@ static bool pci_endpoint_test_copy(struc
- orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
- &orig_dst_phys_addr, GFP_KERNEL);
- if (!orig_dst_addr) {
-- dev_err(dev, "failed to allocate destination address\n");
-+ dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
- goto err_orig_src_addr;
- }
-@@ -281,8 +401,10 @@ static bool pci_endpoint_test_copy(struc
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
- size);
-
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
-- 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
-+ COMMAND_COPY);
-
- wait_for_completion(&test->irq_raised);
-
-@@ -318,10 +440,18 @@ static bool pci_endpoint_test_write(stru
- if (size > SIZE_MAX - alignment)
- goto err;
-
-+ if (size > SIZE_MAX - alignment)
-+ goto err;
-+
-+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
-+ dev_err(dev, "Invalid IRQ type option\n");
-+ goto err;
-+ }
-+
- orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
- GFP_KERNEL);
- if (!orig_addr) {
-- dev_err(dev, "failed to allocate address\n");
-+ dev_err(dev, "Failed to allocate address\n");
- ret = false;
- goto err;
- }
-@@ -348,8 +478,10 @@ static bool pci_endpoint_test_write(stru
-
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
-
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
-- 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
-+ COMMAND_READ);
-
- wait_for_completion(&test->irq_raised);
-
-@@ -379,10 +511,18 @@ static bool pci_endpoint_test_read(struc
- if (size > SIZE_MAX - alignment)
- goto err;
-
-+ if (size > SIZE_MAX - alignment)
-+ goto err;
-+
-+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
-+ dev_err(dev, "Invalid IRQ type option\n");
-+ goto err;
-+ }
-+
- orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
- GFP_KERNEL);
- if (!orig_addr) {
-- dev_err(dev, "failed to allocate destination address\n");
-+ dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
- goto err;
- }
-@@ -403,8 +543,10 @@ static bool pci_endpoint_test_read(struc
-
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
-
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
-+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
- pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
-- 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
-+ COMMAND_WRITE);
-
- wait_for_completion(&test->irq_raised);
-
-@@ -417,6 +559,38 @@ err:
- return ret;
- }
-
-+static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
-+ int req_irq_type)
-+{
-+ struct pci_dev *pdev = test->pdev;
-+ struct device *dev = &pdev->dev;
-+
-+ if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
-+ dev_err(dev, "Invalid IRQ type option\n");
-+ return false;
-+ }
-+
-+ if (irq_type == req_irq_type)
-+ return true;
-+
-+ pci_endpoint_test_release_irq(test);
-+ pci_endpoint_test_free_irq_vectors(test);
-+
-+ if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
-+ goto err;
-+
-+ if (!pci_endpoint_test_request_irq(test))
-+ goto err;
-+
-+ irq_type = req_irq_type;
-+ return true;
-+
-+err:
-+ pci_endpoint_test_free_irq_vectors(test);
-+ irq_type = IRQ_TYPE_UNDEFINED;
-+ return false;
-+}
-+
- static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
- {
-@@ -436,7 +610,8 @@ static long pci_endpoint_test_ioctl(stru
- ret = pci_endpoint_test_legacy_irq(test);
- break;
- case PCITEST_MSI:
-- ret = pci_endpoint_test_msi_irq(test, arg);
-+ case PCITEST_MSIX:
-+ ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
- break;
- case PCITEST_WRITE:
- ret = pci_endpoint_test_write(test, arg);
-@@ -447,6 +622,12 @@ static long pci_endpoint_test_ioctl(stru
- case PCITEST_COPY:
- ret = pci_endpoint_test_copy(test, arg);
- break;
-+ case PCITEST_SET_IRQTYPE:
-+ ret = pci_endpoint_test_set_irq(test, arg);
-+ break;
-+ case PCITEST_GET_IRQTYPE:
-+ ret = irq_type;
-+ break;
- }
-
- ret:
-@@ -462,9 +643,7 @@ static const struct file_operations pci_
- static int pci_endpoint_test_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
- {
-- int i;
- int err;
-- int irq = 0;
- int id;
- char name[24];
- enum pci_barno bar;
-@@ -486,12 +665,15 @@ static int pci_endpoint_test_probe(struc
- test->alignment = 0;
- test->pdev = pdev;
-
-+ if (no_msi)
-+ irq_type = IRQ_TYPE_LEGACY;
-+
- data = (struct pci_endpoint_test_data *)ent->driver_data;
- if (data) {
- test_reg_bar = data->test_reg_bar;
- test->test_reg_bar = test_reg_bar;
- test->alignment = data->alignment;
-- no_msi = data->no_msi;
-+ irq_type = data->irq_type;
- }
-
- init_completion(&test->irq_raised);
-@@ -511,36 +693,21 @@ static int pci_endpoint_test_probe(struc
-
- pci_set_master(pdev);
-
-- if (!no_msi) {
-- irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
-- if (irq < 0)
-- dev_err(dev, "failed to get MSI interrupts\n");
-- test->num_irqs = irq;
-- }
-+ if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
-+ goto err_disable_irq;
-
-- err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
-- IRQF_SHARED, DRV_MODULE_NAME, test);
-- if (err) {
-- dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
-- goto err_disable_msi;
-- }
--
-- for (i = 1; i < irq; i++) {
-- err = devm_request_irq(dev, pdev->irq + i,
-- pci_endpoint_test_irqhandler,
-- IRQF_SHARED, DRV_MODULE_NAME, test);
-- if (err)
-- dev_err(dev, "failed to request IRQ %d for MSI %d\n",
-- pdev->irq + i, i + 1);
-- }
-+ if (!pci_endpoint_test_request_irq(test))
-+ goto err_disable_irq;
-
- for (bar = BAR_0; bar <= BAR_5; bar++) {
-- base = pci_ioremap_bar(pdev, bar);
-- if (!base) {
-- dev_err(dev, "failed to read BAR%d\n", bar);
-- WARN_ON(bar == test_reg_bar);
-+ if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
-+ base = pci_ioremap_bar(pdev, bar);
-+ if (!base) {
-+ dev_err(dev, "Failed to read BAR%d\n", bar);
-+ WARN_ON(bar == test_reg_bar);
-+ }
-+ test->bar[bar] = base;
- }
-- test->bar[bar] = base;
- }
-
- test->base = test->bar[test_reg_bar];
-@@ -556,24 +723,31 @@ static int pci_endpoint_test_probe(struc
- id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- err = id;
-- dev_err(dev, "unable to get id\n");
-+ dev_err(dev, "Unable to get id\n");
- goto err_iounmap;
- }
-
- snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
- misc_device = &test->miscdev;
- misc_device->minor = MISC_DYNAMIC_MINOR;
-- misc_device->name = name;
-+ misc_device->name = kstrdup(name, GFP_KERNEL);
-+ if (!misc_device->name) {
-+ err = -ENOMEM;
-+ goto err_ida_remove;
-+ }
- misc_device->fops = &pci_endpoint_test_fops,
-
- err = misc_register(misc_device);
- if (err) {
-- dev_err(dev, "failed to register device\n");
-- goto err_ida_remove;
-+ dev_err(dev, "Failed to register device\n");
-+ goto err_kfree_name;
- }
-
- return 0;
-
-+err_kfree_name:
-+ kfree(misc_device->name);
-+
- err_ida_remove:
- ida_simple_remove(&pci_endpoint_test_ida, id);
-
-@@ -583,11 +757,13 @@ err_iounmap:
- pci_iounmap(pdev, test->bar[bar]);
- }
-
-- for (i = 0; i < irq; i++)
-- devm_free_irq(dev, pdev->irq + i, test);
-+ pci_endpoint_test_release_irq(test);
-
- err_disable_msi:
- pci_disable_msi(pdev);
-+
-+err_disable_irq:
-+ pci_endpoint_test_free_irq_vectors(test);
- pci_release_regions(pdev);
-
- err_disable_pdev:
-@@ -610,14 +786,15 @@ static void pci_endpoint_test_remove(str
- return;
-
- misc_deregister(&test->miscdev);
-+ kfree(misc_device->name);
- ida_simple_remove(&pci_endpoint_test_ida, id);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
- if (test->bar[bar])
- pci_iounmap(pdev, test->bar[bar]);
- }
-- for (i = 0; i < test->num_irqs; i++)
-- devm_free_irq(&pdev->dev, pdev->irq + i, test);
-- pci_disable_msi(pdev);
-+
-+ pci_endpoint_test_release_irq(test);
-+ pci_endpoint_test_free_irq_vectors(test);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- }
-@@ -625,6 +802,7 @@ static void pci_endpoint_test_remove(str
- static const struct pci_device_id pci_endpoint_test_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
-+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID) },
- { }
- };
- MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
---- a/drivers/pci/Kconfig
-+++ b/drivers/pci/Kconfig
-@@ -142,6 +142,7 @@ config PCI_HYPERV
-
- source "drivers/pci/hotplug/Kconfig"
- source "drivers/pci/dwc/Kconfig"
-+source "drivers/pci/mobiveil/Kconfig"
- source "drivers/pci/host/Kconfig"
- source "drivers/pci/endpoint/Kconfig"
- source "drivers/pci/switch/Kconfig"
---- a/drivers/pci/dwc/Kconfig
-+++ b/drivers/pci/dwc/Kconfig
-@@ -50,17 +50,36 @@ config PCI_DRA7XX_EP
- endif
-
- config PCIE_DW_PLAT
-- bool "Platform bus based DesignWare PCIe Controller"
-- depends on PCI
-- depends on PCI_MSI_IRQ_DOMAIN
-- select PCIE_DW_HOST
-- ---help---
-- This selects the DesignWare PCIe controller support. Select this if
-- you have a PCIe controller on Platform bus.
-+ bool
-
-- If you have a controller with this interface, say Y or M here.
-+config PCIE_DW_PLAT_HOST
-+ bool "Platform bus based DesignWare PCIe Controller - Host mode"
-+ depends on PCI && PCI_MSI_IRQ_DOMAIN
-+ select PCIE_DW_HOST
-+ select PCIE_DW_PLAT
-+ help
-+ Enables support for the PCIe controller in the Designware IP to
-+ work in host mode. There are two instances of PCIe controller in
-+ Designware IP.
-+ This controller can work either as EP or RC. In order to enable
-+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
-+ order to enable device-specific features PCI_DW_PLAT_EP must be
-+ selected.
-
-- If unsure, say N.
-+config PCIE_DW_PLAT_EP
-+ bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
-+ depends on PCI && PCI_MSI_IRQ_DOMAIN
-+ depends on PCI_ENDPOINT
-+ select PCIE_DW_EP
-+ select PCIE_DW_PLAT
-+ help
-+ Enables support for the PCIe controller in the Designware IP to
-+ work in endpoint mode. There are two instances of PCIe controller
-+ in Designware IP.
-+ This controller can work either as EP or RC. In order to enable
-+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
-+ order to enable device-specific features PCI_DW_PLAT_EP must be
-+ selected.
-
- config PCI_EXYNOS
- bool "Samsung Exynos PCIe controller"
---- a/drivers/pci/dwc/Makefile
-+++ b/drivers/pci/dwc/Makefile
-@@ -10,7 +10,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
- obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
- obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
- obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
--obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
-+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o
- obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
- obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
- obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
---- a/drivers/pci/dwc/pci-dra7xx.c
-+++ b/drivers/pci/dwc/pci-dra7xx.c
-@@ -339,15 +339,6 @@ static irqreturn_t dra7xx_pcie_irq_handl
- return IRQ_HANDLED;
- }
-
--static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
--{
-- u32 reg;
--
-- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-- dw_pcie_writel_dbi2(pci, reg, 0x0);
-- dw_pcie_writel_dbi(pci, reg, 0x0);
--}
--
- static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
- {
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
---- /dev/null
-+++ b/drivers/pci/dwc/pci-layerscape-ep.c
-@@ -0,0 +1,146 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * PCIe controller EP driver for Freescale Layerscape SoCs
-+ *
-+ * Copyright (C) 2018 NXP Semiconductor.
-+ *
-+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/of_pci.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_address.h>
-+#include <linux/pci.h>
-+#include <linux/platform_device.h>
-+#include <linux/resource.h>
-+
-+#include "pcie-designware.h"
-+
-+#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
-+
-+struct ls_pcie_ep {
-+ struct dw_pcie *pci;
-+};
-+
-+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
-+
-+static int ls_pcie_establish_link(struct dw_pcie *pci)
-+{
-+ return 0;
-+}
-+
-+static const struct dw_pcie_ops ls_pcie_ep_ops = {
-+ .start_link = ls_pcie_establish_link,
-+};
-+
-+static const struct of_device_id ls_pcie_ep_of_match[] = {
-+ { .compatible = "fsl,ls-pcie-ep",},
-+ { },
-+};
-+
-+static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
-+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ struct pci_epc *epc = ep->epc;
-+ enum pci_barno bar;
-+
-+ for (bar = BAR_0; bar <= BAR_5; bar++)
-+ dw_pcie_ep_reset_bar(pci, bar);
-+
-+ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
-+}
-+
-+static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ enum pci_epc_irq_type type, u16 interrupt_num)
-+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+
-+ switch (type) {
-+ case PCI_EPC_IRQ_LEGACY:
-+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
-+ case PCI_EPC_IRQ_MSI:
-+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
-+ case PCI_EPC_IRQ_MSIX:
-+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
-+ default:
-+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
-+ return -EINVAL;
-+ }
-+}
-+
-+static struct dw_pcie_ep_ops pcie_ep_ops = {
-+ .ep_init = ls_pcie_ep_init,
-+ .raise_irq = ls_pcie_ep_raise_irq,
-+};
-+
-+static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
-+ struct platform_device *pdev)
-+{
-+ struct dw_pcie *pci = pcie->pci;
-+ struct device *dev = pci->dev;
-+ struct dw_pcie_ep *ep;
-+ struct resource *res;
-+ int ret;
-+
-+ ep = &pci->ep;
-+ ep->ops = &pcie_ep_ops;
-+
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
-+ if (!res)
-+ return -EINVAL;
-+
-+ ep->phys_base = res->start;
-+ ep->addr_size = resource_size(res);
-+
-+ ret = dw_pcie_ep_init(ep);
-+ if (ret) {
-+ dev_err(dev, "failed to initialize endpoint\n");
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int __init ls_pcie_ep_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct dw_pcie *pci;
-+ struct ls_pcie_ep *pcie;
-+ struct resource *dbi_base;
-+ int ret;
-+
-+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-+ if (!pcie)
-+ return -ENOMEM;
-+
-+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
-+ if (!pci)
-+ return -ENOMEM;
-+
-+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
-+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
-+ if (IS_ERR(pci->dbi_base))
-+ return PTR_ERR(pci->dbi_base);
-+
-+ pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
-+ pci->dev = dev;
-+ pci->ops = &ls_pcie_ep_ops;
-+ pcie->pci = pci;
-+
-+ platform_set_drvdata(pdev, pcie);
-+
-+ ret = ls_add_pcie_ep(pcie, pdev);
-+
-+ return ret;
-+}
-+
-+static struct platform_driver ls_pcie_ep_driver = {
-+ .driver = {
-+ .name = "layerscape-pcie-ep",
-+ .of_match_table = ls_pcie_ep_of_match,
-+ .suppress_bind_attrs = true,
-+ },
-+};
-+builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
---- a/drivers/pci/dwc/pci-layerscape.c
-+++ b/drivers/pci/dwc/pci-layerscape.c
-@@ -33,6 +33,8 @@
-
- /* PEX Internal Configuration Registers */
- #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
-+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
-+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
-
- #define PCIE_IATU_NUM 6
-
-@@ -124,6 +126,14 @@ static int ls_pcie_link_up(struct dw_pci
- return 1;
- }
-
-+/* Forward error response of outbound non-posted requests */
-+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
-+{
-+ struct dw_pcie *pci = pcie->pci;
-+
-+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
-+}
-+
- static int ls_pcie_host_init(struct pcie_port *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-@@ -135,6 +145,7 @@ static int ls_pcie_host_init(struct pcie
- * dw_pcie_setup_rc() will reconfigure the outbound windows.
- */
- ls_pcie_disable_outbound_atus(pcie);
-+ ls_pcie_fix_error_response(pcie);
-
- dw_pcie_dbi_ro_wr_en(pci);
- ls_pcie_clear_multifunction(pcie);
-@@ -253,6 +264,7 @@ static struct ls_pcie_drvdata ls2088_drv
- };
-
- static const struct of_device_id ls_pcie_of_match[] = {
-+ { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
- { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
- { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
---- a/drivers/pci/dwc/pcie-designware-ep.c
-+++ b/drivers/pci/dwc/pcie-designware-ep.c
-@@ -1,20 +1,9 @@
-+// SPDX-License-Identifier: GPL-2.0
- /**
- * Synopsys DesignWare PCIe Endpoint controller driver
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
- #include <linux/of.h>
-@@ -30,7 +19,8 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep
- pci_epc_linkup(epc);
- }
-
--static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
-+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
-+ int flags)
- {
- u32 reg;
-
-@@ -38,10 +28,52 @@ static void dw_pcie_ep_reset_bar(struct
- dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg, 0x0);
- dw_pcie_writel_dbi(pci, reg, 0x0);
-+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
-+ dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
-+ dw_pcie_writel_dbi(pci, reg + 4, 0x0);
-+ }
- dw_pcie_dbi_ro_wr_dis(pci);
- }
-
--static int dw_pcie_ep_write_header(struct pci_epc *epc,
-+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
-+{
-+ __dw_pcie_ep_reset_bar(pci, bar, 0);
-+}
-+
-+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
-+ u8 cap)
-+{
-+ u8 cap_id, next_cap_ptr;
-+ u16 reg;
-+
-+ reg = dw_pcie_readw_dbi(pci, cap_ptr);
-+ next_cap_ptr = (reg & 0xff00) >> 8;
-+ cap_id = (reg & 0x00ff);
-+
-+ if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
-+ return 0;
-+
-+ if (cap_id == cap)
-+ return cap_ptr;
-+
-+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
-+}
-+
-+static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
-+{
-+ u8 next_cap_ptr;
-+ u16 reg;
-+
-+ reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
-+ next_cap_ptr = (reg & 0x00ff);
-+
-+ if (!next_cap_ptr)
-+ return 0;
-+
-+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
-+}
-+
-+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
- struct pci_epf_header *hdr)
- {
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
-@@ -114,24 +146,29 @@ static int dw_pcie_ep_outbound_atu(struc
- return 0;
- }
-
--static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
-+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar)
- {
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
-
-- dw_pcie_ep_reset_bar(pci, bar);
-+ __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
-
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
- clear_bit(atu_index, ep->ib_window_map);
- }
-
--static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
-- dma_addr_t bar_phys, size_t size, int flags)
-+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar)
- {
- int ret;
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ enum pci_barno bar = epf_bar->barno;
-+ size_t size = epf_bar->size;
-+ int flags = epf_bar->flags;
- enum dw_pcie_as_type as_type;
- u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-
-@@ -140,13 +177,20 @@ static int dw_pcie_ep_set_bar(struct pci
- else
- as_type = DW_PCIE_AS_IO;
-
-- ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
-+ ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
- if (ret)
- return ret;
-
- dw_pcie_dbi_ro_wr_en(pci);
-- dw_pcie_writel_dbi2(pci, reg, size - 1);
-+
-+ dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg, flags);
-+
-+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
-+ dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
-+ dw_pcie_writel_dbi(pci, reg + 4, 0);
-+ }
-+
- dw_pcie_dbi_ro_wr_dis(pci);
-
- return 0;
-@@ -167,7 +211,8 @@ static int dw_pcie_find_index(struct dw_
- return -EINVAL;
- }
-
--static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
-+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t addr)
- {
- int ret;
- u32 atu_index;
-@@ -182,8 +227,9 @@ static void dw_pcie_ep_unmap_addr(struct
- clear_bit(atu_index, ep->ob_window_map);
- }
-
--static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
-- u64 pci_addr, size_t size)
-+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t addr,
-+ u64 pci_addr, size_t size)
- {
- int ret;
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
-@@ -198,45 +244,93 @@ static int dw_pcie_ep_map_addr(struct pc
- return 0;
- }
-
--static int dw_pcie_ep_get_msi(struct pci_epc *epc)
-+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
-+{
-+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ u32 val, reg;
-+
-+ if (!ep->msi_cap)
-+ return -EINVAL;
-+
-+ reg = ep->msi_cap + PCI_MSI_FLAGS;
-+ val = dw_pcie_readw_dbi(pci, reg);
-+ if (!(val & PCI_MSI_FLAGS_ENABLE))
-+ return -EINVAL;
-+
-+ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
-+
-+ return val;
-+}
-+
-+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
-+{
-+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ u32 val, reg;
-+
-+ if (!ep->msi_cap)
-+ return -EINVAL;
-+
-+ reg = ep->msi_cap + PCI_MSI_FLAGS;
-+ val = dw_pcie_readw_dbi(pci, reg);
-+ val &= ~PCI_MSI_FLAGS_QMASK;
-+ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
-+ dw_pcie_dbi_ro_wr_en(pci);
-+ dw_pcie_writew_dbi(pci, reg, val);
-+ dw_pcie_dbi_ro_wr_dis(pci);
-+
-+ return 0;
-+}
-+
-+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
- {
-- int val;
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ u32 val, reg;
-+
-+ if (!ep->msix_cap)
-+ return -EINVAL;
-
-- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
-- if (!(val & MSI_CAP_MSI_EN_MASK))
-+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
-+ val = dw_pcie_readw_dbi(pci, reg);
-+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
- return -EINVAL;
-
-- val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
-+ val &= PCI_MSIX_FLAGS_QSIZE;
-+
- return val;
- }
-
--static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
-+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
- {
-- int val;
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ u32 val, reg;
-
-- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
-- val &= ~MSI_CAP_MMC_MASK;
-- val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
-+ if (!ep->msix_cap)
-+ return -EINVAL;
-+
-+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
-+ val = dw_pcie_readw_dbi(pci, reg);
-+ val &= ~PCI_MSIX_FLAGS_QSIZE;
-+ val |= interrupts;
- dw_pcie_dbi_ro_wr_en(pci);
-- dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
-+ dw_pcie_writew_dbi(pci, reg, val);
- dw_pcie_dbi_ro_wr_dis(pci);
-
- return 0;
- }
-
--static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
-- enum pci_epc_irq_type type, u8 interrupt_num)
-+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
-+ enum pci_epc_irq_type type, u16 interrupt_num)
- {
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
-
- if (!ep->ops->raise_irq)
- return -EINVAL;
-
-- return ep->ops->raise_irq(ep, type, interrupt_num);
-+ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
- }
-
- static void dw_pcie_ep_stop(struct pci_epc *epc)
-@@ -269,15 +363,130 @@ static const struct pci_epc_ops epc_ops
- .unmap_addr = dw_pcie_ep_unmap_addr,
- .set_msi = dw_pcie_ep_set_msi,
- .get_msi = dw_pcie_ep_get_msi,
-+ .set_msix = dw_pcie_ep_set_msix,
-+ .get_msix = dw_pcie_ep_get_msix,
- .raise_irq = dw_pcie_ep_raise_irq,
- .start = dw_pcie_ep_start,
- .stop = dw_pcie_ep_stop,
- };
-
-+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
-+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ struct device *dev = pci->dev;
-+
-+ dev_err(dev, "EP cannot trigger legacy IRQs\n");
-+
-+ return -EINVAL;
-+}
-+
-+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ u8 interrupt_num)
-+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ struct pci_epc *epc = ep->epc;
-+ u16 msg_ctrl, msg_data;
-+ u32 msg_addr_lower, msg_addr_upper, reg;
-+ u64 msg_addr;
-+ bool has_upper;
-+ int ret;
-+
-+ if (!ep->msi_cap)
-+ return -EINVAL;
-+
-+ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
-+ reg = ep->msi_cap + PCI_MSI_FLAGS;
-+ msg_ctrl = dw_pcie_readw_dbi(pci, reg);
-+ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
-+ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
-+ msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
-+ if (has_upper) {
-+ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
-+ msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
-+ reg = ep->msi_cap + PCI_MSI_DATA_64;
-+ msg_data = dw_pcie_readw_dbi(pci, reg);
-+ } else {
-+ msg_addr_upper = 0;
-+ reg = ep->msi_cap + PCI_MSI_DATA_32;
-+ msg_data = dw_pcie_readw_dbi(pci, reg);
-+ }
-+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
-+ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
-+ epc->mem->page_size);
-+ if (ret)
-+ return ret;
-+
-+ writel(msg_data | (interrupt_num - 1), ep->msi_mem);
-+
-+ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
-+
-+ return 0;
-+}
-+
-+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ u16 interrupt_num)
-+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ struct pci_epc *epc = ep->epc;
-+ u16 tbl_offset, bir;
-+ u32 bar_addr_upper, bar_addr_lower;
-+ u32 msg_addr_upper, msg_addr_lower;
-+ u32 reg, msg_data, vec_ctrl;
-+ u64 tbl_addr, msg_addr, reg_u64;
-+ void __iomem *msix_tbl;
-+ int ret;
-+
-+ reg = ep->msix_cap + PCI_MSIX_TABLE;
-+ tbl_offset = dw_pcie_readl_dbi(pci, reg);
-+ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
-+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
-+
-+ reg = PCI_BASE_ADDRESS_0 + (4 * bir);
-+ bar_addr_upper = 0;
-+ bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
-+ reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
-+ if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
-+ bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
-+
-+ tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
-+ tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
-+ tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
-+
-+ msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
-+ PCI_MSIX_ENTRY_SIZE);
-+ if (!msix_tbl)
-+ return -EINVAL;
-+
-+ msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
-+ msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
-+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
-+ msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
-+ vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
-+
-+ iounmap(msix_tbl);
-+
-+ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
-+ return -EPERM;
-+
-+ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
-+ epc->mem->page_size);
-+ if (ret)
-+ return ret;
-+
-+ writel(msg_data, ep->msi_mem);
-+
-+ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
-+
-+ return 0;
-+}
-+
- void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
- {
- struct pci_epc *epc = ep->epc;
-
-+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
-+ epc->mem->page_size);
-+
- pci_epc_mem_exit(epc);
- }
-
-@@ -291,7 +500,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
- struct device_node *np = dev->of_node;
-
- if (!pci->dbi_base || !pci->dbi_base2) {
-- dev_err(dev, "dbi_base/deb_base2 is not populated\n");
-+ dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
- return -EINVAL;
- }
-
-@@ -333,15 +542,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
- return -ENOMEM;
- ep->outbound_addr = addr;
-
-- if (ep->ops->ep_init)
-- ep->ops->ep_init(ep);
--
- epc = devm_pci_epc_create(dev, &epc_ops);
- if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
- return PTR_ERR(epc);
- }
-
-+ ep->epc = epc;
-+ epc_set_drvdata(epc, ep);
-+
-+ if (ep->ops->ep_init)
-+ ep->ops->ep_init(ep);
-+
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
-@@ -353,8 +565,16 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
- return ret;
- }
-
-- ep->epc = epc;
-- epc_set_drvdata(epc, ep);
-+ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
-+ epc->mem->page_size);
-+ if (!ep->msi_mem) {
-+ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
-+ return -ENOMEM;
-+ }
-+ ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
-+
-+ ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
-+
- dw_pcie_setup(pci);
-
- return 0;
---- a/drivers/pci/dwc/pcie-designware-host.c
-+++ b/drivers/pci/dwc/pcie-designware-host.c
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * Synopsys DesignWare PCIe host controller driver
- *
-@@ -5,10 +6,6 @@
- * http://www.samsung.com
- *
- * Author: Jingoo Han <jg1.han@samsung.com>
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 as
-- * published by the Free Software Foundation.
- */
-
- #include <linux/irqdomain.h>
---- a/drivers/pci/dwc/pcie-designware-plat.c
-+++ b/drivers/pci/dwc/pcie-designware-plat.c
-@@ -1,13 +1,10 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * PCIe RC driver for Synopsys DesignWare Core
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 as
-- * published by the Free Software Foundation.
- */
- #include <linux/clk.h>
- #include <linux/delay.h>
-@@ -15,19 +12,29 @@
- #include <linux/interrupt.h>
- #include <linux/kernel.h>
- #include <linux/init.h>
-+#include <linux/of_device.h>
- #include <linux/of_gpio.h>
- #include <linux/pci.h>
- #include <linux/platform_device.h>
- #include <linux/resource.h>
- #include <linux/signal.h>
- #include <linux/types.h>
-+#include <linux/regmap.h>
-
- #include "pcie-designware.h"
-
- struct dw_plat_pcie {
-- struct dw_pcie *pci;
-+ struct dw_pcie *pci;
-+ struct regmap *regmap;
-+ enum dw_pcie_device_mode mode;
-+};
-+
-+struct dw_plat_pcie_of_data {
-+ enum dw_pcie_device_mode mode;
- };
-
-+static const struct of_device_id dw_plat_pcie_of_match[];
-+
- static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
- {
- struct pcie_port *pp = arg;
-@@ -52,9 +59,58 @@ static const struct dw_pcie_host_ops dw_
- .host_init = dw_plat_pcie_host_init,
- };
-
--static int dw_plat_add_pcie_port(struct pcie_port *pp,
-+static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
-+{
-+ return 0;
-+}
-+
-+static const struct dw_pcie_ops dw_pcie_ops = {
-+ .start_link = dw_plat_pcie_establish_link,
-+};
-+
-+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
-+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ struct pci_epc *epc = ep->epc;
-+ enum pci_barno bar;
-+
-+ for (bar = BAR_0; bar <= BAR_5; bar++)
-+ dw_pcie_ep_reset_bar(pci, bar);
-+
-+ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
-+ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
-+}
-+
-+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ enum pci_epc_irq_type type,
-+ u16 interrupt_num)
-+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+
-+ switch (type) {
-+ case PCI_EPC_IRQ_LEGACY:
-+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
-+ case PCI_EPC_IRQ_MSI:
-+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
-+ case PCI_EPC_IRQ_MSIX:
-+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
-+ default:
-+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static struct dw_pcie_ep_ops pcie_ep_ops = {
-+ .ep_init = dw_plat_pcie_ep_init,
-+ .raise_irq = dw_plat_pcie_ep_raise_irq,
-+};
-+
-+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
- struct platform_device *pdev)
- {
-+ struct dw_pcie *pci = dw_plat_pcie->pci;
-+ struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
-@@ -82,15 +138,44 @@ static int dw_plat_add_pcie_port(struct
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
-- dev_err(dev, "failed to initialize host\n");
-+ dev_err(dev, "Failed to initialize host\n");
- return ret;
- }
-
- return 0;
- }
-
--static const struct dw_pcie_ops dw_pcie_ops = {
--};
-+static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
-+ struct platform_device *pdev)
-+{
-+ int ret;
-+ struct dw_pcie_ep *ep;
-+ struct resource *res;
-+ struct device *dev = &pdev->dev;
-+ struct dw_pcie *pci = dw_plat_pcie->pci;
-+
-+ ep = &pci->ep;
-+ ep->ops = &pcie_ep_ops;
-+
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
-+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
-+ if (IS_ERR(pci->dbi_base2))
-+ return PTR_ERR(pci->dbi_base2);
-+
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
-+ if (!res)
-+ return -EINVAL;
-+
-+ ep->phys_base = res->start;
-+ ep->addr_size = resource_size(res);
-+
-+ ret = dw_pcie_ep_init(ep);
-+ if (ret) {
-+ dev_err(dev, "Failed to initialize endpoint\n");
-+ return ret;
-+ }
-+ return 0;
-+}
-
- static int dw_plat_pcie_probe(struct platform_device *pdev)
- {
-@@ -99,6 +184,16 @@ static int dw_plat_pcie_probe(struct pla
- struct dw_pcie *pci;
- struct resource *res; /* Resource from DT */
- int ret;
-+ const struct of_device_id *match;
-+ const struct dw_plat_pcie_of_data *data;
-+ enum dw_pcie_device_mode mode;
-+
-+ match = of_match_device(dw_plat_pcie_of_match, dev);
-+ if (!match)
-+ return -EINVAL;
-+
-+ data = (struct dw_plat_pcie_of_data *)match->data;
-+ mode = (enum dw_pcie_device_mode)data->mode;
-
- dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
- if (!dw_plat_pcie)
-@@ -112,23 +207,59 @@ static int dw_plat_pcie_probe(struct pla
- pci->ops = &dw_pcie_ops;
-
- dw_plat_pcie->pci = pci;
-+ dw_plat_pcie->mode = mode;
-+
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
-+ if (!res)
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- platform_set_drvdata(pdev, dw_plat_pcie);
-
-- ret = dw_plat_add_pcie_port(&pci->pp, pdev);
-- if (ret < 0)
-- return ret;
-+ switch (dw_plat_pcie->mode) {
-+ case DW_PCIE_RC_TYPE:
-+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
-+ return -ENODEV;
-+
-+ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
-+ if (ret < 0)
-+ return ret;
-+ break;
-+ case DW_PCIE_EP_TYPE:
-+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
-+ return -ENODEV;
-+
-+ ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
-+ if (ret < 0)
-+ return ret;
-+ break;
-+ default:
-+ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
-+ }
-
- return 0;
- }
-
-+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
-+ .mode = DW_PCIE_RC_TYPE,
-+};
-+
-+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
-+ .mode = DW_PCIE_EP_TYPE,
-+};
-+
- static const struct of_device_id dw_plat_pcie_of_match[] = {
-- { .compatible = "snps,dw-pcie", },
-+ {
-+ .compatible = "snps,dw-pcie",
-+ .data = &dw_plat_pcie_rc_of_data,
-+ },
-+ {
-+ .compatible = "snps,dw-pcie-ep",
-+ .data = &dw_plat_pcie_ep_of_data,
-+ },
- {},
- };
-
---- a/drivers/pci/dwc/pcie-designware.c
-+++ b/drivers/pci/dwc/pcie-designware.c
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * Synopsys DesignWare PCIe host controller driver
- *
-@@ -5,10 +6,6 @@
- * http://www.samsung.com
- *
- * Author: Jingoo Han <jg1.han@samsung.com>
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 as
-- * published by the Free Software Foundation.
- */
-
- #include <linux/delay.h>
---- a/drivers/pci/dwc/pcie-designware.h
-+++ b/drivers/pci/dwc/pcie-designware.h
-@@ -1,3 +1,4 @@
-+// SPDX-License-Identifier: GPL-2.0
- /*
- * Synopsys DesignWare PCIe host controller driver
- *
-@@ -5,10 +6,6 @@
- * http://www.samsung.com
- *
- * Author: Jingoo Han <jg1.han@samsung.com>
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 as
-- * published by the Free Software Foundation.
- */
-
- #ifndef _PCIE_DESIGNWARE_H
-@@ -97,15 +94,6 @@
- #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
-
--#define MSI_MESSAGE_CONTROL 0x52
--#define MSI_CAP_MMC_SHIFT 1
--#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
--#define MSI_CAP_MME_SHIFT 4
--#define MSI_CAP_MSI_EN_MASK 0x1
--#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
--#define MSI_MESSAGE_ADDR_L32 0x54
--#define MSI_MESSAGE_ADDR_U32 0x58
--
- /*
- * Maximum number of MSI IRQs can be 256 per controller. But keep
- * it 32 as of now. Probably we will never need more than 32. If needed,
-@@ -118,6 +106,10 @@
- #define MAX_IATU_IN 256
- #define MAX_IATU_OUT 256
-
-+/* Maximum number of inbound/outbound iATUs */
-+#define MAX_IATU_IN 256
-+#define MAX_IATU_OUT 256
-+
- struct pcie_port;
- struct dw_pcie;
- struct dw_pcie_ep;
-@@ -185,8 +177,8 @@ enum dw_pcie_as_type {
-
- struct dw_pcie_ep_ops {
- void (*ep_init)(struct dw_pcie_ep *ep);
-- int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
-- u8 interrupt_num);
-+ int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
-+ enum pci_epc_irq_type type, u16 interrupt_num);
- };
-
- struct dw_pcie_ep {
-@@ -201,6 +193,10 @@ struct dw_pcie_ep {
- unsigned long *ob_window_map;
- u32 num_ib_windows;
- u32 num_ob_windows;
-+ void __iomem *msi_mem;
-+ phys_addr_t msi_mem_phys;
-+ u8 msi_cap; /* MSI capability offset */
-+ u8 msix_cap; /* MSI-X capability offset */
- };
-
- struct dw_pcie_ops {
-@@ -339,6 +335,12 @@ static inline int dw_pcie_host_init(stru
- void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
- int dw_pcie_ep_init(struct dw_pcie_ep *ep);
- void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
-+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
-+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ u8 interrupt_num);
-+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ u16 interrupt_num);
-+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
- #else
- static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
- {
-@@ -352,5 +354,26 @@ static inline int dw_pcie_ep_init(struct
- static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
- {
- }
-+
-+static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
-+{
-+ return 0;
-+}
-+
-+static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ u8 interrupt_num)
-+{
-+ return 0;
-+}
-+
-+static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
-+ u16 interrupt_num)
-+{
-+ return 0;
-+}
-+
-+static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
-+{
-+}
- #endif
- #endif /* _PCIE_DESIGNWARE_H */
---- a/drivers/pci/endpoint/Kconfig
-+++ b/drivers/pci/endpoint/Kconfig
-@@ -1,3 +1,4 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # PCI Endpoint Support
- #
---- a/drivers/pci/endpoint/Makefile
-+++ b/drivers/pci/endpoint/Makefile
-@@ -1,3 +1,4 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # Makefile for PCI Endpoint Support
- #
---- a/drivers/pci/endpoint/functions/Kconfig
-+++ b/drivers/pci/endpoint/functions/Kconfig
-@@ -1,3 +1,4 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # PCI Endpoint Functions
- #
---- a/drivers/pci/endpoint/functions/Makefile
-+++ b/drivers/pci/endpoint/functions/Makefile
-@@ -1,3 +1,4 @@
-+# SPDX-License-Identifier: GPL-2.0
- #
- # Makefile for PCI Endpoint Functions
- #
---- a/drivers/pci/endpoint/functions/pci-epf-test.c
-+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
-@@ -1,20 +1,9 @@
-+// SPDX-License-Identifier: GPL-2.0
- /**
- * Test driver to test endpoint functionality
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
- #include <linux/crc32.h>
-@@ -29,13 +18,16 @@
- #include <linux/pci-epf.h>
- #include <linux/pci_regs.h>
-
-+#define IRQ_TYPE_LEGACY 0
-+#define IRQ_TYPE_MSI 1
-+#define IRQ_TYPE_MSIX 2
-+
- #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
- #define COMMAND_RAISE_MSI_IRQ BIT(1)
--#define MSI_NUMBER_SHIFT 2
--#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
--#define COMMAND_READ BIT(8)
--#define COMMAND_WRITE BIT(9)
--#define COMMAND_COPY BIT(10)
-+#define COMMAND_RAISE_MSIX_IRQ BIT(2)
-+#define COMMAND_READ BIT(3)
-+#define COMMAND_WRITE BIT(4)
-+#define COMMAND_COPY BIT(5)
-
- #define STATUS_READ_SUCCESS BIT(0)
- #define STATUS_READ_FAIL BIT(1)
-@@ -56,6 +48,7 @@ struct pci_epf_test {
- struct pci_epf *epf;
- enum pci_barno test_reg_bar;
- bool linkup_notifier;
-+ bool msix_available;
- struct delayed_work cmd_handler;
- };
-
-@@ -67,6 +60,8 @@ struct pci_epf_test_reg {
- u64 dst_addr;
- u32 size;
- u32 checksum;
-+ u32 irq_type;
-+ u32 irq_number;
- } __packed;
-
- static struct pci_epf_header test_header = {
-@@ -81,7 +76,7 @@ struct pci_epf_test_data {
- bool linkup_notifier;
- };
-
--static int bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
-+static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
-
- static int pci_epf_test_copy(struct pci_epf_test *epf_test)
- {
-@@ -98,43 +93,45 @@ static int pci_epf_test_copy(struct pci_
-
- src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
- if (!src_addr) {
-- dev_err(dev, "failed to allocate source address\n");
-+ dev_err(dev, "Failed to allocate source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
-- ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
-+ ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
-+ reg->size);
- if (ret) {
-- dev_err(dev, "failed to map source address\n");
-+ dev_err(dev, "Failed to map source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_src_addr;
- }
-
- dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
- if (!dst_addr) {
-- dev_err(dev, "failed to allocate destination address\n");
-+ dev_err(dev, "Failed to allocate destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err_src_map_addr;
- }
-
-- ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
-+ ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
-+ reg->size);
- if (ret) {
-- dev_err(dev, "failed to map destination address\n");
-+ dev_err(dev, "Failed to map destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_dst_addr;
- }
-
- memcpy(dst_addr, src_addr, reg->size);
-
-- pci_epc_unmap_addr(epc, dst_phys_addr);
-+ pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
-
- err_dst_addr:
- pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
-
- err_src_map_addr:
-- pci_epc_unmap_addr(epc, src_phys_addr);
-+ pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
-
- err_src_addr:
- pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
-@@ -158,15 +155,16 @@ static int pci_epf_test_read(struct pci_
-
- src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!src_addr) {
-- dev_err(dev, "failed to allocate address\n");
-+ dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
-- ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
-+ ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
-+ reg->size);
- if (ret) {
-- dev_err(dev, "failed to map address\n");
-+ dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_addr;
- }
-@@ -186,7 +184,7 @@ static int pci_epf_test_read(struct pci_
- kfree(buf);
-
- err_map_addr:
-- pci_epc_unmap_addr(epc, phys_addr);
-+ pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
-
- err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
-@@ -209,15 +207,16 @@ static int pci_epf_test_write(struct pci
-
- dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!dst_addr) {
-- dev_err(dev, "failed to allocate address\n");
-+ dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
-- ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
-+ ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
-+ reg->size);
- if (ret) {
-- dev_err(dev, "failed to map address\n");
-+ dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_addr;
- }
-@@ -237,12 +236,12 @@ static int pci_epf_test_write(struct pci
- * wait 1ms inorder for the write to complete. Without this delay L3
- * error in observed in the host system.
- */
-- mdelay(1);
-+ usleep_range(1000, 2000);
-
- kfree(buf);
-
- err_map_addr:
-- pci_epc_unmap_addr(epc, phys_addr);
-+ pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
-
- err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
-@@ -251,31 +250,42 @@ err:
- return ret;
- }
-
--static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
-+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
-+ u16 irq)
- {
-- u8 msi_count;
- struct pci_epf *epf = epf_test->epf;
-+ struct device *dev = &epf->dev;
- struct pci_epc *epc = epf->epc;
- enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
-
- reg->status |= STATUS_IRQ_RAISED;
-- msi_count = pci_epc_get_msi(epc);
-- if (irq > msi_count || msi_count <= 0)
-- pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
-- else
-- pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
-+
-+ switch (irq_type) {
-+ case IRQ_TYPE_LEGACY:
-+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
-+ break;
-+ case IRQ_TYPE_MSI:
-+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
-+ break;
-+ case IRQ_TYPE_MSIX:
-+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
-+ break;
-+ default:
-+ dev_err(dev, "Failed to raise IRQ, unknown type\n");
-+ break;
-+ }
- }
-
- static void pci_epf_test_cmd_handler(struct work_struct *work)
- {
- int ret;
-- u8 irq;
-- u8 msi_count;
-+ int count;
- u32 command;
- struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
- cmd_handler.work);
- struct pci_epf *epf = epf_test->epf;
-+ struct device *dev = &epf->dev;
- struct pci_epc *epc = epf->epc;
- enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
-@@ -287,11 +297,14 @@ static void pci_epf_test_cmd_handler(str
- reg->command = 0;
- reg->status = 0;
-
-- irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
-+ if (reg->irq_type > IRQ_TYPE_MSIX) {
-+ dev_err(dev, "Failed to detect IRQ type\n");
-+ goto reset_handler;
-+ }
-
- if (command & COMMAND_RAISE_LEGACY_IRQ) {
- reg->status = STATUS_IRQ_RAISED;
-- pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
-+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
- goto reset_handler;
- }
-
-@@ -301,7 +314,8 @@ static void pci_epf_test_cmd_handler(str
- reg->status |= STATUS_WRITE_FAIL;
- else
- reg->status |= STATUS_WRITE_SUCCESS;
-- pci_epf_test_raise_irq(epf_test, irq);
-+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
-+ reg->irq_number);
- goto reset_handler;
- }
-
-@@ -311,7 +325,8 @@ static void pci_epf_test_cmd_handler(str
- reg->status |= STATUS_READ_SUCCESS;
- else
- reg->status |= STATUS_READ_FAIL;
-- pci_epf_test_raise_irq(epf_test, irq);
-+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
-+ reg->irq_number);
- goto reset_handler;
- }
-
-@@ -321,16 +336,28 @@ static void pci_epf_test_cmd_handler(str
- reg->status |= STATUS_COPY_SUCCESS;
- else
- reg->status |= STATUS_COPY_FAIL;
-- pci_epf_test_raise_irq(epf_test, irq);
-+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
-+ reg->irq_number);
- goto reset_handler;
- }
-
- if (command & COMMAND_RAISE_MSI_IRQ) {
-- msi_count = pci_epc_get_msi(epc);
-- if (irq > msi_count || msi_count <= 0)
-+ count = pci_epc_get_msi(epc, epf->func_no);
-+ if (reg->irq_number > count || count <= 0)
-+ goto reset_handler;
-+ reg->status = STATUS_IRQ_RAISED;
-+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
-+ reg->irq_number);
-+ goto reset_handler;
-+ }
-+
-+ if (command & COMMAND_RAISE_MSIX_IRQ) {
-+ count = pci_epc_get_msix(epc, epf->func_no);
-+ if (reg->irq_number > count || count <= 0)
- goto reset_handler;
- reg->status = STATUS_IRQ_RAISED;
-- pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
-+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
-+ reg->irq_number);
- goto reset_handler;
- }
-
-@@ -351,21 +378,23 @@ static void pci_epf_test_unbind(struct p
- {
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epc *epc = epf->epc;
-+ struct pci_epf_bar *epf_bar;
- int bar;
-
- cancel_delayed_work(&epf_test->cmd_handler);
- pci_epc_stop(epc);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
-+ epf_bar = &epf->bar[bar];
-+
- if (epf_test->reg[bar]) {
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
-- pci_epc_clear_bar(epc, bar);
-+ pci_epc_clear_bar(epc, epf->func_no, epf_bar);
- }
- }
- }
-
- static int pci_epf_test_set_bar(struct pci_epf *epf)
- {
-- int flags;
- int bar;
- int ret;
- struct pci_epf_bar *epf_bar;
-@@ -374,20 +403,27 @@ static int pci_epf_test_set_bar(struct p
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- enum pci_barno test_reg_bar = epf_test->test_reg_bar;
-
-- flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
-- if (sizeof(dma_addr_t) == 0x8)
-- flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
--
- for (bar = BAR_0; bar <= BAR_5; bar++) {
- epf_bar = &epf->bar[bar];
-- ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
-- epf_bar->size, flags);
-+
-+ epf_bar->flags |= upper_32_bits(epf_bar->size) ?
-+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
-+ PCI_BASE_ADDRESS_MEM_TYPE_32;
-+
-+ ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
- if (ret) {
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
-- dev_err(dev, "failed to set BAR%d\n", bar);
-+ dev_err(dev, "Failed to set BAR%d\n", bar);
- if (bar == test_reg_bar)
- return ret;
- }
-+ /*
-+ * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
-+ * if the specific implementation required a 64-bit BAR,
-+ * even if we only requested a 32-bit BAR.
-+ */
-+ if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
-+ bar++;
- }
-
- return 0;
-@@ -404,7 +440,7 @@ static int pci_epf_test_alloc_space(stru
- base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
- test_reg_bar);
- if (!base) {
-- dev_err(dev, "failed to allocated register space\n");
-+ dev_err(dev, "Failed to allocated register space\n");
- return -ENOMEM;
- }
- epf_test->reg[test_reg_bar] = base;
-@@ -414,7 +450,7 @@ static int pci_epf_test_alloc_space(stru
- continue;
- base = pci_epf_alloc_space(epf, bar_size[bar], bar);
- if (!base)
-- dev_err(dev, "failed to allocate space for BAR%d\n",
-+ dev_err(dev, "Failed to allocate space for BAR%d\n",
- bar);
- epf_test->reg[bar] = base;
- }
-@@ -433,9 +469,18 @@ static int pci_epf_test_bind(struct pci_
- if (WARN_ON_ONCE(!epc))
- return -EINVAL;
-
-- ret = pci_epc_write_header(epc, header);
-+ if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
-+ epf_test->linkup_notifier = false;
-+ else
-+ epf_test->linkup_notifier = true;
-+
-+ epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
-+
-+ epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
-+
-+ ret = pci_epc_write_header(epc, epf->func_no, header);
- if (ret) {
-- dev_err(dev, "configuration header write failed\n");
-+ dev_err(dev, "Configuration header write failed\n");
- return ret;
- }
-
-@@ -447,9 +492,19 @@ static int pci_epf_test_bind(struct pci_
- if (ret)
- return ret;
-
-- ret = pci_epc_set_msi(epc, epf->msi_interrupts);
-- if (ret)
-+ ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
-+ if (ret) {
-+ dev_err(dev, "MSI configuration failed\n");
- return ret;
-+ }
-+
-+ if (epf_test->msix_available) {
-+ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
-+ if (ret) {
-+ dev_err(dev, "MSI-X configuration failed\n");
-+ return ret;
-+ }
-+ }
-
- if (!epf_test->linkup_notifier)
- queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
-@@ -517,7 +572,7 @@ static int __init pci_epf_test_init(void
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
- ret = pci_epf_register_driver(&test_driver);
- if (ret) {
-- pr_err("failed to register pci epf test driver --> %d\n", ret);
-+ pr_err("Failed to register pci epf test driver --> %d\n", ret);
- return ret;
- }
-
---- a/drivers/pci/endpoint/pci-ep-cfs.c
-+++ b/drivers/pci/endpoint/pci-ep-cfs.c
-@@ -1,35 +1,28 @@
-+// SPDX-License-Identifier: GPL-2.0
- /**
- * configfs to configure the PCI endpoint
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
- #include <linux/module.h>
-+#include <linux/idr.h>
- #include <linux/slab.h>
-
- #include <linux/pci-epc.h>
- #include <linux/pci-epf.h>
- #include <linux/pci-ep-cfs.h>
-
-+static DEFINE_IDR(functions_idr);
-+static DEFINE_MUTEX(functions_mutex);
- static struct config_group *functions_group;
- static struct config_group *controllers_group;
-
- struct pci_epf_group {
- struct config_group group;
- struct pci_epf *epf;
-+ int index;
- };
-
- struct pci_epc_group {
-@@ -151,7 +144,7 @@ static struct configfs_item_operations p
- .drop_link = pci_epc_epf_unlink,
- };
-
--static struct config_item_type pci_epc_type = {
-+static const struct config_item_type pci_epc_type = {
- .ct_item_ops = &pci_epc_item_ops,
- .ct_attrs = pci_epc_attrs,
- .ct_owner = THIS_MODULE,
-@@ -293,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_sh
- to_pci_epf_group(item)->epf->msi_interrupts);
- }
-
-+static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
-+ const char *page, size_t len)
-+{
-+ u16 val;
-+ int ret;
-+
-+ ret = kstrtou16(page, 0, &val);
-+ if (ret)
-+ return ret;
-+
-+ to_pci_epf_group(item)->epf->msix_interrupts = val;
-+
-+ return len;
-+}
-+
-+static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
-+ char *page)
-+{
-+ return sprintf(page, "%d\n",
-+ to_pci_epf_group(item)->epf->msix_interrupts);
-+}
-+
- PCI_EPF_HEADER_R(vendorid)
- PCI_EPF_HEADER_W_u16(vendorid)
-
-@@ -334,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id
- CONFIGFS_ATTR(pci_epf_, subsys_id);
- CONFIGFS_ATTR(pci_epf_, interrupt_pin);
- CONFIGFS_ATTR(pci_epf_, msi_interrupts);
-+CONFIGFS_ATTR(pci_epf_, msix_interrupts);
-
- static struct configfs_attribute *pci_epf_attrs[] = {
- &pci_epf_attr_vendorid,
-@@ -347,6 +363,7 @@ static struct configfs_attribute *pci_ep
- &pci_epf_attr_subsys_id,
- &pci_epf_attr_interrupt_pin,
- &pci_epf_attr_msi_interrupts,
-+ &pci_epf_attr_msix_interrupts,
- NULL,
- };
-
-@@ -354,6 +371,9 @@ static void pci_epf_release(struct confi
- {
- struct pci_epf_group *epf_group = to_pci_epf_group(item);
-
-+ mutex_lock(&functions_mutex);
-+ idr_remove(&functions_idr, epf_group->index);
-+ mutex_unlock(&functions_mutex);
- pci_epf_destroy(epf_group->epf);
- kfree(epf_group);
- }
-@@ -362,7 +382,7 @@ static struct configfs_item_operations p
- .release = pci_epf_release,
- };
-
--static struct config_item_type pci_epf_type = {
-+static const struct config_item_type pci_epf_type = {
- .ct_item_ops = &pci_epf_ops,
- .ct_attrs = pci_epf_attrs,
- .ct_owner = THIS_MODULE,
-@@ -373,22 +393,57 @@ static struct config_group *pci_epf_make
- {
- struct pci_epf_group *epf_group;
- struct pci_epf *epf;
-+ char *epf_name;
-+ int index, err;
-
- epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
- if (!epf_group)
- return ERR_PTR(-ENOMEM);
-
-+ mutex_lock(&functions_mutex);
-+ index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
-+ mutex_unlock(&functions_mutex);
-+ if (index < 0) {
-+ err = index;
-+ goto free_group;
-+ }
-+
-+ epf_group->index = index;
-+
- config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
-
-- epf = pci_epf_create(group->cg_item.ci_name);
-+ epf_name = kasprintf(GFP_KERNEL, "%s.%d",
-+ group->cg_item.ci_name, epf_group->index);
-+ if (!epf_name) {
-+ err = -ENOMEM;
-+ goto remove_idr;
-+ }
-+
-+ epf = pci_epf_create(epf_name);
- if (IS_ERR(epf)) {
- pr_err("failed to create endpoint function device\n");
-- return ERR_PTR(-EINVAL);
-+ err = -EINVAL;
-+ goto free_name;
- }
-
- epf_group->epf = epf;
-
-+ kfree(epf_name);
-+
- return &epf_group->group;
-+
-+free_name:
-+ kfree(epf_name);
-+
-+remove_idr:
-+ mutex_lock(&functions_mutex);
-+ idr_remove(&functions_idr, epf_group->index);
-+ mutex_unlock(&functions_mutex);
-+
-+free_group:
-+ kfree(epf_group);
-+
-+ return ERR_PTR(err);
- }
-
- static void pci_epf_drop(struct config_group *group, struct config_item *item)
-@@ -401,7 +456,7 @@ static struct configfs_group_operations
- .drop_item = &pci_epf_drop,
- };
-
--static struct config_item_type pci_epf_group_type = {
-+static const struct config_item_type pci_epf_group_type = {
- .ct_group_ops = &pci_epf_group_ops,
- .ct_owner = THIS_MODULE,
- };
-@@ -429,15 +484,15 @@ void pci_ep_cfs_remove_epf_group(struct
- }
- EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
-
--static struct config_item_type pci_functions_type = {
-+static const struct config_item_type pci_functions_type = {
- .ct_owner = THIS_MODULE,
- };
-
--static struct config_item_type pci_controllers_type = {
-+static const struct config_item_type pci_controllers_type = {
- .ct_owner = THIS_MODULE,
- };
-
--static struct config_item_type pci_ep_type = {
-+static const struct config_item_type pci_ep_type = {
- .ct_owner = THIS_MODULE,
- };
-
---- a/drivers/pci/endpoint/pci-epc-core.c
-+++ b/drivers/pci/endpoint/pci-epc-core.c
-@@ -1,20 +1,9 @@
-+// SPDX-License-Identifier: GPL-2.0
- /**
- * PCI Endpoint *Controller* (EPC) library
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
- #include <linux/device.h>
-@@ -141,25 +130,26 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
- /**
- * pci_epc_raise_irq() - interrupt the host system
- * @epc: the EPC device which has to interrupt the host
-- * @type: specify the type of interrupt; legacy or MSI
-- * @interrupt_num: the MSI interrupt number
-+ * @func_no: the endpoint function number in the EPC device
-+ * @type: specify the type of interrupt; legacy, MSI or MSI-X
-+ * @interrupt_num: the MSI or MSI-X interrupt number
- *
-- * Invoke to raise an MSI or legacy interrupt
-+ * Invoke to raise an legacy, MSI or MSI-X interrupt
- */
--int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
-- u8 interrupt_num)
-+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
-+ enum pci_epc_irq_type type, u16 interrupt_num)
- {
- int ret;
- unsigned long flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (!epc->ops->raise_irq)
- return 0;
-
- spin_lock_irqsave(&epc->lock, flags);
-- ret = epc->ops->raise_irq(epc, type, interrupt_num);
-+ ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
- spin_unlock_irqrestore(&epc->lock, flags);
-
- return ret;
-@@ -169,22 +159,23 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
- /**
- * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
- * @epc: the EPC device to which MSI interrupts was requested
-+ * @func_no: the endpoint function number in the EPC device
- *
- * Invoke to get the number of MSI interrupts allocated by the RC
- */
--int pci_epc_get_msi(struct pci_epc *epc)
-+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
- {
- int interrupt;
- unsigned long flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return 0;
-
- if (!epc->ops->get_msi)
- return 0;
-
- spin_lock_irqsave(&epc->lock, flags);
-- interrupt = epc->ops->get_msi(epc);
-+ interrupt = epc->ops->get_msi(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
-
- if (interrupt < 0)
-@@ -199,17 +190,19 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
- /**
- * pci_epc_set_msi() - set the number of MSI interrupt numbers required
- * @epc: the EPC device on which MSI has to be configured
-+ * @func_no: the endpoint function number in the EPC device
- * @interrupts: number of MSI interrupts required by the EPF
- *
- * Invoke to set the required number of MSI interrupts.
- */
--int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
-+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
- {
- int ret;
- u8 encode_int;
- unsigned long flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
-+ interrupts > 32)
- return -EINVAL;
-
- if (!epc->ops->set_msi)
-@@ -218,7 +211,7 @@ int pci_epc_set_msi(struct pci_epc *epc,
- encode_int = order_base_2(interrupts);
-
- spin_lock_irqsave(&epc->lock, flags);
-- ret = epc->ops->set_msi(epc, encode_int);
-+ ret = epc->ops->set_msi(epc, func_no, encode_int);
- spin_unlock_irqrestore(&epc->lock, flags);
-
- return ret;
-@@ -226,24 +219,83 @@ int pci_epc_set_msi(struct pci_epc *epc,
- EXPORT_SYMBOL_GPL(pci_epc_set_msi);
-
- /**
-+ * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
-+ * @epc: the EPC device to which MSI-X interrupts was requested
-+ * @func_no: the endpoint function number in the EPC device
-+ *
-+ * Invoke to get the number of MSI-X interrupts allocated by the RC
-+ */
-+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
-+{
-+ int interrupt;
-+ unsigned long flags;
-+
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
-+ return 0;
-+
-+ if (!epc->ops->get_msix)
-+ return 0;
-+
-+ spin_lock_irqsave(&epc->lock, flags);
-+ interrupt = epc->ops->get_msix(epc, func_no);
-+ spin_unlock_irqrestore(&epc->lock, flags);
-+
-+ if (interrupt < 0)
-+ return 0;
-+
-+ return interrupt + 1;
-+}
-+EXPORT_SYMBOL_GPL(pci_epc_get_msix);
-+
-+/**
-+ * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
-+ * @epc: the EPC device on which MSI-X has to be configured
-+ * @func_no: the endpoint function number in the EPC device
-+ * @interrupts: number of MSI-X interrupts required by the EPF
-+ *
-+ * Invoke to set the required number of MSI-X interrupts.
-+ */
-+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
-+{
-+ int ret;
-+ unsigned long flags;
-+
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
-+ interrupts < 1 || interrupts > 2048)
-+ return -EINVAL;
-+
-+ if (!epc->ops->set_msix)
-+ return 0;
-+
-+ spin_lock_irqsave(&epc->lock, flags);
-+ ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
-+ spin_unlock_irqrestore(&epc->lock, flags);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(pci_epc_set_msix);
-+
-+/**
- * pci_epc_unmap_addr() - unmap CPU address from PCI address
- * @epc: the EPC device on which address is allocated
-+ * @func_no: the endpoint function number in the EPC device
- * @phys_addr: physical address of the local system
- *
- * Invoke to unmap the CPU address from PCI address.
- */
--void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
-+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t phys_addr)
- {
- unsigned long flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return;
-
- if (!epc->ops->unmap_addr)
- return;
-
- spin_lock_irqsave(&epc->lock, flags);
-- epc->ops->unmap_addr(epc, phys_addr);
-+ epc->ops->unmap_addr(epc, func_no, phys_addr);
- spin_unlock_irqrestore(&epc->lock, flags);
- }
- EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
-@@ -251,26 +303,27 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
- /**
- * pci_epc_map_addr() - map CPU address to PCI address
- * @epc: the EPC device on which address is allocated
-+ * @func_no: the endpoint function number in the EPC device
- * @phys_addr: physical address of the local system
- * @pci_addr: PCI address to which the physical address should be mapped
- * @size: the size of the allocation
- *
- * Invoke to map CPU address with PCI address.
- */
--int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
-- u64 pci_addr, size_t size)
-+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t phys_addr, u64 pci_addr, size_t size)
- {
- int ret;
- unsigned long flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (!epc->ops->map_addr)
- return 0;
-
- spin_lock_irqsave(&epc->lock, flags);
-- ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
-+ ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
- spin_unlock_irqrestore(&epc->lock, flags);
-
- return ret;
-@@ -280,22 +333,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
- /**
- * pci_epc_clear_bar() - reset the BAR
- * @epc: the EPC device for which the BAR has to be cleared
-- * @bar: the BAR number that has to be reset
-+ * @func_no: the endpoint function number in the EPC device
-+ * @epf_bar: the struct epf_bar that contains the BAR information
- *
- * Invoke to reset the BAR of the endpoint device.
- */
--void pci_epc_clear_bar(struct pci_epc *epc, int bar)
-+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar)
- {
- unsigned long flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
-+ (epf_bar->barno == BAR_5 &&
-+ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- return;
-
- if (!epc->ops->clear_bar)
- return;
-
- spin_lock_irqsave(&epc->lock, flags);
-- epc->ops->clear_bar(epc, bar);
-+ epc->ops->clear_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, flags);
- }
- EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
-@@ -303,26 +360,32 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
- /**
- * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
- * @epc: the EPC device on which BAR has to be configured
-- * @bar: the BAR number that has to be configured
-- * @size: the size of the addr space
-- * @flags: specify memory allocation/io allocation/32bit address/64 bit address
-+ * @func_no: the endpoint function number in the EPC device
-+ * @epf_bar: the struct epf_bar that contains the BAR information
- *
- * Invoke to configure the BAR of the endpoint device.
- */
--int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
-- dma_addr_t bar_phys, size_t size, int flags)
-+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar)
- {
- int ret;
- unsigned long irq_flags;
-+ int flags = epf_bar->flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
-+ (epf_bar->barno == BAR_5 &&
-+ flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
-+ (flags & PCI_BASE_ADDRESS_SPACE_IO &&
-+ flags & PCI_BASE_ADDRESS_IO_MASK) ||
-+ (upper_32_bits(epf_bar->size) &&
-+ !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
- return -EINVAL;
-
- if (!epc->ops->set_bar)
- return 0;
-
- spin_lock_irqsave(&epc->lock, irq_flags);
-- ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
-+ ret = epc->ops->set_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, irq_flags);
-
- return ret;
-@@ -332,6 +395,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
- /**
- * pci_epc_write_header() - write standard configuration header
- * @epc: the EPC device to which the configuration header should be written
-+ * @func_no: the endpoint function number in the EPC device
- * @header: standard configuration header fields
- *
- * Invoke to write the configuration header to the endpoint controller. Every
-@@ -339,19 +403,20 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
- * configuration header would be written. The callback function should write
- * the header fields to this dedicated location.
- */
--int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
-+int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_header *header)
- {
- int ret;
- unsigned long flags;
-
-- if (IS_ERR(epc))
-+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (!epc->ops->write_header)
- return 0;
-
- spin_lock_irqsave(&epc->lock, flags);
-- ret = epc->ops->write_header(epc, header);
-+ ret = epc->ops->write_header(epc, func_no, header);
- spin_unlock_irqrestore(&epc->lock, flags);
-
- return ret;
---- a/drivers/pci/endpoint/pci-epc-mem.c
-+++ b/drivers/pci/endpoint/pci-epc-mem.c
-@@ -1,20 +1,9 @@
-+// SPDX-License-Identifier: GPL-2.0
- /**
- * PCI Endpoint *Controller* Address Space Management
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
- #include <linux/io.h>
---- a/drivers/pci/endpoint/pci-epf-core.c
-+++ b/drivers/pci/endpoint/pci-epf-core.c
-@@ -1,20 +1,9 @@
-+// SPDX-License-Identifier: GPL-2.0
- /**
- * PCI Endpoint *Function* (EPF) library
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
- #include <linux/device.h>
-@@ -26,6 +15,8 @@
- #include <linux/pci-epf.h>
- #include <linux/pci-ep-cfs.h>
-
-+static DEFINE_MUTEX(pci_epf_mutex);
-+
- static struct bus_type pci_epf_bus_type;
- static const struct device_type pci_epf_type;
-
-@@ -109,6 +100,8 @@ void pci_epf_free_space(struct pci_epf *
-
- epf->bar[bar].phys_addr = 0;
- epf->bar[bar].size = 0;
-+ epf->bar[bar].barno = 0;
-+ epf->bar[bar].flags = 0;
- }
- EXPORT_SYMBOL_GPL(pci_epf_free_space);
-
-@@ -137,11 +130,27 @@ void *pci_epf_alloc_space(struct pci_epf
-
- epf->bar[bar].phys_addr = phys_addr;
- epf->bar[bar].size = size;
-+ epf->bar[bar].barno = bar;
-+ epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY;
-
- return space;
- }
- EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
-
-+static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
-+{
-+ struct config_group *group, *tmp;
-+
-+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
-+ return;
-+
-+ mutex_lock(&pci_epf_mutex);
-+ list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
-+ pci_ep_cfs_remove_epf_group(group);
-+ list_del(&driver->epf_group);
-+ mutex_unlock(&pci_epf_mutex);
-+}
-+
- /**
- * pci_epf_unregister_driver() - unregister the PCI EPF driver
- * @driver: the PCI EPF driver that has to be unregistered
-@@ -150,11 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
- */
- void pci_epf_unregister_driver(struct pci_epf_driver *driver)
- {
-- pci_ep_cfs_remove_epf_group(driver->group);
-+ pci_epf_remove_cfs(driver);
- driver_unregister(&driver->driver);
- }
- EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
-
-+static int pci_epf_add_cfs(struct pci_epf_driver *driver)
-+{
-+ struct config_group *group;
-+ const struct pci_epf_device_id *id;
-+
-+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
-+ return 0;
-+
-+ INIT_LIST_HEAD(&driver->epf_group);
-+
-+ id = driver->id_table;
-+ while (id->name[0]) {
-+ group = pci_ep_cfs_add_epf_group(id->name);
-+ if (IS_ERR(group)) {
-+ pci_epf_remove_cfs(driver);
-+ return PTR_ERR(group);
-+ }
-+
-+ mutex_lock(&pci_epf_mutex);
-+ list_add_tail(&group->group_entry, &driver->epf_group);
-+ mutex_unlock(&pci_epf_mutex);
-+ id++;
-+ }
-+
-+ return 0;
-+}
-+
- /**
- * __pci_epf_register_driver() - register a new PCI EPF driver
- * @driver: structure representing PCI EPF driver
-@@ -180,7 +216,7 @@ int __pci_epf_register_driver(struct pci
- if (ret)
- return ret;
-
-- driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
-+ pci_epf_add_cfs(driver);
-
- return 0;
- }
-@@ -211,29 +247,17 @@ struct pci_epf *pci_epf_create(const cha
- int ret;
- struct pci_epf *epf;
- struct device *dev;
-- char *func_name;
-- char *buf;
-+ int len;
-
- epf = kzalloc(sizeof(*epf), GFP_KERNEL);
-- if (!epf) {
-- ret = -ENOMEM;
-- goto err_ret;
-- }
-+ if (!epf)
-+ return ERR_PTR(-ENOMEM);
-
-- buf = kstrdup(name, GFP_KERNEL);
-- if (!buf) {
-- ret = -ENOMEM;
-- goto free_epf;
-- }
--
-- func_name = buf;
-- buf = strchrnul(buf, '.');
-- *buf = '\0';
--
-- epf->name = kstrdup(func_name, GFP_KERNEL);
-+ len = strchrnul(name, '.') - name;
-+ epf->name = kstrndup(name, len, GFP_KERNEL);
- if (!epf->name) {
-- ret = -ENOMEM;
-- goto free_func_name;
-+ kfree(epf);
-+ return ERR_PTR(-ENOMEM);
- }
-
- dev = &epf->dev;
-@@ -242,28 +266,18 @@ struct pci_epf *pci_epf_create(const cha
- dev->type = &pci_epf_type;
-
- ret = dev_set_name(dev, "%s", name);
-- if (ret)
-- goto put_dev;
-+ if (ret) {
-+ put_device(dev);
-+ return ERR_PTR(ret);
-+ }
-
- ret = device_add(dev);
-- if (ret)
-- goto put_dev;
-+ if (ret) {
-+ put_device(dev);
-+ return ERR_PTR(ret);
-+ }
-
-- kfree(func_name);
- return epf;
--
--put_dev:
-- put_device(dev);
-- kfree(epf->name);
--
--free_func_name:
-- kfree(func_name);
--
--free_epf:
-- kfree(epf);
--
--err_ret:
-- return ERR_PTR(ret);
- }
- EXPORT_SYMBOL_GPL(pci_epf_create);
-
---- a/drivers/pci/host/pci-host-common.c
-+++ b/drivers/pci/host/pci-host-common.c
-@@ -113,9 +113,7 @@ err_out:
- int pci_host_common_probe(struct platform_device *pdev,
- struct pci_ecam_ops *ops)
- {
-- const char *type;
- struct device *dev = &pdev->dev;
-- struct device_node *np = dev->of_node;
- struct pci_bus *bus, *child;
- struct pci_host_bridge *bridge;
- struct pci_config_window *cfg;
-@@ -126,12 +124,6 @@ int pci_host_common_probe(struct platfor
- if (!bridge)
- return -ENOMEM;
-
-- type = of_get_property(np, "device_type", NULL);
-- if (!type || strcmp(type, "pci")) {
-- dev_err(dev, "invalid \"device_type\" %s\n", type);
-- return -EINVAL;
-- }
--
- of_pci_check_probe_only();
-
- /* Parse and map our Configuration Space windows */
---- a/drivers/pci/host/pcie-xilinx-nwl.c
-+++ b/drivers/pci/host/pcie-xilinx-nwl.c
-@@ -778,16 +778,7 @@ static int nwl_pcie_parse_dt(struct nwl_
- struct platform_device *pdev)
- {
- struct device *dev = pcie->dev;
-- struct device_node *node = dev->of_node;
- struct resource *res;
-- const char *type;
--
-- /* Check for device type */
-- type = of_get_property(node, "device_type", NULL);
-- if (!type || strcmp(type, "pci")) {
-- dev_err(dev, "invalid \"device_type\" %s\n", type);
-- return -EINVAL;
-- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
- pcie->breg_base = devm_ioremap_resource(dev, res);
---- a/drivers/pci/host/pcie-xilinx.c
-+++ b/drivers/pci/host/pcie-xilinx.c
-@@ -584,15 +584,8 @@ static int xilinx_pcie_parse_dt(struct x
- struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
- struct resource regs;
-- const char *type;
- int err;
-
-- type = of_get_property(node, "device_type", NULL);
-- if (!type || strcmp(type, "pci")) {
-- dev_err(dev, "invalid \"device_type\" %s\n", type);
-- return -EINVAL;
-- }
--
- err = of_address_to_resource(node, 0, &regs);
- if (err) {
- dev_err(dev, "missing \"reg\" property\n");
---- /dev/null
-+++ b/drivers/pci/mobiveil/Kconfig
-@@ -0,0 +1,50 @@
-+# SPDX-License-Identifier: GPL-2.0
-+
-+menu "Mobiveil PCIe Core Support"
-+ depends on PCI
-+
-+config PCIE_MOBIVEIL
-+ bool
-+
-+config PCIE_MOBIVEIL_HOST
-+ bool
-+ depends on PCI_MSI_IRQ_DOMAIN
-+ select PCIE_MOBIVEIL
-+
-+config PCIE_MOBIVEIL_EP
-+ bool
-+ depends on PCI_ENDPOINT
-+ select PCIE_MOBIVEIL
-+
-+config PCIE_MOBIVEIL_PLAT
-+ bool "Mobiveil AXI PCIe controller"
-+ depends on ARCH_ZYNQMP || COMPILE_TEST
-+ depends on OF
-+ select PCIE_MOBIVEIL_HOST
-+ help
-+ Say Y here if you want to enable support for the Mobiveil AXI PCIe
-+ Soft IP. It has up to 8 outbound and inbound windows
-+ for address translation and it is a PCIe Gen4 IP.
-+
-+config PCI_LAYERSCAPE_GEN4
-+ bool "Freescale Layerscpe PCIe Gen4 controller in RC mode"
-+ depends on PCI
-+ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
-+ depends on PCI_MSI_IRQ_DOMAIN
-+ select PCIE_MOBIVEIL_HOST
-+ help
-+ Say Y here if you want PCIe Gen4 controller support on
-+ Layerscape SoCs. And the PCIe controller work in RC mode
-+ by setting the RCW[HOST_AGT_PEX] to 0.
-+
-+config PCI_LAYERSCAPE_GEN4_EP
-+ bool "Freescale Layerscpe PCIe Gen4 controller in EP mode"
-+ depends on PCI
-+ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
-+ depends on PCI_ENDPOINT
-+ select PCIE_MOBIVEIL_EP
-+ help
-+ Say Y here if you want PCIe Gen4 controller support on
-+ Layerscape SoCs. And the PCIe controller work in EP mode
-+ by setting the RCW[HOST_AGT_PEX] to 1.
-+endmenu
---- /dev/null
-+++ b/drivers/pci/mobiveil/Makefile
-@@ -0,0 +1,7 @@
-+# SPDX-License-Identifier: GPL-2.0
-+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
-+obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
-+obj-$(CONFIG_PCIE_MOBIVEIL_EP) += pcie-mobiveil-ep.o
-+obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
-+obj-$(CONFIG_PCI_LAYERSCAPE_GEN4) += pci-layerscape-gen4.o
-+obj-$(CONFIG_PCI_LAYERSCAPE_GEN4_EP) += pci-layerscape-gen4-ep.o
---- /dev/null
-+++ b/drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
-@@ -0,0 +1,178 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * PCIe controller EP driver for Freescale Layerscape SoCs
-+ *
-+ * Copyright (C) 2018 NXP Semiconductor.
-+ *
-+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/of_pci.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_address.h>
-+#include <linux/pci.h>
-+#include <linux/platform_device.h>
-+#include <linux/resource.h>
-+
-+#include "pcie-mobiveil.h"
-+
-+struct ls_pcie_g4_ep {
-+ struct mobiveil_pcie *mv_pci;
-+};
-+
-+#define to_ls_pcie_g4_ep(x) dev_get_drvdata((x)->dev)
-+
-+static const struct of_device_id ls_pcie_g4_ep_of_match[] = {
-+ { .compatible = "fsl,lx2160a-pcie-ep",},
-+ { },
-+};
-+
-+static void ls_pcie_g4_get_bar_num(struct mobiveil_pcie_ep *ep)
-+{
-+ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
-+ u32 type, reg;
-+ u8 bar;
-+
-+ ep->bar_num = BAR_5 + 1;
-+
-+ for (bar = BAR_0; bar <= BAR_5; bar++) {
-+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-+ type = csr_readl(mv_pci, reg) &
-+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
-+ if (type & PCI_BASE_ADDRESS_MEM_TYPE_64)
-+ ep->bar_num--;
-+ }
-+}
-+
-+static void ls_pcie_g4_ep_init(struct mobiveil_pcie_ep *ep)
-+{
-+ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
-+ struct pci_epc *epc = ep->epc;
-+ enum pci_barno bar;
-+ int win_idx, val;
-+
-+ /*
-+ * Errata: unsupported request error on inbound posted write
-+ * transaction, PCIe controller reports advisory error instead
-+ * of uncorrectable error message to RC.
-+ * workaround: set the bit20(unsupported_request_Error_severity) with
-+ * value 1 in uncorrectable_Error_Severity_Register, make the
-+ * unsupported request error generate the fatal error.
-+ */
-+ val = csr_readl(mv_pci, CFG_UNCORRECTABLE_ERROR_SEVERITY);
-+ val |= 1 << UNSUPPORTED_REQUEST_ERROR_SHIFT;
-+ csr_writel(mv_pci, val, CFG_UNCORRECTABLE_ERROR_SEVERITY);
-+
-+ ls_pcie_g4_get_bar_num(ep);
-+
-+ for (bar = BAR_0; bar < (ep->bar_num * ep->pf_num); bar++)
-+ mobiveil_pcie_ep_reset_bar(mv_pci, bar);
-+
-+ for (win_idx = 0; win_idx < MAX_IATU_OUT; win_idx++)
-+ mobiveil_pcie_disable_ob_win(mv_pci, win_idx);
-+
-+ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
-+ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
-+}
-+
-+static int ls_pcie_g4_ep_raise_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
-+ enum pci_epc_irq_type type,
-+ u16 interrupt_num)
-+{
-+ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
-+
-+ switch (type) {
-+ case PCI_EPC_IRQ_LEGACY:
-+ return mobiveil_pcie_ep_raise_legacy_irq(ep, func_no);
-+ case PCI_EPC_IRQ_MSI:
-+ return mobiveil_pcie_ep_raise_msi_irq(ep, func_no,
-+ interrupt_num);
-+ case PCI_EPC_IRQ_MSIX:
-+ return mobiveil_pcie_ep_raise_msix_irq(ep, func_no,
-+ interrupt_num);
-+ default:
-+ dev_err(&mv_pci->pdev->dev, "UNKNOWN IRQ type\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static struct mobiveil_pcie_ep_ops pcie_ep_ops = {
-+ .ep_init = ls_pcie_g4_ep_init,
-+ .raise_irq = ls_pcie_g4_ep_raise_irq,
-+};
-+
-+static int __init ls_pcie_gen4_add_pcie_ep(struct ls_pcie_g4_ep *ls_pcie_g4_ep,
-+ struct platform_device *pdev)
-+{
-+ struct mobiveil_pcie *mv_pci = ls_pcie_g4_ep->mv_pci;
-+ struct device *dev = &pdev->dev;
-+ struct mobiveil_pcie_ep *ep;
-+ struct resource *res;
-+ int ret;
-+ struct device_node *np = dev->of_node;
-+
-+ ep = &mv_pci->ep;
-+ ep->ops = &pcie_ep_ops;
-+
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
-+ if (!res)
-+ return -EINVAL;
-+
-+ ep->phys_base = res->start;
-+ ep->addr_size = resource_size(res);
-+
-+ ret = of_property_read_u32(np, "max-functions", &ep->pf_num);
-+ if (ret < 0)
-+ ep->pf_num = 1;
-+
-+ ret = mobiveil_pcie_ep_init(ep);
-+ if (ret) {
-+ dev_err(dev, "failed to initialize endpoint\n");
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int __init ls_pcie_g4_ep_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct mobiveil_pcie *mv_pci;
-+ struct ls_pcie_g4_ep *ls_pcie_g4_ep;
-+ struct resource *res;
-+ int ret;
-+
-+ ls_pcie_g4_ep = devm_kzalloc(dev, sizeof(*ls_pcie_g4_ep), GFP_KERNEL);
-+ if (!ls_pcie_g4_ep)
-+ return -ENOMEM;
-+
-+ mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
-+ if (!mv_pci)
-+ return -ENOMEM;
-+
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
-+ mv_pci->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
-+ if (IS_ERR(mv_pci->csr_axi_slave_base))
-+ return PTR_ERR(mv_pci->csr_axi_slave_base);
-+
-+ mv_pci->pdev = pdev;
-+ ls_pcie_g4_ep->mv_pci = mv_pci;
-+
-+ platform_set_drvdata(pdev, ls_pcie_g4_ep);
-+
-+ ret = ls_pcie_gen4_add_pcie_ep(ls_pcie_g4_ep, pdev);
-+
-+ return ret;
-+}
-+
-+static struct platform_driver ls_pcie_g4_ep_driver = {
-+ .driver = {
-+ .name = "layerscape-pcie-gen4-ep",
-+ .of_match_table = ls_pcie_g4_ep_of_match,
-+ .suppress_bind_attrs = true,
-+ },
-+};
-+builtin_platform_driver_probe(ls_pcie_g4_ep_driver, ls_pcie_g4_ep_probe);
---- /dev/null
-+++ b/drivers/pci/mobiveil/pci-layerscape-gen4.c
-@@ -0,0 +1,292 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * PCIe host controller driver for NXP Layerscape SoCs
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/of_pci.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_irq.h>
-+#include <linux/of_address.h>
-+#include <linux/pci.h>
-+#include <linux/platform_device.h>
-+#include <linux/resource.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/regmap.h>
-+
-+#include "pcie-mobiveil.h"
-+
-+/* LUT and PF control registers */
-+#define PCIE_LUT_OFF (0x80000)
-+#define PCIE_LUT_GCR (0x28)
-+#define PCIE_LUT_GCR_RRE (0)
-+
-+#define PCIE_PF_OFF (0xc0000)
-+#define PCIE_PF_INT_STAT (0x18)
-+#define PF_INT_STAT_PABRST (31)
-+
-+#define PCIE_PF_DBG (0x7fc)
-+#define PF_DBG_LTSSM_MASK (0x3f)
-+#define PF_DBG_WE (31)
-+#define PF_DBG_PABR (27)
-+
-+#define LS_PCIE_G4_LTSSM_L0 0x2d /* L0 state */
-+
-+#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
-+
-+struct ls_pcie_g4 {
-+ struct mobiveil_pcie *pci;
-+ struct delayed_work dwork;
-+ int irq;
-+};
-+
-+static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
-+{
-+ return ioread32(pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
-+}
-+
-+static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
-+ u32 off, u32 val)
-+{
-+ iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
-+}
-+
-+static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
-+{
-+ return ioread32(pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
-+}
-+
-+static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
-+ u32 off, u32 val)
-+{
-+ iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
-+}
-+
-+static bool ls_pcie_g4_is_bridge(struct ls_pcie_g4 *pcie)
-+{
-+ struct mobiveil_pcie *mv_pci = pcie->pci;
-+ u32 header_type;
-+
-+ header_type = csr_readb(mv_pci, PCI_HEADER_TYPE);
-+ header_type &= 0x7f;
-+
-+ return header_type == PCI_HEADER_TYPE_BRIDGE;
-+}
-+
-+static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
-+{
-+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
-+ u32 state;
-+
-+ state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
-+ state = state & PF_DBG_LTSSM_MASK;
-+
-+ if (state == LS_PCIE_G4_LTSSM_L0)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+static void ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
-+{
-+ struct mobiveil_pcie *mv_pci = pcie->pci;
-+ u32 val, act_stat;
-+ int to = 100;
-+
-+ /* Poll for pab_csb_reset to set and PAB activity to clear */
-+ do {
-+ usleep_range(10, 15);
-+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
-+ act_stat = csr_readl(mv_pci, PAB_ACTIVITY_STAT);
-+ } while (((val & 1 << PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
-+ if (to < 0) {
-+ dev_err(&mv_pci->pdev->dev, "poll PABRST&PABACT timeout\n");
-+ return;
-+ }
-+
-+ /* clear PEX_RESET bit in PEX_PF0_DBG register */
-+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
-+ val |= 1 << PF_DBG_WE;
-+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
-+
-+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
-+ val |= 1 << PF_DBG_PABR;
-+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
-+
-+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
-+ val &= ~(1 << PF_DBG_WE);
-+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
-+
-+ mobiveil_host_init(mv_pci, true);
-+
-+ to = 100;
-+ while (!ls_pcie_g4_link_up(mv_pci) && to--)
-+ usleep_range(200, 250);
-+ if (to < 0)
-+ dev_err(&mv_pci->pdev->dev, "PCIe link trainning timeout\n");
-+}
-+
-+static irqreturn_t ls_pcie_g4_handler(int irq, void *dev_id)
-+{
-+ struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
-+ struct mobiveil_pcie *mv_pci = pcie->pci;
-+ u32 val;
-+
-+ val = csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
-+ if (!val)
-+ return IRQ_NONE;
-+
-+ if (val & PAB_INTP_RESET)
-+ schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
-+
-+ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
-+{
-+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
-+ u32 val;
-+ int ret;
-+
-+ pcie->irq = platform_get_irq_byname(mv_pci->pdev, "intr");
-+ if (pcie->irq < 0) {
-+ dev_err(&mv_pci->pdev->dev, "Can't get 'intr' irq.\n");
-+ return pcie->irq;
-+ }
-+ ret = devm_request_irq(&mv_pci->pdev->dev, pcie->irq,
-+ ls_pcie_g4_handler, IRQF_SHARED,
-+ mv_pci->pdev->name, pcie);
-+ if (ret) {
-+ dev_err(&mv_pci->pdev->dev, "Can't register PCIe IRQ.\n");
-+ return ret;
-+ }
-+
-+ /* Enable interrupts */
-+ val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
-+ PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
-+ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
-+
-+ return 0;
-+}
-+
-+static void ls_pcie_g4_reset(struct work_struct *work)
-+{
-+ struct delayed_work *dwork = container_of(work, struct delayed_work,
-+ work);
-+ struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
-+ struct mobiveil_pcie *mv_pci = pcie->pci;
-+ u16 ctrl;
-+
-+ ctrl = csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
-+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
-+ csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
-+ ls_pcie_g4_reinit_hw(pcie);
-+}
-+
-+static int ls_pcie_g4_read_other_conf(struct pci_bus *bus, unsigned int devfn,
-+ int where, int size, u32 *val)
-+{
-+ struct mobiveil_pcie *pci = bus->sysdata;
-+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
-+ int ret;
-+
-+ if (where == PCI_VENDOR_ID)
-+ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
-+ 0 << PCIE_LUT_GCR_RRE);
-+
-+ ret = pci_generic_config_read(bus, devfn, where, size, val);
-+
-+ if (where == PCI_VENDOR_ID)
-+ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
-+ 1 << PCIE_LUT_GCR_RRE);
-+
-+ return ret;
-+}
-+
-+static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
-+ .interrupt_init = ls_pcie_g4_interrupt_init,
-+ .read_other_conf = ls_pcie_g4_read_other_conf,
-+};
-+
-+static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
-+ .link_up = ls_pcie_g4_link_up,
-+};
-+
-+static void workaround_tkt381274(struct ls_pcie_g4 *pcie)
-+{
-+ struct mobiveil_pcie *mv_pci = pcie->pci;
-+ u32 val;
-+
-+ /* Set ACK latency timeout */
-+ val = csr_readl(mv_pci, GPEX_ACK_REPLAY_TO);
-+ val &= ~(ACK_LAT_TO_VAL_MASK << ACK_LAT_TO_VAL_SHIFT);
-+ val |= (4 << ACK_LAT_TO_VAL_SHIFT);
-+ csr_writel(mv_pci, val, GPEX_ACK_REPLAY_TO);
-+}
-+
-+static int __init ls_pcie_g4_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct mobiveil_pcie *mv_pci;
-+ struct ls_pcie_g4 *pcie;
-+ struct device_node *np = dev->of_node;
-+ int ret;
-+
-+ if (!of_parse_phandle(np, "msi-parent", 0)) {
-+ dev_err(dev, "failed to find msi-parent\n");
-+ return -EINVAL;
-+ }
-+
-+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-+ if (!pcie)
-+ return -ENOMEM;
-+
-+ mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
-+ if (!mv_pci)
-+ return -ENOMEM;
-+
-+ mv_pci->pdev = pdev;
-+ mv_pci->ops = &ls_pcie_g4_pab_ops;
-+ mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
-+ pcie->pci = mv_pci;
-+
-+ platform_set_drvdata(pdev, pcie);
-+
-+ INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
-+
-+ ret = mobiveil_pcie_host_probe(mv_pci);
-+ if (ret) {
-+ dev_err(dev, "fail to probe!\n");
-+ return ret;
-+ }
-+
-+ if (!ls_pcie_g4_is_bridge(pcie))
-+ return -ENODEV;
-+
-+ workaround_tkt381274(pcie);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id ls_pcie_g4_of_match[] = {
-+ { .compatible = "fsl,lx2160a-pcie", },
-+ { },
-+};
-+
-+static struct platform_driver ls_pcie_g4_driver = {
-+ .driver = {
-+ .name = "layerscape-pcie-gen4",
-+ .of_match_table = ls_pcie_g4_of_match,
-+ .suppress_bind_attrs = true,
-+ },
-+};
-+
-+builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
---- /dev/null
-+++ b/drivers/pci/mobiveil/pcie-mobiveil-ep.c
-@@ -0,0 +1,512 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/**
-+ * Mobiveil PCIe Endpoint controller driver
-+ *
-+ * Copyright (C) 2018 NXP Semiconductor.
-+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
-+ */
-+
-+#include <linux/of.h>
-+#include <linux/pci-epc.h>
-+#include <linux/pci-epf.h>
-+#include <linux/platform_device.h>
-+#include "pcie-mobiveil.h"
-+
-+void mobiveil_pcie_ep_linkup(struct mobiveil_pcie_ep *ep)
-+{
-+ struct pci_epc *epc = ep->epc;
-+
-+ pci_epc_linkup(epc);
-+}
-+
-+static void __mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
-+ enum pci_barno bar)
-+{
-+ csr_writel(pcie, bar, GPEX_BAR_SELECT);
-+ csr_writel(pcie, 0, GPEX_BAR_SIZE_LDW);
-+ csr_writel(pcie, 0, GPEX_BAR_SIZE_UDW);
-+}
-+
-+void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
-+ enum pci_barno bar)
-+{
-+ __mobiveil_pcie_ep_reset_bar(pcie, bar);
-+}
-+
-+static u8 __mobiveil_pcie_ep_find_next_cap(struct mobiveil_pcie *pcie,
-+ u8 cap_ptr, u8 cap)
-+{
-+ u8 cap_id, next_cap_ptr;
-+ u16 reg;
-+
-+ reg = csr_readw(pcie, cap_ptr);
-+ next_cap_ptr = (reg & 0xff00) >> 8;
-+ cap_id = (reg & 0x00ff);
-+
-+ if (cap_id == cap)
-+ return cap_ptr;
-+
-+ if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
-+ return 0;
-+
-+ return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
-+}
-+
-+static u8 mobiveil_pcie_ep_find_capability(struct mobiveil_pcie *pcie,
-+ u8 cap)
-+{
-+ u8 next_cap_ptr;
-+ u16 reg;
-+
-+ reg = csr_readw(pcie, PCI_CAPABILITY_LIST);
-+ next_cap_ptr = (reg & 0x00ff);
-+
-+ if (!next_cap_ptr)
-+ return 0;
-+
-+ return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
-+}
-+
-+static int mobiveil_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_header *hdr)
-+{
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+
-+ csr_writew(pcie, hdr->vendorid, PCI_VENDOR_ID);
-+ csr_writew(pcie, hdr->deviceid, PCI_DEVICE_ID);
-+ csr_writeb(pcie, hdr->revid, PCI_REVISION_ID);
-+ csr_writeb(pcie, hdr->progif_code, PCI_CLASS_PROG);
-+ csr_writew(pcie, hdr->subclass_code | hdr->baseclass_code << 8,
-+ PCI_CLASS_DEVICE);
-+ csr_writeb(pcie, hdr->cache_line_size, PCI_CACHE_LINE_SIZE);
-+ csr_writew(pcie, hdr->subsys_vendor_id, PCI_SUBSYSTEM_VENDOR_ID);
-+ csr_writew(pcie, hdr->subsys_id, PCI_SUBSYSTEM_ID);
-+ csr_writeb(pcie, hdr->interrupt_pin, PCI_INTERRUPT_PIN);
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_ep_inbound_atu(struct mobiveil_pcie_ep *ep,
-+ u8 func_no, enum pci_barno bar,
-+ dma_addr_t cpu_addr)
-+{
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+
-+ program_ib_windows_ep(pcie, func_no, bar, cpu_addr);
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_ep_outbound_atu(struct mobiveil_pcie_ep *ep,
-+ phys_addr_t phys_addr,
-+ u64 pci_addr, u8 func_no,
-+ size_t size)
-+{
-+ int ret;
-+ u32 free_win;
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+
-+ free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
-+ if (free_win >= ep->num_ob_windows) {
-+ dev_err(&pcie->pdev->dev, "No free outbound window\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = program_ob_windows_ep(pcie, free_win, MEM_WINDOW_TYPE,
-+ phys_addr, pci_addr, func_no, size);
-+ if (ret < 0) {
-+ dev_err(&pcie->pdev->dev, "Failed to program IB window\n");
-+ return ret;
-+ }
-+
-+ set_bit(free_win, ep->ob_window_map);
-+ ep->outbound_addr[free_win] = phys_addr;
-+
-+ return 0;
-+}
-+
-+static void mobiveil_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar)
-+{
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ enum pci_barno bar = epf_bar->barno;
-+
-+ if (bar < ep->bar_num) {
-+ __mobiveil_pcie_ep_reset_bar(pcie,
-+ func_no * ep->bar_num + bar);
-+
-+ mobiveil_pcie_disable_ib_win_ep(pcie, func_no, bar);
-+ }
-+}
-+
-+static int mobiveil_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar)
-+{
-+ int ret;
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ enum pci_barno bar = epf_bar->barno;
-+ size_t size = epf_bar->size;
-+
-+ if (bar < ep->bar_num) {
-+ ret = mobiveil_pcie_ep_inbound_atu(ep, func_no, bar,
-+ epf_bar->phys_addr);
-+ if (ret)
-+ return ret;
-+
-+ csr_writel(pcie, func_no * ep->bar_num + bar,
-+ GPEX_BAR_SELECT);
-+ csr_writel(pcie, lower_32_bits(~(size - 1)),
-+ GPEX_BAR_SIZE_LDW);
-+ csr_writel(pcie, upper_32_bits(~(size - 1)),
-+ GPEX_BAR_SIZE_UDW);
-+ }
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_find_index(struct mobiveil_pcie_ep *ep,
-+ phys_addr_t addr,
-+ u32 *atu_index)
-+{
-+ u32 index;
-+
-+ for (index = 0; index < ep->num_ob_windows; index++) {
-+ if (ep->outbound_addr[index] != addr)
-+ continue;
-+ *atu_index = index;
-+ return 0;
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+static void mobiveil_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t addr)
-+{
-+ int ret;
-+ u32 atu_index;
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+
-+ ret = mobiveil_pcie_find_index(ep, addr, &atu_index);
-+ if (ret < 0)
-+ return;
-+
-+ mobiveil_pcie_disable_ob_win(pcie, atu_index);
-+ clear_bit(atu_index, ep->ob_window_map);
-+}
-+
-+static int mobiveil_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t addr,
-+ u64 pci_addr, size_t size)
-+{
-+ int ret;
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+
-+ ret = mobiveil_pcie_ep_outbound_atu(ep, addr, pci_addr, func_no, size);
-+ if (ret) {
-+ dev_err(&pcie->pdev->dev, "Failed to enable address\n");
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
-+{
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ u32 val, reg;
-+
-+ if (!ep->msi_cap)
-+ return -EINVAL;
-+
-+ reg = ep->msi_cap + PCI_MSI_FLAGS;
-+ val = csr_readw(pcie, reg);
-+ if (!(val & PCI_MSI_FLAGS_ENABLE))
-+ return -EINVAL;
-+
-+ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
-+
-+ return val;
-+}
-+
-+static int mobiveil_pcie_ep_set_msi(struct pci_epc *epc,
-+ u8 func_no, u8 interrupts)
-+{
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ u32 val, reg;
-+
-+ if (!ep->msi_cap)
-+ return -EINVAL;
-+
-+ reg = ep->msi_cap + PCI_MSI_FLAGS;
-+ val = csr_readw(pcie, reg);
-+ val &= ~PCI_MSI_FLAGS_QMASK;
-+ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
-+ csr_writew(pcie, val, reg);
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
-+{
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ u32 val, reg;
-+
-+ if (!ep->msix_cap)
-+ return -EINVAL;
-+
-+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
-+ val = csr_readw(pcie, reg);
-+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
-+ return -EINVAL;
-+
-+ val &= PCI_MSIX_FLAGS_QSIZE;
-+
-+ return val;
-+}
-+
-+static int mobiveil_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no,
-+ u16 interrupts)
-+{
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ u32 val, reg;
-+
-+ if (!ep->msix_cap)
-+ return -EINVAL;
-+
-+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
-+ val = csr_readw(pcie, reg);
-+ val &= ~PCI_MSIX_FLAGS_QSIZE;
-+ val |= interrupts;
-+ csr_writew(pcie, val, reg);
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
-+ enum pci_epc_irq_type type,
-+ u16 interrupt_num)
-+{
-+ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
-+
-+ if (!ep->ops->raise_irq)
-+ return -EINVAL;
-+
-+ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
-+}
-+
-+static const struct pci_epc_ops epc_ops = {
-+ .write_header = mobiveil_pcie_ep_write_header,
-+ .set_bar = mobiveil_pcie_ep_set_bar,
-+ .clear_bar = mobiveil_pcie_ep_clear_bar,
-+ .map_addr = mobiveil_pcie_ep_map_addr,
-+ .unmap_addr = mobiveil_pcie_ep_unmap_addr,
-+ .set_msi = mobiveil_pcie_ep_set_msi,
-+ .get_msi = mobiveil_pcie_ep_get_msi,
-+ .set_msix = mobiveil_pcie_ep_set_msix,
-+ .get_msix = mobiveil_pcie_ep_get_msix,
-+ .raise_irq = mobiveil_pcie_ep_raise_irq,
-+};
-+
-+int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no)
-+{
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+
-+ dev_err(&pcie->pdev->dev, "EP cannot trigger legacy IRQs\n");
-+
-+ return -EINVAL;
-+}
-+
-+int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
-+ u8 interrupt_num)
-+{
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ struct pci_epc *epc = ep->epc;
-+ u16 msg_ctrl, msg_data;
-+ u32 msg_addr_lower, msg_addr_upper, reg;
-+ u64 msg_addr;
-+ u32 func_num;
-+ bool has_upper;
-+ int ret;
-+
-+ if (!ep->msi_cap)
-+ return -EINVAL;
-+
-+ func_num = csr_readl(pcie, PAB_CTRL);
-+ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
-+ func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
-+ csr_writel(pcie, func_num, PAB_CTRL);
-+
-+ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
-+ reg = ep->msi_cap + PCI_MSI_FLAGS;
-+ msg_ctrl = csr_readw(pcie, reg);
-+ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
-+ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
-+ msg_addr_lower = csr_readl(pcie, reg);
-+ if (has_upper) {
-+ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
-+ msg_addr_upper = csr_readl(pcie, reg);
-+ reg = ep->msi_cap + PCI_MSI_DATA_64;
-+ msg_data = csr_readw(pcie, reg);
-+ } else {
-+ msg_addr_upper = 0;
-+ reg = ep->msi_cap + PCI_MSI_DATA_32;
-+ msg_data = csr_readw(pcie, reg);
-+ }
-+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
-+
-+ func_num = csr_readl(pcie, PAB_CTRL);
-+ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
-+ csr_writel(pcie, func_num, PAB_CTRL);
-+
-+ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
-+ msg_addr, epc->mem->page_size);
-+ if (ret)
-+ return ret;
-+
-+ writel(msg_data | (interrupt_num - 1), ep->msi_mem);
-+
-+ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
-+
-+ return 0;
-+}
-+
-+int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
-+ u16 interrupt_num)
-+{
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ struct pci_epc *epc = ep->epc;
-+ u32 msg_addr_upper, msg_addr_lower;
-+ u32 msg_data;
-+ u64 msg_addr;
-+ u32 func_num;
-+ int ret;
-+
-+ func_num = csr_readl(pcie, PAB_CTRL);
-+ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
-+ func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
-+ csr_writel(pcie, func_num, PAB_CTRL);
-+
-+ msg_addr_lower = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
-+ PCI_MSIX_ENTRY_LOWER_ADDR +
-+ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
-+ msg_addr_upper = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
-+ PCI_MSIX_ENTRY_UPPER_ADDR +
-+ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
-+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
-+ msg_data = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
-+ PCI_MSIX_ENTRY_DATA +
-+ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
-+
-+ func_num = csr_readl(pcie, PAB_CTRL);
-+ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
-+ csr_writel(pcie, func_num, PAB_CTRL);
-+
-+ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
-+ msg_addr, epc->mem->page_size);
-+ if (ret)
-+ return ret;
-+
-+ writel(msg_data, ep->msi_mem);
-+
-+ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
-+
-+ return 0;
-+}
-+
-+void mobiveil_pcie_ep_exit(struct mobiveil_pcie_ep *ep)
-+{
-+ struct pci_epc *epc = ep->epc;
-+
-+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
-+ epc->mem->page_size);
-+
-+ pci_epc_mem_exit(epc);
-+}
-+
-+int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep)
-+{
-+ int ret;
-+ void *addr;
-+ struct pci_epc *epc;
-+ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
-+ struct device *dev = &pcie->pdev->dev;
-+ struct device_node *np = dev->of_node;
-+
-+ if (!pcie->csr_axi_slave_base) {
-+ dev_err(dev, "csr_base is not populated\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
-+ if (ret < 0) {
-+ dev_err(dev, "Unable to read *num-ob-windows* property\n");
-+ return ret;
-+ }
-+
-+ if (ep->num_ob_windows > MAX_IATU_OUT) {
-+ dev_err(dev, "Invalid *num-ob-windows*\n");
-+ return -EINVAL;
-+ }
-+ ep->ob_window_map = devm_kcalloc(dev,
-+ BITS_TO_LONGS(ep->num_ob_windows),
-+ sizeof(long),
-+ GFP_KERNEL);
-+ if (!ep->ob_window_map)
-+ return -ENOMEM;
-+
-+ addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
-+ GFP_KERNEL);
-+ if (!addr)
-+ return -ENOMEM;
-+ ep->outbound_addr = addr;
-+
-+ mobiveil_pcie_enable_bridge_pio(pcie);
-+ mobiveil_pcie_enable_engine_apio(pcie);
-+ mobiveil_pcie_enable_engine_ppio(pcie);
-+ mobiveil_pcie_enable_msi_ep(pcie);
-+
-+ epc = devm_pci_epc_create(dev, &epc_ops);
-+ if (IS_ERR(epc)) {
-+ dev_err(dev, "Failed to create epc device\n");
-+ return PTR_ERR(epc);
-+ }
-+
-+ ep->epc = epc;
-+ epc_set_drvdata(epc, ep);
-+
-+ ep->msi_cap = mobiveil_pcie_ep_find_capability(pcie, PCI_CAP_ID_MSI);
-+
-+ ep->msix_cap = mobiveil_pcie_ep_find_capability(pcie,
-+ PCI_CAP_ID_MSIX);
-+
-+ if (ep->ops->ep_init)
-+ ep->ops->ep_init(ep);
-+
-+ epc->max_functions = ep->pf_num;
-+
-+ ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
-+ ep->page_size);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to initialize address space\n");
-+ return ret;
-+ }
-+
-+ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
-+ epc->mem->page_size);
-+ if (!ep->msi_mem) {
-+ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/pci/mobiveil/pcie-mobiveil-host.c
-@@ -0,0 +1,640 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * PCIe host controller driver for Mobiveil PCIe Host controller
-+ *
-+ * Copyright (c) 2018 Mobiveil Inc.
-+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/irqchip/chained_irq.h>
-+#include <linux/irqdomain.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/msi.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_pci.h>
-+#include <linux/pci.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+
-+#include "pcie-mobiveil.h"
-+
-+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
-+{
-+ struct mobiveil_pcie *pcie = bus->sysdata;
-+
-+ /* If there is no link, then there is no device */
-+ if (bus->number > pcie->rp.root_bus_nr && !mobiveil_pcie_link_up(pcie))
-+ return false;
-+
-+ /* Only one device down on each root port */
-+ if ((bus->number == pcie->rp.root_bus_nr) && (devfn > 0))
-+ return false;
-+
-+ /*
-+ * Do not read more than one device on the bus directly
-+ * attached to RC
-+ */
-+ if ((bus->primary == pcie->rp.root_bus_nr) && (PCI_SLOT(devfn) > 0))
-+ return false;
-+
-+ return true;
-+}
-+
-+/*
-+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
-+ * root port or endpoint
-+ */
-+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
-+ unsigned int devfn, int where)
-+{
-+ struct mobiveil_pcie *pcie = bus->sysdata;
-+ u32 value;
-+
-+ if (!mobiveil_pcie_valid_device(bus, devfn))
-+ return NULL;
-+
-+ /* RC config access */
-+ if (bus->number == pcie->rp.root_bus_nr)
-+ return pcie->csr_axi_slave_base + where;
-+
-+ /*
-+ * EP config access (in Config/APIO space)
-+ * Program PEX Address base (31..16 bits) with appropriate value
-+ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
-+ * Relies on pci_lock serialization
-+ */
-+ value = bus->number << PAB_BUS_SHIFT |
-+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
-+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
-+
-+ csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
-+
-+ return pcie->rp.config_axi_slave_base + where;
-+}
-+
-+static int mobiveil_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
-+ int where, int size, u32 *val)
-+{
-+ struct mobiveil_pcie *pcie = bus->sysdata;
-+ struct root_port *rp = &pcie->rp;
-+
-+ if (bus->number > rp->root_bus_nr && rp->ops->read_other_conf)
-+ return rp->ops->read_other_conf(bus, devfn, where, size, val);
-+
-+ return pci_generic_config_read(bus, devfn, where, size, val);
-+}
-+static struct pci_ops mobiveil_pcie_ops = {
-+ .map_bus = mobiveil_pcie_map_bus,
-+ .read = mobiveil_pcie_config_read,
-+ .write = pci_generic_config_write,
-+};
-+
-+static void mobiveil_pcie_isr(struct irq_desc *desc)
-+{
-+ struct irq_chip *chip = irq_desc_get_chip(desc);
-+ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
-+ struct device *dev = &pcie->pdev->dev;
-+ struct mobiveil_msi *msi = &pcie->rp.msi;
-+ u32 msi_data, msi_addr_lo, msi_addr_hi;
-+ u32 intr_status, msi_status;
-+ unsigned long shifted_status;
-+ u32 bit, virq, val, mask;
-+
-+ /*
-+ * The core provides a single interrupt for both INTx/MSI messages.
-+ * So we'll read both INTx and MSI status
-+ */
-+
-+ chained_irq_enter(chip, desc);
-+
-+ /* read INTx status */
-+ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
-+ mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
-+ intr_status = val & mask;
-+
-+ /* Handle INTx */
-+ if (intr_status & PAB_INTP_INTX_MASK) {
-+ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
-+ shifted_status &= PAB_INTP_INTX_MASK;
-+ shifted_status >>= PAB_INTX_START;
-+ do {
-+ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
-+ virq = irq_find_mapping(pcie->rp.intx_domain,
-+ bit + 1);
-+ if (virq)
-+ generic_handle_irq(virq);
-+ else
-+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
-+ bit);
-+
-+ /* clear interrupt handled */
-+ csr_writel(pcie, 1 << (PAB_INTX_START + bit),
-+ PAB_INTP_AMBA_MISC_STAT);
-+ }
-+
-+ shifted_status = csr_readl(pcie,
-+ PAB_INTP_AMBA_MISC_STAT);
-+ shifted_status &= PAB_INTP_INTX_MASK;
-+ shifted_status >>= PAB_INTX_START;
-+ } while (shifted_status != 0);
-+ }
-+
-+ /* read extra MSI status register */
-+ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
-+
-+ /* handle MSI interrupts */
-+ while (msi_status & 1) {
-+ msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
-+
-+ /*
-+ * MSI_STATUS_OFFSET register gets updated to zero
-+ * once we pop not only the MSI data but also address
-+ * from MSI hardware FIFO. So keeping these following
-+ * two dummy reads.
-+ */
-+ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
-+ MSI_ADDR_L_OFFSET);
-+ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
-+ MSI_ADDR_H_OFFSET);
-+ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
-+ msi_data, msi_addr_hi, msi_addr_lo);
-+
-+ virq = irq_find_mapping(msi->dev_domain, msi_data);
-+ if (virq)
-+ generic_handle_irq(virq);
-+
-+ msi_status = readl_relaxed(pcie->apb_csr_base +
-+ MSI_STATUS_OFFSET);
-+ }
-+
-+ /* Clear the interrupt status */
-+ csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
-+ chained_irq_exit(chip, desc);
-+}
-+
-+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
-+{
-+ struct device *dev = &pcie->pdev->dev;
-+ struct platform_device *pdev = pcie->pdev;
-+ struct device_node *node = dev->of_node;
-+ struct resource *res;
-+
-+ /* map config resource */
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-+ "config_axi_slave");
-+ pcie->rp.config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
-+ if (IS_ERR(pcie->rp.config_axi_slave_base))
-+ return PTR_ERR(pcie->rp.config_axi_slave_base);
-+ pcie->rp.ob_io_res = res;
-+
-+ /* map csr resource */
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-+ "csr_axi_slave");
-+ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
-+ if (IS_ERR(pcie->csr_axi_slave_base))
-+ return PTR_ERR(pcie->csr_axi_slave_base);
-+ pcie->pcie_reg_base = res->start;
-+
-+ /* read the number of windows requested */
-+ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
-+ pcie->apio_wins = MAX_PIO_WINDOWS;
-+
-+ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
-+ pcie->ppio_wins = MAX_PIO_WINDOWS;
-+
-+ return 0;
-+}
-+
-+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
-+{
-+ phys_addr_t msg_addr = pcie->pcie_reg_base;
-+ struct mobiveil_msi *msi = &pcie->rp.msi;
-+
-+ msi->num_of_vectors = PCI_NUM_MSI;
-+ msi->msi_pages_phys = (phys_addr_t)msg_addr;
-+
-+ writel_relaxed(lower_32_bits(msg_addr),
-+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
-+ writel_relaxed(upper_32_bits(msg_addr),
-+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
-+ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
-+ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
-+}
-+
-+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
-+{
-+ u32 value, pab_ctrl, type;
-+ struct resource_entry *win;
-+ int i;
-+
-+ /* Disable all inbound/outbound windows */
-+ for (i = 0; i < pcie->apio_wins; i++)
-+ mobiveil_pcie_disable_ob_win(pcie, i);
-+ for (i = 0; i < pcie->ppio_wins; i++)
-+ mobiveil_pcie_disable_ib_win(pcie, i);
-+
-+ pcie->ib_wins_configured = 0;
-+ pcie->ob_wins_configured = 0;
-+
-+ if (!reinit) {
-+ /* setup bus numbers */
-+ value = csr_readl(pcie, PCI_PRIMARY_BUS);
-+ value &= 0xff000000;
-+ value |= 0x00ff0100;
-+ csr_writel(pcie, value, PCI_PRIMARY_BUS);
-+ }
-+
-+ /*
-+ * program Bus Master Enable Bit in Command Register in PAB Config
-+ * Space
-+ */
-+ value = csr_readl(pcie, PCI_COMMAND);
-+ value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
-+ csr_writel(pcie, value, PCI_COMMAND);
-+
-+ /*
-+ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
-+ * register
-+ */
-+ pab_ctrl = csr_readl(pcie, PAB_CTRL);
-+ pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
-+ csr_writel(pcie, pab_ctrl, PAB_CTRL);
-+
-+ /*
-+ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
-+ * PAB_AXI_PIO_CTRL Register
-+ */
-+ value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
-+ value |= APIO_EN_MASK;
-+ csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
-+
-+ /* Enable PCIe PIO master */
-+ value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
-+ value |= 1 << PIO_ENABLE_SHIFT;
-+ csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
-+
-+ /*
-+ * we'll program one outbound window for config reads and
-+ * another default inbound window for all the upstream traffic
-+ * rest of the outbound windows will be configured according to
-+ * the "ranges" field defined in device tree
-+ */
-+
-+ /* config outbound translation window */
-+ program_ob_windows(pcie, WIN_NUM_0, pcie->rp.ob_io_res->start, 0,
-+ CFG_WINDOW_TYPE, resource_size(pcie->rp.ob_io_res));
-+
-+ /* memory inbound translation window */
-+ program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
-+
-+ /* Get the I/O and memory ranges from DT */
-+ resource_list_for_each_entry(win, pcie->resources) {
-+ if (resource_type(win->res) == IORESOURCE_MEM) {
-+ type = MEM_WINDOW_TYPE;
-+ } else if (resource_type(win->res) == IORESOURCE_IO) {
-+ type = IO_WINDOW_TYPE;
-+ } else if (resource_type(win->res) == IORESOURCE_BUS) {
-+ pcie->rp.root_bus_nr = win->res->start;
-+ continue;
-+ } else {
-+ continue;
-+ }
-+
-+ /* configure outbound translation window */
-+ program_ob_windows(pcie, pcie->ob_wins_configured,
-+ win->res->start,
-+ win->res->start - win->offset,
-+ type, resource_size(win->res));
-+ }
-+
-+ /* fixup for PCIe class register */
-+ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
-+ value &= 0xff;
-+ value |= (PCI_CLASS_BRIDGE_PCI << 16);
-+ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
-+
-+ return 0;
-+}
-+
-+static void mobiveil_mask_intx_irq(struct irq_data *data)
-+{
-+ struct irq_desc *desc = irq_to_desc(data->irq);
-+ struct mobiveil_pcie *pcie;
-+ unsigned long flags;
-+ u32 mask, shifted_val;
-+
-+ pcie = irq_desc_get_chip_data(desc);
-+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
-+ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
-+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
-+ shifted_val &= ~mask;
-+ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
-+ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
-+}
-+
-+static void mobiveil_unmask_intx_irq(struct irq_data *data)
-+{
-+ struct irq_desc *desc = irq_to_desc(data->irq);
-+ struct mobiveil_pcie *pcie;
-+ unsigned long flags;
-+ u32 shifted_val, mask;
-+
-+ pcie = irq_desc_get_chip_data(desc);
-+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
-+ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
-+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
-+ shifted_val |= mask;
-+ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
-+ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
-+}
-+
-+static struct irq_chip intx_irq_chip = {
-+ .name = "mobiveil_pcie:intx",
-+ .irq_enable = mobiveil_unmask_intx_irq,
-+ .irq_disable = mobiveil_mask_intx_irq,
-+ .irq_mask = mobiveil_mask_intx_irq,
-+ .irq_unmask = mobiveil_unmask_intx_irq,
-+};
-+
-+/* routine to setup the INTx related data */
-+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
-+ irq_hw_number_t hwirq)
-+{
-+ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
-+ irq_set_chip_data(irq, domain->host_data);
-+
-+ return 0;
-+}
-+
-+/* INTx domain operations structure */
-+static const struct irq_domain_ops intx_domain_ops = {
-+ .map = mobiveil_pcie_intx_map,
-+};
-+
-+static struct irq_chip mobiveil_msi_irq_chip = {
-+ .name = "Mobiveil PCIe MSI",
-+ .irq_mask = pci_msi_mask_irq,
-+ .irq_unmask = pci_msi_unmask_irq,
-+};
-+
-+static struct msi_domain_info mobiveil_msi_domain_info = {
-+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
-+ MSI_FLAG_PCI_MSIX),
-+ .chip = &mobiveil_msi_irq_chip,
-+};
-+
-+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
-+{
-+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
-+ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
-+
-+ msg->address_lo = lower_32_bits(addr);
-+ msg->address_hi = upper_32_bits(addr);
-+ msg->data = data->hwirq;
-+
-+ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
-+ (int)data->hwirq, msg->address_hi, msg->address_lo);
-+}
-+
-+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
-+ const struct cpumask *mask, bool force)
-+{
-+ return -EINVAL;
-+}
-+
-+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
-+ .name = "Mobiveil MSI",
-+ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
-+ .irq_set_affinity = mobiveil_msi_set_affinity,
-+};
-+
-+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
-+ unsigned int virq,
-+ unsigned int nr_irqs, void *args)
-+{
-+ struct mobiveil_pcie *pcie = domain->host_data;
-+ struct mobiveil_msi *msi = &pcie->rp.msi;
-+ unsigned long bit;
-+
-+ WARN_ON(nr_irqs != 1);
-+ mutex_lock(&msi->lock);
-+
-+ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
-+ if (bit >= msi->num_of_vectors) {
-+ mutex_unlock(&msi->lock);
-+ return -ENOSPC;
-+ }
-+
-+ set_bit(bit, msi->msi_irq_in_use);
-+
-+ mutex_unlock(&msi->lock);
-+
-+ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
-+ domain->host_data, handle_level_irq, NULL, NULL);
-+ return 0;
-+}
-+
-+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
-+ unsigned int virq,
-+ unsigned int nr_irqs)
-+{
-+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
-+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
-+ struct mobiveil_msi *msi = &pcie->rp.msi;
-+
-+ mutex_lock(&msi->lock);
-+
-+ if (!test_bit(d->hwirq, msi->msi_irq_in_use))
-+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
-+ d->hwirq);
-+ else
-+ __clear_bit(d->hwirq, msi->msi_irq_in_use);
-+
-+ mutex_unlock(&msi->lock);
-+}
-+static const struct irq_domain_ops msi_domain_ops = {
-+ .alloc = mobiveil_irq_msi_domain_alloc,
-+ .free = mobiveil_irq_msi_domain_free,
-+};
-+
-+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
-+{
-+ struct device *dev = &pcie->pdev->dev;
-+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
-+ struct mobiveil_msi *msi = &pcie->rp.msi;
-+
-+ mutex_init(&msi->lock);
-+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
-+ &msi_domain_ops, pcie);
-+ if (!msi->dev_domain) {
-+ dev_err(dev, "failed to create IRQ domain\n");
-+ return -ENOMEM;
-+ }
-+
-+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
-+ &mobiveil_msi_domain_info,
-+ msi->dev_domain);
-+ if (!msi->msi_domain) {
-+ dev_err(dev, "failed to create MSI domain\n");
-+ irq_domain_remove(msi->dev_domain);
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
-+{
-+ struct device *dev = &pcie->pdev->dev;
-+ struct device_node *node = dev->of_node;
-+ int ret;
-+
-+ /* setup INTx */
-+ pcie->rp.intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
-+ &intx_domain_ops, pcie);
-+
-+ if (!pcie->rp.intx_domain) {
-+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
-+ return -ENOMEM;
-+ }
-+
-+ raw_spin_lock_init(&pcie->rp.intx_mask_lock);
-+
-+ /* setup MSI */
-+ ret = mobiveil_allocate_msi_domains(pcie);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
-+{
-+ struct device *dev = &pcie->pdev->dev;
-+ struct resource *res;
-+ int ret;
-+
-+ if (pcie->rp.ops->interrupt_init)
-+ return pcie->rp.ops->interrupt_init(pcie);
-+
-+ /* map MSI config resource */
-+ res = platform_get_resource_byname(pcie->pdev, IORESOURCE_MEM,
-+ "apb_csr");
-+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
-+ if (IS_ERR(pcie->apb_csr_base))
-+ return PTR_ERR(pcie->apb_csr_base);
-+
-+ /* setup MSI hardware registers */
-+ mobiveil_pcie_enable_msi(pcie);
-+
-+ pcie->rp.irq = platform_get_irq(pcie->pdev, 0);
-+ if (pcie->rp.irq <= 0) {
-+ dev_err(dev, "failed to map IRQ: %d\n", pcie->rp.irq);
-+ return -ENODEV;
-+ }
-+
-+ /* initialize the IRQ domains */
-+ ret = mobiveil_pcie_init_irq_domain(pcie);
-+ if (ret) {
-+ dev_err(dev, "Failed creating IRQ Domain\n");
-+ return ret;
-+ }
-+
-+ irq_set_chained_handler_and_data(pcie->rp.irq,
-+ mobiveil_pcie_isr, pcie);
-+
-+ /* Enable interrupts */
-+ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
-+ PAB_INTP_AMBA_MISC_ENB);
-+
-+ return 0;
-+}
-+
-+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
-+{
-+ struct pci_bus *bus;
-+ struct pci_bus *child;
-+ struct pci_host_bridge *bridge;
-+ struct device *dev = &pcie->pdev->dev;
-+ struct device_node *np = dev->of_node;
-+ resource_size_t iobase;
-+ int ret;
-+
-+ ret = mobiveil_pcie_parse_dt(pcie);
-+ if (ret) {
-+ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
-+ return ret;
-+ }
-+
-+ /* allocate the PCIe port */
-+ bridge = devm_pci_alloc_host_bridge(dev, 0);
-+ if (!bridge)
-+ return -ENOMEM;
-+
-+ /* parse the host bridge base addresses from the device tree file */
-+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
-+ &bridge->windows, &iobase);
-+ if (ret) {
-+ dev_err(dev, "Getting bridge resources failed\n");
-+ return ret;
-+ }
-+
-+ pcie->resources = &bridge->windows;
-+
-+ /*
-+ * configure all inbound and outbound windows and prepare the RC for
-+ * config access
-+ */
-+ ret = mobiveil_host_init(pcie, false);
-+ if (ret) {
-+ dev_err(dev, "Failed to initialize host\n");
-+ goto error;
-+ }
-+
-+ ret = mobiveil_pcie_interrupt_init(pcie);
-+ if (ret) {
-+ dev_err(dev, "Interrupt init failed\n");
-+ goto error;
-+ }
-+
-+ ret = devm_request_pci_bus_resources(dev, pcie->resources);
-+ if (ret)
-+ goto error;
-+
-+ /* Initialize bridge */
-+ bridge->dev.parent = dev;
-+ bridge->sysdata = pcie;
-+ bridge->busnr = pcie->rp.root_bus_nr;
-+ bridge->ops = &mobiveil_pcie_ops;
-+ bridge->map_irq = of_irq_parse_and_map_pci;
-+ bridge->swizzle_irq = pci_common_swizzle;
-+
-+ ret = mobiveil_bringup_link(pcie);
-+ if (ret) {
-+ dev_info(dev, "link bring-up failed\n");
-+ }
-+
-+ /* setup the kernel resources for the newly added PCIe root bus */
-+ ret = pci_scan_root_bus_bridge(bridge);
-+ if (ret)
-+ goto error;
-+
-+ bus = bridge->bus;
-+
-+ pci_assign_unassigned_bus_resources(bus);
-+ list_for_each_entry(child, &bus->children, node)
-+ pcie_bus_configure_settings(child);
-+ pci_bus_add_devices(bus);
-+
-+ return 0;
-+error:
-+ pci_free_resource_list(pcie->resources);
-+ return ret;
-+}
---- /dev/null
-+++ b/drivers/pci/mobiveil/pcie-mobiveil-plat.c
-@@ -0,0 +1,54 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * PCIe host controller driver for Mobiveil PCIe Host controller
-+ *
-+ * Copyright (c) 2018 Mobiveil Inc.
-+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of_pci.h>
-+#include <linux/pci.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+
-+#include "pcie-mobiveil.h"
-+
-+static int mobiveil_pcie_probe(struct platform_device *pdev)
-+{
-+ struct mobiveil_pcie *pcie;
-+ struct device *dev = &pdev->dev;
-+
-+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-+ if (!pcie)
-+ return -ENOMEM;
-+
-+ pcie->pdev = pdev;
-+
-+ return mobiveil_pcie_host_probe(pcie);
-+}
-+
-+static const struct of_device_id mobiveil_pcie_of_match[] = {
-+ {.compatible = "mbvl,gpex40-pcie",},
-+ {},
-+};
-+
-+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
-+
-+static struct platform_driver mobiveil_pcie_driver = {
-+ .probe = mobiveil_pcie_probe,
-+ .driver = {
-+ .name = "mobiveil-pcie",
-+ .of_match_table = mobiveil_pcie_of_match,
-+ .suppress_bind_attrs = true,
-+ },
-+};
-+
-+builtin_platform_driver(mobiveil_pcie_driver);
-+
-+MODULE_LICENSE("GPL v2");
-+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
-+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
---- /dev/null
-+++ b/drivers/pci/mobiveil/pcie-mobiveil.c
-@@ -0,0 +1,334 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * PCIe host controller driver for Mobiveil PCIe Host controller
-+ *
-+ * Copyright (c) 2018 Mobiveil Inc.
-+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
-+ */
-+
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include <linux/platform_device.h>
-+
-+#include "pcie-mobiveil.h"
-+
-+/*
-+ * mobiveil_pcie_sel_page - routine to access paged register
-+ *
-+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
-+ * for this scheme to work extracted higher 6 bits of the offset will be
-+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
-+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
-+ */
-+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
-+{
-+ u32 val;
-+
-+ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
-+ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
-+ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
-+
-+ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
-+}
-+
-+static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
-+{
-+ if (off < PAGED_ADDR_BNDRY) {
-+ /* For directly accessed registers, clear the pg_sel field */
-+ mobiveil_pcie_sel_page(pcie, 0);
-+ return pcie->csr_axi_slave_base + off;
-+ }
-+
-+ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
-+ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
-+}
-+
-+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
-+{
-+ if ((uintptr_t)addr & (size - 1)) {
-+ *val = 0;
-+ return PCIBIOS_BAD_REGISTER_NUMBER;
-+ }
-+
-+ switch (size) {
-+ case 4:
-+ *val = readl(addr);
-+ break;
-+ case 2:
-+ *val = readw(addr);
-+ break;
-+ case 1:
-+ *val = readb(addr);
-+ break;
-+ default:
-+ *val = 0;
-+ return PCIBIOS_BAD_REGISTER_NUMBER;
-+ }
-+
-+ return PCIBIOS_SUCCESSFUL;
-+}
-+
-+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
-+{
-+ if ((uintptr_t)addr & (size - 1))
-+ return PCIBIOS_BAD_REGISTER_NUMBER;
-+
-+ switch (size) {
-+ case 4:
-+ writel(val, addr);
-+ break;
-+ case 2:
-+ writew(val, addr);
-+ break;
-+ case 1:
-+ writeb(val, addr);
-+ break;
-+ default:
-+ return PCIBIOS_BAD_REGISTER_NUMBER;
-+ }
-+
-+ return PCIBIOS_SUCCESSFUL;
-+}
-+
-+u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
-+{
-+ void *addr;
-+ u32 val;
-+ int ret;
-+
-+ addr = mobiveil_pcie_comp_addr(pcie, off);
-+
-+ ret = mobiveil_pcie_read(addr, size, &val);
-+ if (ret)
-+ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
-+
-+ return val;
-+}
-+
-+void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
-+{
-+ void *addr;
-+ int ret;
-+
-+ addr = mobiveil_pcie_comp_addr(pcie, off);
-+
-+ ret = mobiveil_pcie_write(addr, size, val);
-+ if (ret)
-+ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
-+}
-+
-+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
-+{
-+ if (pcie->ops->link_up)
-+ return pcie->ops->link_up(pcie);
-+
-+ return (csr_readl(pcie, LTSSM_STATUS) &
-+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
-+}
-+
-+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
-+ u64 pci_addr, u32 type, u64 size)
-+{
-+ u32 value;
-+ u64 size64 = ~(size - 1);
-+
-+ if (win_num >= pcie->ppio_wins) {
-+ dev_err(&pcie->pdev->dev,
-+ "ERROR: max inbound windows reached !\n");
-+ return;
-+ }
-+
-+ value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
-+ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT |
-+ WIN_SIZE_MASK << WIN_SIZE_SHIFT);
-+ value |= (type << AMAP_CTRL_TYPE_SHIFT) | (1 << AMAP_CTRL_EN_SHIFT) |
-+ (lower_32_bits(size64) & WIN_SIZE_MASK << WIN_SIZE_SHIFT);
-+ csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
-+
-+ csr_writel(pcie, upper_32_bits(size64),
-+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
-+
-+ csr_writel(pcie, lower_32_bits(cpu_addr),
-+ PAB_PEX_AMAP_AXI_WIN(win_num));
-+ csr_writel(pcie, upper_32_bits(cpu_addr),
-+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
-+
-+ csr_writel(pcie, lower_32_bits(pci_addr),
-+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
-+ csr_writel(pcie, upper_32_bits(pci_addr),
-+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
-+
-+ pcie->ib_wins_configured++;
-+}
-+
-+/*
-+ * routine to program the outbound windows
-+ */
-+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
-+ u64 pci_addr, u32 type, u64 size)
-+{
-+
-+ u32 value;
-+ u64 size64 = ~(size - 1);
-+
-+ if (win_num >= pcie->apio_wins) {
-+ dev_err(&pcie->pdev->dev,
-+ "ERROR: max outbound windows reached !\n");
-+ return;
-+ }
-+
-+ /*
-+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
-+ * to 4 KB in PAB_AXI_AMAP_CTRL register
-+ */
-+ value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
-+ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT |
-+ WIN_SIZE_MASK << WIN_SIZE_SHIFT);
-+ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
-+ (lower_32_bits(size64) & WIN_SIZE_MASK << WIN_SIZE_SHIFT);
-+ csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
-+
-+ csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
-+
-+ /*
-+ * program AXI window base with appropriate value in
-+ * PAB_AXI_AMAP_AXI_WIN0 register
-+ */
-+ csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
-+ PAB_AXI_AMAP_AXI_WIN(win_num));
-+ csr_writel(pcie, upper_32_bits(cpu_addr),
-+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
-+
-+ csr_writel(pcie, lower_32_bits(pci_addr),
-+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
-+ csr_writel(pcie, upper_32_bits(pci_addr),
-+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
-+
-+ pcie->ob_wins_configured++;
-+}
-+
-+int program_ob_windows_ep(struct mobiveil_pcie *pcie, int win_num, int type,
-+ u64 phys, u64 bus_addr, u8 func, u64 size)
-+{
-+ u32 val;
-+ u32 size_h, size_l;
-+
-+ if (size & (size - 1))
-+ size = 1 << (1 + ilog2(size));
-+
-+ size_h = upper_32_bits(~(size - 1));
-+ size_l = lower_32_bits(~(size - 1));
-+
-+ val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
-+ val &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT |
-+ WIN_SIZE_MASK << WIN_SIZE_SHIFT);
-+ val |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
-+ (size_l & (WIN_SIZE_MASK << WIN_SIZE_SHIFT));
-+ csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num));
-+
-+ csr_writel(pcie, func, PAB_AXI_AMAP_PCI_HDR_PARAM(win_num));
-+ csr_writel(pcie, lower_32_bits(phys), PAB_AXI_AMAP_AXI_WIN(win_num));
-+ csr_writel(pcie, upper_32_bits(phys),
-+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
-+ csr_writel(pcie, lower_32_bits(bus_addr),
-+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
-+ csr_writel(pcie, upper_32_bits(bus_addr),
-+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
-+ csr_writel(pcie, size_h, PAB_EXT_AXI_AMAP_SIZE(win_num));
-+
-+ return 0;
-+}
-+
-+void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
-+ int bar, u64 phys)
-+{
-+ csr_writel(pcie, upper_32_bits(phys),
-+ PAB_EXT_PEX_BAR_AMAP(func_no, bar));
-+ csr_writel(pcie, lower_32_bits(phys) | PEX_BAR_AMAP_EN,
-+ PAB_PEX_BAR_AMAP(func_no, bar));
-+}
-+
-+void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pcie,
-+ u8 func_no, u8 bar)
-+{
-+ u32 val;
-+
-+ val = csr_readl(pcie, PAB_PEX_BAR_AMAP(func_no, bar));
-+ val &= ~(1 << 0);
-+ csr_writel(pcie, val, PAB_PEX_BAR_AMAP(func_no, bar));
-+}
-+
-+int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
-+{
-+ int retries;
-+
-+ /* check if the link is up or not */
-+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
-+ if (mobiveil_pcie_link_up(pcie))
-+ return 0;
-+
-+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
-+ }
-+
-+ dev_info(&pcie->pdev->dev, "link never came up\n");
-+
-+ return -ETIMEDOUT;
-+}
-+
-+void mobiveil_pcie_disable_ib_win(struct mobiveil_pcie *pcie, int win_num)
-+{
-+ u32 val;
-+
-+ val = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
-+ val &= ~(1 << AMAP_CTRL_EN_SHIFT);
-+ csr_writel(pcie, val, PAB_PEX_AMAP_CTRL(win_num));
-+}
-+
-+void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pcie, int win_num)
-+{
-+ u32 val;
-+
-+ val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
-+ val &= ~(1 << WIN_ENABLE_SHIFT);
-+ csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num));
-+}
-+
-+void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pcie)
-+{
-+ u32 val;
-+
-+ val = csr_readl(pcie, PAB_CTRL);
-+ val |= 1 << AMBA_PIO_ENABLE_SHIFT;
-+ val |= 1 << PEX_PIO_ENABLE_SHIFT;
-+ csr_writel(pcie, val, PAB_CTRL);
-+}
-+
-+void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pcie)
-+{
-+ u32 val;
-+
-+ val = csr_readl(pcie, PAB_AXI_PIO_CTRL);
-+ val |= APIO_EN_MASK;
-+ csr_writel(pcie, val, PAB_AXI_PIO_CTRL);
-+}
-+
-+void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pcie)
-+{
-+ u32 val;
-+
-+ val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
-+ val |= 1 << PIO_ENABLE_SHIFT;
-+ csr_writel(pcie, val, PAB_PEX_PIO_CTRL);
-+}
-+
-+void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pcie)
-+{
-+ u32 val;
-+
-+ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
-+ val |= 1 << 0;
-+ csr_writel(pcie, val, PAB_INTP_AMBA_MISC_ENB);
-+}
---- /dev/null
-+++ b/drivers/pci/mobiveil/pcie-mobiveil.h
-@@ -0,0 +1,296 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * PCIe host controller driver for Mobiveil PCIe Host controller
-+ *
-+ * Copyright (c) 2018 Mobiveil Inc.
-+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-+ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
-+ */
-+
-+#ifndef _PCIE_MOBIVEIL_H
-+#define _PCIE_MOBIVEIL_H
-+
-+#include <linux/pci.h>
-+#include <linux/irq.h>
-+#include <linux/msi.h>
-+#include "../pci.h"
-+
-+#include <linux/pci-epc.h>
-+#include <linux/pci-epf.h>
-+
-+#define MAX_IATU_OUT 256
-+/* register offsets and bit positions */
-+
-+/*
-+ * translation tables are grouped into windows, each window registers are
-+ * grouped into blocks of 4 or 16 registers each
-+ */
-+#define PAB_REG_BLOCK_SIZE 16
-+#define PAB_EXT_REG_BLOCK_SIZE 4
-+
-+#define PAB_REG_ADDR(offset, win) \
-+ (offset + (win * PAB_REG_BLOCK_SIZE))
-+#define PAB_EXT_REG_ADDR(offset, win) \
-+ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
-+
-+#define LTSSM_STATUS 0x0404
-+#define LTSSM_STATUS_L0_MASK 0x3f
-+#define LTSSM_STATUS_L0 0x2d
-+
-+#define PAB_CTRL 0x0808
-+#define AMBA_PIO_ENABLE_SHIFT 0
-+#define PEX_PIO_ENABLE_SHIFT 1
-+#define PAGE_SEL_SHIFT 13
-+#define PAGE_SEL_MASK 0x3f
-+#define PAGE_LO_MASK 0x3ff
-+#define PAGE_SEL_OFFSET_SHIFT 10
-+#define FUNC_SEL_SHIFT 19
-+#define FUNC_SEL_MASK 0x1ff
-+#define MSI_SW_CTRL_EN (1 << 29)
-+
-+#define PAB_ACTIVITY_STAT 0x81c
-+
-+#define PAB_AXI_PIO_CTRL 0x0840
-+#define APIO_EN_MASK 0xf
-+
-+#define PAB_PEX_PIO_CTRL 0x08c0
-+#define PIO_ENABLE_SHIFT 0
-+
-+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
-+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
-+#define PAB_INTP_RESET (0x1 << 1)
-+#define PAB_INTP_MSI (0x1 << 3)
-+#define PAB_INTP_INTA (0x1 << 5)
-+#define PAB_INTP_INTB (0x1 << 6)
-+#define PAB_INTP_INTC (0x1 << 7)
-+#define PAB_INTP_INTD (0x1 << 8)
-+#define PAB_INTP_PCIE_UE (0x1 << 9)
-+#define PAB_INTP_IE_PMREDI (0x1 << 29)
-+#define PAB_INTP_IE_EC (0x1 << 30)
-+#define PAB_INTP_MSI_MASK PAB_INTP_MSI
-+#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\
-+ PAB_INTP_INTC | PAB_INTP_INTD)
-+
-+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
-+#define WIN_ENABLE_SHIFT 0
-+#define WIN_TYPE_SHIFT 1
-+#define WIN_TYPE_MASK 0x3
-+#define WIN_SIZE_SHIFT 10
-+#define WIN_SIZE_MASK 0x3fffff
-+
-+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
-+
-+#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
-+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
-+#define AXI_WINDOW_ALIGN_MASK 3
-+
-+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
-+#define PAB_BUS_SHIFT 24
-+#define PAB_DEVICE_SHIFT 19
-+#define PAB_FUNCTION_SHIFT 16
-+
-+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
-+#define PAB_INTP_AXI_PIO_CLASS 0x474
-+
-+#define GPEX_ACK_REPLAY_TO 0x438
-+#define ACK_LAT_TO_VAL_MASK 0x1fff
-+#define ACK_LAT_TO_VAL_SHIFT 0
-+
-+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
-+#define AMAP_CTRL_EN_SHIFT 0
-+#define AMAP_CTRL_TYPE_SHIFT 1
-+#define AMAP_CTRL_TYPE_MASK 3
-+
-+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
-+#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
-+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
-+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
-+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
-+
-+/* PPIO WINs EP mode */
-+#define PAB_PEX_BAR_AMAP(func, bar) (0x1ba0 + 0x20 * func + 4 * bar)
-+#define PAB_EXT_PEX_BAR_AMAP(func, bar) (0x84a0 + 0x20 * func + 4 * bar)
-+#define PEX_BAR_AMAP_EN (1 << 0)
-+
-+#define PAB_AXI_AMAP_PCI_HDR_PARAM(idx) (0x5ba0 + 0x04 * idx)
-+#define PAB_MSIX_TABLE_PBA_ACCESS 0xD000
-+
-+#define GPEX_BAR_ENABLE 0x4D4
-+#define GPEX_BAR_SIZE_LDW 0x4D8
-+#define GPEX_BAR_SIZE_UDW 0x4DC
-+#define GPEX_BAR_SELECT 0x4E0
-+
-+#define CFG_UNCORRECTABLE_ERROR_SEVERITY 0x10c
-+#define UNSUPPORTED_REQUEST_ERROR_SHIFT 20
-+#define CFG_UNCORRECTABLE_ERROR_MASK 0x108
-+
-+/* starting offset of INTX bits in status register */
-+#define PAB_INTX_START 5
-+
-+/* supported number of MSI interrupts */
-+#define PCI_NUM_MSI 16
-+
-+/* MSI registers */
-+#define MSI_BASE_LO_OFFSET 0x04
-+#define MSI_BASE_HI_OFFSET 0x08
-+#define MSI_SIZE_OFFSET 0x0c
-+#define MSI_ENABLE_OFFSET 0x14
-+#define MSI_STATUS_OFFSET 0x18
-+#define MSI_DATA_OFFSET 0x20
-+#define MSI_ADDR_L_OFFSET 0x24
-+#define MSI_ADDR_H_OFFSET 0x28
-+
-+/* outbound and inbound window definitions */
-+#define WIN_NUM_0 0
-+#define WIN_NUM_1 1
-+#define CFG_WINDOW_TYPE 0
-+#define IO_WINDOW_TYPE 1
-+#define MEM_WINDOW_TYPE 2
-+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
-+#define MAX_PIO_WINDOWS 8
-+
-+/* Parameters for the waiting for link up routine */
-+#define LINK_WAIT_MAX_RETRIES 10
-+#define LINK_WAIT_MIN 90000
-+#define LINK_WAIT_MAX 100000
-+
-+#define PAGED_ADDR_BNDRY 0xc00
-+#define OFFSET_TO_PAGE_ADDR(off) \
-+ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
-+#define OFFSET_TO_PAGE_IDX(off) \
-+ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
-+
-+struct mobiveil_pcie;
-+struct mobiveil_pcie_ep;
-+
-+struct mobiveil_msi { /* MSI information */
-+ struct mutex lock; /* protect bitmap variable */
-+ struct irq_domain *msi_domain;
-+ struct irq_domain *dev_domain;
-+ phys_addr_t msi_pages_phys;
-+ int num_of_vectors;
-+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
-+};
-+
-+struct mobiveil_rp_ops {
-+ int (*interrupt_init)(struct mobiveil_pcie *pcie);
-+ int (*read_other_conf)(struct pci_bus *bus, unsigned int devfn,
-+ int where, int size, u32 *val);
-+};
-+
-+struct root_port {
-+ u8 root_bus_nr;
-+ void __iomem *config_axi_slave_base; /* endpoint config base */
-+ struct resource *ob_io_res;
-+ struct mobiveil_rp_ops *ops;
-+ int irq;
-+ raw_spinlock_t intx_mask_lock;
-+ struct irq_domain *intx_domain;
-+ struct mobiveil_msi msi;
-+};
-+
-+struct mobiveil_pab_ops {
-+ int (*link_up)(struct mobiveil_pcie *pcie);
-+};
-+
-+struct mobiveil_pcie_ep_ops {
-+ void (*ep_init)(struct mobiveil_pcie_ep *ep);
-+ int (*raise_irq)(struct mobiveil_pcie_ep *ep, u8 func_no,
-+ enum pci_epc_irq_type type, u16 interrupt_num);
-+};
-+
-+struct mobiveil_pcie_ep {
-+ struct pci_epc *epc;
-+ struct mobiveil_pcie_ep_ops *ops;
-+ phys_addr_t phys_base;
-+ size_t addr_size;
-+ size_t page_size;
-+ phys_addr_t *outbound_addr;
-+ unsigned long *ob_window_map;
-+ u32 num_ob_windows;
-+ void __iomem *msi_mem;
-+ phys_addr_t msi_mem_phys;
-+ u8 msi_cap; /* MSI capability offset */
-+ u8 msix_cap; /* MSI-X capability offset */
-+ u8 bar_num;
-+ u32 pf_num;
-+};
-+
-+struct mobiveil_pcie {
-+ struct platform_device *pdev;
-+ struct list_head *resources;
-+ void __iomem *csr_axi_slave_base; /* PAB registers base */
-+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
-+ void __iomem *apb_csr_base; /* MSI register base */
-+ u32 apio_wins;
-+ u32 ppio_wins;
-+ u32 ob_wins_configured; /* configured outbound windows */
-+ u32 ib_wins_configured; /* configured inbound windows */
-+ const struct mobiveil_pab_ops *ops;
-+ struct root_port rp;
-+ struct mobiveil_pcie_ep ep;
-+};
-+#define to_mobiveil_pcie_from_ep(endpoint) \
-+ container_of((endpoint), struct mobiveil_pcie, ep)
-+
-+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
-+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit);
-+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
-+int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
-+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
-+ u64 pci_addr, u32 type, u64 size);
-+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
-+ u64 pci_addr, u32 type, u64 size);
-+void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pci, int win_num);
-+void mobiveil_pcie_disable_ib_win(struct mobiveil_pcie *pci, int win_num);
-+u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
-+void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size);
-+
-+static inline u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
-+{
-+ return csr_read(pcie, off, 0x4);
-+}
-+
-+static inline u32 csr_readw(struct mobiveil_pcie *pcie, u32 off)
-+{
-+ return csr_read(pcie, off, 0x2);
-+}
-+
-+static inline u32 csr_readb(struct mobiveil_pcie *pcie, u32 off)
-+{
-+ return csr_read(pcie, off, 0x1);
-+}
-+
-+static inline void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
-+{
-+ csr_write(pcie, val, off, 0x4);
-+}
-+
-+static inline void csr_writew(struct mobiveil_pcie *pcie, u32 val, u32 off)
-+{
-+ csr_write(pcie, val, off, 0x2);
-+}
-+
-+static inline void csr_writeb(struct mobiveil_pcie *pcie, u32 val, u32 off)
-+{
-+ csr_write(pcie, val, off, 0x1);
-+}
-+
-+void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
-+ int bar, u64 phys);
-+int program_ob_windows_ep(struct mobiveil_pcie *pcie, int win_num, int type,
-+ u64 phys, u64 bus_addr, u8 func, u64 size);
-+void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pci,
-+ u8 func_no, u8 bar);
-+int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep);
-+int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no);
-+int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
-+ u8 interrupt_num);
-+int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
-+ u16 interrupt_num);
-+void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pci, enum pci_barno bar);
-+void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pci);
-+void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pci);
-+void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pci);
-+void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pci);
-+#endif /* _PCIE_MOBIVEIL_H */
---- a/drivers/pci/pcie/portdrv_core.c
-+++ b/drivers/pci/pcie/portdrv_core.c
-@@ -45,6 +45,20 @@ static void release_pcie_device(struct d
- }
-
- /**
-+ * pcibios_check_service_irqs - check irqs in the device tree
-+ * @dev: PCI Express port to handle
-+ * @irqs: Array of irqs to populate
-+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
-+ *
-+ * Return value: 0 means no service irqs in the device tree
-+ *
-+ */
-+int __weak pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
-+{
-+ return 0;
-+}
-+
-+/**
- * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
- * for given port
- * @dev: PCI Express port to handle
-@@ -185,10 +199,25 @@ out_free_irqs:
- static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
- {
- int ret, i;
-+ int irq = -1;
-
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
- irqs[i] = -1;
-
-+ /* Check if some platforms owns independent irq pins for AER/PME etc.
-+ * Some platforms may own independent AER/PME interrupts and set
-+ * them in the device tree file.
-+ */
-+ ret = pcibios_check_service_irqs(dev, irqs, mask);
-+ if (ret) {
-+ if (dev->irq)
-+ irq = dev->irq;
-+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
-+ if (irqs[i] == -1 && i != PCIE_PORT_SERVICE_VC_SHIFT)
-+ irqs[i] = irq;
-+ return 0;
-+ }
-+
- /*
- * If we support PME or hotplug, but we can't use MSI/MSI-X for
- * them, we have to fall back to INTx or other interrupts, e.g., a
---- a/drivers/pci/quirks.c
-+++ b/drivers/pci/quirks.c
-@@ -3394,6 +3394,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_A
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
-
-+/*
-+ * NXP (Freescale Vendor ID) LS1088 chips do not behave correctly after
-+ * bus reset. Link state of device does not comes UP and so config space
-+ * never accessible again.
-+ */
-+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x80c0, quirk_no_bus_reset);
-+
- static void quirk_no_pm_reset(struct pci_dev *dev)
- {
- /*
-@@ -4918,3 +4925,11 @@ static void quirk_no_ats(struct pci_dev
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
- #endif /* CONFIG_PCI_ATS */
-+
-+/* Freescale PCIe doesn't support MSI in RC mode */
-+static void quirk_fsl_no_msi(struct pci_dev *pdev)
-+{
-+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
-+ pdev->no_msi = 1;
-+}
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
---- a/include/linux/pci-ep-cfs.h
-+++ b/include/linux/pci-ep-cfs.h
-@@ -1,12 +1,9 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
- /**
- * PCI Endpoint ConfigFS header file
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
- */
-
- #ifndef __LINUX_PCI_EP_CFS_H
---- a/include/linux/pci-epc.h
-+++ b/include/linux/pci-epc.h
-@@ -1,12 +1,9 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
- /**
- * PCI Endpoint *Controller* (EPC) header file
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
- */
-
- #ifndef __LINUX_PCI_EPC_H
-@@ -20,6 +17,7 @@ enum pci_epc_irq_type {
- PCI_EPC_IRQ_UNKNOWN,
- PCI_EPC_IRQ_LEGACY,
- PCI_EPC_IRQ_MSI,
-+ PCI_EPC_IRQ_MSIX,
- };
-
- /**
-@@ -33,24 +31,32 @@ enum pci_epc_irq_type {
- * capability register
- * @get_msi: ops to get the number of MSI interrupts allocated by the RC from
- * the MSI capability register
-- * @raise_irq: ops to raise a legacy or MSI interrupt
-+ * @set_msix: ops to set the requested number of MSI-X interrupts in the
-+ * MSI-X capability register
-+ * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
-+ * from the MSI-X capability register
-+ * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
- * @start: ops to start the PCI link
- * @stop: ops to stop the PCI link
- * @owner: the module owner containing the ops
- */
- struct pci_epc_ops {
-- int (*write_header)(struct pci_epc *pci_epc,
-+ int (*write_header)(struct pci_epc *epc, u8 func_no,
- struct pci_epf_header *hdr);
-- int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
-- dma_addr_t bar_phys, size_t size, int flags);
-- void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
-- int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
-- u64 pci_addr, size_t size);
-- void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
-- int (*set_msi)(struct pci_epc *epc, u8 interrupts);
-- int (*get_msi)(struct pci_epc *epc);
-- int (*raise_irq)(struct pci_epc *pci_epc,
-- enum pci_epc_irq_type type, u8 interrupt_num);
-+ int (*set_bar)(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar);
-+ void (*clear_bar)(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar);
-+ int (*map_addr)(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t addr, u64 pci_addr, size_t size);
-+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t addr);
-+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
-+ int (*get_msi)(struct pci_epc *epc, u8 func_no);
-+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
-+ int (*get_msix)(struct pci_epc *epc, u8 func_no);
-+ int (*raise_irq)(struct pci_epc *epc, u8 func_no,
-+ enum pci_epc_irq_type type, u16 interrupt_num);
- int (*start)(struct pci_epc *epc);
- void (*stop)(struct pci_epc *epc);
- struct module *owner;
-@@ -94,8 +100,17 @@ struct pci_epc {
- struct config_group *group;
- /* spinlock to protect against concurrent access of EP controller */
- spinlock_t lock;
-+ unsigned int features;
- };
-
-+#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
-+#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
-+#define EPC_FEATURE_MSIX_AVAILABLE BIT(4)
-+#define EPC_FEATURE_SET_BAR(features, bar) \
-+ (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
-+#define EPC_FEATURE_GET_BAR(features) \
-+ ((features & EPC_FEATURE_BAR_MASK) >> 1)
-+
- #define to_pci_epc(device) container_of((device), struct pci_epc, dev)
-
- #define pci_epc_create(dev, ops) \
-@@ -127,17 +142,23 @@ void pci_epc_destroy(struct pci_epc *epc
- int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
- void pci_epc_linkup(struct pci_epc *epc);
- void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
--int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
--int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
-- dma_addr_t bar_phys, size_t size, int flags);
--void pci_epc_clear_bar(struct pci_epc *epc, int bar);
--int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
-+int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_header *hdr);
-+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar);
-+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
-+ struct pci_epf_bar *epf_bar);
-+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t phys_addr,
- u64 pci_addr, size_t size);
--void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
--int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
--int pci_epc_get_msi(struct pci_epc *epc);
--int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
-- u8 interrupt_num);
-+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
-+ phys_addr_t phys_addr);
-+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
-+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
-+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
-+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
-+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
-+ enum pci_epc_irq_type type, u16 interrupt_num);
- int pci_epc_start(struct pci_epc *epc);
- void pci_epc_stop(struct pci_epc *epc);
- struct pci_epc *pci_epc_get(const char *epc_name);
---- a/include/linux/pci-epf.h
-+++ b/include/linux/pci-epf.h
-@@ -1,12 +1,9 @@
-+/* SPDX-License-Identifier: GPL-2.0+ */
- /**
- * PCI Endpoint *Function* (EPF) header file
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
-- *
-- * This program is free software: you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License version 2 of
-- * the License as published by the Free Software Foundation.
- */
-
- #ifndef __LINUX_PCI_EPF_H
-@@ -75,7 +72,7 @@ struct pci_epf_ops {
- * @driver: PCI EPF driver
- * @ops: set of function pointers for performing EPF operations
- * @owner: the owner of the module that registers the PCI EPF driver
-- * @group: configfs group corresponding to the PCI EPF driver
-+ * @epf_group: list of configfs group corresponding to the PCI EPF driver
- * @id_table: identifies EPF devices for probing
- */
- struct pci_epf_driver {
-@@ -85,7 +82,7 @@ struct pci_epf_driver {
- struct device_driver driver;
- struct pci_epf_ops *ops;
- struct module *owner;
-- struct config_group *group;
-+ struct list_head epf_group;
- const struct pci_epf_device_id *id_table;
- };
-
-@@ -100,6 +97,8 @@ struct pci_epf_driver {
- struct pci_epf_bar {
- dma_addr_t phys_addr;
- size_t size;
-+ enum pci_barno barno;
-+ int flags;
- };
-
- /**
-@@ -120,6 +119,7 @@ struct pci_epf {
- struct pci_epf_header *header;
- struct pci_epf_bar bar[6];
- u8 msi_interrupts;
-+ u16 msix_interrupts;
- u8 func_no;
-
- struct pci_epc *epc;
---- a/include/linux/pci.h
-+++ b/include/linux/pci.h
-@@ -1946,6 +1946,7 @@ void pcibios_release_device(struct pci_d
- void pcibios_penalize_isa_irq(int irq, int active);
- int pcibios_alloc_irq(struct pci_dev *dev);
- void pcibios_free_irq(struct pci_dev *dev);
-+int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask);
-
- #ifdef CONFIG_HIBERNATE_CALLBACKS
- extern struct dev_pm_ops pcibios_pm_ops;
---- a/include/uapi/linux/pcitest.h
-+++ b/include/uapi/linux/pcitest.h
-@@ -16,5 +16,8 @@
- #define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
- #define PCITEST_READ _IOW('P', 0x5, unsigned long)
- #define PCITEST_COPY _IOW('P', 0x6, unsigned long)
-+#define PCITEST_MSIX _IOW('P', 0x7, int)
-+#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
-+#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
-
- #endif /* __UAPI_LINUX_PCITEST_H */
---- a/tools/pci/pcitest.c
-+++ b/tools/pci/pcitest.c
-@@ -30,12 +30,17 @@
- #define BILLION 1E9
-
- static char *result[] = { "NOT OKAY", "OKAY" };
-+static char *irq[] = { "LEGACY", "MSI", "MSI-X" };
-
- struct pci_test {
- char *device;
- char barnum;
- bool legacyirq;
- unsigned int msinum;
-+ unsigned int msixnum;
-+ int irqtype;
-+ bool set_irqtype;
-+ bool get_irqtype;
- bool read;
- bool write;
- bool copy;
-@@ -62,6 +67,24 @@ static int run_test(struct pci_test *tes
- fprintf(stdout, "%s\n", result[ret]);
- }
-
-+ if (test->set_irqtype) {
-+ ret = ioctl(fd, PCITEST_SET_IRQTYPE, test->irqtype);
-+ fprintf(stdout, "SET IRQ TYPE TO %s:\t\t", irq[test->irqtype]);
-+ if (ret < 0)
-+ fprintf(stdout, "FAILED\n");
-+ else
-+ fprintf(stdout, "%s\n", result[ret]);
-+ }
-+
-+ if (test->get_irqtype) {
-+ ret = ioctl(fd, PCITEST_GET_IRQTYPE);
-+ fprintf(stdout, "GET IRQ TYPE:\t\t");
-+ if (ret < 0)
-+ fprintf(stdout, "FAILED\n");
-+ else
-+ fprintf(stdout, "%s\n", irq[ret]);
-+ }
-+
- if (test->legacyirq) {
- ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
- fprintf(stdout, "LEGACY IRQ:\t");
-@@ -80,6 +103,15 @@ static int run_test(struct pci_test *tes
- fprintf(stdout, "%s\n", result[ret]);
- }
-
-+ if (test->msixnum > 0 && test->msixnum <= 2048) {
-+ ret = ioctl(fd, PCITEST_MSIX, test->msixnum);
-+ fprintf(stdout, "MSI-X%d:\t\t", test->msixnum);
-+ if (ret < 0)
-+ fprintf(stdout, "TEST FAILED\n");
-+ else
-+ fprintf(stdout, "%s\n", result[ret]);
-+ }
-+
- if (test->write) {
- ret = ioctl(fd, PCITEST_WRITE, test->size);
- fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
-@@ -130,7 +162,7 @@ int main(int argc, char **argv)
- /* set default endpoint device */
- test->device = "/dev/pci-endpoint-test.0";
-
-- while ((c = getopt(argc, argv, "D:b:m:lrwcs:")) != EOF)
-+ while ((c = getopt(argc, argv, "D:b:m:x:i:Ilrwcs:")) != EOF)
- switch (c) {
- case 'D':
- test->device = optarg;
-@@ -148,6 +180,20 @@ int main(int argc, char **argv)
- if (test->msinum < 1 || test->msinum > 32)
- goto usage;
- continue;
-+ case 'x':
-+ test->msixnum = atoi(optarg);
-+ if (test->msixnum < 1 || test->msixnum > 2048)
-+ goto usage;
-+ continue;
-+ case 'i':
-+ test->irqtype = atoi(optarg);
-+ if (test->irqtype < 0 || test->irqtype > 2)
-+ goto usage;
-+ test->set_irqtype = true;
-+ continue;
-+ case 'I':
-+ test->get_irqtype = true;
-+ continue;
- case 'r':
- test->read = true;
- continue;
-@@ -170,6 +216,9 @@ usage:
- "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
- "\t-b <bar num> BAR test (bar number between 0..5)\n"
- "\t-m <msi num> MSI test (msi number between 1..32)\n"
-+ "\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
-+ "\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
-+ "\t-I Get current IRQ type configured\n"
- "\t-l Legacy IRQ test\n"
- "\t-r Read buffer test\n"
- "\t-w Write buffer test\n"
---- a/tools/pci/pcitest.sh
-+++ b/tools/pci/pcitest.sh
-@@ -16,7 +16,10 @@ echo
- echo "Interrupt tests"
- echo
-
-+pcitest -i 0
- pcitest -l
-+
-+pcitest -i 1
- msi=1
-
- while [ $msi -lt 33 ]
-@@ -26,9 +29,21 @@ do
- done
- echo
-
-+pcitest -i 2
-+msix=1
-+
-+while [ $msix -lt 2049 ]
-+do
-+ pcitest -x $msix
-+ msix=`expr $msix + 1`
-+done
-+echo
-+
- echo "Read Tests"
- echo
-
-+pcitest -i 1
-+
- pcitest -r -s 1
- pcitest -r -s 1024
- pcitest -r -s 1025
diff --git a/target/linux/layerscape/patches-4.14/817-platform-security-support-layerscape.patch b/target/linux/layerscape/patches-4.14/817-platform-security-support-layerscape.patch
deleted file mode 100644
index 55e7209ce3..0000000000
--- a/target/linux/layerscape/patches-4.14/817-platform-security-support-layerscape.patch
+++ /dev/null
@@ -1,1443 +0,0 @@
-From d2e808b0dcca1b5e850274f770775c355ae36c48 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:27:03 +0800
-Subject: [PATCH 34/40] platfrom-security: support layerscape
-This is an integrated patch of platform-security for
- layerscape
-
-Signed-off-by: Sahil Malhotra <sahil.malhotra@nxp.com>
-Signed-off-by: Udit Agarwal <udit.agarwal@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- Documentation/security/keys/secure-key.rst | 67 ++
- MAINTAINERS | 12 +
- drivers/tee/optee/Kconfig | 8 +
- drivers/tee/optee/core.c | 2 +-
- include/keys/secure-type.h | 33 +
- security/keys/Kconfig | 11 +
- security/keys/Makefile | 5 +
- security/keys/encrypted-keys/Makefile | 2 +
- security/keys/encrypted-keys/encrypted.c | 13 +-
- security/keys/encrypted-keys/encrypted.h | 13 +
- .../keys/encrypted-keys/masterkey_secure.c | 37 ++
- security/keys/secure_key.c | 339 ++++++++++
- security/keys/securekey_desc.c | 608 ++++++++++++++++++
- security/keys/securekey_desc.h | 141 ++++
- 14 files changed, 1288 insertions(+), 3 deletions(-)
- create mode 100644 Documentation/security/keys/secure-key.rst
- create mode 100644 include/keys/secure-type.h
- create mode 100644 security/keys/encrypted-keys/masterkey_secure.c
- create mode 100644 security/keys/secure_key.c
- create mode 100644 security/keys/securekey_desc.c
- create mode 100644 security/keys/securekey_desc.h
-
---- /dev/null
-+++ b/Documentation/security/keys/secure-key.rst
-@@ -0,0 +1,67 @@
-+==========
-+Secure Key
-+==========
-+
-+Secure key is the new type added to kernel key ring service.
-+Secure key is a symmetric type key of minimum length 32 bytes
-+and with maximum possible length to be 128 bytes. It is produced
-+in kernel using the CAAM crypto engine. Userspace can only see
-+the blob for the corresponding key. All the blobs are displayed
-+or loaded in hex ascii.
-+
-+Secure key can be created on platforms which supports CAAM
-+hardware block. Secure key can also be used as a master key to
-+create the encrypted keys along with the existing key types in
-+kernel.
-+
-+Secure key uses CAAM hardware to generate the key and blobify its
-+content for userspace. Generated blobs are tied up with the hardware
-+secret key stored in CAAM, hence the same blob will not be able to
-+de-blobify with the different secret key on another machine.
-+
-+Usage::
-+
-+ keyctl add secure <name> "new <keylen>" <ring>
-+ keyctl load secure <name> "load <hex_blob>" <ring>
-+ keyctl print <key_id>
-+
-+"keyctl add secure" option will create the random data of the
-+specified key len using CAAM and store it as a key in kernel.
-+Key contents will be displayed as blobs to the user in hex ascii.
-+User can input key len from 32 bytes to 128 bytes.
-+
-+"keyctl load secure" option will load the blob contents. In kernel,
-+key will be deirved using input blob and CAAM, along with the secret
-+key stored in CAAM.
-+
-+"keyctl print" will return the hex string of the blob corresponding to
-+key_id. Returned blob will be of key_len + 48 bytes. Extra 48 bytes are
-+the header bytes added by the CAAM.
-+
-+Example of secure key usage::
-+
-+1. Create the secure key with name kmk-master of length 32 bytes::
-+
-+ $ keyctl add secure kmk-master "new 32" @u
-+ 46001928
-+
-+ $keyctl show
-+ Session Keyring
-+ 1030783626 --alswrv 0 65534 keyring: _uid_ses.0
-+ 695927745 --alswrv 0 65534 \_ keyring: _uid.0
-+ 46001928 --als-rv 0 0 \_ secure: kmk-master
-+
-+2. Print the blob contents for the kmk-master key::
-+
-+ $ keyctl print 46001928
-+ d9743445b640f3d59c1670dddc0bc9c2
-+ 34fc9aab7dd05c965e6120025012f029b
-+ 07faa4776c4f6ed02899e35a135531e9a
-+ 6e5c2b51132f9d5aef28f68738e658296
-+ 3fe583177cfe50d2542b659a13039
-+
-+ $ keyctl pipe 46001928 > secure_key.blob
-+
-+3. Load the blob in the user key ring::
-+
-+ $ keyctl load secure kmk-master "load 'cat secure_key.blob'" @u
---- a/MAINTAINERS
-+++ b/MAINTAINERS
-@@ -7646,6 +7646,18 @@ F: include/keys/trusted-type.h
- F: security/keys/trusted.c
- F: security/keys/trusted.h
-
-+KEYS-SECURE
-+M: Udit Agarwal <udit.agarwal@nxp.com>
-+R: Sahil Malhotra <sahil.malhotra@nxp.com>
-+L: linux-security-module@vger.kernel.org
-+L: keyrings@vger.kernel.org
-+S: Supported
-+F: include/keys/secure-type.h
-+F: security/keys/secure_key.c
-+F: security/keys/securekey_desc.c
-+F: security/keys/securekey_desc.h
-+F: security/keys/encrypted-keys/masterkey_secure.c
-+
- KEYS/KEYRINGS:
- M: David Howells <dhowells@redhat.com>
- L: keyrings@vger.kernel.org
---- a/drivers/tee/optee/Kconfig
-+++ b/drivers/tee/optee/Kconfig
-@@ -6,3 +6,11 @@ config OPTEE
- help
- This implements the OP-TEE Trusted Execution Environment (TEE)
- driver.
-+
-+config OPTEE_SHM_NUM_PRIV_PAGES
-+ int "Private Shared Memory Pages"
-+ default 1
-+ depends on OPTEE
-+ help
-+ This sets the number of private shared memory pages to be
-+ used by OP-TEE TEE driver.
---- a/drivers/tee/optee/core.c
-+++ b/drivers/tee/optee/core.c
-@@ -31,7 +31,7 @@
-
- #define DRIVER_NAME "optee"
-
--#define OPTEE_SHM_NUM_PRIV_PAGES 1
-+#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
-
- /**
- * optee_from_msg_param() - convert from OPTEE_MSG parameters to
---- /dev/null
-+++ b/include/keys/secure-type.h
-@@ -0,0 +1,33 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * Copyright (C) 2018 NXP.
-+ *
-+ */
-+
-+#ifndef _KEYS_SECURE_TYPE_H
-+#define _KEYS_SECURE_TYPE_H
-+
-+#include <linux/key.h>
-+#include <linux/rcupdate.h>
-+
-+/* Minimum key size to be used is 32 bytes and maximum key size fixed
-+ * is 128 bytes.
-+ * Blob size to be kept is Maximum key size + blob header added by CAAM.
-+ */
-+
-+#define MIN_KEY_SIZE 32
-+#define MAX_KEY_SIZE 128
-+#define BLOB_HEADER_SIZE 48
-+
-+#define MAX_BLOB_SIZE (MAX_KEY_SIZE + BLOB_HEADER_SIZE)
-+
-+struct secure_key_payload {
-+ struct rcu_head rcu;
-+ unsigned int key_len;
-+ unsigned int blob_len;
-+ unsigned char key[MAX_KEY_SIZE + 1];
-+ unsigned char blob[MAX_BLOB_SIZE];
-+};
-+
-+extern struct key_type key_type_secure;
-+#endif
---- a/security/keys/Kconfig
-+++ b/security/keys/Kconfig
-@@ -71,6 +71,17 @@ config TRUSTED_KEYS
-
- If you are unsure as to whether this is required, answer N.
-
-+config SECURE_KEYS
-+ tristate "SECURE_KEYS"
-+ depends on KEYS && CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
-+ help
-+ This option provide support for creating secure-type key and blobs
-+ in kernel. Secure keys are random number symmetric keys generated
-+ from CAAM. The CAAM creates the blobs for the random key.
-+ Userspace will only be able to see the blob.
-+
-+ If you are unsure as to whether this is required, answer N.
-+
- config ENCRYPTED_KEYS
- tristate "ENCRYPTED KEYS"
- depends on KEYS
---- a/security/keys/Makefile
-+++ b/security/keys/Makefile
-@@ -28,4 +28,9 @@ obj-$(CONFIG_KEY_DH_OPERATIONS) += dh.o
- #
- obj-$(CONFIG_BIG_KEYS) += big_key.o
- obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
-+CFLAGS_secure_key.o += -I$(obj)/../../drivers/crypto/caam/
-+CFLAGS_securekey_desc.o += -I$(obj)/../../drivers/crypto/caam/
-+obj-$(CONFIG_SECURE_KEYS) += securekey.o
-+securekey-y := securekey_desc.o \
-+ secure_key.o
- obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
---- a/security/keys/encrypted-keys/Makefile
-+++ b/security/keys/encrypted-keys/Makefile
-@@ -7,5 +7,7 @@ obj-$(CONFIG_ENCRYPTED_KEYS) += encrypte
-
- encrypted-keys-y := encrypted.o ecryptfs_format.o
- masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
-+masterkey-$(CONFIG_SECURE_KEYS) := masterkey_secure.o
- masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
-+masterkey-$(CONFIG_SECURE_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_secure.o
- encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
---- a/security/keys/encrypted-keys/encrypted.c
-+++ b/security/keys/encrypted-keys/encrypted.c
-@@ -39,6 +39,7 @@
- #include "ecryptfs_format.h"
-
- static const char KEY_TRUSTED_PREFIX[] = "trusted:";
-+static const char KEY_SECURE_PREFIX[] = "secure:";
- static const char KEY_USER_PREFIX[] = "user:";
- static const char hash_alg[] = "sha256";
- static const char hmac_alg[] = "hmac(sha256)";
-@@ -49,6 +50,7 @@ static unsigned int ivsize;
- static int blksize;
-
- #define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1)
-+#define KEY_SECURE_PREFIX_LEN (sizeof(KEY_SECURE_PREFIX) - 1)
- #define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1)
- #define KEY_ECRYPTFS_DESC_LEN 16
- #define HASH_SIZE SHA256_DIGEST_SIZE
-@@ -125,7 +127,7 @@ static int valid_ecryptfs_desc(const cha
- /*
- * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key
- *
-- * key-type:= "trusted:" | "user:"
-+ * key-type:= "trusted:" | "user:" | "secure:"
- * desc:= master-key description
- *
- * Verify that 'key-type' is valid and that 'desc' exists. On key update,
-@@ -140,6 +142,8 @@ static int valid_master_desc(const char
-
- if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
- prefix_len = KEY_TRUSTED_PREFIX_LEN;
-+ else if (!strncmp(new_desc, KEY_SECURE_PREFIX, KEY_SECURE_PREFIX_LEN))
-+ prefix_len = KEY_SECURE_PREFIX_LEN;
- else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
- prefix_len = KEY_USER_PREFIX_LEN;
- else
-@@ -358,7 +362,7 @@ static int calc_hmac(u8 *digest, const u
-
- enum derived_key_type { ENC_KEY, AUTH_KEY };
-
--/* Derive authentication/encryption key from trusted key */
-+/* Derive authentication/encryption key from trusted/secure key */
- static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
- const u8 *master_key, size_t master_keylen)
- {
-@@ -429,6 +433,11 @@ static struct key *request_master_key(st
- mkey = request_trusted_key(epayload->master_desc +
- KEY_TRUSTED_PREFIX_LEN,
- master_key, master_keylen);
-+ } else if (!strncmp(epayload->master_desc, KEY_SECURE_PREFIX,
-+ KEY_SECURE_PREFIX_LEN)) {
-+ mkey = request_secure_key(epayload->master_desc +
-+ KEY_SECURE_PREFIX_LEN,
-+ master_key, master_keylen);
- } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX,
- KEY_USER_PREFIX_LEN)) {
- mkey = request_user_key(epayload->master_desc +
---- a/security/keys/encrypted-keys/encrypted.h
-+++ b/security/keys/encrypted-keys/encrypted.h
-@@ -16,6 +16,19 @@ static inline struct key *request_truste
- }
- #endif
-
-+#if defined(CONFIG_SECURE_KEYS)
-+extern struct key *request_secure_key(const char *secure_desc,
-+ const u8 **master_key,
-+ size_t *master_keylen);
-+#else
-+static inline struct key *request_secure_key(const char *secure_desc,
-+ const u8 **master_key,
-+ size_t *master_keylen)
-+{
-+ return ERR_PTR(-EOPNOTSUPP);
-+}
-+#endif
-+
- #if ENCRYPTED_DEBUG
- static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
- {
---- /dev/null
-+++ b/security/keys/encrypted-keys/masterkey_secure.c
-@@ -0,0 +1,37 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2018 NXP.
-+ *
-+ */
-+
-+#include <linux/uaccess.h>
-+#include <linux/module.h>
-+#include <linux/err.h>
-+#include <keys/secure-type.h>
-+#include <keys/encrypted-type.h>
-+#include "encrypted.h"
-+
-+/*
-+ * request_secure_key - request the secure key
-+ *
-+ * Secure keys and their blobs are derived from CAAM hardware.
-+ * Userspace manages secure key-type data, but key data is not
-+ * visible in plain form. It is presented as blobs.
-+ */
-+struct key *request_secure_key(const char *secure_desc,
-+ const u8 **master_key, size_t *master_keylen)
-+{
-+ struct secure_key_payload *spayload;
-+ struct key *skey;
-+
-+ skey = request_key(&key_type_secure, secure_desc, NULL);
-+ if (IS_ERR(skey))
-+ goto error;
-+
-+ down_read(&skey->sem);
-+ spayload = skey->payload.data[0];
-+ *master_key = spayload->key;
-+ *master_keylen = spayload->key_len;
-+error:
-+ return skey;
-+}
---- /dev/null
-+++ b/security/keys/secure_key.c
-@@ -0,0 +1,339 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/* Copyright (C) 2018 NXP
-+ * Secure key is generated using NXP CAAM hardware block. CAAM generates the
-+ * random number (used as a key) and creates its blob for the user.
-+ */
-+
-+#include <linux/slab.h>
-+#include <linux/parser.h>
-+#include <linux/string.h>
-+#include <linux/key-type.h>
-+#include <linux/rcupdate.h>
-+#include <keys/secure-type.h>
-+#include <linux/completion.h>
-+
-+#include "securekey_desc.h"
-+
-+static const char hmac_alg[] = "hmac(sha1)";
-+static const char hash_alg[] = "sha1";
-+
-+static struct crypto_shash *hashalg;
-+static struct crypto_shash *hmacalg;
-+
-+enum {
-+ error = -1,
-+ new_key,
-+ load_blob,
-+};
-+
-+static const match_table_t key_tokens = {
-+ {new_key, "new"},
-+ {load_blob, "load"},
-+ {error, NULL}
-+};
-+
-+static struct secure_key_payload *secure_payload_alloc(struct key *key)
-+{
-+ struct secure_key_payload *sec_key = NULL;
-+ int ret = 0;
-+
-+ ret = key_payload_reserve(key, sizeof(*sec_key));
-+ if (ret < 0)
-+ goto out;
-+
-+ sec_key = kzalloc(sizeof(*sec_key), GFP_KERNEL);
-+ if (!sec_key)
-+ goto out;
-+
-+out:
-+ return sec_key;
-+}
-+
-+/*
-+ * parse_inputdata - parse the keyctl input data and fill in the
-+ * payload structure for key or its blob.
-+ * param[in]: data pointer to the data to be parsed for creating key.
-+ * param[in]: p pointer to secure key payload structure to fill parsed data
-+ * On success returns 0, otherwise -EINVAL.
-+ */
-+static int parse_inputdata(char *data, struct secure_key_payload *p)
-+{
-+ substring_t args[MAX_OPT_ARGS];
-+ long keylen = 0;
-+ int ret = -EINVAL;
-+ int key_cmd = -EINVAL;
-+ char *c = NULL;
-+
-+ c = strsep(&data, " \t");
-+ if (!c) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ /* Get the keyctl command i.e. new_key or load_blob etc */
-+ key_cmd = match_token(c, key_tokens, args);
-+
-+ switch (key_cmd) {
-+ case new_key:
-+ /* first argument is key size */
-+ c = strsep(&data, " \t");
-+ if (!c) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ ret = kstrtol(c, 10, &keylen);
-+ if (ret < 0 || keylen < MIN_KEY_SIZE ||
-+ keylen > MAX_KEY_SIZE) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ p->key_len = keylen;
-+ ret = new_key;
-+
-+ break;
-+ case load_blob:
-+ /* first argument is blob data for CAAM*/
-+ c = strsep(&data, " \t");
-+ if (!c) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ /* Blob_len = No of characters in blob/2 */
-+ p->blob_len = strlen(c) / 2;
-+ if (p->blob_len > MAX_BLOB_SIZE) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ ret = hex2bin(p->blob, c, p->blob_len);
-+ if (ret < 0) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ ret = load_blob;
-+
-+ break;
-+ case error:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+out:
-+ return ret;
-+}
-+
-+/*
-+ * secure_instantiate - create a new secure type key.
-+ * Supports the operation to generate a new key. A random number
-+ * is generated from CAAM as key data and the corresponding red blob
-+ * is formed and stored as key_blob.
-+ * Also supports the operation to load the blob and key is derived using
-+ * that blob from CAAM.
-+ * On success, return 0. Otherwise return errno.
-+ */
-+static int secure_instantiate(struct key *key,
-+ struct key_preparsed_payload *prep)
-+{
-+ struct secure_key_payload *payload = NULL;
-+ size_t datalen = prep->datalen;
-+ char *data = NULL;
-+ int key_cmd = 0;
-+ int ret = 0;
-+ enum sk_req_type sk_op_type;
-+ struct device *dev = NULL;
-+
-+ if (datalen <= 0 || datalen > 32767 || !prep->data) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ data = kmalloc(datalen + 1, GFP_KERNEL);
-+ if (!data) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ memcpy(data, prep->data, datalen);
-+ data[datalen] = '\0';
-+
-+ payload = secure_payload_alloc(key);
-+ if (!payload) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* Allocate caam job ring for operation to be performed from CAAM */
-+ dev = caam_jr_alloc();
-+ if (!dev) {
-+ pr_info("caam_jr_alloc failed\n");
-+ ret = -ENODEV;
-+ goto out;
-+ }
-+
-+ key_cmd = parse_inputdata(data, payload);
-+ if (key_cmd < 0) {
-+ ret = key_cmd;
-+ goto out;
-+ }
-+
-+ switch (key_cmd) {
-+ case load_blob:
-+ /*
-+ * Red blob decryption to be done for load operation
-+ * to derive the key.
-+ */
-+ sk_op_type = sk_red_blob_dec;
-+ ret = key_deblob(payload, sk_op_type, dev);
-+ if (ret != 0) {
-+ pr_info("secure_key: key_blob decap fail (%d)\n", ret);
-+ goto out;
-+ }
-+ break;
-+ case new_key:
-+ /* Get Random number from caam of the specified length */
-+ sk_op_type = sk_get_random;
-+ ret = caam_get_random(payload, sk_op_type, dev);
-+ if (ret != 0) {
-+ pr_info("secure_key: get_random fail (%d)\n", ret);
-+ goto out;
-+ }
-+
-+ /* Generate red blob of key random bytes with CAAM */
-+ sk_op_type = sk_red_blob_enc;
-+ ret = key_blob(payload, sk_op_type, dev);
-+ if (ret != 0) {
-+ pr_info("secure_key: key_blob encap fail (%d)\n", ret);
-+ goto out;
-+ }
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+out:
-+ if (data)
-+ kzfree(data);
-+ if (dev)
-+ caam_jr_free(dev);
-+
-+ if (!ret)
-+ rcu_assign_keypointer(key, payload);
-+ else
-+ kzfree(payload);
-+
-+ return ret;
-+}
-+
-+/*
-+ * secure_read - copy the blob data to userspace in hex.
-+ * param[in]: key pointer to key struct
-+ * param[in]: buffer pointer to user data for creating key
-+ * param[in]: buflen is the length of the buffer
-+ * On success, return to userspace the secure key data size.
-+ */
-+static long secure_read(const struct key *key, char __user *buffer,
-+ size_t buflen)
-+{
-+ const struct secure_key_payload *p = NULL;
-+ char *ascii_buf;
-+ char *bufp;
-+ int i;
-+
-+ p = dereference_key_locked(key);
-+ if (!p)
-+ return -EINVAL;
-+
-+ if (buffer && buflen >= 2 * p->blob_len) {
-+ ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
-+ if (!ascii_buf)
-+ return -ENOMEM;
-+
-+ bufp = ascii_buf;
-+ for (i = 0; i < p->blob_len; i++)
-+ bufp = hex_byte_pack(bufp, p->blob[i]);
-+ if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
-+ kzfree(ascii_buf);
-+ return -EFAULT;
-+ }
-+ kzfree(ascii_buf);
-+ }
-+ return 2 * p->blob_len;
-+}
-+
-+/*
-+ * secure_destroy - clear and free the key's payload
-+ */
-+static void secure_destroy(struct key *key)
-+{
-+ kzfree(key->payload.data[0]);
-+}
-+
-+struct key_type key_type_secure = {
-+ .name = "secure",
-+ .instantiate = secure_instantiate,
-+ .destroy = secure_destroy,
-+ .read = secure_read,
-+};
-+EXPORT_SYMBOL_GPL(key_type_secure);
-+
-+static void secure_shash_release(void)
-+{
-+ if (hashalg)
-+ crypto_free_shash(hashalg);
-+ if (hmacalg)
-+ crypto_free_shash(hmacalg);
-+}
-+
-+static int __init secure_shash_alloc(void)
-+{
-+ int ret;
-+
-+ hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
-+ if (IS_ERR(hmacalg)) {
-+ pr_info("secure_key: could not allocate crypto %s\n",
-+ hmac_alg);
-+ return PTR_ERR(hmacalg);
-+ }
-+
-+ hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
-+ if (IS_ERR(hashalg)) {
-+ pr_info("secure_key: could not allocate crypto %s\n",
-+ hash_alg);
-+ ret = PTR_ERR(hashalg);
-+ goto hashalg_fail;
-+ }
-+
-+ return 0;
-+
-+hashalg_fail:
-+ crypto_free_shash(hmacalg);
-+ return ret;
-+}
-+
-+static int __init init_secure_key(void)
-+{
-+ int ret;
-+
-+ ret = secure_shash_alloc();
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = register_key_type(&key_type_secure);
-+ if (ret < 0)
-+ secure_shash_release();
-+ return ret;
-+}
-+
-+static void __exit cleanup_secure_key(void)
-+{
-+ secure_shash_release();
-+ unregister_key_type(&key_type_secure);
-+}
-+
-+late_initcall(init_secure_key);
-+module_exit(cleanup_secure_key);
-+
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/security/keys/securekey_desc.c
-@@ -0,0 +1,608 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2018 NXP
-+ *
-+ */
-+
-+#include <keys/secure-type.h>
-+#include "securekey_desc.h"
-+
-+/* key modifier for blob encapsulation & decapsulation descriptor */
-+u8 key_modifier[] = "SECURE_KEY";
-+u32 key_modifier_len = 10;
-+
-+void caam_sk_rng_desc(struct sk_req *skreq, struct sk_desc *skdesc)
-+{
-+ struct sk_fetch_rnd_data *fetch_rnd_data = NULL;
-+ struct random_desc *rnd_desc = NULL;
-+ size_t len = 0;
-+ u32 *desc = skreq->hwdesc;
-+
-+ init_job_desc(desc, 0);
-+
-+ fetch_rnd_data = &skreq->req_u.sk_fetch_rnd_data;
-+ rnd_desc = &skdesc->dma_u.random_descp;
-+ len = fetch_rnd_data->key_len;
-+
-+ /* command 0x82500000 */
-+ append_cmd(desc, CMD_OPERATION | OP_TYPE_CLASS1_ALG |
-+ OP_ALG_ALGSEL_RNG);
-+ /* command 0x60340000 | len */
-+ append_cmd(desc, CMD_FIFO_STORE | FIFOST_TYPE_RNGSTORE | len);
-+ append_ptr(desc, rnd_desc->rnd_data);
-+}
-+
-+void caam_sk_redblob_encap_desc(struct sk_req *skreq, struct sk_desc *skdesc)
-+{
-+ struct redblob_encap_desc *red_blob_desc =
-+ &skdesc->dma_u.redblob_encapdesc;
-+ struct sk_red_blob_encap *red_blob_req =
-+ &skreq->req_u.sk_red_blob_encap;
-+ u32 *desc = skreq->hwdesc;
-+
-+ init_job_desc(desc, 0);
-+
-+ /* Load class 2 key with key modifier. */
-+ append_key_as_imm(desc, key_modifier, key_modifier_len,
-+ key_modifier_len, CLASS_2 | KEY_DEST_CLASS_REG);
-+
-+ /* SEQ IN PTR Command. */
-+ append_seq_in_ptr(desc, red_blob_desc->in_data, red_blob_req->data_sz,
-+ 0);
-+
-+ /* SEQ OUT PTR Command. */
-+ append_seq_out_ptr(desc, red_blob_desc->redblob,
-+ red_blob_req->redblob_sz, 0);
-+
-+ /* RedBlob encapsulation PROTOCOL Command. */
-+ append_operation(desc, OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB);
-+}
-+
-+/* void caam_sk_redblob_decap_desc(struct sk_req *skreq, struct sk_desc *skdesc)
-+ * brief CAAM Descriptor creator from redblob to plaindata.
-+ * param[in] skreq Pointer to secure key request structure
-+ * param[in] skdesc Pointer to secure key descriptor structure
-+ */
-+void caam_sk_redblob_decap_desc(struct sk_req *skreq, struct sk_desc *skdesc)
-+{
-+ struct redblob_decap_desc *red_blob_desc =
-+ &skdesc->dma_u.redblob_decapdesc;
-+ struct sk_red_blob_decap *red_blob_req =
-+ &skreq->req_u.sk_red_blob_decap;
-+ u32 *desc = skreq->hwdesc;
-+
-+ init_job_desc(desc, 0);
-+
-+ /* Load class 2 key with key modifier. */
-+ append_key_as_imm(desc, key_modifier, key_modifier_len,
-+ key_modifier_len, CLASS_2 | KEY_DEST_CLASS_REG);
-+
-+ /* SEQ IN PTR Command. */
-+ append_seq_in_ptr(desc, red_blob_desc->redblob,
-+ red_blob_req->redblob_sz, 0);
-+
-+ /* SEQ OUT PTR Command. */
-+ append_seq_out_ptr(desc, red_blob_desc->out_data,
-+ red_blob_req->data_sz, 0);
-+
-+ /* RedBlob decapsulation PROTOCOL Command. */
-+ append_operation(desc, OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB);
-+}
-+
-+/* int caam_sk_get_random_map(struct device *dev, struct sk_req *req,
-+ * struct sk_desc *skdesc)
-+ * brief DMA map the buffer virtual pointers to physical address.
-+ * param[in] dev Pointer to job ring device structure
-+ * param[in] req Pointer to secure key request structure
-+ * param[in] skdesc Pointer to secure key descriptor structure
-+ * return 0 on success, error value otherwise.
-+ */
-+int caam_sk_get_random_map(struct device *dev, struct sk_req *req,
-+ struct sk_desc *skdesc)
-+{
-+ struct sk_fetch_rnd_data *fetch_rnd_data;
-+ struct random_desc *rnd_desc;
-+
-+ fetch_rnd_data = &req->req_u.sk_fetch_rnd_data;
-+ rnd_desc = &skdesc->dma_u.random_descp;
-+
-+ rnd_desc->rnd_data = dma_map_single(dev, fetch_rnd_data->data,
-+ fetch_rnd_data->key_len, DMA_FROM_DEVICE);
-+
-+ if (dma_mapping_error(dev, rnd_desc->rnd_data)) {
-+ dev_err(dev, "Unable to map memory\n");
-+ goto sk_random_map_fail;
-+ }
-+ return 0;
-+
-+sk_random_map_fail:
-+ return -ENOMEM;
-+}
-+
-+/* int caam_sk_redblob_encap_map(struct device *dev, struct sk_req *req,
-+ * struct sk_desc *skdesc)
-+ * brief DMA map the buffer virtual pointers to physical address.
-+ * param[in] dev Pointer to job ring device structure
-+ * param[in] req Pointer to secure key request structure
-+ * param[in] skdesc Pointer to secure key descriptor structure
-+ * return 0 on success, error value otherwise.
-+ */
-+int caam_sk_redblob_encap_map(struct device *dev, struct sk_req *req,
-+ struct sk_desc *skdesc)
-+{
-+ struct sk_red_blob_encap *red_blob_encap;
-+ struct redblob_encap_desc *red_blob_desc;
-+
-+ red_blob_encap = &req->req_u.sk_red_blob_encap;
-+ red_blob_desc = &skdesc->dma_u.redblob_encapdesc;
-+
-+ red_blob_desc->in_data = dma_map_single(dev, red_blob_encap->data,
-+ red_blob_encap->data_sz, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, red_blob_desc->in_data)) {
-+ dev_err(dev, "Unable to map memory\n");
-+ goto sk_data_fail;
-+ }
-+
-+ red_blob_desc->redblob = dma_map_single(dev, red_blob_encap->redblob,
-+ red_blob_encap->redblob_sz, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, red_blob_desc->redblob)) {
-+ dev_err(dev, "Unable to map memory\n");
-+ goto sk_redblob_fail;
-+ }
-+
-+ return 0;
-+
-+sk_redblob_fail:
-+ dma_unmap_single(dev, red_blob_desc->in_data, red_blob_encap->data_sz,
-+ DMA_TO_DEVICE);
-+sk_data_fail:
-+ return -ENOMEM;
-+}
-+
-+/* static int caam_sk_redblob_decap_map(struct device *dev,
-+ * struct sk_req *req,
-+ * struct sk_desc *skdesc)
-+ * brief DMA map the buffer virtual pointers to physical address.
-+ * param[in] dev Pointer to job ring device structure
-+ * param[in] req Pointer to secure key request structure
-+ * param[in] skdesc Pointer to secure key descriptor structure
-+ * return 0 on success, error value otherwise.
-+ */
-+int caam_sk_redblob_decap_map(struct device *dev, struct sk_req *req,
-+ struct sk_desc *skdesc)
-+{
-+ struct sk_red_blob_decap *red_blob_decap;
-+ struct redblob_decap_desc *red_blob_desc;
-+
-+ red_blob_decap = &req->req_u.sk_red_blob_decap;
-+ red_blob_desc = &skdesc->dma_u.redblob_decapdesc;
-+
-+ red_blob_desc->redblob = dma_map_single(dev, red_blob_decap->redblob,
-+ red_blob_decap->redblob_sz, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, red_blob_desc->redblob)) {
-+ dev_err(dev, "Unable to map memory\n");
-+ goto sk_redblob_fail;
-+ }
-+
-+ red_blob_desc->out_data = dma_map_single(dev, red_blob_decap->data,
-+ red_blob_decap->data_sz, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, red_blob_desc->out_data)) {
-+ dev_err(dev, "Unable to map memory\n");
-+ goto sk_data_fail;
-+ }
-+
-+ return 0;
-+
-+sk_data_fail:
-+ dma_unmap_single(dev, red_blob_desc->redblob,
-+ red_blob_decap->redblob_sz, DMA_TO_DEVICE);
-+sk_redblob_fail:
-+ return -ENOMEM;
-+}
-+
-+/* @fn void securekey_unmap(struct device *dev,
-+ * struct sk_desc *skdesc, struct sk_req *req)
-+ * @brief DMA unmap the buffer pointers.
-+ * @param[in] dev Pointer to job ring device structure
-+ * @param[in] skdesc Pointer to secure key descriptor structure
-+ * @param[in] req Pointer to secure key request structure
-+ */
-+void securekey_unmap(struct device *dev,
-+ struct sk_desc *skdesc, struct sk_req *req)
-+{
-+
-+ switch (req->type) {
-+ case sk_get_random:
-+ {
-+ struct sk_fetch_rnd_data *fetch_rnd_data;
-+ struct random_desc *rnd_desc;
-+
-+ fetch_rnd_data = &req->req_u.sk_fetch_rnd_data;
-+ rnd_desc = &skdesc->dma_u.random_descp;
-+
-+ /* Unmap Descriptor buffer pointers. */
-+ dma_unmap_single(dev, rnd_desc->rnd_data,
-+ fetch_rnd_data->key_len,
-+ DMA_FROM_DEVICE);
-+ break;
-+ }
-+ case sk_red_blob_enc:
-+ {
-+ struct sk_red_blob_encap *red_blob_encap;
-+ struct redblob_encap_desc *red_blob_desc;
-+
-+ red_blob_encap = &req->req_u.sk_red_blob_encap;
-+ red_blob_desc = &skdesc->dma_u.redblob_encapdesc;
-+
-+ /* Unmap Descriptor buffer pointers. */
-+ dma_unmap_single(dev, red_blob_desc->in_data,
-+ red_blob_encap->data_sz,
-+ DMA_TO_DEVICE);
-+
-+ dma_unmap_single(dev, red_blob_desc->redblob,
-+ red_blob_encap->redblob_sz,
-+ DMA_FROM_DEVICE);
-+
-+ break;
-+ }
-+ case sk_red_blob_dec:
-+ {
-+ struct sk_red_blob_decap *red_blob_decap;
-+ struct redblob_decap_desc *red_blob_desc;
-+
-+ red_blob_decap = &req->req_u.sk_red_blob_decap;
-+ red_blob_desc = &skdesc->dma_u.redblob_decapdesc;
-+
-+ /* Unmap Descriptor buffer pointers. */
-+ dma_unmap_single(dev, red_blob_desc->redblob,
-+ red_blob_decap->redblob_sz,
-+ DMA_TO_DEVICE);
-+
-+ dma_unmap_single(dev, red_blob_desc->out_data,
-+ red_blob_decap->data_sz,
-+ DMA_FROM_DEVICE);
-+
-+ break;
-+ }
-+ default:
-+ dev_err(dev, "Unable to find request type\n");
-+ break;
-+ }
-+ kfree(skdesc);
-+}
-+
-+/* int caam_securekey_desc_init(struct device *dev, struct sk_req *req)
-+ * brief CAAM Descriptor creator for secure key operations.
-+ * param[in] dev Pointer to job ring device structure
-+ * param[in] req Pointer to secure key request structure
-+ * return 0 on success, error value otherwise.
-+ */
-+int caam_securekey_desc_init(struct device *dev, struct sk_req *req)
-+{
-+ struct sk_desc *skdesc = NULL;
-+ int ret = 0;
-+
-+ switch (req->type) {
-+ case sk_get_random:
-+ {
-+ skdesc = kmalloc(sizeof(*skdesc), GFP_DMA);
-+ if (!skdesc) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ skdesc->req_type = req->type;
-+
-+ if (caam_sk_get_random_map(dev, req, skdesc)) {
-+ dev_err(dev, "caam get_random map fail\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ caam_sk_rng_desc(req, skdesc);
-+ break;
-+ }
-+ case sk_red_blob_enc:
-+ {
-+ skdesc = kmalloc(sizeof(*skdesc), GFP_DMA);
-+ if (!skdesc) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ skdesc->req_type = req->type;
-+
-+ if (caam_sk_redblob_encap_map(dev, req, skdesc)) {
-+ dev_err(dev, "caam redblob_encap map fail\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* Descriptor function to create redblob from data. */
-+ caam_sk_redblob_encap_desc(req, skdesc);
-+ break;
-+ }
-+
-+ case sk_red_blob_dec:
-+ {
-+ skdesc = kmalloc(sizeof(*skdesc), GFP_DMA);
-+ if (!skdesc) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ skdesc->req_type = req->type;
-+
-+ if (caam_sk_redblob_decap_map(dev, req, skdesc)) {
-+ dev_err(dev, "caam redblob_decap map fail\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* Descriptor function to decap data from redblob. */
-+ caam_sk_redblob_decap_desc(req, skdesc);
-+ break;
-+ }
-+ default:
-+ pr_debug("Unknown request type\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ req->desc_pointer = (void *)skdesc;
-+
-+out:
-+ return ret;
-+}
-+
-+/* static void caam_op_done (struct device *dev, u32 *desc, u32 ret,
-+ * void *context)
-+ * brief callback function to be called when descriptor executed.
-+ * param[in] dev Pointer to device structure
-+ * param[in] desc descriptor pointer
-+ * param[in] ret return status of Job submitted
-+ * param[in] context void pointer
-+ */
-+static void caam_op_done(struct device *dev, u32 *desc, u32 ret,
-+ void *context)
-+{
-+ struct sk_req *req = context;
-+
-+ if (ret) {
-+ dev_err(dev, "caam op done err: %x\n", ret);
-+ /* print the error source name. */
-+ caam_jr_strstatus(dev, ret);
-+ }
-+ /* Call securekey_unmap function for unmapping the buffer pointers. */
-+ securekey_unmap(dev, req->desc_pointer, req);
-+
-+ req->ret = ret;
-+ complete(&req->comp);
-+}
-+
-+
-+/* static int sk_job_submit(struct device *jrdev, struct sk_req *req)
-+ * brief Enqueue a Job descriptor to Job ring and wait until SEC returns.
-+ * param[in] jrdev Pointer to job ring device structure
-+ * param[in] req Pointer to secure key request structure
-+ * return 0 on success, error value otherwise.
-+ */
-+static int sk_job_submit(struct device *jrdev, struct sk_req *req)
-+{
-+ int ret;
-+
-+ init_completion(&req->comp);
-+
-+ /* caam_jr_enqueue function for Enqueue a job descriptor */
-+ ret = caam_jr_enqueue(jrdev, req->hwdesc, caam_op_done, req);
-+ if (!ret)
-+ wait_for_completion_interruptible(&req->comp);
-+
-+ ret = req->ret;
-+ return ret;
-+}
-+
-+/* caam_get_random(struct secure_key_payload *p, enum sk_req_type fetch_rnd,
-+ * struct device *dev)
-+ * Create the random number of the specified length using CAAM block
-+ * param[in]: out pointer to place the random bytes
-+ * param[in]: length for the random data bytes.
-+ * param[in]: dev Pointer to job ring device structure
-+ * If operation is successful return 0, otherwise error.
-+ */
-+int caam_get_random(struct secure_key_payload *p, enum sk_req_type fetch_rnd,
-+ struct device *dev)
-+{
-+ struct sk_fetch_rnd_data *fetch_rnd_data = NULL;
-+ struct sk_req *req = NULL;
-+ int ret = 0;
-+ void *temp = NULL;
-+
-+ req = kmalloc(sizeof(struct sk_req), GFP_DMA);
-+ if (!req) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ req->type = fetch_rnd;
-+ fetch_rnd_data = &(req->req_u.sk_fetch_rnd_data);
-+
-+ /* initialise with key length */
-+ fetch_rnd_data->key_len = p->key_len;
-+
-+ temp = kmalloc(fetch_rnd_data->key_len, GFP_DMA);
-+ if (!temp) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ fetch_rnd_data->data = temp;
-+
-+ ret = caam_securekey_desc_init(dev, req);
-+
-+ if (ret) {
-+ pr_info("caam_securekey_desc_init failed\n");
-+ goto out;
-+ }
-+
-+ ret = sk_job_submit(dev, req);
-+ if (!ret) {
-+ /*Copy output to key buffer. */
-+ memcpy(p->key, fetch_rnd_data->data, p->key_len);
-+ } else {
-+ ret = -EINVAL;
-+ }
-+
-+out:
-+ if (req)
-+ kfree(req);
-+
-+ if (temp)
-+ kfree(temp);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(caam_get_random);
-+
-+/* key_deblob(struct secure_key_payload *p, enum sk_req_type decap_type
-+ * struct device *dev)
-+ * Deblobify the blob to get the key data and fill in secure key payload struct
-+ * param[in] p pointer to the secure key payload
-+ * param[in] decap_type operation to be done.
-+ * param[in] dev dev Pointer to job ring device structure
-+ * If operation is successful return 0, otherwise error.
-+ */
-+int key_deblob(struct secure_key_payload *p, enum sk_req_type decap_type,
-+ struct device *dev)
-+{
-+ unsigned int blob_len;
-+ struct sk_red_blob_decap *d_blob;
-+ struct sk_req *req = NULL;
-+ int total_sz = 0, *temp = NULL, ret = 0;
-+
-+ req = kmalloc(sizeof(struct sk_req), GFP_DMA);
-+ if (!req) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ d_blob = &(req->req_u.sk_red_blob_decap);
-+ blob_len = p->blob_len;
-+ req->type = decap_type;
-+
-+ /*
-+ * Red blob size is the blob_len filled in payload struct
-+ * Data_sz i.e. key is the blob_len - blob header size
-+ */
-+
-+ d_blob->redblob_sz = blob_len;
-+ d_blob->data_sz = blob_len - (SK_BLOB_KEY_SZ + SK_BLOB_MAC_SZ);
-+ total_sz = d_blob->data_sz + d_blob->redblob_sz;
-+
-+ temp = kmalloc(total_sz, GFP_DMA);
-+ if (!temp) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ req->mem_pointer = temp;
-+ d_blob->redblob = temp;
-+ d_blob->data = d_blob->redblob + d_blob->redblob_sz;
-+ memcpy(d_blob->redblob, p->blob, blob_len);
-+
-+ ret = caam_securekey_desc_init(dev, req);
-+
-+ if (ret) {
-+ pr_info("caam_securekey_desc_init: Failed\n");
-+ goto out;
-+ }
-+
-+ ret = sk_job_submit(dev, req);
-+ if (!ret) {
-+ /*Copy output to key buffer. */
-+ p->key_len = d_blob->data_sz;
-+ memcpy(p->key, d_blob->data, p->key_len);
-+ } else {
-+ ret = -EINVAL;
-+ }
-+
-+out:
-+ if (temp)
-+ kfree(temp);
-+ if (req)
-+ kfree(req);
-+ return ret;
-+}
-+EXPORT_SYMBOL(key_deblob);
-+
-+/* key_blob(struct secure_key_payload *p, enum sk_req_type encap_type,
-+ * struct device *dev)
-+ * To blobify the key data to get the blob. This blob can only be seen by
-+ * userspace.
-+ * param[in] p pointer to the secure key payload
-+ * param[in] decap_type operation to be done.
-+ * param[in] dev dev Pointer to job ring device structure
-+ * If operation is successful return 0, otherwise error.
-+ */
-+int key_blob(struct secure_key_payload *p, enum sk_req_type encap_type,
-+ struct device *dev)
-+{
-+ unsigned int key_len;
-+ struct sk_red_blob_encap *k_blob;
-+ struct sk_req *req = NULL;
-+ int total_sz = 0, *temp = NULL, ret = 0;
-+
-+ req = kmalloc(sizeof(struct sk_req), GFP_DMA);
-+ if (!req) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ key_len = p->key_len;
-+
-+ req->type = encap_type;
-+ k_blob = &(req->req_u.sk_red_blob_encap);
-+
-+ /*
-+ * Data_sz i.e. key len and the corresponding blob_len is
-+ * key_len + BLOB header size.
-+ */
-+
-+ k_blob->data_sz = key_len;
-+ k_blob->redblob_sz = key_len + SK_BLOB_KEY_SZ + SK_BLOB_MAC_SZ;
-+ total_sz = k_blob->data_sz + k_blob->redblob_sz;
-+
-+ temp = kmalloc(total_sz, GFP_DMA);
-+ if (!temp) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ req->mem_pointer = temp;
-+ k_blob->data = temp;
-+
-+ k_blob->redblob = k_blob->data + k_blob->data_sz;
-+ memcpy(k_blob->data, p->key, key_len);
-+
-+ ret = caam_securekey_desc_init(dev, req);
-+
-+ if (ret) {
-+ pr_info("caam_securekey_desc_init failed\n");
-+ goto out;
-+ }
-+
-+ ret = sk_job_submit(dev, req);
-+ if (!ret) {
-+ /*Copy output to key buffer. */
-+ p->blob_len = k_blob->redblob_sz;
-+ memcpy(p->blob, k_blob->redblob, p->blob_len);
-+ } else {
-+ ret = -EINVAL;
-+ }
-+
-+out:
-+ if (temp)
-+ kfree(req->mem_pointer);
-+ if (req)
-+ kfree(req);
-+ return ret;
-+
-+}
-+EXPORT_SYMBOL(key_blob);
---- /dev/null
-+++ b/security/keys/securekey_desc.h
-@@ -0,0 +1,141 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * Copyright 2018 NXP
-+ *
-+ */
-+#ifndef _SECUREKEY_DESC_H_
-+#define _SECUREKEY_DESC_H_
-+
-+#include "compat.h"
-+#include "regs.h"
-+#include "intern.h"
-+#include "desc.h"
-+#include "desc_constr.h"
-+#include "jr.h"
-+#include "error.h"
-+#include "pdb.h"
-+
-+#define SK_BLOB_KEY_SZ 32 /* Blob key size. */
-+#define SK_BLOB_MAC_SZ 16 /* Blob MAC size. */
-+
-+/*
-+ * brief defines different kinds of operations supported by this module.
-+ */
-+enum sk_req_type {
-+ sk_get_random,
-+ sk_red_blob_enc,
-+ sk_red_blob_dec,
-+};
-+
-+
-+/*
-+ * struct random_des
-+ * param[out] rnd_data output buffer for random data.
-+ */
-+struct random_desc {
-+ dma_addr_t rnd_data;
-+};
-+
-+/* struct redblob_encap_desc
-+ * details Structure containing dma address for redblob encapsulation.
-+ * param[in] in_data input data to redblob encap descriptor.
-+ * param[out] redblob output buffer for redblob.
-+ */
-+struct redblob_encap_desc {
-+ dma_addr_t in_data;
-+ dma_addr_t redblob;
-+};
-+
-+/* struct redblob_decap_desc
-+ * details Structure containing dma address for redblob decapsulation.
-+ * param[in] redblob input buffer to redblob decap descriptor.
-+ * param[out] out_data output data from redblob decap descriptor.
-+ */
-+struct redblob_decap_desc {
-+ dma_addr_t redblob;
-+ dma_addr_t out_data;
-+};
-+
-+/* struct sk_desc
-+ * details Structure for securekey descriptor creation.
-+ * param[in] req_type operation supported.
-+ * param[in] dma_u union of struct for supported operation.
-+ */
-+struct sk_desc {
-+ u32 req_type;
-+ union {
-+ struct redblob_encap_desc redblob_encapdesc;
-+ struct redblob_decap_desc redblob_decapdesc;
-+ struct random_desc random_descp;
-+ } dma_u;
-+};
-+
-+/* struct sk_fetch_rnd_data
-+ * decriptor structure containing key length.
-+ */
-+struct sk_fetch_rnd_data {
-+ void *data;
-+ size_t key_len;
-+};
-+
-+/* struct sk_red_blob_encap
-+ * details Structure containing buffer pointers for redblob encapsulation.
-+ * param[in] data Input data.
-+ * param[in] data_sz size of Input data.
-+ * param[out] redblob output buffer for redblob.
-+ * param[in] redblob_sz size of redblob.
-+ */
-+struct sk_red_blob_encap {
-+ void *data;
-+ uint32_t data_sz;
-+ void *redblob;
-+ uint32_t redblob_sz;
-+};
-+
-+/* struct sk_red_blob_decap
-+ * details Structure containing buffer pointers for redblob decapsulation.
-+ * param[in] redblob Input redblob.
-+ * param[in] redblob_sz size of redblob.
-+ * param[out] data output buffer for data.
-+ * param[in] data_sz size of output data.
-+ */
-+struct sk_red_blob_decap {
-+ void *redblob;
-+ uint32_t redblob_sz;
-+ void *data;
-+ uint32_t data_sz;
-+};
-+
-+/* struct sk_req
-+ * details Structure for securekey request creation.
-+ * param[in] type operation supported.
-+ * param[in] req_u union of struct for supported operation.
-+ * param[out] ret return status of CAAM operation.
-+ * param[in] mem_pointer memory pointer for allocated kernel memory.
-+ * param[in] desc_pointer Pointer to securekey descriptor creation structure.
-+ * param[in] comp struct completion object.
-+ * param[in] hwdesc contains descriptor instructions.
-+ */
-+struct sk_req {
-+ enum sk_req_type type;
-+ void *arg;
-+ union {
-+ struct sk_red_blob_encap sk_red_blob_encap;
-+ struct sk_red_blob_decap sk_red_blob_decap;
-+ struct sk_fetch_rnd_data sk_fetch_rnd_data;
-+ } req_u;
-+ int ret;
-+ void *mem_pointer;
-+ void *desc_pointer;
-+ struct completion comp;
-+ u32 hwdesc[MAX_CAAM_DESCSIZE];
-+};
-+
-+int caam_get_random(struct secure_key_payload *p, enum sk_req_type fetch_rnd,
-+ struct device *dev);
-+int key_blob(struct secure_key_payload *p, enum sk_req_type encap_type,
-+ struct device *dev);
-+int key_deblob(struct secure_key_payload *p, enum sk_req_type decap_type,
-+ struct device *dev);
-+
-+#endif /*_SECUREKEY_DESC_H_*/
diff --git a/target/linux/layerscape/patches-4.14/818-qspi-support-layerscape.patch b/target/linux/layerscape/patches-4.14/818-qspi-support-layerscape.patch
deleted file mode 100644
index 1546e4b6a7..0000000000
--- a/target/linux/layerscape/patches-4.14/818-qspi-support-layerscape.patch
+++ /dev/null
@@ -1,745 +0,0 @@
-From fe21ef44284a3aa6fd80448e4ab2e1e8a55fb926 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:59 +0800
-Subject: [PATCH] qspi: support layerscape
-
-This is an integrated patch of qspi for layerscape
-
-Signed-off-by: Abhimanyu Saini <abhimanyu.saini@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Chuanhua Han <chuanhua.han@nxp.com>
-Signed-off-by: Cyrille Pitchen <cyrille.pitchen@wedev4u.fr>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
-Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
-Signed-off-by: Suresh Gupta <suresh.gupta@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
----
- drivers/mtd/spi-nor/fsl-quadspi.c | 444 +++++++++++++++++++-----------
- drivers/mtd/spi-nor/spi-nor.c | 5 +
- drivers/spi/spi-fsl-dspi.c | 4 +-
- 3 files changed, 291 insertions(+), 162 deletions(-)
-
---- a/drivers/mtd/spi-nor/fsl-quadspi.c
-+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
-@@ -41,6 +41,7 @@
- #define QUADSPI_QUIRK_TKT253890 (1 << 2)
- /* Controller cannot wake up from wait mode, TKT245618 */
- #define QUADSPI_QUIRK_TKT245618 (1 << 3)
-+#define QUADSPI_ADDR_REMAP (1 << 4)
-
- /* The registers */
- #define QUADSPI_MCR 0x00
-@@ -183,7 +184,7 @@
-
- /* Macros for constructing the LUT register. */
- #define LUT0(ins, pad, opr) \
-- (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
-+ (((opr) << OPRND0_SHIFT) | ((pad) << PAD0_SHIFT) | \
- ((LUT_##ins) << INSTR0_SHIFT))
-
- #define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
-@@ -193,27 +194,29 @@
- #define QUADSPI_LUT_NUM 64
-
- /* SEQID -- we can have 16 seqids at most. */
--#define SEQID_READ 0
--#define SEQID_WREN 1
--#define SEQID_WRDI 2
--#define SEQID_RDSR 3
--#define SEQID_SE 4
--#define SEQID_CHIP_ERASE 5
--#define SEQID_PP 6
--#define SEQID_RDID 7
--#define SEQID_WRSR 8
--#define SEQID_RDCR 9
--#define SEQID_EN4B 10
--#define SEQID_BRWR 11
-+/* LUT0 programmed by bootloader, for run-time create entry for LUT seqid 1 */
-+#define SEQID_LUT0_BOOTLOADER 0
-+#define SEQID_LUT1_RUNTIME 1
-+#define SEQID_LUT2_AHBREAD 2
-
- #define QUADSPI_MIN_IOMAP SZ_4M
-
-+enum fsl_qspi_ops {
-+ FSL_QSPI_OPS_READ = 0,
-+ FSL_QSPI_OPS_WRITE,
-+ FSL_QSPI_OPS_ERASE,
-+ FSL_QSPI_OPS_READ_REG,
-+ FSL_QSPI_OPS_WRITE_REG,
-+ FSL_QSPI_OPS_WRITE_BUF_REG,
-+};
-+
- enum fsl_qspi_devtype {
- FSL_QUADSPI_VYBRID,
- FSL_QUADSPI_IMX6SX,
- FSL_QUADSPI_IMX7D,
- FSL_QUADSPI_IMX6UL,
- FSL_QUADSPI_LS1021A,
-+ FSL_QUADSPI_LS2080A,
- };
-
- struct fsl_qspi_devtype_data {
-@@ -267,6 +270,15 @@ static struct fsl_qspi_devtype_data ls10
- .driver_data = 0,
- };
-
-+static const struct fsl_qspi_devtype_data ls2080a_data = {
-+ .devtype = FSL_QUADSPI_LS2080A,
-+ .rxfifo = 128,
-+ .txfifo = 64,
-+ .ahb_buf_size = 1024,
-+ .driver_data = QUADSPI_QUIRK_TKT253890 | QUADSPI_ADDR_REMAP,
-+};
-+
-+
- #define FSL_QSPI_MAX_CHIP 4
- struct fsl_qspi {
- struct spi_nor nor[FSL_QSPI_MAX_CHIP];
-@@ -310,6 +322,22 @@ static inline int needs_wakeup_wait_mode
- }
-
- /*
-+ * QSPI memory regions split into two parts: a 256MB region that is located
-+ * in the least significant 4GB of the SoC address space and a 3.75GB region
-+ * that is located above the least significant 4GB of the SoC address space.
-+ *
-+ * The 4GB QSPI address space map is shown below.
-+ *
-+ * SoC Address QSPI Address
-+ * 0x00_2000_0000-0x00_2FFF_FFFF 0x00_0000_0000-0x00_0FFF_FFFF First 256MB
-+ * 0x04_1000_0000-0x04_FFFF_FFFF 0x00_1000_0000-0x00_FFFF_FFFF Last 3.75GB
-+ */
-+static inline int need_address_remap(struct fsl_qspi *q)
-+{
-+ return q->devtype_data->driver_data & QUADSPI_ADDR_REMAP;
-+}
-+
-+/*
- * R/W functions for big- or little-endian registers:
- * The qSPI controller's endian is independent of the CPU core's endian.
- * So far, although the CPU core is little-endian but the qSPI have two
-@@ -368,137 +396,160 @@ static irqreturn_t fsl_qspi_irq_handler(
- return IRQ_HANDLED;
- }
-
--static void fsl_qspi_init_lut(struct fsl_qspi *q)
-+static inline s8 pad_count(s8 pad_val)
- {
-+ s8 count = -1;
-+
-+ if (!pad_val)
-+ return 0;
-+
-+ while (pad_val) {
-+ pad_val >>= 1;
-+ count++;
-+ }
-+ return count;
-+}
-+
-+/*
-+ * Prepare LUT entry for the input cmd.
-+ * Protocol info is present in instance of struct spi_nor, using which fields
-+ * like cmd, data, addrlen along with pad info etc can be parsed.
-+ */
-+static void fsl_qspi_prepare_lut(struct spi_nor *nor,
-+ enum fsl_qspi_ops ops, u8 cmd)
-+{
-+ struct fsl_qspi *q = nor->priv;
- void __iomem *base = q->iobase;
- int rxfifo = q->devtype_data->rxfifo;
-+ int txfifo = q->devtype_data->txfifo;
- u32 lut_base;
-- int i;
-+ u8 cmd_pad, addr_pad, data_pad, dummy_pad;
-+ enum spi_nor_protocol protocol = 0;
-+ u8 addrlen = 0;
-+ u8 read_dm, opcode;
-+ int stop_lut;
-+
-+ read_dm = opcode = cmd_pad = addr_pad = data_pad = dummy_pad = 0;
-+
-+ switch (ops) {
-+ case FSL_QSPI_OPS_READ_REG:
-+ case FSL_QSPI_OPS_WRITE_REG:
-+ case FSL_QSPI_OPS_WRITE_BUF_REG:
-+ opcode = cmd;
-+ protocol = nor->reg_proto;
-+ break;
-+ case FSL_QSPI_OPS_READ:
-+ opcode = cmd;
-+ read_dm = nor->read_dummy;
-+ protocol = nor->read_proto;
-+ break;
-+ case FSL_QSPI_OPS_WRITE:
-+ opcode = cmd;
-+ protocol = nor->write_proto;
-+ break;
-+ case FSL_QSPI_OPS_ERASE:
-+ opcode = cmd;
-+ break;
-+ default:
-+ dev_err(q->dev, "Unsupported operation 0x%.2x\n", ops);
-+ return;
-+ }
-+
-+ if (protocol) {
-+ cmd_pad = spi_nor_get_protocol_inst_nbits(protocol);
-+ addr_pad = spi_nor_get_protocol_addr_nbits(protocol);
-+ data_pad = spi_nor_get_protocol_data_nbits(protocol);
-+ }
-+
-+ dummy_pad = data_pad;
-
-- struct spi_nor *nor = &q->nor[0];
-- u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
-- u8 read_op = nor->read_opcode;
-- u8 read_dm = nor->read_dummy;
-+ dev_dbg(q->dev, "ops:%x opcode:%x pad[cmd:%d, addr:%d, data:%d]\n",
-+ ops, opcode, cmd_pad, addr_pad, data_pad);
-
- fsl_qspi_unlock_lut(q);
-
-- /* Clear all the LUT table */
-- for (i = 0; i < QUADSPI_LUT_NUM; i++)
-- qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
--
-- /* Read */
-- lut_base = SEQID_READ * 4;
--
-- qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen),
-- base + QUADSPI_LUT(lut_base));
-- qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
-- LUT1(FSL_READ, PAD4, rxfifo),
-- base + QUADSPI_LUT(lut_base + 1));
--
-- /* Write enable */
-- lut_base = SEQID_WREN * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Page Program */
-- lut_base = SEQID_PP * 4;
--
-- qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) |
-- LUT1(ADDR, PAD1, addrlen),
-- base + QUADSPI_LUT(lut_base));
-- qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
-- base + QUADSPI_LUT(lut_base + 1));
--
-- /* Read Status */
-- lut_base = SEQID_RDSR * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) |
-- LUT1(FSL_READ, PAD1, 0x1),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Erase a sector */
-- lut_base = SEQID_SE * 4;
--
-- qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) |
-- LUT1(ADDR, PAD1, addrlen),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Erase the whole chip */
-- lut_base = SEQID_CHIP_ERASE * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
-- base + QUADSPI_LUT(lut_base));
--
-- /* READ ID */
-- lut_base = SEQID_RDID * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) |
-- LUT1(FSL_READ, PAD1, 0x8),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Write Register */
-- lut_base = SEQID_WRSR * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) |
-- LUT1(FSL_WRITE, PAD1, 0x2),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Read Configuration Register */
-- lut_base = SEQID_RDCR * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) |
-- LUT1(FSL_READ, PAD1, 0x1),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Write disable */
-- lut_base = SEQID_WRDI * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Enter 4 Byte Mode (Micron) */
-- lut_base = SEQID_EN4B * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B),
-- base + QUADSPI_LUT(lut_base));
--
-- /* Enter 4 Byte Mode (Spansion) */
-- lut_base = SEQID_BRWR * 4;
-- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
-- base + QUADSPI_LUT(lut_base));
-+ /* Dynamic LUT */
-+ lut_base = SEQID_LUT1_RUNTIME * 4;
-+ if (ops == FSL_QSPI_OPS_READ)
-+ lut_base = SEQID_LUT2_AHBREAD * 4;
-+
-+ /* default, STOP instruction to be programmed in (lut_base + 1) reg */
-+ stop_lut = 1;
-+ switch (ops) {
-+ case FSL_QSPI_OPS_READ_REG:
-+ qspi_writel(q, LUT0(CMD, pad_count(cmd_pad), opcode) |
-+ LUT1(FSL_READ, pad_count(data_pad), rxfifo),
-+ base + QUADSPI_LUT(lut_base));
-+ break;
-+ case FSL_QSPI_OPS_WRITE_REG:
-+ qspi_writel(q, LUT0(CMD, pad_count(cmd_pad), opcode),
-+ base + QUADSPI_LUT(lut_base));
-+ break;
-+ case FSL_QSPI_OPS_WRITE_BUF_REG:
-+ qspi_writel(q, LUT0(CMD, pad_count(cmd_pad), opcode) |
-+ LUT1(FSL_WRITE, pad_count(data_pad), txfifo),
-+ base + QUADSPI_LUT(lut_base));
-+ break;
-+ case FSL_QSPI_OPS_READ:
-+ case FSL_QSPI_OPS_WRITE:
-+ case FSL_QSPI_OPS_ERASE:
-+ /* Common for Read, Write and Erase ops. */
-+
-+ addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
-+
-+ qspi_writel(q, LUT0(CMD, pad_count(cmd_pad), opcode) |
-+ LUT1(ADDR, pad_count(addr_pad), addrlen),
-+ base + QUADSPI_LUT(lut_base));
-+ /*
-+ * For Erase ops - Data and Dummy not required.
-+ * For Write ops - Dummy not required.
-+ */
-
-- fsl_qspi_lock_lut(q);
--}
-+ if (ops == FSL_QSPI_OPS_READ) {
-
--/* Get the SEQID for the command */
--static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
--{
-- switch (cmd) {
-- case SPINOR_OP_READ_1_1_4:
-- case SPINOR_OP_READ_1_1_4_4B:
-- return SEQID_READ;
-- case SPINOR_OP_WREN:
-- return SEQID_WREN;
-- case SPINOR_OP_WRDI:
-- return SEQID_WRDI;
-- case SPINOR_OP_RDSR:
-- return SEQID_RDSR;
-- case SPINOR_OP_SE:
-- return SEQID_SE;
-- case SPINOR_OP_CHIP_ERASE:
-- return SEQID_CHIP_ERASE;
-- case SPINOR_OP_PP:
-- return SEQID_PP;
-- case SPINOR_OP_RDID:
-- return SEQID_RDID;
-- case SPINOR_OP_WRSR:
-- return SEQID_WRSR;
-- case SPINOR_OP_RDCR:
-- return SEQID_RDCR;
-- case SPINOR_OP_EN4B:
-- return SEQID_EN4B;
-- case SPINOR_OP_BRWR:
-- return SEQID_BRWR;
-+ /*
-+ * For cmds SPINOR_OP_READ and SPINOR_OP_READ_4B value
-+ * of dummy cycles are 0.
-+ */
-+ if (read_dm)
-+ qspi_writel(q,
-+ LUT0(DUMMY, pad_count(dummy_pad),
-+ read_dm) |
-+ LUT1(FSL_READ, pad_count(data_pad),
-+ rxfifo),
-+ base + QUADSPI_LUT(lut_base + 1));
-+ else
-+ qspi_writel(q,
-+ LUT0(FSL_READ, pad_count(data_pad),
-+ rxfifo),
-+ base + QUADSPI_LUT(lut_base + 1));
-+
-+ stop_lut = 2;
-+
-+ /* TODO Add condition to check if READ is IP/AHB. */
-+
-+ /* For AHB read, add seqid in BFGENCR register. */
-+ qspi_writel(q,
-+ SEQID_LUT2_AHBREAD <<
-+ QUADSPI_BFGENCR_SEQID_SHIFT,
-+ q->iobase + QUADSPI_BFGENCR);
-+ }
-+
-+ if (ops == FSL_QSPI_OPS_WRITE) {
-+ qspi_writel(q, LUT0(FSL_WRITE, pad_count(data_pad), 0),
-+ base + QUADSPI_LUT(lut_base + 1));
-+ stop_lut = 2;
-+ }
-+ break;
- default:
-- if (cmd == q->nor[0].erase_opcode)
-- return SEQID_SE;
-- dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
-+ dev_err(q->dev, "Unsupported operation 0x%.2x\n", ops);
- break;
- }
-- return -EINVAL;
-+
-+ /* prepare LUT for STOP instruction. */
-+ qspi_writel(q, 0, base + QUADSPI_LUT(lut_base + stop_lut));
-+
-+ fsl_qspi_lock_lut(q);
- }
-
- static int
-@@ -508,6 +559,10 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c
- int seqid;
- u32 reg, reg2;
- int err;
-+ u32 memmap_phyadd = q->memmap_phy;
-+
-+ if (need_address_remap(q))
-+ memmap_phyadd = 0;
-
- init_completion(&q->c);
- dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
-@@ -516,7 +571,7 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c
- /* save the reg */
- reg = qspi_readl(q, base + QUADSPI_MCR);
-
-- qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
-+ qspi_writel(q, memmap_phyadd + q->chip_base_addr + addr,
- base + QUADSPI_SFAR);
- qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
- base + QUADSPI_RBCT);
-@@ -533,7 +588,7 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c
- } while (1);
-
- /* trigger the LUT now */
-- seqid = fsl_qspi_get_seqid(q, cmd);
-+ seqid = SEQID_LUT1_RUNTIME;
- qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
- base + QUADSPI_IPCR);
-
-@@ -609,6 +664,7 @@ static ssize_t fsl_qspi_nor_write(struct
- {
- int ret, i, j;
- u32 tmp;
-+ u8 byts;
-
- dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
- q->chip_base_addr, to, count);
-@@ -618,10 +674,18 @@ static ssize_t fsl_qspi_nor_write(struct
- qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
-
- /* fill the TX data to the FIFO */
-+ byts = count;
- for (j = 0, i = ((count + 3) / 4); j < i; j++) {
-- tmp = fsl_qspi_endian_xchg(q, *txbuf);
-+ if(byts >= 4)
-+ tmp = fsl_qspi_endian_xchg(q, *txbuf);
-+ else {
-+ memcpy(&tmp, txbuf, byts);
-+ tmp = fsl_qspi_endian_xchg(q, tmp);
-+ }
-+
- qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
- txbuf++;
-+ byts -= 4;
- }
-
- /* fill the TXFIFO upto 16 bytes for i.MX7d */
-@@ -642,11 +706,15 @@ static void fsl_qspi_set_map_addr(struct
- {
- int nor_size = q->nor_size;
- void __iomem *base = q->iobase;
-+ u32 memmap_phyadd = q->memmap_phy;
-+
-+ if (need_address_remap(q))
-+ memmap_phyadd = 0;
-
-- qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
-- qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
-- qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
-- qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
-+ qspi_writel(q, nor_size + memmap_phyadd, base + QUADSPI_SFA1AD);
-+ qspi_writel(q, nor_size * 2 + memmap_phyadd, base + QUADSPI_SFA2AD);
-+ qspi_writel(q, nor_size * 3 + memmap_phyadd, base + QUADSPI_SFB1AD);
-+ qspi_writel(q, nor_size * 4 + memmap_phyadd, base + QUADSPI_SFB2AD);
- }
-
- /*
-@@ -662,7 +730,7 @@ static void fsl_qspi_set_map_addr(struct
- * causes the controller to clear the buffer, and use the sequence pointed
- * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
- */
--static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
-+static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
- {
- void __iomem *base = q->iobase;
- int seqid;
-@@ -685,8 +753,8 @@ static void fsl_qspi_init_abh_read(struc
- qspi_writel(q, 0, base + QUADSPI_BUF1IND);
- qspi_writel(q, 0, base + QUADSPI_BUF2IND);
-
-- /* Set the default lut sequence for AHB Read. */
-- seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
-+ /* Set dynamic LUT entry as lut sequence for AHB Read . */
-+ seqid = SEQID_LUT2_AHBREAD;
- qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
- q->iobase + QUADSPI_BFGENCR);
- }
-@@ -729,7 +797,6 @@ static int fsl_qspi_nor_setup(struct fsl
- void __iomem *base = q->iobase;
- u32 reg;
- int ret;
--
- /* disable and unprepare clock to avoid glitch pass to controller */
- fsl_qspi_clk_disable_unprep(q);
-
-@@ -747,9 +814,6 @@ static int fsl_qspi_nor_setup(struct fsl
- base + QUADSPI_MCR);
- udelay(1);
-
-- /* Init the LUT table. */
-- fsl_qspi_init_lut(q);
--
- /* Disable the module */
- qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
- base + QUADSPI_MCR);
-@@ -770,6 +834,9 @@ static int fsl_qspi_nor_setup(struct fsl
- /* enable the interrupt */
- qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
-
-+ /* Init for AHB read */
-+ fsl_qspi_init_ahb_read(q);
-+
- return 0;
- }
-
-@@ -792,12 +859,6 @@ static int fsl_qspi_nor_setup_last(struc
- if (ret)
- return ret;
-
-- /* Init the LUT table again. */
-- fsl_qspi_init_lut(q);
--
-- /* Init for AHB read */
-- fsl_qspi_init_abh_read(q);
--
- return 0;
- }
-
-@@ -807,6 +868,7 @@ static const struct of_device_id fsl_qsp
- { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
- { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
-+ { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
- { /* sentinel */ }
- };
- MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
-@@ -821,6 +883,7 @@ static int fsl_qspi_read_reg(struct spi_
- int ret;
- struct fsl_qspi *q = nor->priv;
-
-+ fsl_qspi_prepare_lut(nor, FSL_QSPI_OPS_READ_REG, opcode);
- ret = fsl_qspi_runcmd(q, opcode, 0, len);
- if (ret)
- return ret;
-@@ -835,6 +898,8 @@ static int fsl_qspi_write_reg(struct spi
- int ret;
-
- if (!buf) {
-+ /* Prepare LUT for WRITE_REG cmd with input BUF as NULL. */
-+ fsl_qspi_prepare_lut(nor, FSL_QSPI_OPS_WRITE_REG, opcode);
- ret = fsl_qspi_runcmd(q, opcode, 0, 1);
- if (ret)
- return ret;
-@@ -843,6 +908,8 @@ static int fsl_qspi_write_reg(struct spi
- fsl_qspi_invalid(q);
-
- } else if (len > 0) {
-+ /* Prepare LUT for WRITE_REG cmd with input BUF non-NULL. */
-+ fsl_qspi_prepare_lut(nor, FSL_QSPI_OPS_WRITE_BUF_REG, opcode);
- ret = fsl_qspi_nor_write(q, nor, opcode, 0,
- (u32 *)buf, len);
- if (ret > 0)
-@@ -859,8 +926,11 @@ static ssize_t fsl_qspi_write(struct spi
- size_t len, const u_char *buf)
- {
- struct fsl_qspi *q = nor->priv;
-- ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
-- (u32 *)buf, len);
-+ ssize_t ret;
-+
-+ fsl_qspi_prepare_lut(nor, FSL_QSPI_OPS_WRITE, nor->program_opcode);
-+ ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
-+ (u32 *)buf, len);
-
- /* invalid the data in the AHB buffer. */
- fsl_qspi_invalid(q);
-@@ -873,6 +943,8 @@ static ssize_t fsl_qspi_read(struct spi_
- struct fsl_qspi *q = nor->priv;
- u8 cmd = nor->read_opcode;
-
-+ fsl_qspi_prepare_lut(nor, FSL_QSPI_OPS_READ, nor->read_opcode);
-+
- /* if necessary,ioremap buffer before AHB read, */
- if (!q->ahb_addr) {
- q->memmap_offs = q->chip_base_addr + from;
-@@ -907,8 +979,9 @@ static ssize_t fsl_qspi_read(struct spi_
- len);
-
- /* Read out the data directly from the AHB buffer.*/
-- memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
-- len);
-+ memcpy_fromio(buf,
-+ q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
-+ len);
-
- return len;
- }
-@@ -921,6 +994,7 @@ static int fsl_qspi_erase(struct spi_nor
- dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
- nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs);
-
-+ fsl_qspi_prepare_lut(nor, FSL_QSPI_OPS_ERASE, nor->erase_opcode);
- ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
- if (ret)
- return ret;
-@@ -958,17 +1032,14 @@ static void fsl_qspi_unprep(struct spi_n
-
- static int fsl_qspi_probe(struct platform_device *pdev)
- {
-- const struct spi_nor_hwcaps hwcaps = {
-- .mask = SNOR_HWCAPS_READ_1_1_4 |
-- SNOR_HWCAPS_PP,
-- };
-+ struct spi_nor_hwcaps hwcaps;
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct fsl_qspi *q;
- struct resource *res;
- struct spi_nor *nor;
- struct mtd_info *mtd;
-- int ret, i = 0;
-+ int ret, i = 0, value;
-
- q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
- if (!q)
-@@ -1041,6 +1112,10 @@ static int fsl_qspi_probe(struct platfor
-
- /* iterate the subnodes. */
- for_each_available_child_of_node(dev->of_node, np) {
-+ /* Reset hwcaps mask to minimal caps for the slave node. */
-+ hwcaps.mask = SNOR_HWCAPS_READ | SNOR_HWCAPS_PP;
-+ value = 0;
-+
- /* skip the holes */
- if (!q->has_second_chip)
- i *= 2;
-@@ -1070,6 +1145,51 @@ static int fsl_qspi_probe(struct platfor
- /* set the chip address for READID */
- fsl_qspi_set_base_addr(q, nor);
-
-+ /*
-+ * If spi-rx-bus-width and spi-tx-bus-width not defined assign
-+ * default hardware capabilities SNOR_HWCAPS_READ_1_1_4 and
-+ * SNOR_HWCAPS_PP supported by the Quad-SPI controller.
-+ */
-+ if (!of_property_read_u32(np, "spi-rx-bus-width", &value)) {
-+ switch (value) {
-+ case 1:
-+ hwcaps.mask |= SNOR_HWCAPS_READ |
-+ SNOR_HWCAPS_READ_FAST;
-+ break;
-+ case 2:
-+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2 |
-+ SNOR_HWCAPS_READ_1_2_2;
-+ break;
-+ case 4:
-+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4 |
-+ SNOR_HWCAPS_READ_1_4_4;
-+ break;
-+ default:
-+ dev_err(dev,
-+ "spi-rx-bus-width %d not supported\n",
-+ value);
-+ break;
-+ }
-+ } else
-+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
-+
-+ if (!of_property_read_u32(np, "spi-tx-bus-width", &value)) {
-+ switch (value) {
-+ case 1:
-+ hwcaps.mask |= SNOR_HWCAPS_PP;
-+ break;
-+ case 4:
-+ hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4 |
-+ SNOR_HWCAPS_PP_1_4_4;
-+ break;
-+ default:
-+ dev_err(dev,
-+ "spi-tx-bus-width %d not supported\n",
-+ value);
-+ break;
-+ }
-+ }
-+
- ret = spi_nor_scan(nor, NULL, &hwcaps);
- if (ret)
- goto mutex_failed;
-@@ -1098,6 +1218,8 @@ static int fsl_qspi_probe(struct platfor
- if (nor->page_size > q->devtype_data->txfifo)
- nor->page_size = q->devtype_data->txfifo;
-
-+ /*required for memory mapped AHB read*/
-+ fsl_qspi_prepare_lut(nor, FSL_QSPI_OPS_READ, nor->read_opcode);
- i++;
- }
-
-@@ -1106,6 +1228,8 @@ static int fsl_qspi_probe(struct platfor
- if (ret)
- goto last_init_failed;
-
-+
-+
- fsl_qspi_clk_disable_unprep(q);
- return 0;
-
---- a/drivers/mtd/spi-nor/spi-nor.c
-+++ b/drivers/mtd/spi-nor/spi-nor.c
-@@ -1159,6 +1159,11 @@ static const struct flash_info spi_nor_i
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
-+ {
-+ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
-+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-+ },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
---- a/drivers/spi/spi-fsl-dspi.c
-+++ b/drivers/spi/spi-fsl-dspi.c
-@@ -1024,8 +1024,8 @@ static int dspi_probe(struct platform_de
- goto out_clk_put;
- }
-
-- ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
-- pdev->name, dspi);
-+ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
-+ IRQF_SHARED, pdev->name, dspi);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
- goto out_clk_put;
diff --git a/target/linux/layerscape/patches-4.14/819-sdhc-support-layerscape.patch b/target/linux/layerscape/patches-4.14/819-sdhc-support-layerscape.patch
deleted file mode 100644
index 02688c74cd..0000000000
--- a/target/linux/layerscape/patches-4.14/819-sdhc-support-layerscape.patch
+++ /dev/null
@@ -1,572 +0,0 @@
-From 6ca94d2e7dc72b21703e6d9be4e8ec3ad4a26f41 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:59:02 +0800
-Subject: [PATCH] sdhc: support layerscape
-
-This is an integrated patch of sdhc for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Mathew McBride <matt@traverse.com.au>
-Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
----
- drivers/mmc/core/mmc.c | 3 +
- drivers/mmc/host/sdhci-esdhc.h | 25 +++
- drivers/mmc/host/sdhci-of-esdhc.c | 270 ++++++++++++++++++++++++++----
- drivers/mmc/host/sdhci.c | 9 +-
- drivers/mmc/host/sdhci.h | 1 +
- include/linux/mmc/card.h | 1 +
- include/linux/mmc/host.h | 2 +
- 7 files changed, 272 insertions(+), 39 deletions(-)
-
---- a/drivers/mmc/core/mmc.c
-+++ b/drivers/mmc/core/mmc.c
-@@ -1174,6 +1174,9 @@ static int mmc_select_hs400(struct mmc_c
- goto out_err;
-
- /* Switch card to DDR */
-+ if (host->ops->prepare_ddr_to_hs400)
-+ host->ops->prepare_ddr_to_hs400(host);
-+
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BUS_WIDTH,
- EXT_CSD_DDR_BUS_WIDTH_8,
---- a/drivers/mmc/host/sdhci-esdhc.h
-+++ b/drivers/mmc/host/sdhci-esdhc.h
-@@ -59,7 +59,32 @@
-
- /* Tuning Block Control Register */
- #define ESDHC_TBCTL 0x120
-+#define ESDHC_HS400_WNDW_ADJUST 0x00000040
-+#define ESDHC_HS400_MODE 0x00000010
- #define ESDHC_TB_EN 0x00000004
-+#define ESDHC_TBPTR 0x128
-+
-+/* SD Clock Control Register */
-+#define ESDHC_SDCLKCTL 0x144
-+#define ESDHC_LPBK_CLK_SEL 0x80000000
-+#define ESDHC_CMD_CLK_CTL 0x00008000
-+
-+/* SD Timing Control Register */
-+#define ESDHC_SDTIMNGCTL 0x148
-+#define ESDHC_FLW_CTL_BG 0x00008000
-+
-+/* DLL Config 0 Register */
-+#define ESDHC_DLLCFG0 0x160
-+#define ESDHC_DLL_ENABLE 0x80000000
-+#define ESDHC_DLL_FREQ_SEL 0x08000000
-+
-+/* DLL Config 1 Register */
-+#define ESDHC_DLLCFG1 0x164
-+#define ESDHC_DLL_PD_PULSE_STRETCH_SEL 0x80000000
-+
-+/* DLL Status 0 Register */
-+#define ESDHC_DLLSTAT0 0x170
-+#define ESDHC_DLL_STS_SLV_LOCK 0x08000000
-
- /* Control Register for DMA transfer */
- #define ESDHC_DMA_SYSCTL 0x40c
---- a/drivers/mmc/host/sdhci-of-esdhc.c
-+++ b/drivers/mmc/host/sdhci-of-esdhc.c
-@@ -30,11 +30,61 @@
- #define VENDOR_V_22 0x12
- #define VENDOR_V_23 0x13
-
-+#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
-+
-+struct esdhc_clk_fixup {
-+ const unsigned int sd_dflt_max_clk;
-+ const unsigned int max_clk[MMC_TIMING_NUM];
-+};
-+
-+static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
-+ .sd_dflt_max_clk = 25000000,
-+ .max_clk[MMC_TIMING_MMC_HS] = 46500000,
-+ .max_clk[MMC_TIMING_SD_HS] = 46500000,
-+};
-+
-+static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
-+ .sd_dflt_max_clk = 25000000,
-+ .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
-+ .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
-+};
-+
-+static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
-+ .sd_dflt_max_clk = 25000000,
-+ .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
-+ .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
-+};
-+
-+static const struct esdhc_clk_fixup p1010_esdhc_clk = {
-+ .sd_dflt_max_clk = 20000000,
-+ .max_clk[MMC_TIMING_LEGACY] = 20000000,
-+ .max_clk[MMC_TIMING_MMC_HS] = 42000000,
-+ .max_clk[MMC_TIMING_SD_HS] = 40000000,
-+};
-+
-+static const struct of_device_id sdhci_esdhc_of_match[] = {
-+ { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
-+ { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
-+ { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
-+ { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
-+ { .compatible = "fsl,mpc8379-esdhc" },
-+ { .compatible = "fsl,mpc8536-esdhc" },
-+ { .compatible = "fsl,esdhc" },
-+ { }
-+};
-+MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
-+
- struct sdhci_esdhc {
- u8 vendor_ver;
- u8 spec_ver;
- bool quirk_incorrect_hostver;
-+ bool quirk_limited_clk_division;
-+ bool quirk_unreliable_pulse_detection;
-+ bool quirk_fixup_tuning;
-+ bool quirk_incorrect_delay_chain;
- unsigned int peripheral_clock;
-+ const struct esdhc_clk_fixup *clk_fixup;
-+ u32 div_ratio;
- };
-
- /**
-@@ -500,13 +550,20 @@ static void esdhc_clock_enable(struct sd
- }
- }
-
-+static struct soc_device_attribute soc_incorrect_delay_chain[] = {
-+ { .family = "QorIQ LX2160A", .revision = "1.0", },
-+ { },
-+};
-+
- static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
- {
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- int pre_div = 1;
- int div = 1;
-+ int division;
- ktime_t timeout;
-+ long fixup = 0;
- u32 temp;
-
- host->mmc->actual_clock = 0;
-@@ -520,27 +577,14 @@ static void esdhc_of_set_clock(struct sd
- if (esdhc->vendor_ver < VENDOR_V_23)
- pre_div = 2;
-
-- /*
-- * Limit SD clock to 167MHz for ls1046a according to its datasheet
-- */
-- if (clock > 167000000 &&
-- of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc"))
-- clock = 167000000;
-+ if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
-+ esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
-+ fixup = esdhc->clk_fixup->sd_dflt_max_clk;
-+ else if (esdhc->clk_fixup)
-+ fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
-
-- /*
-- * Limit SD clock to 125MHz for ls1012a according to its datasheet
-- */
-- if (clock > 125000000 &&
-- of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc"))
-- clock = 125000000;
--
-- /* Workaround to reduce the clock frequency for p1010 esdhc */
-- if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
-- if (clock > 20000000)
-- clock -= 5000000;
-- if (clock > 40000000)
-- clock -= 5000000;
-- }
-+ if (fixup && clock > fixup)
-+ clock = fixup;
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
-@@ -553,9 +597,30 @@ static void esdhc_of_set_clock(struct sd
- while (host->max_clk / pre_div / div > clock && div < 16)
- div++;
-
-+ if (esdhc->quirk_limited_clk_division &&
-+ clock == MMC_HS200_MAX_DTR &&
-+ (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
-+ host->flags & SDHCI_HS400_TUNING)) {
-+ division = pre_div * div;
-+ if (division <= 4) {
-+ pre_div = 4;
-+ div = 1;
-+ } else if (division <= 8) {
-+ pre_div = 4;
-+ div = 2;
-+ } else if (division <= 12) {
-+ pre_div = 4;
-+ div = 3;
-+ } else {
-+ pr_warn("%s: using upsupported clock division.\n",
-+ mmc_hostname(host->mmc));
-+ }
-+ }
-+
- dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
- host->mmc->actual_clock = host->max_clk / pre_div / div;
-+ esdhc->div_ratio = pre_div * div;
- pre_div >>= 1;
- div--;
-
-@@ -565,6 +630,29 @@ static void esdhc_of_set_clock(struct sd
- | (pre_div << ESDHC_PREDIV_SHIFT));
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
-
-+ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
-+ clock == MMC_HS200_MAX_DTR) {
-+ temp = sdhci_readl(host, ESDHC_TBCTL);
-+ sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
-+ temp = sdhci_readl(host, ESDHC_SDCLKCTL);
-+ sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
-+ esdhc_clock_enable(host, true);
-+
-+ temp = sdhci_readl(host, ESDHC_DLLCFG0);
-+ temp |= ESDHC_DLL_ENABLE;
-+ if (host->mmc->actual_clock == MMC_HS200_MAX_DTR ||
-+ esdhc->quirk_incorrect_delay_chain == false)
-+ temp |= ESDHC_DLL_FREQ_SEL;
-+ sdhci_writel(host, temp, ESDHC_DLLCFG0);
-+ temp = sdhci_readl(host, ESDHC_TBCTL);
-+ sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
-+
-+ esdhc_clock_enable(host, false);
-+ temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
-+ temp |= ESDHC_FLUSH_ASYNC_FIFO;
-+ sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
-+ }
-+
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- while (1) {
-@@ -580,6 +668,7 @@ static void esdhc_of_set_clock(struct sd
- udelay(10);
- }
-
-+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= ESDHC_CLOCK_SDCLKEN;
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- }
-@@ -608,6 +697,8 @@ static void esdhc_pltfm_set_bus_width(st
-
- static void esdhc_reset(struct sdhci_host *host, u8 mask)
- {
-+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u32 val;
-
- sdhci_reset(host, mask);
-@@ -619,6 +710,12 @@ static void esdhc_reset(struct sdhci_hos
- val = sdhci_readl(host, ESDHC_TBCTL);
- val &= ~ESDHC_TB_EN;
- sdhci_writel(host, val, ESDHC_TBCTL);
-+
-+ if (esdhc->quirk_unreliable_pulse_detection) {
-+ val = sdhci_readl(host, ESDHC_DLLCFG1);
-+ val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
-+ sdhci_writel(host, val, ESDHC_DLLCFG1);
-+ }
- }
- }
-
-@@ -630,6 +727,7 @@ static void esdhc_reset(struct sdhci_hos
- static const struct of_device_id scfg_device_ids[] = {
- { .compatible = "fsl,t1040-scfg", },
- { .compatible = "fsl,ls1012a-scfg", },
-+ { .compatible = "fsl,ls1043a-scfg", },
- { .compatible = "fsl,ls1046a-scfg", },
- {}
- };
-@@ -692,23 +790,91 @@ static int esdhc_signal_voltage_switch(s
- }
- }
-
--static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
-+static struct soc_device_attribute soc_fixup_tuning[] = {
-+ { .family = "QorIQ T1040", .revision = "1.0", },
-+ { .family = "QorIQ T2080", .revision = "1.0", },
-+ { .family = "QorIQ T1023", .revision = "1.0", },
-+ { .family = "QorIQ LS1021A", .revision = "1.0", },
-+ { .family = "QorIQ LS1080A", .revision = "1.0", },
-+ { .family = "QorIQ LS2080A", .revision = "1.0", },
-+ { .family = "QorIQ LS1012A", .revision = "1.0", },
-+ { .family = "QorIQ LS1043A", .revision = "1.*", },
-+ { .family = "QorIQ LS1046A", .revision = "1.0", },
-+ { },
-+};
-+
-+static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
- {
-- struct sdhci_host *host = mmc_priv(mmc);
- u32 val;
-
-- /* Use tuning block for tuning procedure */
- esdhc_clock_enable(host, false);
-+
- val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- val |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
-
- val = sdhci_readl(host, ESDHC_TBCTL);
-- val |= ESDHC_TB_EN;
-+ if (enable)
-+ val |= ESDHC_TB_EN;
-+ else
-+ val &= ~ESDHC_TB_EN;
- sdhci_writel(host, val, ESDHC_TBCTL);
-+
- esdhc_clock_enable(host, true);
-+}
-+
-+static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
-+{
-+ struct sdhci_host *host = mmc_priv(mmc);
-+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
-+ bool hs400_tuning;
-+ u32 val;
-+ int ret;
-+
-+ if (esdhc->quirk_limited_clk_division &&
-+ host->flags & SDHCI_HS400_TUNING)
-+ esdhc_of_set_clock(host, host->clock);
-+
-+ esdhc_tuning_block_enable(host, true);
-+
-+ hs400_tuning = host->flags & SDHCI_HS400_TUNING;
-+ ret = sdhci_execute_tuning(mmc, opcode);
-+
-+ if (hs400_tuning) {
-+ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
-+ val |= ESDHC_FLW_CTL_BG;
-+ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
-+ }
-
-- return sdhci_execute_tuning(mmc, opcode);
-+ if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
-+
-+ /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
-+ * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
-+ */
-+ val = sdhci_readl(host, ESDHC_TBPTR);
-+ val = (val & ~((0x7f << 8) | 0x7f)) |
-+ (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
-+ sdhci_writel(host, val, ESDHC_TBPTR);
-+
-+ /* program the software tuning mode by setting
-+ * TBCTL[TB_MODE]=2'h3
-+ */
-+ val = sdhci_readl(host, ESDHC_TBCTL);
-+ val |= 0x3;
-+ sdhci_writel(host, val, ESDHC_TBCTL);
-+ sdhci_execute_tuning(mmc, opcode);
-+ }
-+ return ret;
-+}
-+
-+static void esdhc_set_uhs_signaling(struct sdhci_host *host,
-+ unsigned int timing)
-+{
-+ if (timing == MMC_TIMING_MMC_HS400)
-+ esdhc_tuning_block_enable(host, true);
-+ else
-+ sdhci_set_uhs_signaling(host, timing);
- }
-
- #ifdef CONFIG_PM_SLEEP
-@@ -757,7 +923,7 @@ static const struct sdhci_ops sdhci_esdh
- .adma_workaround = esdhc_of_adma_workaround,
- .set_bus_width = esdhc_pltfm_set_bus_width,
- .reset = esdhc_reset,
-- .set_uhs_signaling = sdhci_set_uhs_signaling,
-+ .set_uhs_signaling = esdhc_set_uhs_signaling,
- };
-
- static const struct sdhci_ops sdhci_esdhc_le_ops = {
-@@ -774,7 +940,7 @@ static const struct sdhci_ops sdhci_esdh
- .adma_workaround = esdhc_of_adma_workaround,
- .set_bus_width = esdhc_pltfm_set_bus_width,
- .reset = esdhc_reset,
-- .set_uhs_signaling = sdhci_set_uhs_signaling,
-+ .set_uhs_signaling = esdhc_set_uhs_signaling,
- };
-
- static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
-@@ -800,8 +966,20 @@ static struct soc_device_attribute soc_i
- { },
- };
-
-+static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
-+ { .family = "QorIQ LX2160A", .revision = "1.0", },
-+ { .family = "QorIQ LX2160A", .revision = "2.0", },
-+ { },
-+};
-+
-+static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
-+ { .family = "QorIQ LX2160A", .revision = "1.0", },
-+ { },
-+};
-+
- static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
- {
-+ const struct of_device_id *match;
- struct sdhci_pltfm_host *pltfm_host;
- struct sdhci_esdhc *esdhc;
- struct device_node *np;
-@@ -821,6 +999,24 @@ static void esdhc_init(struct platform_d
- else
- esdhc->quirk_incorrect_hostver = false;
-
-+ if (soc_device_match(soc_fixup_sdhc_clkdivs))
-+ esdhc->quirk_limited_clk_division = true;
-+ else
-+ esdhc->quirk_limited_clk_division = false;
-+
-+ if (soc_device_match(soc_unreliable_pulse_detection))
-+ esdhc->quirk_unreliable_pulse_detection = true;
-+ else
-+ esdhc->quirk_unreliable_pulse_detection = false;
-+
-+ if (soc_device_match(soc_incorrect_delay_chain))
-+ esdhc->quirk_incorrect_delay_chain = true;
-+ else
-+ esdhc->quirk_incorrect_delay_chain = false;
-+
-+ match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
-+ if (match)
-+ esdhc->clk_fixup = match->data;
- np = pdev->dev.of_node;
- clk = of_clk_get(np, 0);
- if (!IS_ERR(clk)) {
-@@ -848,6 +1044,12 @@ static void esdhc_init(struct platform_d
- }
- }
-
-+static int esdhc_prepare_ddr_to_hs400(struct mmc_host *mmc)
-+{
-+ esdhc_tuning_block_enable(mmc_priv(mmc), false);
-+ return 0;
-+}
-+
- static int sdhci_esdhc_probe(struct platform_device *pdev)
- {
- struct sdhci_host *host;
-@@ -871,6 +1073,7 @@ static int sdhci_esdhc_probe(struct plat
- host->mmc_host_ops.start_signal_voltage_switch =
- esdhc_signal_voltage_switch;
- host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
-+ host->mmc_host_ops.prepare_ddr_to_hs400 = esdhc_prepare_ddr_to_hs400;
- host->tuning_delay = 1;
-
- esdhc_init(pdev, host);
-@@ -879,6 +1082,11 @@ static int sdhci_esdhc_probe(struct plat
-
- pltfm_host = sdhci_priv(host);
- esdhc = sdhci_pltfm_priv(pltfm_host);
-+ if (soc_device_match(soc_fixup_tuning))
-+ esdhc->quirk_fixup_tuning = true;
-+ else
-+ esdhc->quirk_fixup_tuning = false;
-+
- if (esdhc->vendor_ver == VENDOR_V_22)
- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
-
-@@ -925,14 +1133,6 @@ static int sdhci_esdhc_probe(struct plat
- return ret;
- }
-
--static const struct of_device_id sdhci_esdhc_of_match[] = {
-- { .compatible = "fsl,mpc8379-esdhc" },
-- { .compatible = "fsl,mpc8536-esdhc" },
-- { .compatible = "fsl,esdhc" },
-- { }
--};
--MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
--
- static struct platform_driver sdhci_esdhc_driver = {
- .driver = {
- .name = "sdhci-esdhc",
---- a/drivers/mmc/host/sdhci.c
-+++ b/drivers/mmc/host/sdhci.c
-@@ -2148,7 +2148,7 @@ static void sdhci_send_tuning(struct sdh
-
- }
-
--static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
-+static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
- {
- int i;
-
-@@ -2165,13 +2165,13 @@ static void __sdhci_execute_tuning(struc
- pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
- mmc_hostname(host->mmc));
- sdhci_abort_tuning(host, opcode);
-- return;
-+ return -ETIMEDOUT;
- }
-
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
- if (ctrl & SDHCI_CTRL_TUNED_CLK)
-- return; /* Success! */
-+ return 0; /* Success! */
- break;
- }
-
-@@ -2183,6 +2183,7 @@ static void __sdhci_execute_tuning(struc
- pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
- mmc_hostname(host->mmc));
- sdhci_reset_tuning(host);
-+ return -EAGAIN;
- }
-
- int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
-@@ -2244,7 +2245,7 @@ int sdhci_execute_tuning(struct mmc_host
-
- sdhci_start_tuning(host);
-
-- __sdhci_execute_tuning(host, opcode);
-+ host->tuning_err = __sdhci_execute_tuning(host, opcode);
-
- sdhci_end_tuning(host);
- out:
---- a/drivers/mmc/host/sdhci.h
-+++ b/drivers/mmc/host/sdhci.h
-@@ -545,6 +545,7 @@ struct sdhci_host {
-
- unsigned int tuning_count; /* Timer count for re-tuning */
- unsigned int tuning_mode; /* Re-tuning mode supported by host */
-+ unsigned int tuning_err; /* Error code for re-tuning */
- #define SDHCI_TUNING_MODE_1 0
- #define SDHCI_TUNING_MODE_2 1
- #define SDHCI_TUNING_MODE_3 2
---- a/include/linux/mmc/card.h
-+++ b/include/linux/mmc/card.h
-@@ -156,6 +156,7 @@ struct sd_switch_caps {
- #define UHS_DDR50_MAX_DTR 50000000
- #define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
- #define UHS_SDR12_MAX_DTR 25000000
-+#define DEFAULT_SPEED_MAX_DTR UHS_SDR12_MAX_DTR
- unsigned int sd3_bus_mode;
- #define UHS_SDR12_BUS_SPEED 0
- #define HIGH_SPEED_BUS_SPEED 1
---- a/include/linux/mmc/host.h
-+++ b/include/linux/mmc/host.h
-@@ -145,6 +145,8 @@ struct mmc_host_ops {
-
- /* Prepare HS400 target operating frequency depending host driver */
- int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
-+ int (*prepare_ddr_to_hs400)(struct mmc_host *host);
-+
- /* Prepare enhanced strobe depending host driver */
- void (*hs400_enhanced_strobe)(struct mmc_host *host,
- struct mmc_ios *ios);
diff --git a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch b/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
deleted file mode 100644
index 63f6540be0..0000000000
--- a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
+++ /dev/null
@@ -1,15294 +0,0 @@
-From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 23 Apr 2019 17:41:43 +0800
-Subject: [PATCH] sec: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This is an integrated patch of sec for layerscape
-
-Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
-Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
-Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
-Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
-Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
-Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
-Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
----
- crypto/Kconfig | 20 +
- crypto/Makefile | 1 +
- crypto/chacha20poly1305.c | 2 -
- crypto/tcrypt.c | 27 +-
- crypto/testmgr.c | 244 ++
- crypto/testmgr.h | 219 ++
- crypto/tls.c | 607 ++++
- drivers/crypto/Makefile | 2 +-
- drivers/crypto/caam/Kconfig | 85 +-
- drivers/crypto/caam/Makefile | 26 +-
- drivers/crypto/caam/caamalg.c | 468 +++-
- drivers/crypto/caam/caamalg_desc.c | 903 +++++-
- drivers/crypto/caam/caamalg_desc.h | 52 +-
- drivers/crypto/caam/caamalg_qi.c | 1060 ++++++-
- drivers/crypto/caam/caamalg_qi2.c | 5843 +++++++++++++++++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h | 276 ++
- drivers/crypto/caam/caamhash.c | 192 +-
- drivers/crypto/caam/caamhash_desc.c | 108 +
- drivers/crypto/caam/caamhash_desc.h | 49 +
- drivers/crypto/caam/caampkc.c | 52 +-
- drivers/crypto/caam/caamrng.c | 52 +-
- drivers/crypto/caam/compat.h | 4 +
- drivers/crypto/caam/ctrl.c | 194 +-
- drivers/crypto/caam/desc.h | 89 +-
- drivers/crypto/caam/desc_constr.h | 59 +-
- drivers/crypto/caam/dpseci.c | 865 ++++++
- drivers/crypto/caam/dpseci.h | 433 +++
- drivers/crypto/caam/dpseci_cmd.h | 287 ++
- drivers/crypto/caam/error.c | 81 +-
- drivers/crypto/caam/error.h | 6 +-
- drivers/crypto/caam/intern.h | 102 +-
- drivers/crypto/caam/jr.c | 84 +
- drivers/crypto/caam/jr.h | 2 +
- drivers/crypto/caam/key_gen.c | 30 -
- drivers/crypto/caam/key_gen.h | 30 +
- drivers/crypto/caam/qi.c | 134 +-
- drivers/crypto/caam/qi.h | 2 +-
- drivers/crypto/caam/regs.h | 76 +-
- drivers/crypto/caam/sg_sw_qm.h | 46 +-
- drivers/crypto/talitos.c | 8 +
- include/crypto/chacha20.h | 1 +
- 41 files changed, 12088 insertions(+), 733 deletions(-)
- create mode 100644 crypto/tls.c
- create mode 100644 drivers/crypto/caam/caamalg_qi2.c
- create mode 100644 drivers/crypto/caam/caamalg_qi2.h
- create mode 100644 drivers/crypto/caam/caamhash_desc.c
- create mode 100644 drivers/crypto/caam/caamhash_desc.h
- create mode 100644 drivers/crypto/caam/dpseci.c
- create mode 100644 drivers/crypto/caam/dpseci.h
- create mode 100644 drivers/crypto/caam/dpseci_cmd.h
-
---- a/crypto/Kconfig
-+++ b/crypto/Kconfig
-@@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
- a sequence number xored with a salt. This is the default
- algorithm for CBC.
-
-+config CRYPTO_TLS
-+ tristate "TLS support"
-+ select CRYPTO_AEAD
-+ select CRYPTO_BLKCIPHER
-+ select CRYPTO_MANAGER
-+ select CRYPTO_HASH
-+ select CRYPTO_NULL
-+ select CRYPTO_AUTHENC
-+ help
-+ Support for TLS 1.0 record encryption and decryption
-+
-+ This module adds support for encryption/decryption of TLS 1.0 frames
-+ using blockcipher algorithms. The name of the resulting algorithm is
-+ "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
-+ algorithms are used (e.g. aes-generic, sha1-generic), but hardware
-+ accelerated versions will be used automatically if available.
-+
-+ User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
-+ operations through AF_ALG or cryptodev interfaces
-+
- comment "Block modes"
-
- config CRYPTO_CBC
---- a/crypto/Makefile
-+++ b/crypto/Makefile
-@@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
- obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
- obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
- obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
-+obj-$(CONFIG_CRYPTO_TLS) += tls.o
- obj-$(CONFIG_CRYPTO_LZO) += lzo.o
- obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
- obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
---- a/crypto/chacha20poly1305.c
-+++ b/crypto/chacha20poly1305.c
-@@ -22,8 +22,6 @@
-
- #include "internal.h"
-
--#define CHACHAPOLY_IV_SIZE 12
--
- struct chachapoly_instance_ctx {
- struct crypto_skcipher_spawn chacha;
- struct crypto_ahash_spawn poly;
---- a/crypto/tcrypt.c
-+++ b/crypto/tcrypt.c
-@@ -76,7 +76,7 @@ static char *check[] = {
- "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
- "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
- "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
-- NULL
-+ "rsa", NULL
- };
-
- struct tcrypt_result {
-@@ -355,11 +355,13 @@ static void test_aead_speed(const char *
- iv);
- aead_request_set_ad(req, aad_size);
-
-- if (secs)
-+ if (secs) {
- ret = test_aead_jiffies(req, enc, *b_size,
- secs);
-- else
-+ cond_resched();
-+ } else {
- ret = test_aead_cycles(req, enc, *b_size);
-+ }
-
- if (ret) {
- pr_err("%s() failed return code=%d\n", e, ret);
-@@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
-
- ahash_request_set_crypt(req, sg, output, speed[i].plen);
-
-- if (secs)
-+ if (secs) {
- ret = test_ahash_jiffies(req, speed[i].blen,
- speed[i].plen, output, secs);
-- else
-+ cond_resched();
-+ } else {
- ret = test_ahash_cycles(req, speed[i].blen,
- speed[i].plen, output);
-+ }
-
- if (ret) {
- pr_err("hashing failed ret=%d\n", ret);
-@@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
-
- skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
-
-- if (secs)
-+ if (secs) {
- ret = test_acipher_jiffies(req, enc,
- *b_size, secs);
-- else
-+ cond_resched();
-+ } else {
- ret = test_acipher_cycles(req, enc,
- *b_size);
-+ }
-
- if (ret) {
- pr_err("%s() failed flags=%x\n", e,
-@@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
- ret += tcrypt_test("hmac(sha3-512)");
- break;
-
-+ case 115:
-+ ret += tcrypt_test("rsa");
-+ break;
-+
- case 150:
- ret += tcrypt_test("ansi_cprng");
- break;
-@@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
- case 190:
- ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
- break;
-+ case 191:
-+ ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
-+ break;
- case 200:
- test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_16_24_32);
---- a/crypto/testmgr.c
-+++ b/crypto/testmgr.c
-@@ -117,6 +117,13 @@ struct drbg_test_suite {
- unsigned int count;
- };
-
-+struct tls_test_suite {
-+ struct {
-+ struct tls_testvec *vecs;
-+ unsigned int count;
-+ } enc, dec;
-+};
-+
- struct akcipher_test_suite {
- const struct akcipher_testvec *vecs;
- unsigned int count;
-@@ -140,6 +147,7 @@ struct alg_test_desc {
- struct hash_test_suite hash;
- struct cprng_test_suite cprng;
- struct drbg_test_suite drbg;
-+ struct tls_test_suite tls;
- struct akcipher_test_suite akcipher;
- struct kpp_test_suite kpp;
- } suite;
-@@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
- return 0;
- }
-
-+static int __test_tls(struct crypto_aead *tfm, int enc,
-+ struct tls_testvec *template, unsigned int tcount,
-+ const bool diff_dst)
-+{
-+ const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
-+ unsigned int i, k, authsize;
-+ char *q;
-+ struct aead_request *req;
-+ struct scatterlist *sg;
-+ struct scatterlist *sgout;
-+ const char *e, *d;
-+ struct tcrypt_result result;
-+ void *input;
-+ void *output;
-+ void *assoc;
-+ char *iv;
-+ char *key;
-+ char *xbuf[XBUFSIZE];
-+ char *xoutbuf[XBUFSIZE];
-+ char *axbuf[XBUFSIZE];
-+ int ret = -ENOMEM;
-+
-+ if (testmgr_alloc_buf(xbuf))
-+ goto out_noxbuf;
-+
-+ if (diff_dst && testmgr_alloc_buf(xoutbuf))
-+ goto out_nooutbuf;
-+
-+ if (testmgr_alloc_buf(axbuf))
-+ goto out_noaxbuf;
-+
-+ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
-+ if (!iv)
-+ goto out_noiv;
-+
-+ key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
-+ if (!key)
-+ goto out_nokey;
-+
-+ sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
-+ if (!sg)
-+ goto out_nosg;
-+
-+ sgout = sg + 8;
-+
-+ d = diff_dst ? "-ddst" : "";
-+ e = enc ? "encryption" : "decryption";
-+
-+ init_completion(&result.completion);
-+
-+ req = aead_request_alloc(tfm, GFP_KERNEL);
-+ if (!req) {
-+ pr_err("alg: tls%s: Failed to allocate request for %s\n",
-+ d, algo);
-+ goto out;
-+ }
-+
-+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-+ tcrypt_complete, &result);
-+
-+ for (i = 0; i < tcount; i++) {
-+ input = xbuf[0];
-+ assoc = axbuf[0];
-+
-+ ret = -EINVAL;
-+ if (WARN_ON(template[i].ilen > PAGE_SIZE ||
-+ template[i].alen > PAGE_SIZE))
-+ goto out;
-+
-+ memcpy(assoc, template[i].assoc, template[i].alen);
-+ memcpy(input, template[i].input, template[i].ilen);
-+
-+ if (template[i].iv)
-+ memcpy(iv, template[i].iv, MAX_IVLEN);
-+ else
-+ memset(iv, 0, MAX_IVLEN);
-+
-+ crypto_aead_clear_flags(tfm, ~0);
-+
-+ if (template[i].klen > MAX_KEYLEN) {
-+ pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
-+ d, i, algo, template[i].klen, MAX_KEYLEN);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ memcpy(key, template[i].key, template[i].klen);
-+
-+ ret = crypto_aead_setkey(tfm, key, template[i].klen);
-+ if (!ret == template[i].fail) {
-+ pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
-+ d, i, algo, crypto_aead_get_flags(tfm));
-+ goto out;
-+ } else if (ret)
-+ continue;
-+
-+ authsize = 20;
-+ ret = crypto_aead_setauthsize(tfm, authsize);
-+ if (ret) {
-+ pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
-+ d, authsize, i, algo);
-+ goto out;
-+ }
-+
-+ k = !!template[i].alen;
-+ sg_init_table(sg, k + 1);
-+ sg_set_buf(&sg[0], assoc, template[i].alen);
-+ sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
-+ template[i].ilen));
-+ output = input;
-+
-+ if (diff_dst) {
-+ sg_init_table(sgout, k + 1);
-+ sg_set_buf(&sgout[0], assoc, template[i].alen);
-+
-+ output = xoutbuf[0];
-+ sg_set_buf(&sgout[k], output,
-+ (enc ? template[i].rlen : template[i].ilen));
-+ }
-+
-+ aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-+ template[i].ilen, iv);
-+
-+ aead_request_set_ad(req, template[i].alen);
-+
-+ ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
-+
-+ switch (ret) {
-+ case 0:
-+ if (template[i].novrfy) {
-+ /* verification was supposed to fail */
-+ pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
-+ d, e, i, algo);
-+ /* so really, we got a bad message */
-+ ret = -EBADMSG;
-+ goto out;
-+ }
-+ break;
-+ case -EINPROGRESS:
-+ case -EBUSY:
-+ wait_for_completion(&result.completion);
-+ reinit_completion(&result.completion);
-+ ret = result.err;
-+ if (!ret)
-+ break;
-+ case -EBADMSG:
-+ /* verification failure was expected */
-+ if (template[i].novrfy)
-+ continue;
-+ /* fall through */
-+ default:
-+ pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
-+ d, e, i, algo, -ret);
-+ goto out;
-+ }
-+
-+ q = output;
-+ if (memcmp(q, template[i].result, template[i].rlen)) {
-+ pr_err("alg: tls%s: Test %d failed on %s for %s\n",
-+ d, i, e, algo);
-+ hexdump(q, template[i].rlen);
-+ pr_err("should be:\n");
-+ hexdump(template[i].result, template[i].rlen);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ aead_request_free(req);
-+
-+ kfree(sg);
-+out_nosg:
-+ kfree(key);
-+out_nokey:
-+ kfree(iv);
-+out_noiv:
-+ testmgr_free_buf(axbuf);
-+out_noaxbuf:
-+ if (diff_dst)
-+ testmgr_free_buf(xoutbuf);
-+out_nooutbuf:
-+ testmgr_free_buf(xbuf);
-+out_noxbuf:
-+ return ret;
-+}
-+
-+static int test_tls(struct crypto_aead *tfm, int enc,
-+ struct tls_testvec *template, unsigned int tcount)
-+{
-+ int ret;
-+ /* test 'dst == src' case */
-+ ret = __test_tls(tfm, enc, template, tcount, false);
-+ if (ret)
-+ return ret;
-+ /* test 'dst != src' case */
-+ return __test_tls(tfm, enc, template, tcount, true);
-+}
-+
-+static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
-+ u32 type, u32 mask)
-+{
-+ struct crypto_aead *tfm;
-+ int err = 0;
-+
-+ tfm = crypto_alloc_aead(driver, type, mask);
-+ if (IS_ERR(tfm)) {
-+ pr_err("alg: aead: Failed to load transform for %s: %ld\n",
-+ driver, PTR_ERR(tfm));
-+ return PTR_ERR(tfm);
-+ }
-+
-+ if (desc->suite.tls.enc.vecs) {
-+ err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
-+ desc->suite.tls.enc.count);
-+ if (err)
-+ goto out;
-+ }
-+
-+ if (!err && desc->suite.tls.dec.vecs)
-+ err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
-+ desc->suite.tls.dec.count);
-+
-+out:
-+ crypto_free_aead(tfm);
-+ return err;
-+}
-+
- static int test_cipher(struct crypto_cipher *tfm, int enc,
- const struct cipher_testvec *template,
- unsigned int tcount)
-@@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
- .hash = __VECS(tgr192_tv_template)
- }
- }, {
-+ .alg = "tls10(hmac(sha1),cbc(aes))",
-+ .test = alg_test_tls,
-+ .suite = {
-+ .tls = {
-+ .enc = __VECS(tls_enc_tv_template),
-+ .dec = __VECS(tls_dec_tv_template)
-+ }
-+ }
-+ }, {
- .alg = "vmac(aes)",
- .test = alg_test_hash,
- .suite = {
---- a/crypto/testmgr.h
-+++ b/crypto/testmgr.h
-@@ -125,6 +125,20 @@ struct drbg_testvec {
- size_t expectedlen;
- };
-
-+struct tls_testvec {
-+ char *key; /* wrapped keys for encryption and authentication */
-+ char *iv; /* initialization vector */
-+ char *input; /* input data */
-+ char *assoc; /* associated data: seq num, type, version, input len */
-+ char *result; /* result data */
-+ unsigned char fail; /* the test failure is expected */
-+ unsigned char novrfy; /* dec verification failure expected */
-+ unsigned char klen; /* key length */
-+ unsigned short ilen; /* input data length */
-+ unsigned short alen; /* associated data length */
-+ unsigned short rlen; /* result length */
-+};
-+
- struct akcipher_testvec {
- const unsigned char *key;
- const unsigned char *m;
-@@ -153,6 +167,211 @@ struct kpp_testvec {
- static const char zeroed_string[48];
-
- /*
-+ * TLS1.0 synthetic test vectors
-+ */
-+static struct tls_testvec tls_enc_tv_template[] = {
-+ {
-+#ifdef __LITTLE_ENDIAN
-+ .key = "\x08\x00" /* rta length */
-+ "\x01\x00" /* rta type */
-+#else
-+ .key = "\x00\x08" /* rta length */
-+ "\x00\x01" /* rta type */
-+#endif
-+ "\x00\x00\x00\x10" /* enc key length */
-+ "authenticationkey20benckeyis16_bytes",
-+ .klen = 8 + 20 + 16,
-+ .iv = "iv0123456789abcd",
-+ .input = "Single block msg",
-+ .ilen = 16,
-+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
-+ "\x00\x03\x01\x00\x10",
-+ .alen = 13,
-+ .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
-+ "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
-+ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
-+ "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
-+ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
-+ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
-+ .rlen = 16 + 20 + 12,
-+ }, {
-+#ifdef __LITTLE_ENDIAN
-+ .key = "\x08\x00" /* rta length */
-+ "\x01\x00" /* rta type */
-+#else
-+ .key = "\x00\x08" /* rta length */
-+ "\x00\x01" /* rta type */
-+#endif
-+ "\x00\x00\x00\x10" /* enc key length */
-+ "authenticationkey20benckeyis16_bytes",
-+ .klen = 8 + 20 + 16,
-+ .iv = "iv0123456789abcd",
-+ .input = "",
-+ .ilen = 0,
-+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
-+ "\x00\x03\x01\x00\x00",
-+ .alen = 13,
-+ .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
-+ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
-+ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
-+ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
-+ .rlen = 20 + 12,
-+ }, {
-+#ifdef __LITTLE_ENDIAN
-+ .key = "\x08\x00" /* rta length */
-+ "\x01\x00" /* rta type */
-+#else
-+ .key = "\x00\x08" /* rta length */
-+ "\x00\x01" /* rta type */
-+#endif
-+ "\x00\x00\x00\x10" /* enc key length */
-+ "authenticationkey20benckeyis16_bytes",
-+ .klen = 8 + 20 + 16,
-+ .iv = "iv0123456789abcd",
-+ .input = "285 bytes plaintext285 bytes plaintext285 bytes"
-+ " plaintext285 bytes plaintext285 bytes plaintext285"
-+ " bytes plaintext285 bytes plaintext285 bytes"
-+ " plaintext285 bytes plaintext285 bytes plaintext285"
-+ " bytes plaintext285 bytes plaintext285 bytes"
-+ " plaintext285 bytes plaintext285 bytes plaintext285"
-+ " bytes plaintext285 bytes plaintext",
-+ .ilen = 285,
-+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
-+ "\x00\x03\x01\x01\x1d",
-+ .alen = 13,
-+ .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
-+ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
-+ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
-+ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
-+ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
-+ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
-+ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
-+ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
-+ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
-+ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
-+ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
-+ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
-+ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
-+ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
-+ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
-+ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
-+ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
-+ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
-+ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
-+ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
-+ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
-+ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
-+ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
-+ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
-+ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
-+ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
-+ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
-+ .rlen = 285 + 20 + 15,
-+ }
-+};
-+
-+static struct tls_testvec tls_dec_tv_template[] = {
-+ {
-+#ifdef __LITTLE_ENDIAN
-+ .key = "\x08\x00" /* rta length */
-+ "\x01\x00" /* rta type */
-+#else
-+ .key = "\x00\x08" /* rta length */
-+ "\x00\x01" /* rta type */
-+#endif
-+ "\x00\x00\x00\x10" /* enc key length */
-+ "authenticationkey20benckeyis16_bytes",
-+ .klen = 8 + 20 + 16,
-+ .iv = "iv0123456789abcd",
-+ .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
-+ "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
-+ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
-+ "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
-+ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
-+ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
-+ .ilen = 16 + 20 + 12,
-+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
-+ "\x00\x03\x01\x00\x30",
-+ .alen = 13,
-+ .result = "Single block msg",
-+ .rlen = 16,
-+ }, {
-+#ifdef __LITTLE_ENDIAN
-+ .key = "\x08\x00" /* rta length */
-+ "\x01\x00" /* rta type */
-+#else
-+ .key = "\x00\x08" /* rta length */
-+ "\x00\x01" /* rta type */
-+#endif
-+ "\x00\x00\x00\x10" /* enc key length */
-+ "authenticationkey20benckeyis16_bytes",
-+ .klen = 8 + 20 + 16,
-+ .iv = "iv0123456789abcd",
-+ .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
-+ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
-+ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
-+ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
-+ .ilen = 20 + 12,
-+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
-+ "\x00\x03\x01\x00\x20",
-+ .alen = 13,
-+ .result = "",
-+ .rlen = 0,
-+ }, {
-+#ifdef __LITTLE_ENDIAN
-+ .key = "\x08\x00" /* rta length */
-+ "\x01\x00" /* rta type */
-+#else
-+ .key = "\x00\x08" /* rta length */
-+ "\x00\x01" /* rta type */
-+#endif
-+ "\x00\x00\x00\x10" /* enc key length */
-+ "authenticationkey20benckeyis16_bytes",
-+ .klen = 8 + 20 + 16,
-+ .iv = "iv0123456789abcd",
-+ .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
-+ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
-+ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
-+ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
-+ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
-+ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
-+ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
-+ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
-+ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
-+ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
-+ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
-+ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
-+ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
-+ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
-+ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
-+ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
-+ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
-+ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
-+ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
-+ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
-+ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
-+ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
-+ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
-+ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
-+ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
-+ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
-+ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
-+
-+ .ilen = 285 + 20 + 15,
-+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
-+ "\x00\x03\x01\x01\x40",
-+ .alen = 13,
-+ .result = "285 bytes plaintext285 bytes plaintext285 bytes"
-+ " plaintext285 bytes plaintext285 bytes plaintext285"
-+ " bytes plaintext285 bytes plaintext285 bytes"
-+ " plaintext285 bytes plaintext285 bytes plaintext285"
-+ " bytes plaintext285 bytes plaintext285 bytes"
-+ " plaintext285 bytes plaintext285 bytes plaintext",
-+ .rlen = 285,
-+ }
-+};
-+
-+/*
- * RSA test vectors. Borrowed from openSSL.
- */
- static const struct akcipher_testvec rsa_tv_template[] = {
---- /dev/null
-+++ b/crypto/tls.c
-@@ -0,0 +1,607 @@
-+/*
-+ * Copyright 2013 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
-+ * any later version.
-+ *
-+ */
-+
-+#include <crypto/internal/aead.h>
-+#include <crypto/internal/hash.h>
-+#include <crypto/internal/skcipher.h>
-+#include <crypto/authenc.h>
-+#include <crypto/null.h>
-+#include <crypto/scatterwalk.h>
-+#include <linux/err.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/rtnetlink.h>
-+
-+struct tls_instance_ctx {
-+ struct crypto_ahash_spawn auth;
-+ struct crypto_skcipher_spawn enc;
-+};
-+
-+struct crypto_tls_ctx {
-+ unsigned int reqoff;
-+ struct crypto_ahash *auth;
-+ struct crypto_skcipher *enc;
-+ struct crypto_skcipher *null;
-+};
-+
-+struct tls_request_ctx {
-+ /*
-+ * cryptlen holds the payload length in the case of encryption or
-+ * payload_len + icv_len + padding_len in case of decryption
-+ */
-+ unsigned int cryptlen;
-+ /* working space for partial results */
-+ struct scatterlist tmp[2];
-+ struct scatterlist cipher[2];
-+ struct scatterlist dst[2];
-+ char tail[];
-+};
-+
-+struct async_op {
-+ struct completion completion;
-+ int err;
-+};
-+
-+static void tls_async_op_done(struct crypto_async_request *req, int err)
-+{
-+ struct async_op *areq = req->data;
-+
-+ if (err == -EINPROGRESS)
-+ return;
-+
-+ areq->err = err;
-+ complete(&areq->completion);
-+}
-+
-+static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
-+ struct crypto_ahash *auth = ctx->auth;
-+ struct crypto_skcipher *enc = ctx->enc;
-+ struct crypto_authenc_keys keys;
-+ int err = -EINVAL;
-+
-+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
-+ goto badkey;
-+
-+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
-+ crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
-+ CRYPTO_TFM_REQ_MASK);
-+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
-+ crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
-+ CRYPTO_TFM_RES_MASK);
-+
-+ if (err)
-+ goto out;
-+
-+ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
-+ crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
-+ CRYPTO_TFM_REQ_MASK);
-+ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
-+ crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
-+ CRYPTO_TFM_RES_MASK);
-+
-+out:
-+ return err;
-+
-+badkey:
-+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ goto out;
-+}
-+
-+/**
-+ * crypto_tls_genicv - Calculate hmac digest for a TLS record
-+ * @hash: (output) buffer to save the digest into
-+ * @src: (input) scatterlist with the assoc and payload data
-+ * @srclen: (input) size of the source buffer (assoclen + cryptlen)
-+ * @req: (input) aead request
-+ **/
-+static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
-+ unsigned int srclen, struct aead_request *req)
-+{
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
-+ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
-+ struct async_op ahash_op;
-+ struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
-+ unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-+ int err = -EBADMSG;
-+
-+ /* Bail out if the request assoc len is 0 */
-+ if (!req->assoclen)
-+ return err;
-+
-+ init_completion(&ahash_op.completion);
-+
-+ /* the hash transform to be executed comes from the original request */
-+ ahash_request_set_tfm(ahreq, ctx->auth);
-+ /* prepare the hash request with input data and result pointer */
-+ ahash_request_set_crypt(ahreq, src, hash, srclen);
-+ /* set the notifier for when the async hash function returns */
-+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-+ tls_async_op_done, &ahash_op);
-+
-+ /* Calculate the digest on the given data. The result is put in hash */
-+ err = crypto_ahash_digest(ahreq);
-+ if (err == -EINPROGRESS) {
-+ err = wait_for_completion_interruptible(&ahash_op.completion);
-+ if (!err)
-+ err = ahash_op.err;
-+ }
-+
-+ return err;
-+}
-+
-+/**
-+ * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
-+ * @hash: (output) buffer to save the digest and padding into
-+ * @phashlen: (output) the size of digest + padding
-+ * @req: (input) aead request
-+ **/
-+static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
-+ struct aead_request *req)
-+{
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ unsigned int hash_size = crypto_aead_authsize(tls);
-+ unsigned int block_size = crypto_aead_blocksize(tls);
-+ unsigned int srclen = req->cryptlen + hash_size;
-+ unsigned int icvlen = req->cryptlen + req->assoclen;
-+ unsigned int padlen;
-+ int err;
-+
-+ err = crypto_tls_genicv(hash, req->src, icvlen, req);
-+ if (err)
-+ goto out;
-+
-+ /* add padding after digest */
-+ padlen = block_size - (srclen % block_size);
-+ memset(hash + hash_size, padlen - 1, padlen);
-+
-+ *phashlen = hash_size + padlen;
-+out:
-+ return err;
-+}
-+
-+static int crypto_tls_copy_data(struct aead_request *req,
-+ struct scatterlist *src,
-+ struct scatterlist *dst,
-+ unsigned int len)
-+{
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
-+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-+
-+ skcipher_request_set_tfm(skreq, ctx->null);
-+ skcipher_request_set_callback(skreq, aead_request_flags(req),
-+ NULL, NULL);
-+ skcipher_request_set_crypt(skreq, src, dst, len, NULL);
-+
-+ return crypto_skcipher_encrypt(skreq);
-+}
-+
-+static int crypto_tls_encrypt(struct aead_request *req)
-+{
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
-+ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
-+ struct skcipher_request *skreq;
-+ struct scatterlist *cipher = treq_ctx->cipher;
-+ struct scatterlist *tmp = treq_ctx->tmp;
-+ struct scatterlist *sg, *src, *dst;
-+ unsigned int cryptlen, phashlen;
-+ u8 *hash = treq_ctx->tail;
-+ int err;
-+
-+ /*
-+ * The hash result is saved at the beginning of the tls request ctx
-+ * and is aligned as required by the hash transform. Enough space was
-+ * allocated in crypto_tls_init_tfm to accommodate the difference. The
-+ * requests themselves start later at treq_ctx->tail + ctx->reqoff so
-+ * the result is not overwritten by the second (cipher) request.
-+ */
-+ hash = (u8 *)ALIGN((unsigned long)hash +
-+ crypto_ahash_alignmask(ctx->auth),
-+ crypto_ahash_alignmask(ctx->auth) + 1);
-+
-+ /*
-+ * STEP 1: create ICV together with necessary padding
-+ */
-+ err = crypto_tls_gen_padicv(hash, &phashlen, req);
-+ if (err)
-+ return err;
-+
-+ /*
-+ * STEP 2: Hash and padding are combined with the payload
-+ * depending on the form it arrives. Scatter tables must have at least
-+ * one page of data before chaining with another table and can't have
-+ * an empty data page. The following code addresses these requirements.
-+ *
-+ * If the payload is empty, only the hash is encrypted, otherwise the
-+ * payload scatterlist is merged with the hash. A special merging case
-+ * is when the payload has only one page of data. In that case the
-+ * payload page is moved to another scatterlist and prepared there for
-+ * encryption.
-+ */
-+ if (req->cryptlen) {
-+ src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
-+
-+ sg_init_table(cipher, 2);
-+ sg_set_buf(cipher + 1, hash, phashlen);
-+
-+ if (sg_is_last(src)) {
-+ sg_set_page(cipher, sg_page(src), req->cryptlen,
-+ src->offset);
-+ src = cipher;
-+ } else {
-+ unsigned int rem_len = req->cryptlen;
-+
-+ for (sg = src; rem_len > sg->length; sg = sg_next(sg))
-+ rem_len -= min(rem_len, sg->length);
-+
-+ sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
-+ sg_chain(sg, 1, cipher);
-+ }
-+ } else {
-+ sg_init_one(cipher, hash, phashlen);
-+ src = cipher;
-+ }
-+
-+ /**
-+ * If src != dst copy the associated data from source to destination.
-+ * In both cases fast-forward passed the associated data in the dest.
-+ */
-+ if (req->src != req->dst) {
-+ err = crypto_tls_copy_data(req, req->src, req->dst,
-+ req->assoclen);
-+ if (err)
-+ return err;
-+ }
-+ dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
-+
-+ /*
-+ * STEP 3: encrypt the frame and return the result
-+ */
-+ cryptlen = req->cryptlen + phashlen;
-+
-+ /*
-+ * The hash and the cipher are applied at different times and their
-+ * requests can use the same memory space without interference
-+ */
-+ skreq = (void *)(treq_ctx->tail + ctx->reqoff);
-+ skcipher_request_set_tfm(skreq, ctx->enc);
-+ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
-+ skcipher_request_set_callback(skreq, aead_request_flags(req),
-+ req->base.complete, req->base.data);
-+ /*
-+ * Apply the cipher transform. The result will be in req->dst when the
-+ * asynchronuous call terminates
-+ */
-+ err = crypto_skcipher_encrypt(skreq);
-+
-+ return err;
-+}
-+
-+static int crypto_tls_decrypt(struct aead_request *req)
-+{
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
-+ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
-+ unsigned int cryptlen = req->cryptlen;
-+ unsigned int hash_size = crypto_aead_authsize(tls);
-+ unsigned int block_size = crypto_aead_blocksize(tls);
-+ struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
-+ struct scatterlist *tmp = treq_ctx->tmp;
-+ struct scatterlist *src, *dst;
-+
-+ u8 padding[255]; /* padding can be 0-255 bytes */
-+ u8 pad_size;
-+ u16 *len_field;
-+ u8 *ihash, *hash = treq_ctx->tail;
-+
-+ int paderr = 0;
-+ int err = -EINVAL;
-+ int i;
-+ struct async_op ciph_op;
-+
-+ /*
-+ * Rule out bad packets. The input packet length must be at least one
-+ * byte more than the hash_size
-+ */
-+ if (cryptlen <= hash_size || cryptlen % block_size)
-+ goto out;
-+
-+ /*
-+ * Step 1 - Decrypt the source. Fast-forward past the associated data
-+ * to the encrypted data. The result will be overwritten in place so
-+ * that the decrypted data will be adjacent to the associated data. The
-+ * last step (computing the hash) will have it's input data already
-+ * prepared and ready to be accessed at req->src.
-+ */
-+ src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
-+ dst = src;
-+
-+ init_completion(&ciph_op.completion);
-+ skcipher_request_set_tfm(skreq, ctx->enc);
-+ skcipher_request_set_callback(skreq, aead_request_flags(req),
-+ tls_async_op_done, &ciph_op);
-+ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
-+ err = crypto_skcipher_decrypt(skreq);
-+ if (err == -EINPROGRESS) {
-+ err = wait_for_completion_interruptible(&ciph_op.completion);
-+ if (!err)
-+ err = ciph_op.err;
-+ }
-+ if (err)
-+ goto out;
-+
-+ /*
-+ * Step 2 - Verify padding
-+ * Retrieve the last byte of the payload; this is the padding size.
-+ */
-+ cryptlen -= 1;
-+ scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
-+
-+ /* RFC recommendation for invalid padding size. */
-+ if (cryptlen < pad_size + hash_size) {
-+ pad_size = 0;
-+ paderr = -EBADMSG;
-+ }
-+ cryptlen -= pad_size;
-+ scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
-+
-+ /* Padding content must be equal with pad_size. We verify it all */
-+ for (i = 0; i < pad_size; i++)
-+ if (padding[i] != pad_size)
-+ paderr = -EBADMSG;
-+
-+ /*
-+ * Step 3 - Verify hash
-+ * Align the digest result as required by the hash transform. Enough
-+ * space was allocated in crypto_tls_init_tfm
-+ */
-+ hash = (u8 *)ALIGN((unsigned long)hash +
-+ crypto_ahash_alignmask(ctx->auth),
-+ crypto_ahash_alignmask(ctx->auth) + 1);
-+ /*
-+ * Two bytes at the end of the associated data make the length field.
-+ * It must be updated with the length of the cleartext message before
-+ * the hash is calculated.
-+ */
-+ len_field = sg_virt(req->src) + req->assoclen - 2;
-+ cryptlen -= hash_size;
-+ *len_field = htons(cryptlen);
-+
-+ /* This is the hash from the decrypted packet. Save it for later */
-+ ihash = hash + hash_size;
-+ scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
-+
-+ /* Now compute and compare our ICV with the one from the packet */
-+ err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
-+ if (!err)
-+ err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
-+
-+ if (req->src != req->dst) {
-+ err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
-+ req->assoclen);
-+ if (err)
-+ goto out;
-+ }
-+
-+ /* return the first found error */
-+ if (paderr)
-+ err = paderr;
-+
-+out:
-+ aead_request_complete(req, err);
-+ return err;
-+}
-+
-+static int crypto_tls_init_tfm(struct crypto_aead *tfm)
-+{
-+ struct aead_instance *inst = aead_alg_instance(tfm);
-+ struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
-+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
-+ struct crypto_ahash *auth;
-+ struct crypto_skcipher *enc;
-+ struct crypto_skcipher *null;
-+ int err;
-+
-+ auth = crypto_spawn_ahash(&ictx->auth);
-+ if (IS_ERR(auth))
-+ return PTR_ERR(auth);
-+
-+ enc = crypto_spawn_skcipher(&ictx->enc);
-+ err = PTR_ERR(enc);
-+ if (IS_ERR(enc))
-+ goto err_free_ahash;
-+
-+ null = crypto_get_default_null_skcipher2();
-+ err = PTR_ERR(null);
-+ if (IS_ERR(null))
-+ goto err_free_skcipher;
-+
-+ ctx->auth = auth;
-+ ctx->enc = enc;
-+ ctx->null = null;
-+
-+ /*
-+ * Allow enough space for two digests. The two digests will be compared
-+ * during the decryption phase. One will come from the decrypted packet
-+ * and the other will be calculated. For encryption, one digest is
-+ * padded (up to a cipher blocksize) and chained with the payload
-+ */
-+ ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
-+ crypto_ahash_alignmask(auth),
-+ crypto_ahash_alignmask(auth) + 1) +
-+ max(crypto_ahash_digestsize(auth),
-+ crypto_skcipher_blocksize(enc));
-+
-+ crypto_aead_set_reqsize(tfm,
-+ sizeof(struct tls_request_ctx) +
-+ ctx->reqoff +
-+ max_t(unsigned int,
-+ crypto_ahash_reqsize(auth) +
-+ sizeof(struct ahash_request),
-+ crypto_skcipher_reqsize(enc) +
-+ sizeof(struct skcipher_request)));
-+
-+ return 0;
-+
-+err_free_skcipher:
-+ crypto_free_skcipher(enc);
-+err_free_ahash:
-+ crypto_free_ahash(auth);
-+ return err;
-+}
-+
-+static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
-+{
-+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
-+
-+ crypto_free_ahash(ctx->auth);
-+ crypto_free_skcipher(ctx->enc);
-+ crypto_put_default_null_skcipher2();
-+}
-+
-+static void crypto_tls_free(struct aead_instance *inst)
-+{
-+ struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
-+
-+ crypto_drop_skcipher(&ctx->enc);
-+ crypto_drop_ahash(&ctx->auth);
-+ kfree(inst);
-+}
-+
-+static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
-+{
-+ struct crypto_attr_type *algt;
-+ struct aead_instance *inst;
-+ struct hash_alg_common *auth;
-+ struct crypto_alg *auth_base;
-+ struct skcipher_alg *enc;
-+ struct tls_instance_ctx *ctx;
-+ const char *enc_name;
-+ int err;
-+
-+ algt = crypto_get_attr_type(tb);
-+ if (IS_ERR(algt))
-+ return PTR_ERR(algt);
-+
-+ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-+ return -EINVAL;
-+
-+ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
-+ CRYPTO_ALG_TYPE_AHASH_MASK |
-+ crypto_requires_sync(algt->type, algt->mask));
-+ if (IS_ERR(auth))
-+ return PTR_ERR(auth);
-+
-+ auth_base = &auth->base;
-+
-+ enc_name = crypto_attr_alg_name(tb[2]);
-+ err = PTR_ERR(enc_name);
-+ if (IS_ERR(enc_name))
-+ goto out_put_auth;
-+
-+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
-+ err = -ENOMEM;
-+ if (!inst)
-+ goto out_put_auth;
-+
-+ ctx = aead_instance_ctx(inst);
-+
-+ err = crypto_init_ahash_spawn(&ctx->auth, auth,
-+ aead_crypto_instance(inst));
-+ if (err)
-+ goto err_free_inst;
-+
-+ crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
-+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
-+ crypto_requires_sync(algt->type,
-+ algt->mask));
-+ if (err)
-+ goto err_drop_auth;
-+
-+ enc = crypto_spawn_skcipher_alg(&ctx->enc);
-+
-+ err = -ENAMETOOLONG;
-+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-+ "tls10(%s,%s)", auth_base->cra_name,
-+ enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
-+ goto err_drop_enc;
-+
-+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-+ "tls10(%s,%s)", auth_base->cra_driver_name,
-+ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
-+ goto err_drop_enc;
-+
-+ inst->alg.base.cra_flags = (auth_base->cra_flags |
-+ enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
-+ inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
-+ auth_base->cra_priority;
-+ inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
-+ inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
-+ enc->base.cra_alignmask;
-+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
-+
-+ inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
-+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
-+ inst->alg.maxauthsize = auth->digestsize;
-+
-+ inst->alg.init = crypto_tls_init_tfm;
-+ inst->alg.exit = crypto_tls_exit_tfm;
-+
-+ inst->alg.setkey = crypto_tls_setkey;
-+ inst->alg.encrypt = crypto_tls_encrypt;
-+ inst->alg.decrypt = crypto_tls_decrypt;
-+
-+ inst->free = crypto_tls_free;
-+
-+ err = aead_register_instance(tmpl, inst);
-+ if (err)
-+ goto err_drop_enc;
-+
-+out:
-+ crypto_mod_put(auth_base);
-+ return err;
-+
-+err_drop_enc:
-+ crypto_drop_skcipher(&ctx->enc);
-+err_drop_auth:
-+ crypto_drop_ahash(&ctx->auth);
-+err_free_inst:
-+ kfree(inst);
-+out_put_auth:
-+ goto out;
-+}
-+
-+static struct crypto_template crypto_tls_tmpl = {
-+ .name = "tls10",
-+ .create = crypto_tls_create,
-+ .module = THIS_MODULE,
-+};
-+
-+static int __init crypto_tls_module_init(void)
-+{
-+ return crypto_register_template(&crypto_tls_tmpl);
-+}
-+
-+static void __exit crypto_tls_module_exit(void)
-+{
-+ crypto_unregister_template(&crypto_tls_tmpl);
-+}
-+
-+module_init(crypto_tls_module_init);
-+module_exit(crypto_tls_module_exit);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("TLS 1.0 record encryption");
---- a/drivers/crypto/Makefile
-+++ b/drivers/crypto/Makefile
-@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
- obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
- obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
- obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
--obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
-+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
- obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
- obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
- obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
---- a/drivers/crypto/caam/Kconfig
-+++ b/drivers/crypto/caam/Kconfig
-@@ -1,7 +1,17 @@
-+config CRYPTO_DEV_FSL_CAAM_COMMON
-+ tristate
-+
-+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
-+ tristate
-+
-+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
-+ tristate
-+
- config CRYPTO_DEV_FSL_CAAM
-- tristate "Freescale CAAM-Multicore driver backend"
-+ tristate "Freescale CAAM-Multicore platform driver backend"
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
- select SOC_BUS
-+ select CRYPTO_DEV_FSL_CAAM_COMMON
- help
- Enables the driver module for Freescale's Cryptographic Accelerator
- and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
-@@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
- To compile this driver as a module, choose M here: the module
- will be called caam.
-
-+if CRYPTO_DEV_FSL_CAAM
-+
-+config CRYPTO_DEV_FSL_CAAM_DEBUG
-+ bool "Enable debug output in CAAM driver"
-+ help
-+ Selecting this will enable printing of various debug
-+ information in the CAAM driver.
-+
- config CRYPTO_DEV_FSL_CAAM_JR
- tristate "Freescale CAAM Job Ring driver backend"
-- depends on CRYPTO_DEV_FSL_CAAM
- default y
- help
- Enables the driver module for Job Rings which are part of
-@@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
- To compile this driver as a module, choose M here: the module
- will be called caam_jr.
-
-+if CRYPTO_DEV_FSL_CAAM_JR
-+
- config CRYPTO_DEV_FSL_CAAM_RINGSIZE
- int "Job Ring size"
-- depends on CRYPTO_DEV_FSL_CAAM_JR
- range 2 9
- default "9"
- help
-@@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
-
- config CRYPTO_DEV_FSL_CAAM_INTC
- bool "Job Ring interrupt coalescing"
-- depends on CRYPTO_DEV_FSL_CAAM_JR
- help
- Enable the Job Ring's interrupt coalescing feature.
-
-@@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
- threshold. Range is 1-65535.
-
- config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
-- tristate "Register algorithm implementations with the Crypto API"
-- depends on CRYPTO_DEV_FSL_CAAM_JR
-+ bool "Register algorithm implementations with the Crypto API"
- default y
-+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- select CRYPTO_AEAD
- select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
-@@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- scatterlist crypto API (such as the linux native IPSec
- stack) to the SEC4 via job ring.
-
-- To compile this as a module, choose M here: the module
-- will be called caamalg.
--
- config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
-- tristate "Queue Interface as Crypto API backend"
-- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
-+ bool "Queue Interface as Crypto API backend"
-+ depends on FSL_SDK_DPA && NET
- default y
-+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
- help
-@@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
- assigned to the kernel should also be more than the number of
- job rings.
-
-- To compile this as a module, choose M here: the module
-- will be called caamalg_qi.
--
- config CRYPTO_DEV_FSL_CAAM_AHASH_API
-- tristate "Register hash algorithm implementations with Crypto API"
-- depends on CRYPTO_DEV_FSL_CAAM_JR
-+ bool "Register hash algorithm implementations with Crypto API"
- default y
-+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
- select CRYPTO_HASH
- help
- Selecting this will offload ahash for users of the
- scatterlist crypto API to the SEC4 via job ring.
-
-- To compile this as a module, choose M here: the module
-- will be called caamhash.
--
- config CRYPTO_DEV_FSL_CAAM_PKC_API
-- tristate "Register public key cryptography implementations with Crypto API"
-- depends on CRYPTO_DEV_FSL_CAAM_JR
-+ bool "Register public key cryptography implementations with Crypto API"
- default y
- select CRYPTO_RSA
- help
- Selecting this will allow SEC Public key support for RSA.
- Supported cryptographic primitives: encryption, decryption,
- signature and verification.
-- To compile this as a module, choose M here: the module
-- will be called caam_pkc.
-
- config CRYPTO_DEV_FSL_CAAM_RNG_API
-- tristate "Register caam device for hwrng API"
-- depends on CRYPTO_DEV_FSL_CAAM_JR
-+ bool "Register caam device for hwrng API"
- default y
- select CRYPTO_RNG
- select HW_RANDOM
-@@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
- Selecting this will register the SEC4 hardware rng to
- the hw_random API for suppying the kernel entropy pool.
-
-- To compile this as a module, choose M here: the module
-- will be called caamrng.
-+endif # CRYPTO_DEV_FSL_CAAM_JR
-
--config CRYPTO_DEV_FSL_CAAM_DEBUG
-- bool "Enable debug output in CAAM driver"
-- depends on CRYPTO_DEV_FSL_CAAM
-- help
-- Selecting this will enable printing of various debug
-- information in the CAAM driver.
-+endif # CRYPTO_DEV_FSL_CAAM
-
--config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
-- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
-- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
-+config CRYPTO_DEV_FSL_DPAA2_CAAM
-+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
-+ depends on FSL_MC_DPIO
-+ select CRYPTO_DEV_FSL_CAAM_COMMON
-+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
-+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
-+ select CRYPTO_BLKCIPHER
-+ select CRYPTO_AUTHENC
-+ select CRYPTO_AEAD
-+ select CRYPTO_HASH
-+ ---help---
-+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
-+ It handles DPSECI DPAA2 objects that sit on the Management Complex
-+ (MC) fsl-mc bus.
-+
-+ To compile this as a module, choose M here: the module
-+ will be called dpaa2_caam.
---- a/drivers/crypto/caam/Makefile
-+++ b/drivers/crypto/caam/Makefile
-@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
- ccflags-y := -DDEBUG
- endif
-
-+ccflags-y += -DVERSION=\"\"
-+
-+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
--obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
--obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
--obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
--obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
--obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
--
--caam-objs := ctrl.o
--caam_jr-objs := jr.o key_gen.o error.o
--caam_pkc-y := caampkc.o pkc_desc.o
-+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
-+
-+caam-y := ctrl.o
-+caam_jr-y := jr.o key_gen.o
-+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
-+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
-+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
-+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
-+
-+caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
- ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
- ccflags-y += -DCONFIG_CAAM_QI
-- caam-objs += qi.o
- endif
-+
-+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
-+
-+dpaa2_caam-y := caamalg_qi2.o dpseci.o
---- a/drivers/crypto/caam/caamalg.c
-+++ b/drivers/crypto/caam/caamalg.c
-@@ -71,6 +71,8 @@
- #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
- CAAM_CMD_SZ * 5)
-
-+#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
-+
- #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
- #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
-
-@@ -108,6 +110,7 @@ struct caam_ctx {
- dma_addr_t sh_desc_dec_dma;
- dma_addr_t sh_desc_givenc_dma;
- dma_addr_t key_dma;
-+ enum dma_data_direction dir;
- struct device *jrdev;
- struct alginfo adata;
- struct alginfo cdata;
-@@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
- ctx->adata.keylen_pad;
-@@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-- cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
-+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
-+ ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
-@@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
-- cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
-+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
-+ ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- return 0;
- }
-@@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
- unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- u32 ctx1_iv_off = 0;
- u32 *desc, *nonce = NULL;
- u32 inl_mask;
-@@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
- desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
- ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
-- false);
-+ false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- skip_enc:
- /*
-@@ -266,9 +273,9 @@ skip_enc:
- desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
- ctx->authsize, alg->caam.geniv, is_rfc3686,
-- nonce, ctx1_iv_off, false);
-+ nonce, ctx1_iv_off, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- if (!alg->caam.geniv)
- goto skip_givenc;
-@@ -300,9 +307,9 @@ skip_enc:
- desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
- ctx->authsize, is_rfc3686, nonce,
-- ctx1_iv_off, false);
-+ ctx1_iv_off, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- skip_givenc:
- return 0;
-@@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
- ctx->cdata.keylen;
-@@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
- }
-
- desc = ctx->sh_desc_enc;
-- cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
-+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
-@@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
- }
-
- desc = ctx->sh_desc_dec;
-- cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
-+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- return 0;
- }
-@@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
- ctx->cdata.keylen;
-@@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_enc;
-- cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
-+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
-@@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_dec;
-- cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
-+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- return 0;
- }
-@@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
- u32 *desc;
- int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
- ctx->cdata.keylen;
-@@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_enc;
-- cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
-+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- /*
- * Job Descriptor and Shared Descriptors
-@@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
- }
-
- desc = ctx->sh_desc_dec;
-- cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
-+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ false);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- return 0;
- }
-@@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
- return 0;
- }
-
-+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *jrdev = ctx->jrdev;
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ u32 *desc;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ desc = ctx->sh_desc_enc;
-+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
-+ ctx->authsize, true, false);
-+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), ctx->dir);
-+
-+ desc = ctx->sh_desc_dec;
-+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
-+ ctx->authsize, false, false);
-+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int chachapoly_setauthsize(struct crypto_aead *aead,
-+ unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+
-+ if (authsize != POLY1305_DIGEST_SIZE)
-+ return -EINVAL;
-+
-+ ctx->authsize = authsize;
-+ return chachapoly_set_sh_desc(aead);
-+}
-+
-+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-+
-+ if (keylen != CHACHA20_KEY_SIZE + saltlen) {
-+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+ }
-+
-+ ctx->cdata.key_virt = key;
-+ ctx->cdata.keylen = keylen - saltlen;
-+
-+ return chachapoly_set_sh_desc(aead);
-+}
-+
- static int aead_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int keylen)
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- struct crypto_authenc_keys keys;
- int ret = 0;
-
-@@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- #endif
-
-+ /*
-+ * If DKP is supported, use it in the shared descriptor to generate
-+ * the split key.
-+ */
-+ if (ctrlpriv->era >= 6) {
-+ ctx->adata.keylen = keys.authkeylen;
-+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
-+ goto badkey;
-+
-+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
-+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
-+ keys.enckeylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma,
-+ ctx->adata.keylen_pad +
-+ keys.enckeylen, ctx->dir);
-+ goto skip_split_key;
-+ }
-+
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
- keys.authkeylen, CAAM_MAX_KEY_SIZE -
- keys.enckeylen);
-@@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
- /* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-- keys.enckeylen, DMA_TO_DEVICE);
-+ keys.enckeylen, ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
- #endif
-+
-+skip_split_key:
- ctx->cdata.keylen = keys.enckeylen;
- return aead_set_sh_desc(aead);
- badkey:
-@@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
- #endif
-
- memcpy(ctx->key, key, keylen);
-- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
- ctx->cdata.keylen = keylen;
-
- return gcm_set_sh_desc(aead);
-@@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
- */
- ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-- DMA_TO_DEVICE);
-+ ctx->dir);
- return rfc4106_set_sh_desc(aead);
- }
-
-@@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
- */
- ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-- DMA_TO_DEVICE);
-+ ctx->dir);
- return rfc4543_set_sh_desc(aead);
- }
-
-@@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- /* ablkcipher_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- /* ablkcipher_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- return 0;
- }
-@@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
- desc = ctx->sh_desc_enc;
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- /* xts_ablkcipher_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
-
- return 0;
- }
-@@ -989,9 +1082,6 @@ static void init_aead_job(struct aead_re
- append_seq_out_ptr(desc, dst_dma,
- req->assoclen + req->cryptlen - authsize,
- out_options);
--
-- /* REG3 = assoclen */
-- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
- }
-
- static void init_gcm_job(struct aead_request *req,
-@@ -1006,6 +1096,7 @@ static void init_gcm_job(struct aead_req
- unsigned int last;
-
- init_aead_job(req, edesc, all_contig, encrypt);
-+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
-
- /* BUG This should not be specific to generic GCM. */
- last = 0;
-@@ -1023,6 +1114,40 @@ static void init_gcm_job(struct aead_req
- /* End of blank commands */
- }
-
-+static void init_chachapoly_job(struct aead_request *req,
-+ struct aead_edesc *edesc, bool all_contig,
-+ bool encrypt)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ unsigned int assoclen = req->assoclen;
-+ u32 *desc = edesc->hw_desc;
-+ u32 ctx_iv_off = 4;
-+
-+ init_aead_job(req, edesc, all_contig, encrypt);
-+
-+ if (ivsize != CHACHAPOLY_IV_SIZE) {
-+ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
-+ ctx_iv_off += 4;
-+
-+ /*
-+ * The associated data comes already with the IV but we need
-+ * to skip it when we authenticate or encrypt...
-+ */
-+ assoclen -= ivsize;
-+ }
-+
-+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
-+
-+ /*
-+ * For IPsec load the IV further in the same register.
-+ * For RFC7539 simply load the 12 bytes nonce in a single operation
-+ */
-+ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT |
-+ ctx_iv_off << LDST_OFFSET_SHIFT);
-+}
-+
- static void init_authenc_job(struct aead_request *req,
- struct aead_edesc *edesc,
- bool all_contig, bool encrypt)
-@@ -1032,6 +1157,7 @@ static void init_authenc_job(struct aead
- struct caam_aead_alg, aead);
- unsigned int ivsize = crypto_aead_ivsize(aead);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = alg->caam.rfc3686;
-@@ -1055,6 +1181,15 @@ static void init_authenc_job(struct aead
-
- init_aead_job(req, edesc, all_contig, encrypt);
-
-+ /*
-+ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
-+ * having DPOVRD as destination.
-+ */
-+ if (ctrlpriv->era < 3)
-+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
-+ else
-+ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
-+
- if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
- append_load_as_imm(desc, req->iv, ivsize,
- LDST_CLASS_1_CCB |
-@@ -1227,8 +1362,16 @@ static struct aead_edesc *aead_edesc_all
- }
- }
-
-+ /*
-+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
-+ * the end of the table by allocating more S/G entries.
-+ */
- sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
-- sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-+ if (mapped_dst_nents > 1)
-+ sec4_sg_len += ALIGN(mapped_dst_nents, 4);
-+ else
-+ sec4_sg_len = ALIGN(sec4_sg_len, 4);
-+
- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
- /* allocate space for base edesc and hw desc commands, link tables */
-@@ -1309,6 +1452,72 @@ static int gcm_encrypt(struct aead_reque
- return ret;
- }
-
-+static int chachapoly_encrypt(struct aead_request *req)
-+{
-+ struct aead_edesc *edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *jrdev = ctx->jrdev;
-+ bool all_contig;
-+ u32 *desc;
-+ int ret;
-+
-+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
-+ true);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ desc = edesc->hw_desc;
-+
-+ init_chachapoly_job(req, edesc, all_contig, true);
-+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
-+ 1);
-+
-+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
-+ if (!ret) {
-+ ret = -EINPROGRESS;
-+ } else {
-+ aead_unmap(jrdev, edesc, req);
-+ kfree(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static int chachapoly_decrypt(struct aead_request *req)
-+{
-+ struct aead_edesc *edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *jrdev = ctx->jrdev;
-+ bool all_contig;
-+ u32 *desc;
-+ int ret;
-+
-+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
-+ false);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ desc = edesc->hw_desc;
-+
-+ init_chachapoly_job(req, edesc, all_contig, false);
-+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
-+ 1);
-+
-+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
-+ if (!ret) {
-+ ret = -EINPROGRESS;
-+ } else {
-+ aead_unmap(jrdev, edesc, req);
-+ kfree(edesc);
-+ }
-+
-+ return ret;
-+}
-+
- static int ipsec_gcm_encrypt(struct aead_request *req)
- {
- if (req->assoclen < 8)
-@@ -1496,7 +1705,25 @@ static struct ablkcipher_edesc *ablkciph
-
- sec4_sg_ents = 1 + mapped_src_nents;
- dst_sg_idx = sec4_sg_ents;
-- sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-+
-+ /*
-+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
-+ * the end of the table by allocating more S/G entries. Logic:
-+ * if (src != dst && output S/G)
-+ * pad output S/G, if needed
-+ * else if (src == dst && S/G)
-+ * overlapping S/Gs; pad one of them
-+ * else if (input S/G) ...
-+ * pad input S/G, if needed
-+ */
-+ if (mapped_dst_nents > 1)
-+ sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
-+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
-+ sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
-+ 1 + ALIGN(mapped_src_nents, 4));
-+ else
-+ sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
-+
- sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
-
- /*
-@@ -3199,6 +3426,50 @@ static struct caam_aead_alg driver_aeads
- .geniv = true,
- },
- },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc7539(chacha20,poly1305)",
-+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
-+ "caam",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = chachapoly_setkey,
-+ .setauthsize = chachapoly_setauthsize,
-+ .encrypt = chachapoly_encrypt,
-+ .decrypt = chachapoly_decrypt,
-+ .ivsize = CHACHAPOLY_IV_SIZE,
-+ .maxauthsize = POLY1305_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
-+ OP_ALG_AAI_AEAD,
-+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
-+ OP_ALG_AAI_AEAD,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc7539esp(chacha20,poly1305)",
-+ .cra_driver_name = "rfc7539esp-chacha20-"
-+ "poly1305-caam",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = chachapoly_setkey,
-+ .setauthsize = chachapoly_setauthsize,
-+ .encrypt = chachapoly_encrypt,
-+ .decrypt = chachapoly_decrypt,
-+ .ivsize = 8,
-+ .maxauthsize = POLY1305_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
-+ OP_ALG_AAI_AEAD,
-+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
-+ OP_ALG_AAI_AEAD,
-+ },
-+ },
- };
-
- struct caam_crypto_alg {
-@@ -3207,9 +3478,11 @@ struct caam_crypto_alg {
- struct caam_alg_entry caam;
- };
-
--static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
-+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
-+ bool uses_dkp)
- {
- dma_addr_t dma_addr;
-+ struct caam_drv_private *priv;
-
- ctx->jrdev = caam_jr_alloc();
- if (IS_ERR(ctx->jrdev)) {
-@@ -3217,10 +3490,16 @@ static int caam_init_common(struct caam_
- return PTR_ERR(ctx->jrdev);
- }
-
-+ priv = dev_get_drvdata(ctx->jrdev->parent);
-+ if (priv->era >= 6 && uses_dkp)
-+ ctx->dir = DMA_BIDIRECTIONAL;
-+ else
-+ ctx->dir = DMA_TO_DEVICE;
-+
- dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
- offsetof(struct caam_ctx,
- sh_desc_enc_dma),
-- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
-+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(ctx->jrdev, dma_addr)) {
- dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
- caam_jr_free(ctx->jrdev);
-@@ -3248,7 +3527,7 @@ static int caam_cra_init(struct crypto_t
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
-
-- return caam_init_common(ctx, &caam_alg->caam);
-+ return caam_init_common(ctx, &caam_alg->caam, false);
- }
-
- static int caam_aead_init(struct crypto_aead *tfm)
-@@ -3258,14 +3537,15 @@ static int caam_aead_init(struct crypto_
- container_of(alg, struct caam_aead_alg, aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
-
-- return caam_init_common(ctx, &caam_alg->caam);
-+ return caam_init_common(ctx, &caam_alg->caam,
-+ alg->setkey == aead_setkey);
- }
-
- static void caam_exit_common(struct caam_ctx *ctx)
- {
- dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
- offsetof(struct caam_ctx, sh_desc_enc_dma),
-- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
-+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- caam_jr_free(ctx->jrdev);
- }
-
-@@ -3279,7 +3559,7 @@ static void caam_aead_exit(struct crypto
- caam_exit_common(crypto_aead_ctx(tfm));
- }
-
--static void __exit caam_algapi_exit(void)
-+void caam_algapi_exit(void)
- {
-
- struct caam_crypto_alg *t_alg, *n;
-@@ -3358,56 +3638,52 @@ static void caam_aead_alg_init(struct ca
- alg->exit = caam_aead_exit;
- }
-
--static int __init caam_algapi_init(void)
-+int caam_algapi_init(struct device *ctrldev)
- {
-- struct device_node *dev_node;
-- struct platform_device *pdev;
-- struct device *ctrldev;
-- struct caam_drv_private *priv;
-+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- int i = 0, err = 0;
-- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
-+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
- unsigned int md_limit = SHA512_DIGEST_SIZE;
- bool registered = false;
-
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
-- if (!dev_node) {
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
-- if (!dev_node)
-- return -ENODEV;
-- }
--
-- pdev = of_find_device_by_node(dev_node);
-- if (!pdev) {
-- of_node_put(dev_node);
-- return -ENODEV;
-- }
--
-- ctrldev = &pdev->dev;
-- priv = dev_get_drvdata(ctrldev);
-- of_node_put(dev_node);
--
-- /*
-- * If priv is NULL, it's probably because the caam driver wasn't
-- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
-- */
-- if (!priv)
-- return -ENODEV;
--
--
- INIT_LIST_HEAD(&alg_list);
-
- /*
- * Register crypto algorithms the device supports.
- * First, detect presence and attributes of DES, AES, and MD blocks.
- */
-- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
-- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
-- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
-- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
-- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+ if (priv->era < 10) {
-+ u32 cha_vid, cha_inst;
-+
-+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
-+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
-+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+
-+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
-+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
-+ CHA_ID_LS_DES_SHIFT;
-+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
-+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+ ccha_inst = 0;
-+ ptha_inst = 0;
-+ } else {
-+ u32 aesa, mdha;
-+
-+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
-+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
-+
-+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
-+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
-+
-+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
-+ aes_inst = aesa & CHA_VER_NUM_MASK;
-+ md_inst = mdha & CHA_VER_NUM_MASK;
-+ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
-+ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
-+ }
-
- /* If MD is present, limit digest size based on LP256 */
-- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
-+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
- md_limit = SHA256_DIGEST_SIZE;
-
- for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
-@@ -3429,10 +3705,10 @@ static int __init caam_algapi_init(void)
- * Check support for AES modes not available
- * on LP devices.
- */
-- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
-- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
-- OP_ALG_AAI_XTS)
-- continue;
-+ if (aes_vid == CHA_VER_VID_AES_LP &&
-+ (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_XTS)
-+ continue;
-
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
-@@ -3471,21 +3747,28 @@ static int __init caam_algapi_init(void)
- if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
- continue;
-
-+ /* Skip CHACHA20 algorithms if not supported by device */
-+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
-+ continue;
-+
-+ /* Skip POLY1305 algorithms if not supported by device */
-+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
-+ continue;
-+
- /*
- * Check support for AES algorithms not available
- * on LP devices.
- */
-- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
-- if (alg_aai == OP_ALG_AAI_GCM)
-- continue;
-+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
-+ continue;
-
- /*
- * Skip algorithms requiring message digests
- * if MD or MD size is not supported by device.
- */
-- if (c2_alg_sel &&
-- (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
-- continue;
-+ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
-+ (!md_inst || t_alg->aead.maxauthsize > md_limit))
-+ continue;
-
- caam_aead_alg_init(t_alg);
-
-@@ -3505,10 +3788,3 @@ static int __init caam_algapi_init(void)
-
- return err;
- }
--
--module_init(caam_algapi_init);
--module_exit(caam_algapi_exit);
--
--MODULE_LICENSE("GPL");
--MODULE_DESCRIPTION("FSL CAAM support for crypto API");
--MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
---- a/drivers/crypto/caam/caamalg_desc.c
-+++ b/drivers/crypto/caam/caamalg_desc.c
-@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
- * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
- * (non-protocol) with no (null) encryption.
- * @desc: pointer to buffer used for descriptor construction
-- * @adata: pointer to authentication transform definitions. Note that since a
-- * split key is to be used, the size of the split key itself is
-- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case. Valid algorithm values - one of
-+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
-+ * with OP_ALG_AAI_HMAC_PRECOMP.
- * @icvsize: integrity check value (ICV) size (truncated or full)
-- *
-- * Note: Requires an MDHA split key.
-+ * @era: SEC Era
- */
- void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-- unsigned int icvsize)
-+ unsigned int icvsize, int era)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
-
-@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-- if (adata->key_inline)
-- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-- adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
-- KEY_ENC);
-- else
-- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ if (era < 6) {
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt,
-+ adata->keylen_pad, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ } else {
-+ append_proto_dkp(desc, adata);
-+ }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
-@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
- * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
- * (non-protocol) with no (null) decryption.
- * @desc: pointer to buffer used for descriptor construction
-- * @adata: pointer to authentication transform definitions. Note that since a
-- * split key is to be used, the size of the split key itself is
-- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case. Valid algorithm values - one of
-+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
-+ * with OP_ALG_AAI_HMAC_PRECOMP.
- * @icvsize: integrity check value (ICV) size (truncated or full)
-- *
-- * Note: Requires an MDHA split key.
-+ * @era: SEC Era
- */
- void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-- unsigned int icvsize)
-+ unsigned int icvsize, int era)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
-
-@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-- if (adata->key_inline)
-- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-- adata->keylen, CLASS_2 |
-- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-- else
-- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ if (era < 6) {
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt,
-+ adata->keylen_pad, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ } else {
-+ append_proto_dkp(desc, adata);
-+ }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
-@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
- static void init_sh_desc_key_aead(u32 * const desc,
- struct alginfo * const cdata,
- struct alginfo * const adata,
-- const bool is_rfc3686, u32 *nonce)
-+ const bool is_rfc3686, u32 *nonce, int era)
- {
- u32 *key_jump_cmd;
- unsigned int enckeylen = cdata->keylen;
-@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
-- if (adata->key_inline)
-- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-- adata->keylen, CLASS_2 |
-- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-- else
-- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ if (era < 6) {
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt,
-+ adata->keylen_pad, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ } else {
-+ append_proto_dkp(desc, adata);
-+ }
-
- if (cdata->key_inline)
- append_key_as_imm(desc, cdata->key_virt, enckeylen,
-@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-- * @adata: pointer to authentication transform definitions. Note that since a
-- * split key is to be used, the size of the split key itself is
-- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case. Valid algorithm values - one of
-+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
-+ * with OP_ALG_AAI_HMAC_PRECOMP.
- * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @nonce: pointer to rfc3686 nonce
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- * @is_qi: true when called from caam/qi
-- *
-- * Note: Requires an MDHA split key.
-+ * @era: SEC Era
- */
- void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
-- u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
-+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
-+ int era)
- {
- /* Note: Context registers are saved. */
-- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
-
- /* Class 2 operation */
- append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
-@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
- }
-
- /* Read and write assoclen bytes */
-- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ if (is_qi || era < 3) {
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ } else {
-+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
-+ }
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-- * @adata: pointer to authentication transform definitions. Note that since a
-- * split key is to be used, the size of the split key itself is
-- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case. Valid algorithm values - one of
-+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
-+ * with OP_ALG_AAI_HMAC_PRECOMP.
- * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @nonce: pointer to rfc3686 nonce
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- * @is_qi: true when called from caam/qi
-- *
-- * Note: Requires an MDHA split key.
-+ * @era: SEC Era
- */
- void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool geniv,
- const bool is_rfc3686, u32 *nonce,
-- const u32 ctx1_iv_off, const bool is_qi)
-+ const u32 ctx1_iv_off, const bool is_qi, int era)
- {
- /* Note: Context registers are saved. */
-- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
-
- /* Class 2 operation */
- append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
-@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
- }
-
- /* Read and write assoclen bytes */
-- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-- if (geniv)
-- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
-- else
-- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ if (is_qi || era < 3) {
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ if (geniv)
-+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
-+ ivsize);
-+ else
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
-+ CAAM_CMD_SZ);
-+ } else {
-+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
-+ if (geniv)
-+ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
-+ ivsize);
-+ else
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
-+ CAAM_CMD_SZ);
-+ }
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-@@ -456,30 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-- * @adata: pointer to authentication transform definitions. Note that since a
-- * split key is to be used, the size of the split key itself is
-- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
-- * @ivsize: initialization vector size
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case. Valid algorithm values - one of
-+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
-+ * with OP_ALG_AAI_HMAC_PRECOMP. * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @nonce: pointer to rfc3686 nonce
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- * @is_qi: true when called from caam/qi
-- *
-- * Note: Requires an MDHA split key.
-+ * @era: SEC Era
- */
- void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off,
-- const bool is_qi)
-+ const bool is_qi, int era)
- {
- u32 geniv, moveiv;
- u32 *wait_cmd;
-
- /* Note: Context registers are saved. */
-- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
-
- if (is_qi) {
- u32 *wait_load_cmd;
-@@ -529,8 +561,13 @@ copy_iv:
- OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
-- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ if (is_qi || era < 3) {
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ } else {
-+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
-+ }
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-@@ -592,14 +629,431 @@ copy_iv:
- EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
-
- /**
-+ * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
-+ * @desc: pointer to buffer used for descriptor construction
-+ * @cdata: pointer to block cipher transform definitions
-+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
-+ * with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
-+ * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
-+ * @assoclen: associated data length
-+ * @ivsize: initialization vector size
-+ * @authsize: authentication data size
-+ * @blocksize: block cipher size
-+ * @era: SEC Era
-+ */
-+void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
-+ struct alginfo *adata, unsigned int assoclen,
-+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize, int era)
-+{
-+ u32 *key_jump_cmd, *zero_payload_jump_cmd;
-+ u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
-+
-+ /*
-+ * Compute the index (in bytes) for the LOAD with destination of
-+ * Class 1 Data Size Register and for the LOAD that generates padding
-+ */
-+ if (adata->key_inline) {
-+ idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
-+ cdata->keylen - 4 * CAAM_CMD_SZ;
-+ idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
-+ cdata->keylen - 2 * CAAM_CMD_SZ;
-+ } else {
-+ idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
-+ 4 * CAAM_CMD_SZ;
-+ idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
-+ 2 * CAAM_CMD_SZ;
-+ }
-+
-+ stidx = 1 << HDR_START_IDX_SHIFT;
-+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
-+
-+ /* skip key loading if they are loaded due to sharing */
-+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_SHRD);
-+
-+ if (era < 6) {
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt,
-+ adata->keylen_pad, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen,
-+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ } else {
-+ append_proto_dkp(desc, adata);
-+ }
-+
-+ if (cdata->key_inline)
-+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
-+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
-+ else
-+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
-+ KEY_DEST_CLASS_REG);
-+
-+ set_jump_tgt_here(desc, key_jump_cmd);
-+
-+ /* class 2 operation */
-+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_ENCRYPT);
-+ /* class 1 operation */
-+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_ENCRYPT);
-+
-+ /* payloadlen = input data length - (assoclen + ivlen) */
-+ append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
-+
-+ /* math1 = payloadlen + icvlen */
-+ append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
-+
-+ /* padlen = block_size - math1 % block_size */
-+ append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
-+ append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
-+
-+ /* cryptlen = payloadlen + icvlen + padlen */
-+ append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
-+
-+ /*
-+ * update immediate data with the padding length value
-+ * for the LOAD in the class 1 data size register.
-+ */
-+ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
-+ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
-+ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
-+
-+ /* overwrite PL field for the padding iNFO FIFO entry */
-+ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
-+ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
-+ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
-+
-+ /* store encrypted payload, icv and padding */
-+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
-+
-+ /* if payload length is zero, jump to zero-payload commands */
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
-+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
-+ JUMP_COND_MATH_Z);
-+
-+ /* load iv in context1 */
-+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
-+ LDST_CLASS_1_CCB | ivsize);
-+
-+ /* read assoc for authentication */
-+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
-+ FIFOLD_TYPE_MSG);
-+ /* insnoop payload */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
-+ FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
-+
-+ /* jump the zero-payload commands */
-+ append_jump(desc, JUMP_TEST_ALL | 3);
-+
-+ /* zero-payload commands */
-+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
-+
-+ /* load iv in context1 */
-+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
-+ LDST_CLASS_1_CCB | ivsize);
-+
-+ /* assoc data is the only data for authentication */
-+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
-+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
-+
-+ /* send icv to encryption */
-+ append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
-+ authsize);
-+
-+ /* update class 1 data size register with padding length */
-+ append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
-+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-+
-+ /* generate padding and send it to encryption */
-+ genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
-+ NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
-+ append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
-+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
-+ desc_bytes(desc), 1);
-+#endif
-+}
-+EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
-+
-+/**
-+ * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
-+ * @desc: pointer to buffer used for descriptor construction
-+ * @cdata: pointer to block cipher transform definitions
-+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
-+ * with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
-+ * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
-+ * @assoclen: associated data length
-+ * @ivsize: initialization vector size
-+ * @authsize: authentication data size
-+ * @blocksize: block cipher size
-+ * @era: SEC Era
-+ */
-+void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
-+ struct alginfo *adata, unsigned int assoclen,
-+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize, int era)
-+{
-+ u32 stidx, jumpback;
-+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
-+ /*
-+ * Pointer Size bool determines the size of address pointers.
-+ * false - Pointers fit in one 32-bit word.
-+ * true - Pointers fit in two 32-bit words.
-+ */
-+ static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
-+
-+ stidx = 1 << HDR_START_IDX_SHIFT;
-+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
-+
-+ /* skip key loading if they are loaded due to sharing */
-+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_SHRD);
-+
-+ if (era < 6)
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ else
-+ append_proto_dkp(desc, adata);
-+
-+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
-+ KEY_DEST_CLASS_REG);
-+
-+ set_jump_tgt_here(desc, key_jump_cmd);
-+
-+ /* class 2 operation */
-+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-+ /* class 1 operation */
-+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_DECRYPT);
-+
-+ /* VSIL = input data length - 2 * block_size */
-+ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
-+ blocksize);
-+
-+ /*
-+ * payloadlen + icvlen + padlen = input data length - (assoclen +
-+ * ivsize)
-+ */
-+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
-+
-+ /* skip data to the last but one cipher block */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
-+
-+ /* load iv for the last cipher block */
-+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
-+ LDST_CLASS_1_CCB | ivsize);
-+
-+ /* read last cipher block */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
-+ FIFOLD_TYPE_LAST1 | blocksize);
-+
-+ /* move decrypted block into math0 and math1 */
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
-+ blocksize);
-+
-+ /* reset AES CHA */
-+ append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
-+ LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
-+
-+ /* rewind input sequence */
-+ append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
-+
-+ /* key1 is in decryption form */
-+ append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
-+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
-+
-+ /* load iv in context1 */
-+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
-+ LDST_SRCDST_WORD_CLASS_CTX | ivsize);
-+
-+ /* read sequence number */
-+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
-+ /* load Type, Version and Len fields in math0 */
-+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
-+ LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
-+
-+ /* compute (padlen - 1) */
-+ append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
-+
-+ /* math2 = icvlen + (padlen - 1) + 1 */
-+ append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
-+
-+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
-+
-+ /* VSOL = payloadlen + icvlen + padlen */
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
-+
-+ if (caam_little_end)
-+ append_moveb(desc, MOVE_WAITCOMP |
-+ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
-+
-+ /* update Len field */
-+ append_math_sub(desc, REG0, REG0, REG2, 8);
-+
-+ /* store decrypted payload, icv and padding */
-+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
-+
-+ /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
-+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
-+
-+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
-+ JUMP_COND_MATH_Z);
-+
-+ /* send Type, Version and Len(pre ICV) fields to authentication */
-+ append_move(desc, MOVE_WAITCOMP |
-+ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
-+ (3 << MOVE_OFFSET_SHIFT) | 5);
-+
-+ /* outsnooping payload */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
-+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
-+ FIFOLDST_VLF);
-+ skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
-+
-+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
-+ /* send Type, Version and Len(pre ICV) fields to authentication */
-+ append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
-+ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
-+ (3 << MOVE_OFFSET_SHIFT) | 5);
-+
-+ set_jump_tgt_here(desc, skip_zero_jump_cmd);
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
-+
-+ /* load icvlen and padlen */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
-+ FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
-+
-+ /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
-+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
-+
-+ /*
-+ * Start a new input sequence using the SEQ OUT PTR command options,
-+ * pointer and length used when the current output sequence was defined.
-+ */
-+ if (ps) {
-+ /*
-+ * Move the lower 32 bits of Shared Descriptor address, the
-+ * SEQ OUT PTR command, Output Pointer (2 words) and
-+ * Output Length into math registers.
-+ */
-+ if (caam_little_end)
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 |
-+ (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
-+ else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 |
-+ (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
-+
-+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
-+ append_math_and_imm_u32(desc, REG0, REG0, IMM,
-+ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
-+ /* Append a JUMP command after the copied fields */
-+ jumpback = CMD_JUMP | (char)-9;
-+ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
-+ LDST_SRCDST_WORD_DECO_MATH2 |
-+ (4 << LDST_OFFSET_SHIFT));
-+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
-+ /* Move the updated fields back to the Job Descriptor */
-+ if (caam_little_end)
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF |
-+ (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
-+ else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF |
-+ (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
-+
-+ /*
-+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
-+ * and then jump back to the next command from the
-+ * Shared Descriptor.
-+ */
-+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
-+ } else {
-+ /*
-+ * Move the SEQ OUT PTR command, Output Pointer (1 word) and
-+ * Output Length into math registers.
-+ */
-+ if (caam_little_end)
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 |
-+ (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
-+ else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 |
-+ (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
-+
-+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
-+ append_math_and_imm_u64(desc, REG0, REG0, IMM,
-+ ~(((u64)(CMD_SEQ_IN_PTR ^
-+ CMD_SEQ_OUT_PTR)) << 32));
-+ /* Append a JUMP command after the copied fields */
-+ jumpback = CMD_JUMP | (char)-7;
-+ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
-+ LDST_SRCDST_WORD_DECO_MATH1 |
-+ (4 << LDST_OFFSET_SHIFT));
-+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
-+ /* Move the updated fields back to the Job Descriptor */
-+ if (caam_little_end)
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF |
-+ (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
-+ else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF |
-+ (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
-+
-+ /*
-+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
-+ * and then jump back to the next command from the
-+ * Shared Descriptor.
-+ */
-+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
-+ }
-+
-+ /* skip payload */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
-+ /* check icv */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
-+ FIFOLD_TYPE_LAST2 | authsize);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
-+ desc_bytes(desc), 1);
-+#endif
-+}
-+EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
-+
-+/**
- * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
-+ * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
-+ * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize)
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi)
- {
- u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
- *zero_assoc_jump_cmd2;
-@@ -621,11 +1075,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
-+ if (is_qi) {
-+ u32 *wait_load_cmd;
-+
-+ /* REG3 = assoclen */
-+ append_seq_load(desc, 4, LDST_CLASS_DECO |
-+ LDST_SRCDST_WORD_DECO_MATH3 |
-+ (4 << LDST_OFFSET_SHIFT));
-+
-+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_CALM | JUMP_COND_NCP |
-+ JUMP_COND_NOP | JUMP_COND_NIP |
-+ JUMP_COND_NIFP);
-+ set_jump_tgt_here(desc, wait_load_cmd);
-+
-+ append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
-+ ivsize);
-+ } else {
-+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
-+ CAAM_CMD_SZ);
-+ }
-+
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
-- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
-+ if (is_qi)
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-+
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
-@@ -657,8 +1135,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
-- /* jump the zero-payload commands */
-- append_jump(desc, JUMP_TEST_ALL | 2);
-+ /* jump to ICV writing */
-+ if (is_qi)
-+ append_jump(desc, JUMP_TEST_ALL | 4);
-+ else
-+ append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-@@ -666,10 +1147,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-+ if (is_qi)
-+ /* jump to ICV writing */
-+ append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
-+ if (is_qi)
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
-+ FIFOLD_TYPE_LAST1);
-+
- /* write ICV */
- append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-@@ -686,10 +1175,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
-+ * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
-+ * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize)
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi)
- {
- u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
-
-@@ -710,6 +1202,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-+ if (is_qi) {
-+ u32 *wait_load_cmd;
-+
-+ /* REG3 = assoclen */
-+ append_seq_load(desc, 4, LDST_CLASS_DECO |
-+ LDST_SRCDST_WORD_DECO_MATH3 |
-+ (4 << LDST_OFFSET_SHIFT));
-+
-+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_CALM | JUMP_COND_NCP |
-+ JUMP_COND_NOP | JUMP_COND_NIP |
-+ JUMP_COND_NIFP);
-+ set_jump_tgt_here(desc, wait_load_cmd);
-+
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-+ }
-+
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
-@@ -762,10 +1272,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
-+ * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
-+ * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize)
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi)
- {
- u32 *key_jump_cmd;
-
-@@ -786,7 +1299,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
-- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
-+ if (is_qi) {
-+ u32 *wait_load_cmd;
-+
-+ /* REG3 = assoclen */
-+ append_seq_load(desc, 4, LDST_CLASS_DECO |
-+ LDST_SRCDST_WORD_DECO_MATH3 |
-+ (4 << LDST_OFFSET_SHIFT));
-+
-+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_CALM | JUMP_COND_NCP |
-+ JUMP_COND_NOP | JUMP_COND_NIP |
-+ JUMP_COND_NIFP);
-+ set_jump_tgt_here(desc, wait_load_cmd);
-+
-+ /* Read salt and IV */
-+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
-+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV);
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-+ }
-+
-+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
-@@ -794,7 +1329,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
-- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-@@ -833,10 +1368,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
-+ * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
-+ * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize)
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi)
- {
- u32 *key_jump_cmd;
-
-@@ -858,7 +1396,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
-+ if (is_qi) {
-+ u32 *wait_load_cmd;
-+
-+ /* REG3 = assoclen */
-+ append_seq_load(desc, 4, LDST_CLASS_DECO |
-+ LDST_SRCDST_WORD_DECO_MATH3 |
-+ (4 << LDST_OFFSET_SHIFT));
-+
-+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_CALM | JUMP_COND_NCP |
-+ JUMP_COND_NOP | JUMP_COND_NIP |
-+ JUMP_COND_NIFP);
-+ set_jump_tgt_here(desc, wait_load_cmd);
-+
-+ /* Read salt and IV */
-+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
-+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV);
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-+ }
-+
-+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
-@@ -866,7 +1426,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
-- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-@@ -905,10 +1465,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
-+ * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
-+ * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize)
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
-
-@@ -929,6 +1492,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
-+ if (is_qi) {
-+ /* assoclen is not needed, skip it */
-+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
-+
-+ /* Read salt and IV */
-+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
-+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV);
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-+ }
-+
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-@@ -940,7 +1515,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
-- (0x8 << MOVE_LEN_SHIFT));
-+ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-@@ -975,10 +1550,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
-+ * @ivsize: initialization vector size
- * @icvsize: integrity check value (ICV) size (truncated or full)
-+ * @is_qi: true when called from caam/qi
- */
- void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize)
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi)
- {
- u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
-
-@@ -999,6 +1577,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-+ if (is_qi) {
-+ /* assoclen is not needed, skip it */
-+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
-+
-+ /* Read salt and IV */
-+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
-+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV);
-+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
-+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-+ }
-+
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-@@ -1010,7 +1600,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
-- (0x8 << MOVE_LEN_SHIFT));
-+ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-@@ -1044,6 +1634,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
- }
- EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
-
-+/**
-+ * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
-+ * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
-+ * descriptor (non-protocol).
-+ * @desc: pointer to buffer used for descriptor construction
-+ * @cdata: pointer to block cipher transform definitions
-+ * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
-+ * OP_ALG_AAI_AEAD.
-+ * @adata: pointer to authentication transform definitions
-+ * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
-+ * OP_ALG_AAI_AEAD.
-+ * @ivsize: initialization vector size
-+ * @icvsize: integrity check value (ICV) size (truncated or full)
-+ * @encap: true if encapsulation, false if decapsulation
-+ * @is_qi: true when called from caam/qi
-+ */
-+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
-+ struct alginfo *adata, unsigned int ivsize,
-+ unsigned int icvsize, const bool encap,
-+ const bool is_qi)
-+{
-+ u32 *key_jump_cmd, *wait_cmd;
-+ u32 nfifo;
-+ const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
-+
-+ /* Note: Context registers are saved. */
-+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-+
-+ /* skip key loading if they are loaded due to sharing */
-+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_SHRD);
-+
-+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
-+ CLASS_1 | KEY_DEST_CLASS_REG);
-+
-+ /* For IPsec load the salt from keymat in the context register */
-+ if (is_ipsec)
-+ append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
-+ LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
-+ 4 << LDST_OFFSET_SHIFT);
-+
-+ set_jump_tgt_here(desc, key_jump_cmd);
-+
-+ /* Class 2 and 1 operations: Poly & ChaCha */
-+ if (encap) {
-+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_ENCRYPT);
-+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_ENCRYPT);
-+ } else {
-+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
-+ OP_ALG_DECRYPT);
-+ }
-+
-+ if (is_qi) {
-+ u32 *wait_load_cmd;
-+ u32 ctx1_iv_off = is_ipsec ? 8 : 4;
-+
-+ /* REG3 = assoclen */
-+ append_seq_load(desc, 4, LDST_CLASS_DECO |
-+ LDST_SRCDST_WORD_DECO_MATH3 |
-+ 4 << LDST_OFFSET_SHIFT);
-+
-+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_CALM | JUMP_COND_NCP |
-+ JUMP_COND_NOP | JUMP_COND_NIP |
-+ JUMP_COND_NIFP);
-+ set_jump_tgt_here(desc, wait_load_cmd);
-+
-+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT |
-+ ctx1_iv_off << LDST_OFFSET_SHIFT);
-+ }
-+
-+ /*
-+ * MAGIC with NFIFO
-+ * Read associated data from the input and send them to class1 and
-+ * class2 alignment blocks. From class1 send data to output fifo and
-+ * then write it to memory since we don't need to encrypt AD.
-+ */
-+ nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
-+ NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
-+ append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
-+ LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
-+
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
-+ FIFOLD_CLASS_CLASS1 | LDST_VLF);
-+ append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
-+ MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
-+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
-+
-+ /* IPsec - copy IV at the output */
-+ if (is_ipsec)
-+ append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
-+ 0x2 << 25);
-+
-+ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
-+ JUMP_COND_NOP | JUMP_TEST_ALL);
-+ set_jump_tgt_here(desc, wait_cmd);
-+
-+ if (encap) {
-+ /* Read and write cryptlen bytes */
-+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
-+ CAAM_CMD_SZ);
-+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
-+
-+ /* Write ICV */
-+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
-+ } else {
-+ /* Read and write cryptlen bytes */
-+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
-+ CAAM_CMD_SZ);
-+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
-+
-+ /* Load ICV for verification */
-+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
-+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-+ }
-+
-+ print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
-+ 1);
-+}
-+EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
-+
- /*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
-@@ -1062,7 +1784,8 @@ static inline void ablkcipher_append_src
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
-- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
-+ * - OP_ALG_ALGSEL_CHACHA20
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
-@@ -1084,7 +1807,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
-- u8 *nonce = cdata->key_virt + cdata->keylen;
-+ const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
-@@ -1127,7 +1850,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
-- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
-+ * - OP_ALG_ALGSEL_CHACHA20
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
-@@ -1149,7 +1873,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
-- u8 *nonce = cdata->key_virt + cdata->keylen;
-+ const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
-@@ -1218,7 +1942,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
-- u8 *nonce = cdata->key_virt + cdata->keylen;
-+ const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
---- a/drivers/crypto/caam/caamalg_desc.h
-+++ b/drivers/crypto/caam/caamalg_desc.h
-@@ -17,6 +17,9 @@
- #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
- #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
-
-+#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
-+#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
-+
- /* Note: Nonce is counted in cdata.keylen */
- #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
-@@ -27,14 +30,20 @@
- #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
- #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
- #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-+#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
-+#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
-
- #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
- #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
- #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-+#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
-+#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
-
- #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
- #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
- #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-+#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
-+#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
-
- #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
- #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
-@@ -43,46 +52,67 @@
- 15 * CAAM_CMD_SZ)
-
- void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-- unsigned int icvsize);
-+ unsigned int icvsize, int era);
-
- void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-- unsigned int icvsize);
-+ unsigned int icvsize, int era);
-
- void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off,
-- const bool is_qi);
-+ const bool is_qi, int era);
-
- void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool geniv,
- const bool is_rfc3686, u32 *nonce,
-- const u32 ctx1_iv_off, const bool is_qi);
-+ const u32 ctx1_iv_off, const bool is_qi, int era);
-
- void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
- struct alginfo *adata, unsigned int ivsize,
- unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off,
-- const bool is_qi);
-+ const bool is_qi, int era);
-+
-+void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
-+ struct alginfo *adata, unsigned int assoclen,
-+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize, int era);
-+
-+void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
-+ struct alginfo *adata, unsigned int assoclen,
-+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize, int era);
-
- void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize);
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi);
-
- void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize);
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi);
-
- void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize);
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi);
-
- void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize);
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi);
-
- void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize);
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi);
-
- void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
-- unsigned int icvsize);
-+ unsigned int ivsize, unsigned int icvsize,
-+ const bool is_qi);
-+
-+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
-+ struct alginfo *adata, unsigned int ivsize,
-+ unsigned int icvsize, const bool encap,
-+ const bool is_qi);
-
- void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
---- a/drivers/crypto/caam/caamalg_qi.c
-+++ b/drivers/crypto/caam/caamalg_qi.c
-@@ -7,7 +7,7 @@
- */
-
- #include "compat.h"
--
-+#include "ctrl.h"
- #include "regs.h"
- #include "intern.h"
- #include "desc_constr.h"
-@@ -53,6 +53,7 @@ struct caam_ctx {
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
- u8 key[CAAM_MAX_KEY_SIZE];
- dma_addr_t key_dma;
-+ enum dma_data_direction dir;
- struct alginfo adata;
- struct alginfo cdata;
- unsigned int authsize;
-@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = alg->caam.rfc3686;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
-
- if (!ctx->cdata.keylen || !ctx->authsize)
- return 0;
-@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
-
- cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
- ivsize, ctx->authsize, is_rfc3686, nonce,
-- ctx1_iv_off, true);
-+ ctx1_iv_off, true, ctrlpriv->era);
-
- skip_enc:
- /* aead_decrypt shared descriptor */
-@@ -149,7 +151,8 @@ skip_enc:
-
- cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
- ivsize, ctx->authsize, alg->caam.geniv,
-- is_rfc3686, nonce, ctx1_iv_off, true);
-+ is_rfc3686, nonce, ctx1_iv_off, true,
-+ ctrlpriv->era);
-
- if (!alg->caam.geniv)
- goto skip_givenc;
-@@ -176,7 +179,7 @@ skip_enc:
-
- cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
- ivsize, ctx->authsize, is_rfc3686, nonce,
-- ctx1_iv_off, true);
-+ ctx1_iv_off, true, ctrlpriv->era);
-
- skip_givenc:
- return 0;
-@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
- {
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- struct crypto_authenc_keys keys;
- int ret = 0;
-
-@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- #endif
-
-+ /*
-+ * If DKP is supported, use it in the shared descriptor to generate
-+ * the split key.
-+ */
-+ if (ctrlpriv->era >= 6) {
-+ ctx->adata.keylen = keys.authkeylen;
-+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
-+ goto badkey;
-+
-+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
-+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
-+ keys.enckeylen);
-+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
-+ ctx->adata.keylen_pad +
-+ keys.enckeylen, ctx->dir);
-+ goto skip_split_key;
-+ }
-+
- ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
- keys.authkeylen, CAAM_MAX_KEY_SIZE -
- keys.enckeylen);
-@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
- /* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-- keys.enckeylen, DMA_TO_DEVICE);
-+ keys.enckeylen, ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
- #endif
-
-+skip_split_key:
- ctx->cdata.keylen = keys.enckeylen;
-
- ret = aead_set_sh_desc(aead);
-@@ -258,6 +284,468 @@ badkey:
- return -EINVAL;
- }
-
-+static int tls_set_sh_desc(struct crypto_aead *tls)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ unsigned int ivsize = crypto_aead_ivsize(tls);
-+ unsigned int blocksize = crypto_aead_blocksize(tls);
-+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
-+ unsigned int data_len[2];
-+ u32 inl_mask;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ /*
-+ * TLS 1.0 encrypt shared descriptor
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ data_len[0] = ctx->adata.keylen_pad;
-+ data_len[1] = ctx->cdata.keylen;
-+
-+ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
-+ &inl_mask, ARRAY_SIZE(data_len)) < 0)
-+ return -EINVAL;
-+
-+ if (inl_mask & 1)
-+ ctx->adata.key_virt = ctx->key;
-+ else
-+ ctx->adata.key_dma = ctx->key_dma;
-+
-+ if (inl_mask & 2)
-+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
-+ else
-+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-+
-+ ctx->adata.key_inline = !!(inl_mask & 1);
-+ ctx->cdata.key_inline = !!(inl_mask & 2);
-+
-+ cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize,
-+ ctrlpriv->era);
-+
-+ /*
-+ * TLS 1.0 decrypt shared descriptor
-+ * Keys do not fit inline, regardless of algorithms used
-+ */
-+ ctx->adata.key_inline = false;
-+ ctx->adata.key_dma = ctx->key_dma;
-+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-+
-+ cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize,
-+ ctrlpriv->era);
-+
-+ return 0;
-+}
-+
-+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+
-+ ctx->authsize = authsize;
-+ tls_set_sh_desc(tls);
-+
-+ return 0;
-+}
-+
-+static int tls_setkey(struct crypto_aead *tls, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ struct device *jrdev = ctx->jrdev;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
-+ struct crypto_authenc_keys keys;
-+ int ret = 0;
-+
-+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
-+ goto badkey;
-+
-+#ifdef DEBUG
-+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
-+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
-+ keys.authkeylen);
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ /*
-+ * If DKP is supported, use it in the shared descriptor to generate
-+ * the split key.
-+ */
-+ if (ctrlpriv->era >= 6) {
-+ ctx->adata.keylen = keys.authkeylen;
-+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
-+ goto badkey;
-+
-+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
-+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
-+ keys.enckeylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma,
-+ ctx->adata.keylen_pad +
-+ keys.enckeylen, ctx->dir);
-+ goto skip_split_key;
-+ }
-+
-+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
-+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
-+ keys.enckeylen);
-+ if (ret)
-+ goto badkey;
-+
-+ /* postpend encryption key to auth split key */
-+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, ctx->dir);
-+
-+#ifdef DEBUG
-+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
-+ ctx->adata.keylen, ctx->adata.keylen_pad);
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-+ ctx->adata.keylen_pad + keys.enckeylen, 1);
-+#endif
-+
-+skip_split_key:
-+ ctx->cdata.keylen = keys.enckeylen;
-+
-+ ret = tls_set_sh_desc(tls);
-+ if (ret)
-+ goto badkey;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ return ret;
-+badkey:
-+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+static int gcm_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
-+ ctx->cdata.keylen;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ /*
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ ctx->cdata.key_virt = ctx->key;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-+ ctx->authsize, true);
-+
-+ /*
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ ctx->cdata.key_virt = ctx->key;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-+ ctx->authsize, true);
-+
-+ return 0;
-+}
-+
-+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-+
-+ ctx->authsize = authsize;
-+ gcm_set_sh_desc(authenc);
-+
-+ return 0;
-+}
-+
-+static int gcm_setkey(struct crypto_aead *aead,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *jrdev = ctx->jrdev;
-+ int ret;
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ memcpy(ctx->key, key, keylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
-+ ctx->cdata.keylen = keylen;
-+
-+ ret = gcm_set_sh_desc(aead);
-+ if (ret)
-+ return ret;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
-+ ctx->cdata.keylen;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ ctx->cdata.key_virt = ctx->key;
-+
-+ /*
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-+ ctx->authsize, true);
-+
-+ /*
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-+ ctx->authsize, true);
-+
-+ return 0;
-+}
-+
-+static int rfc4106_setauthsize(struct crypto_aead *authenc,
-+ unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-+
-+ ctx->authsize = authsize;
-+ rfc4106_set_sh_desc(authenc);
-+
-+ return 0;
-+}
-+
-+static int rfc4106_setkey(struct crypto_aead *aead,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *jrdev = ctx->jrdev;
-+ int ret;
-+
-+ if (keylen < 4)
-+ return -EINVAL;
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ memcpy(ctx->key, key, keylen);
-+ /*
-+ * The last four bytes of the key material are used as the salt value
-+ * in the nonce. Update the AES key length.
-+ */
-+ ctx->cdata.keylen = keylen - 4;
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+ ctx->dir);
-+
-+ ret = rfc4106_set_sh_desc(aead);
-+ if (ret)
-+ return ret;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
-+ ctx->cdata.keylen;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ ctx->cdata.key_virt = ctx->key;
-+
-+ /*
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-+ ctx->authsize, true);
-+
-+ /*
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-+ ctx->authsize, true);
-+
-+ return 0;
-+}
-+
-+static int rfc4543_setauthsize(struct crypto_aead *authenc,
-+ unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-+
-+ ctx->authsize = authsize;
-+ rfc4543_set_sh_desc(authenc);
-+
-+ return 0;
-+}
-+
-+static int rfc4543_setkey(struct crypto_aead *aead,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *jrdev = ctx->jrdev;
-+ int ret;
-+
-+ if (keylen < 4)
-+ return -EINVAL;
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ memcpy(ctx->key, key, keylen);
-+ /*
-+ * The last four bytes of the key material are used as the salt value
-+ * in the nonce. Update the AES key length.
-+ */
-+ ctx->cdata.keylen = keylen - 4;
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+ ctx->dir);
-+
-+ ret = rfc4543_set_sh_desc(aead);
-+ if (ret)
-+ return ret;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
- static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
- {
-@@ -414,6 +902,29 @@ struct aead_edesc {
- };
-
- /*
-+ * tls_edesc - s/w-extended tls descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct tls_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct scatterlist tmp[2];
-+ struct scatterlist *dst;
-+ struct caam_drv_req drv_req;
-+ struct qm_sg_entry sgt[0];
-+};
-+
-+/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
-@@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
- }
-
-+static void tls_unmap(struct device *dev,
-+ struct tls_edesc *edesc,
-+ struct aead_request *req)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ int ivsize = crypto_aead_ivsize(aead);
-+
-+ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
-+ edesc->dst_nents, edesc->iv_dma, ivsize,
-+ edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
-+ edesc->qm_sg_bytes);
-+}
-+
- static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-@@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
- qidev = caam_ctx->qidev;
-
- if (unlikely(status)) {
-+ u32 ssrc = status & JRSTA_SSRC_MASK;
-+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
-+
- caam_jr_strstatus(qidev, status);
-- ecode = -EIO;
-+ /*
-+ * verify hw auth check passed else return -EBADMSG
-+ */
-+ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
-+ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
-+ ecode = -EBADMSG;
-+ else
-+ ecode = -EIO;
- }
-
- edesc = container_of(drv_req, typeof(*edesc), drv_req);
-@@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all
- /*
- * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
- * Input is not contiguous.
-+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
-+ * the end of the table by allocating more S/G entries. Logic:
-+ * if (src != dst && output S/G)
-+ * pad output S/G, if needed
-+ * else if (src == dst && S/G)
-+ * overlapping S/Gs; pad one of them
-+ * else if (input S/G) ...
-+ * pad input S/G, if needed
- */
-- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
-- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
-+ if (mapped_dst_nents > 1)
-+ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
-+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
-+ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
-+ 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
-+ else
-+ qm_sg_ents = ALIGN(qm_sg_ents, 4);
-+
- sg_table = &edesc->sgt[0];
- qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
- if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-@@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ
- return aead_crypt(req, false);
- }
-
-+static int ipsec_gcm_encrypt(struct aead_request *req)
-+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-+
-+ return aead_crypt(req, true);
-+}
-+
-+static int ipsec_gcm_decrypt(struct aead_request *req)
-+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-+
-+ return aead_crypt(req, false);
-+}
-+
-+static void tls_done(struct caam_drv_req *drv_req, u32 status)
-+{
-+ struct device *qidev;
-+ struct tls_edesc *edesc;
-+ struct aead_request *aead_req = drv_req->app_ctx;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
-+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
-+ int ecode = 0;
-+
-+ qidev = caam_ctx->qidev;
-+
-+ if (unlikely(status)) {
-+ caam_jr_strstatus(qidev, status);
-+ ecode = -EIO;
-+ }
-+
-+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+ tls_unmap(qidev, edesc, aead_req);
-+
-+ aead_request_complete(aead_req, ecode);
-+ qi_cache_free(edesc);
-+}
-+
-+/*
-+ * allocate and map the tls extended descriptor
-+ */
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int blocksize = crypto_aead_blocksize(aead);
-+ unsigned int padsize, authsize;
-+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
-+ typeof(*alg), aead);
-+ struct device *qidev = ctx->qidev;
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct tls_edesc *edesc;
-+ dma_addr_t qm_sg_dma, iv_dma = 0;
-+ int ivsize = 0;
-+ u8 *iv;
-+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
-+ int in_len, out_len;
-+ struct qm_sg_entry *sg_table, *fd_sgt;
-+ struct caam_drv_ctx *drv_ctx;
-+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
-+ struct scatterlist *dst;
-+
-+ if (encrypt) {
-+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+ blocksize);
-+ authsize = ctx->authsize + padsize;
-+ } else {
-+ authsize = ctx->authsize;
-+ }
-+
-+ drv_ctx = get_drv_ctx(ctx, op_type);
-+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct tls_edesc *)drv_ctx;
-+
-+ /* allocate space for base edesc, link tables and IV */
-+ edesc = qi_cache_alloc(GFP_DMA | flags);
-+ if (unlikely(!edesc)) {
-+ dev_err(qidev, "could not allocate extended descriptor\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ if (likely(req->src == req->dst)) {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
-+ }
-+
-+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(qidev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ dst = req->dst;
-+ } else {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen);
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
-+ }
-+
-+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ if (unlikely(dst_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(dst_nents);
-+ }
-+
-+ if (src_nents) {
-+ mapped_src_nents = dma_map_sg(qidev, req->src,
-+ src_nents, DMA_TO_DEVICE);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(qidev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ } else {
-+ mapped_src_nents = 0;
-+ }
-+
-+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(!mapped_dst_nents)) {
-+ dev_err(qidev, "unable to map destination\n");
-+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ }
-+
-+ /*
-+ * Create S/G table: IV, src, dst.
-+ * Input is not contiguous.
-+ */
-+ qm_sg_ents = 1 + mapped_src_nents +
-+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-+ sg_table = &edesc->sgt[0];
-+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-+
-+ ivsize = crypto_aead_ivsize(aead);
-+ iv = (u8 *)(sg_table + qm_sg_ents);
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
-+ 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ edesc->dst_nents = dst_nents;
-+ edesc->dst = dst;
-+ edesc->iv_dma = iv_dma;
-+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = tls_done;
-+ edesc->drv_req.drv_ctx = drv_ctx;
-+
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ qm_sg_index = 1;
-+
-+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
-+ qm_sg_index += mapped_src_nents;
-+
-+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
-+ qm_sg_index, 0);
-+
-+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, qm_sg_dma)) {
-+ dev_err(qidev, "unable to map S/G table\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
-+ ivsize, op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ edesc->qm_sg_dma = qm_sg_dma;
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ out_len = req->cryptlen + (encrypt ? authsize : 0);
-+ in_len = ivsize + req->assoclen + req->cryptlen;
-+
-+ fd_sgt = &edesc->drv_req.fd_sgt[0];
-+
-+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
-+
-+ if (req->dst == req->src)
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (sg_nents_for_len(req->src, req->assoclen) +
-+ 1) * sizeof(*sg_table), out_len, 0);
-+ else if (mapped_dst_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
-+ else
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
-+ qm_sg_index, out_len, 0);
-+
-+ return edesc;
-+}
-+
-+static int tls_crypt(struct aead_request *req, bool encrypt)
-+{
-+ struct tls_edesc *edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ int ret;
-+
-+ if (unlikely(caam_congested))
-+ return -EAGAIN;
-+
-+ edesc = tls_edesc_alloc(req, encrypt);
-+ if (IS_ERR_OR_NULL(edesc))
-+ return PTR_ERR(edesc);
-+
-+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
-+ if (!ret) {
-+ ret = -EINPROGRESS;
-+ } else {
-+ tls_unmap(ctx->qidev, edesc, req);
-+ qi_cache_free(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static int tls_encrypt(struct aead_request *req)
-+{
-+ return tls_crypt(req, true);
-+}
-+
-+static int tls_decrypt(struct aead_request *req)
-+{
-+ return tls_crypt(req, false);
-+}
-+
- static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
- {
- struct ablkcipher_edesc *edesc;
-@@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
- qm_sg_ents = 1 + mapped_src_nents;
- dst_sg_idx = qm_sg_ents;
-
-- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-+ /*
-+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
-+ * the end of the table by allocating more S/G entries. Logic:
-+ * if (src != dst && output S/G)
-+ * pad output S/G, if needed
-+ * else if (src == dst && S/G)
-+ * overlapping S/Gs; pad one of them
-+ * else if (input S/G) ...
-+ * pad input S/G, if needed
-+ */
-+ if (mapped_dst_nents > 1)
-+ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
-+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
-+ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
-+ 1 + ALIGN(mapped_src_nents, 4));
-+ else
-+ qm_sg_ents = ALIGN(qm_sg_ents, 4);
-+
- qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
-@@ -1308,6 +2128,61 @@ static struct caam_alg_template driver_a
- };
-
- static struct caam_aead_alg driver_aeads[] = {
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc4106(gcm(aes))",
-+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = rfc4106_setkey,
-+ .setauthsize = rfc4106_setauthsize,
-+ .encrypt = ipsec_gcm_encrypt,
-+ .decrypt = ipsec_gcm_decrypt,
-+ .ivsize = 8,
-+ .maxauthsize = AES_BLOCK_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc4543(gcm(aes))",
-+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = rfc4543_setkey,
-+ .setauthsize = rfc4543_setauthsize,
-+ .encrypt = ipsec_gcm_encrypt,
-+ .decrypt = ipsec_gcm_decrypt,
-+ .ivsize = 8,
-+ .maxauthsize = AES_BLOCK_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
-+ },
-+ },
-+ /* Galois Counter Mode */
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "gcm(aes)",
-+ .cra_driver_name = "gcm-aes-caam-qi",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = gcm_setkey,
-+ .setauthsize = gcm_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = 12,
-+ .maxauthsize = AES_BLOCK_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
-+ }
-+ },
- /* single-pass ipsec_esp descriptor */
- {
- .aead = {
-@@ -2118,6 +2993,26 @@ static struct caam_aead_alg driver_aeads
- .geniv = true,
- }
- },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "tls10(hmac(sha1),cbc(aes))",
-+ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = tls_setkey,
-+ .setauthsize = tls_setauthsize,
-+ .encrypt = tls_encrypt,
-+ .decrypt = tls_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ }
- };
-
- struct caam_crypto_alg {
-@@ -2126,9 +3021,21 @@ struct caam_crypto_alg {
- struct caam_alg_entry caam;
- };
-
--static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
-+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
-+ bool uses_dkp)
- {
- struct caam_drv_private *priv;
-+ struct device *dev;
-+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
-+ static const u8 digest_size[] = {
-+ MD5_DIGEST_SIZE,
-+ SHA1_DIGEST_SIZE,
-+ SHA224_DIGEST_SIZE,
-+ SHA256_DIGEST_SIZE,
-+ SHA384_DIGEST_SIZE,
-+ SHA512_DIGEST_SIZE
-+ };
-+ u8 op_id;
-
- /*
- * distribute tfms across job rings to ensure in-order
-@@ -2140,10 +3047,19 @@ static int caam_init_common(struct caam_
- return PTR_ERR(ctx->jrdev);
- }
-
-- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
-- DMA_TO_DEVICE);
-- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
-- dev_err(ctx->jrdev, "unable to map key\n");
-+ priv = dev_get_drvdata(ctx->jrdev->parent);
-+ if (priv->era >= 6 && uses_dkp) {
-+ ctx->dir = DMA_BIDIRECTIONAL;
-+ dev = ctx->jrdev->parent;
-+ } else {
-+ ctx->dir = DMA_TO_DEVICE;
-+ dev = ctx->jrdev;
-+ }
-+
-+ ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
-+ ctx->dir);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key\n");
- caam_jr_free(ctx->jrdev);
- return -ENOMEM;
- }
-@@ -2152,8 +3068,23 @@ static int caam_init_common(struct caam_
- ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
-
-- priv = dev_get_drvdata(ctx->jrdev->parent);
-- ctx->qidev = priv->qidev;
-+ if (ctx->adata.algtype) {
-+ op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
-+ >> OP_ALG_ALGSEL_SHIFT;
-+ if (op_id < ARRAY_SIZE(digest_size)) {
-+ ctx->authsize = digest_size[op_id];
-+ } else {
-+ dev_err(ctx->jrdev,
-+ "incorrect op_id %d; must be less than %zu\n",
-+ op_id, ARRAY_SIZE(digest_size));
-+ caam_jr_free(ctx->jrdev);
-+ return -EINVAL;
-+ }
-+ } else {
-+ ctx->authsize = 0;
-+ }
-+
-+ ctx->qidev = ctx->jrdev->parent;
-
- spin_lock_init(&ctx->lock);
- ctx->drv_ctx[ENCRYPT] = NULL;
-@@ -2170,7 +3101,7 @@ static int caam_cra_init(struct crypto_t
- crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
-
-- return caam_init_common(ctx, &caam_alg->caam);
-+ return caam_init_common(ctx, &caam_alg->caam, false);
- }
-
- static int caam_aead_init(struct crypto_aead *tfm)
-@@ -2180,17 +3111,25 @@ static int caam_aead_init(struct crypto_
- aead);
- struct caam_ctx *ctx = crypto_aead_ctx(tfm);
-
-- return caam_init_common(ctx, &caam_alg->caam);
-+ return caam_init_common(ctx, &caam_alg->caam,
-+ (alg->setkey == aead_setkey) ||
-+ (alg->setkey == tls_setkey));
- }
-
- static void caam_exit_common(struct caam_ctx *ctx)
- {
-+ struct device *dev;
-+
- caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
-
-- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
-- DMA_TO_DEVICE);
-+ if (ctx->dir == DMA_BIDIRECTIONAL)
-+ dev = ctx->jrdev->parent;
-+ else
-+ dev = ctx->jrdev;
-+
-+ dma_unmap_single(dev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
-
- caam_jr_free(ctx->jrdev);
- }
-@@ -2206,7 +3145,7 @@ static void caam_aead_exit(struct crypto
- }
-
- static struct list_head alg_list;
--static void __exit caam_qi_algapi_exit(void)
-+void caam_qi_algapi_exit(void)
- {
- struct caam_crypto_alg *t_alg, *n;
- int i;
-@@ -2282,53 +3221,48 @@ static void caam_aead_alg_init(struct ca
- alg->exit = caam_aead_exit;
- }
-
--static int __init caam_qi_algapi_init(void)
-+int caam_qi_algapi_init(struct device *ctrldev)
- {
-- struct device_node *dev_node;
-- struct platform_device *pdev;
-- struct device *ctrldev;
-- struct caam_drv_private *priv;
-+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- int i = 0, err = 0;
-- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
-+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
- unsigned int md_limit = SHA512_DIGEST_SIZE;
- bool registered = false;
-
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
-- if (!dev_node) {
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
-- if (!dev_node)
-- return -ENODEV;
-- }
--
-- pdev = of_find_device_by_node(dev_node);
-- of_node_put(dev_node);
-- if (!pdev)
-- return -ENODEV;
--
-- ctrldev = &pdev->dev;
-- priv = dev_get_drvdata(ctrldev);
--
-- /*
-- * If priv is NULL, it's probably because the caam driver wasn't
-- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
-- */
-- if (!priv || !priv->qi_present)
-- return -ENODEV;
--
- INIT_LIST_HEAD(&alg_list);
-
- /*
- * Register crypto algorithms the device supports.
- * First, detect presence and attributes of DES, AES, and MD blocks.
- */
-- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
-- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
-- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
-- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
-- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+ if (priv->era < 10) {
-+ u32 cha_vid, cha_inst;
-+
-+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
-+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
-+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+
-+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
-+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
-+ CHA_ID_LS_DES_SHIFT;
-+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
-+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+ } else {
-+ u32 aesa, mdha;
-+
-+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
-+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
-+
-+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
-+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
-+
-+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
-+ aes_inst = aesa & CHA_VER_NUM_MASK;
-+ md_inst = mdha & CHA_VER_NUM_MASK;
-+ }
-
- /* If MD is present, limit digest size based on LP256 */
-- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
-+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
- md_limit = SHA256_DIGEST_SIZE;
-
- for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
-@@ -2349,14 +3283,14 @@ static int __init caam_qi_algapi_init(vo
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
-- dev_warn(priv->qidev, "%s alg allocation failed\n",
-+ dev_warn(ctrldev, "%s alg allocation failed\n",
- alg->driver_name);
- continue;
- }
-
- err = crypto_register_alg(&t_alg->crypto_alg);
- if (err) {
-- dev_warn(priv->qidev, "%s alg registration failed\n",
-+ dev_warn(ctrldev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
- continue;
-@@ -2388,8 +3322,7 @@ static int __init caam_qi_algapi_init(vo
- * Check support for AES algorithms not available
- * on LP devices.
- */
-- if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
-- (alg_aai == OP_ALG_AAI_GCM))
-+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
- continue;
-
- /*
-@@ -2414,14 +3347,7 @@ static int __init caam_qi_algapi_init(vo
- }
-
- if (registered)
-- dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
-+ dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
-
- return err;
- }
--
--module_init(caam_qi_algapi_init);
--module_exit(caam_qi_algapi_exit);
--
--MODULE_LICENSE("GPL");
--MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
--MODULE_AUTHOR("Freescale Semiconductor");
---- /dev/null
-+++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -0,0 +1,5843 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017-2018 NXP
-+ */
-+
-+#include <linux/fsl/mc.h>
-+#include "compat.h"
-+#include "regs.h"
-+#include "caamalg_qi2.h"
-+#include "dpseci_cmd.h"
-+#include "desc_constr.h"
-+#include "error.h"
-+#include "sg_sw_sec4.h"
-+#include "sg_sw_qm2.h"
-+#include "key_gen.h"
-+#include "caamalg_desc.h"
-+#include "caamhash_desc.h"
-+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
-+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
-+
-+#define CAAM_CRA_PRIORITY 2000
-+
-+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
-+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
-+ SHA512_DIGEST_SIZE * 2)
-+
-+/*
-+ * This is a a cache of buffers, from which the users of CAAM QI driver
-+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
-+ * NOTE: A more elegant solution would be to have some headroom in the frames
-+ * being processed. This can be added by the dpaa2-eth driver. This would
-+ * pose a problem for userspace application processing which cannot
-+ * know of this limitation. So for now, this will work.
-+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
-+ */
-+static struct kmem_cache *qi_cache;
-+
-+struct caam_alg_entry {
-+ struct device *dev;
-+ int class1_alg_type;
-+ int class2_alg_type;
-+ bool rfc3686;
-+ bool geniv;
-+};
-+
-+struct caam_aead_alg {
-+ struct aead_alg aead;
-+ struct caam_alg_entry caam;
-+ bool registered;
-+};
-+
-+struct caam_skcipher_alg {
-+ struct skcipher_alg skcipher;
-+ struct caam_alg_entry caam;
-+ bool registered;
-+};
-+
-+/**
-+ * caam_ctx - per-session context
-+ * @flc: Flow Contexts array
-+ * @key: virtual address of the key(s): [authentication key], encryption key
-+ * @flc_dma: I/O virtual addresses of the Flow Contexts
-+ * @key_dma: I/O virtual address of the key
-+ * @dir: DMA direction for mapping key and Flow Contexts
-+ * @dev: dpseci device
-+ * @adata: authentication algorithm details
-+ * @cdata: encryption algorithm details
-+ * @authsize: authentication tag (a.k.a. ICV / MAC) size
-+ */
-+struct caam_ctx {
-+ struct caam_flc flc[NUM_OP];
-+ u8 key[CAAM_MAX_KEY_SIZE];
-+ dma_addr_t flc_dma[NUM_OP];
-+ dma_addr_t key_dma;
-+ enum dma_data_direction dir;
-+ struct device *dev;
-+ struct alginfo adata;
-+ struct alginfo cdata;
-+ unsigned int authsize;
-+};
-+
-+void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
-+ dma_addr_t iova_addr)
-+{
-+ phys_addr_t phys_addr;
-+
-+ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
-+ iova_addr;
-+
-+ return phys_to_virt(phys_addr);
-+}
-+
-+/*
-+ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
-+ *
-+ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
-+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
-+ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
-+ * hosting 16 SG entries.
-+ *
-+ * @flags - flags that would be used for the equivalent kmalloc(..) call
-+ *
-+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
-+ */
-+static inline void *qi_cache_zalloc(gfp_t flags)
-+{
-+ return kmem_cache_zalloc(qi_cache, flags);
-+}
-+
-+/*
-+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
-+ *
-+ * @obj - buffer previously allocated by qi_cache_zalloc
-+ *
-+ * No checking is being done, the call is a passthrough call to
-+ * kmem_cache_free(...)
-+ */
-+static inline void qi_cache_free(void *obj)
-+{
-+ kmem_cache_free(qi_cache, obj);
-+}
-+
-+static struct caam_request *to_caam_req(struct crypto_async_request *areq)
-+{
-+ switch (crypto_tfm_alg_type(areq->tfm)) {
-+ case CRYPTO_ALG_TYPE_SKCIPHER:
-+ return skcipher_request_ctx(skcipher_request_cast(areq));
-+ case CRYPTO_ALG_TYPE_AEAD:
-+ return aead_request_ctx(container_of(areq, struct aead_request,
-+ base));
-+ case CRYPTO_ALG_TYPE_AHASH:
-+ return ahash_request_ctx(ahash_request_cast(areq));
-+ default:
-+ return ERR_PTR(-EINVAL);
-+ }
-+}
-+
-+static void caam_unmap(struct device *dev, struct scatterlist *src,
-+ struct scatterlist *dst, int src_nents,
-+ int dst_nents, dma_addr_t iv_dma, int ivsize,
-+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
-+{
-+ if (dst != src) {
-+ if (src_nents)
-+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
-+ } else {
-+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
-+ }
-+
-+ if (iv_dma)
-+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
-+
-+ if (qm_sg_bytes)
-+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
-+}
-+
-+static int aead_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
-+ typeof(*alg), aead);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ struct device *dev = ctx->dev;
-+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
-+ struct caam_flc *flc;
-+ u32 *desc;
-+ u32 ctx1_iv_off = 0;
-+ u32 *nonce = NULL;
-+ unsigned int data_len[2];
-+ u32 inl_mask;
-+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_CTR_MOD128);
-+ const bool is_rfc3686 = alg->caam.rfc3686;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ /*
-+ * AES-CTR needs to load IV in CONTEXT1 reg
-+ * at an offset of 128bits (16bytes)
-+ * CONTEXT1[255:128] = IV
-+ */
-+ if (ctr_mode)
-+ ctx1_iv_off = 16;
-+
-+ /*
-+ * RFC3686 specific:
-+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-+ */
-+ if (is_rfc3686) {
-+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
-+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
-+ }
-+
-+ data_len[0] = ctx->adata.keylen_pad;
-+ data_len[1] = ctx->cdata.keylen;
-+
-+ /* aead_encrypt shared descriptor */
-+ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
-+ DESC_QI_AEAD_ENC_LEN) +
-+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
-+ DESC_JOB_IO_LEN, data_len, &inl_mask,
-+ ARRAY_SIZE(data_len)) < 0)
-+ return -EINVAL;
-+
-+ if (inl_mask & 1)
-+ ctx->adata.key_virt = ctx->key;
-+ else
-+ ctx->adata.key_dma = ctx->key_dma;
-+
-+ if (inl_mask & 2)
-+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
-+ else
-+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-+
-+ ctx->adata.key_inline = !!(inl_mask & 1);
-+ ctx->cdata.key_inline = !!(inl_mask & 2);
-+
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+
-+ if (alg->caam.geniv)
-+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
-+ ivsize, ctx->authsize, is_rfc3686,
-+ nonce, ctx1_iv_off, true,
-+ priv->sec_attr.era);
-+ else
-+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
-+ ivsize, ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, true, priv->sec_attr.era);
-+
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ /* aead_decrypt shared descriptor */
-+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
-+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
-+ DESC_JOB_IO_LEN, data_len, &inl_mask,
-+ ARRAY_SIZE(data_len)) < 0)
-+ return -EINVAL;
-+
-+ if (inl_mask & 1)
-+ ctx->adata.key_virt = ctx->key;
-+ else
-+ ctx->adata.key_dma = ctx->key_dma;
-+
-+ if (inl_mask & 2)
-+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
-+ else
-+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-+
-+ ctx->adata.key_inline = !!(inl_mask & 1);
-+ ctx->cdata.key_inline = !!(inl_mask & 2);
-+
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
-+ ivsize, ctx->authsize, alg->caam.geniv,
-+ is_rfc3686, nonce, ctx1_iv_off, true,
-+ priv->sec_attr.era);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-+
-+ ctx->authsize = authsize;
-+ aead_set_sh_desc(authenc);
-+
-+ return 0;
-+}
-+
-+struct split_key_sh_result {
-+ struct completion completion;
-+ int err;
-+ struct device *dev;
-+};
-+
-+static void split_key_sh_done(void *cbk_ctx, u32 err)
-+{
-+ struct split_key_sh_result *res = cbk_ctx;
-+
-+#ifdef DEBUG
-+ dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-+#endif
-+
-+ if (err)
-+ caam_qi2_strstatus(res->dev, err);
-+
-+ res->err = err;
-+ complete(&res->completion);
-+}
-+
-+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *dev = ctx->dev;
-+ struct crypto_authenc_keys keys;
-+
-+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
-+ goto badkey;
-+
-+#ifdef DEBUG
-+ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
-+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
-+ keys.authkeylen);
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ ctx->adata.keylen = keys.authkeylen;
-+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
-+ goto badkey;
-+
-+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
-+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, ctx->dir);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-+ ctx->adata.keylen_pad + keys.enckeylen, 1);
-+#endif
-+
-+ ctx->cdata.keylen = keys.enckeylen;
-+
-+ return aead_set_sh_desc(aead);
-+badkey:
-+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-+ bool encrypt)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_request *req_ctx = aead_request_ctx(req);
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
-+ typeof(*alg), aead);
-+ struct device *dev = ctx->dev;
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct aead_edesc *edesc;
-+ dma_addr_t qm_sg_dma, iv_dma = 0;
-+ int ivsize = 0;
-+ unsigned int authsize = ctx->authsize;
-+ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
-+ int in_len, out_len;
-+ struct dpaa2_sg_entry *sg_table;
-+
-+ /* allocate space for base edesc, link tables and IV */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (unlikely(!edesc)) {
-+ dev_err(dev, "could not allocate extended descriptor\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ if (unlikely(req->dst != req->src)) {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen);
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
-+ }
-+
-+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize :
-+ (-authsize)));
-+ if (unlikely(dst_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : (-authsize)));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(dst_nents);
-+ }
-+
-+ if (src_nents) {
-+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(dev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ } else {
-+ mapped_src_nents = 0;
-+ }
-+
-+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(!mapped_dst_nents)) {
-+ dev_err(dev, "unable to map destination\n");
-+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ } else {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
-+ }
-+
-+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(dev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ }
-+
-+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
-+ ivsize = crypto_aead_ivsize(aead);
-+
-+ /*
-+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
-+ * Input is not contiguous.
-+ */
-+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
-+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-+ sg_table = &edesc->sgt[0];
-+ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
-+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-+ CAAM_QI_MEMCACHE_SIZE)) {
-+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
-+ qm_sg_nents, ivsize);
-+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
-+ 0, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ if (ivsize) {
-+ u8 *iv = (u8 *)(sg_table + qm_sg_nents);
-+
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+
-+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, iv_dma)) {
-+ dev_err(dev, "unable to map IV\n");
-+ caam_unmap(dev, req->src, req->dst, src_nents,
-+ dst_nents, 0, 0, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ edesc->dst_nents = dst_nents;
-+ edesc->iv_dma = iv_dma;
-+
-+ if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-+ OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
-+ /*
-+ * The associated data comes already with the IV but we need
-+ * to skip it when we authenticate or encrypt...
-+ */
-+ edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
-+ else
-+ edesc->assoclen = cpu_to_caam32(req->assoclen);
-+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
-+ dev_err(dev, "unable to map assoclen\n");
-+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
-+ qm_sg_index++;
-+ if (ivsize) {
-+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
-+ qm_sg_index++;
-+ }
-+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
-+ qm_sg_index += mapped_src_nents;
-+
-+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+ qm_sg_index, 0);
-+
-+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, qm_sg_dma)) {
-+ dev_err(dev, "unable to map S/G table\n");
-+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ edesc->qm_sg_dma = qm_sg_dma;
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ out_len = req->assoclen + req->cryptlen +
-+ (encrypt ? ctx->authsize : (-ctx->authsize));
-+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
-+ dpaa2_fl_set_len(in_fle, in_len);
-+
-+ if (req->dst == req->src) {
-+ if (mapped_src_nents == 1) {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
-+ } else {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
-+ (1 + !!ivsize) * sizeof(*sg_table));
-+ }
-+ } else if (mapped_dst_nents == 1) {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
-+ } else {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
-+ sizeof(*sg_table));
-+ }
-+
-+ dpaa2_fl_set_len(out_fle, out_len);
-+
-+ return edesc;
-+}
-+
-+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ struct device *dev = ctx->dev;
-+ struct caam_flc *flc;
-+ u32 *desc;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
-+ ctx->authsize, true, true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
-+ ctx->authsize, false, true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int chachapoly_setauthsize(struct crypto_aead *aead,
-+ unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+
-+ if (authsize != POLY1305_DIGEST_SIZE)
-+ return -EINVAL;
-+
-+ ctx->authsize = authsize;
-+ return chachapoly_set_sh_desc(aead);
-+}
-+
-+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-+
-+ if (keylen != CHACHA20_KEY_SIZE + saltlen) {
-+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+ }
-+
-+ ctx->cdata.key_virt = key;
-+ ctx->cdata.keylen = keylen - saltlen;
-+
-+ return chachapoly_set_sh_desc(aead);
-+}
-+
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
-+ bool encrypt)
-+{
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ unsigned int blocksize = crypto_aead_blocksize(tls);
-+ unsigned int padsize, authsize;
-+ struct caam_request *req_ctx = aead_request_ctx(req);
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
-+ typeof(*alg), aead);
-+ struct device *dev = ctx->dev;
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct tls_edesc *edesc;
-+ dma_addr_t qm_sg_dma, iv_dma = 0;
-+ int ivsize = 0;
-+ u8 *iv;
-+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
-+ int in_len, out_len;
-+ struct dpaa2_sg_entry *sg_table;
-+ struct scatterlist *dst;
-+
-+ if (encrypt) {
-+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+ blocksize);
-+ authsize = ctx->authsize + padsize;
-+ } else {
-+ authsize = ctx->authsize;
-+ }
-+
-+ /* allocate space for base edesc, link tables and IV */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (unlikely(!edesc)) {
-+ dev_err(dev, "could not allocate extended descriptor\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ if (likely(req->src == req->dst)) {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
-+ }
-+
-+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(dev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ dst = req->dst;
-+ } else {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen);
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
-+ }
-+
-+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ if (unlikely(dst_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(dst_nents);
-+ }
-+
-+ if (src_nents) {
-+ mapped_src_nents = dma_map_sg(dev, req->src,
-+ src_nents, DMA_TO_DEVICE);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(dev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ } else {
-+ mapped_src_nents = 0;
-+ }
-+
-+ mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(!mapped_dst_nents)) {
-+ dev_err(dev, "unable to map destination\n");
-+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ }
-+
-+ /*
-+ * Create S/G table: IV, src, dst.
-+ * Input is not contiguous.
-+ */
-+ qm_sg_ents = 1 + mapped_src_nents +
-+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-+ sg_table = &edesc->sgt[0];
-+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-+
-+ ivsize = crypto_aead_ivsize(tls);
-+ iv = (u8 *)(sg_table + qm_sg_ents);
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, iv_dma)) {
-+ dev_err(dev, "unable to map IV\n");
-+ caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
-+ 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ edesc->dst_nents = dst_nents;
-+ edesc->dst = dst;
-+ edesc->iv_dma = iv_dma;
-+
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ qm_sg_index = 1;
-+
-+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
-+ qm_sg_index += mapped_src_nents;
-+
-+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
-+ qm_sg_index, 0);
-+
-+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, qm_sg_dma)) {
-+ dev_err(dev, "unable to map S/G table\n");
-+ caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
-+ ivsize, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ edesc->qm_sg_dma = qm_sg_dma;
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ out_len = req->cryptlen + (encrypt ? authsize : 0);
-+ in_len = ivsize + req->assoclen + req->cryptlen;
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
-+ dpaa2_fl_set_len(in_fle, in_len);
-+
-+ if (req->dst == req->src) {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
-+ (sg_nents_for_len(req->src, req->assoclen) +
-+ 1) * sizeof(*sg_table));
-+ } else if (mapped_dst_nents == 1) {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
-+ } else {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
-+ sizeof(*sg_table));
-+ }
-+
-+ dpaa2_fl_set_len(out_fle, out_len);
-+
-+ return edesc;
-+}
-+
-+static int tls_set_sh_desc(struct crypto_aead *tls)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ unsigned int ivsize = crypto_aead_ivsize(tls);
-+ unsigned int blocksize = crypto_aead_blocksize(tls);
-+ struct device *dev = ctx->dev;
-+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
-+ struct caam_flc *flc;
-+ u32 *desc;
-+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
-+ unsigned int data_len[2];
-+ u32 inl_mask;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ /*
-+ * TLS 1.0 encrypt shared descriptor
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ data_len[0] = ctx->adata.keylen_pad;
-+ data_len[1] = ctx->cdata.keylen;
-+
-+ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
-+ &inl_mask, ARRAY_SIZE(data_len)) < 0)
-+ return -EINVAL;
-+
-+ if (inl_mask & 1)
-+ ctx->adata.key_virt = ctx->key;
-+ else
-+ ctx->adata.key_dma = ctx->key_dma;
-+
-+ if (inl_mask & 2)
-+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
-+ else
-+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-+
-+ ctx->adata.key_inline = !!(inl_mask & 1);
-+ ctx->cdata.key_inline = !!(inl_mask & 2);
-+
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize,
-+ priv->sec_attr.era);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc));
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ /*
-+ * TLS 1.0 decrypt shared descriptor
-+ * Keys do not fit inline, regardless of algorithms used
-+ */
-+ ctx->adata.key_inline = false;
-+ ctx->adata.key_dma = ctx->key_dma;
-+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-+
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
-+ ctx->authsize, blocksize, priv->sec_attr.era);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int tls_setkey(struct crypto_aead *tls, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ struct device *dev = ctx->dev;
-+ struct crypto_authenc_keys keys;
-+
-+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
-+ goto badkey;
-+
-+#ifdef DEBUG
-+ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
-+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
-+ keys.authkeylen);
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ ctx->adata.keylen = keys.authkeylen;
-+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
-+ goto badkey;
-+
-+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
-+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, ctx->dir);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-+ ctx->adata.keylen_pad + keys.enckeylen, 1);
-+#endif
-+
-+ ctx->cdata.keylen = keys.enckeylen;
-+
-+ return tls_set_sh_desc(tls);
-+badkey:
-+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+
-+ ctx->authsize = authsize;
-+ tls_set_sh_desc(tls);
-+
-+ return 0;
-+}
-+
-+static int gcm_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *dev = ctx->dev;
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ struct caam_flc *flc;
-+ u32 *desc;
-+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
-+ ctx->cdata.keylen;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ /*
-+ * AES GCM encrypt shared descriptor
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ ctx->cdata.key_virt = ctx->key;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ /*
-+ * Job Descriptor and Shared Descriptors
-+ * must all fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ ctx->cdata.key_virt = ctx->key;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-+
-+ ctx->authsize = authsize;
-+ gcm_set_sh_desc(authenc);
-+
-+ return 0;
-+}
-+
-+static int gcm_setkey(struct crypto_aead *aead,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *dev = ctx->dev;
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ memcpy(ctx->key, key, keylen);
-+ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
-+ ctx->cdata.keylen = keylen;
-+
-+ return gcm_set_sh_desc(aead);
-+}
-+
-+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *dev = ctx->dev;
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ struct caam_flc *flc;
-+ u32 *desc;
-+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
-+ ctx->cdata.keylen;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ ctx->cdata.key_virt = ctx->key;
-+
-+ /*
-+ * RFC4106 encrypt shared descriptor
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ /*
-+ * Job Descriptor and Shared Descriptors
-+ * must all fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int rfc4106_setauthsize(struct crypto_aead *authenc,
-+ unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-+
-+ ctx->authsize = authsize;
-+ rfc4106_set_sh_desc(authenc);
-+
-+ return 0;
-+}
-+
-+static int rfc4106_setkey(struct crypto_aead *aead,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *dev = ctx->dev;
-+
-+ if (keylen < 4)
-+ return -EINVAL;
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ memcpy(ctx->key, key, keylen);
-+ /*
-+ * The last four bytes of the key material are used as the salt value
-+ * in the nonce. Update the AES key length.
-+ */
-+ ctx->cdata.keylen = keylen - 4;
-+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
-+ ctx->dir);
-+
-+ return rfc4106_set_sh_desc(aead);
-+}
-+
-+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *dev = ctx->dev;
-+ unsigned int ivsize = crypto_aead_ivsize(aead);
-+ struct caam_flc *flc;
-+ u32 *desc;
-+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
-+ ctx->cdata.keylen;
-+
-+ if (!ctx->cdata.keylen || !ctx->authsize)
-+ return 0;
-+
-+ ctx->cdata.key_virt = ctx->key;
-+
-+ /*
-+ * RFC4543 encrypt shared descriptor
-+ * Job Descriptor and Shared Descriptor
-+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ /*
-+ * Job Descriptor and Shared Descriptors
-+ * must all fit into the 64-word Descriptor h/w Buffer
-+ */
-+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
-+ ctx->cdata.key_inline = true;
-+ } else {
-+ ctx->cdata.key_inline = false;
-+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
-+ true);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int rfc4543_setauthsize(struct crypto_aead *authenc,
-+ unsigned int authsize)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-+
-+ ctx->authsize = authsize;
-+ rfc4543_set_sh_desc(authenc);
-+
-+ return 0;
-+}
-+
-+static int rfc4543_setkey(struct crypto_aead *aead,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct device *dev = ctx->dev;
-+
-+ if (keylen < 4)
-+ return -EINVAL;
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ memcpy(ctx->key, key, keylen);
-+ /*
-+ * The last four bytes of the key material are used as the salt value
-+ * in the nonce. Update the AES key length.
-+ */
-+ ctx->cdata.keylen = keylen - 4;
-+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
-+ ctx->dir);
-+
-+ return rfc4543_set_sh_desc(aead);
-+}
-+
-+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-+ struct caam_skcipher_alg *alg =
-+ container_of(crypto_skcipher_alg(skcipher),
-+ struct caam_skcipher_alg, skcipher);
-+ struct device *dev = ctx->dev;
-+ struct caam_flc *flc;
-+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
-+ u32 *desc;
-+ u32 ctx1_iv_off = 0;
-+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_CTR_MOD128) &&
-+ ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
-+ OP_ALG_ALGSEL_CHACHA20);
-+ const bool is_rfc3686 = alg->caam.rfc3686;
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+ /*
-+ * AES-CTR needs to load IV in CONTEXT1 reg
-+ * at an offset of 128bits (16bytes)
-+ * CONTEXT1[255:128] = IV
-+ */
-+ if (ctr_mode)
-+ ctx1_iv_off = 16;
-+
-+ /*
-+ * RFC3686 specific:
-+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-+ * | *key = {KEY, NONCE}
-+ */
-+ if (is_rfc3686) {
-+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-+ keylen -= CTR_RFC3686_NONCE_SIZE;
-+ }
-+
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = key;
-+ ctx->cdata.key_inline = true;
-+
-+ /* skcipher_encrypt shared descriptor */
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ /* skcipher_decrypt shared descriptor */
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-+ struct device *dev = ctx->dev;
-+ struct caam_flc *flc;
-+ u32 *desc;
-+
-+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
-+ dev_err(dev, "key size mismatch\n");
-+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+ }
-+
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = key;
-+ ctx->cdata.key_inline = true;
-+
-+ /* xts_skcipher_encrypt shared descriptor */
-+ flc = &ctx->flc[ENCRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ /* xts_skcipher_decrypt shared descriptor */
-+ flc = &ctx->flc[DECRYPT];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
-+ sizeof(flc->flc) + desc_bytes(desc),
-+ ctx->dir);
-+
-+ return 0;
-+}
-+
-+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
-+{
-+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-+ struct caam_request *req_ctx = skcipher_request_ctx(req);
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-+ struct device *dev = ctx->dev;
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct skcipher_edesc *edesc;
-+ dma_addr_t iv_dma;
-+ u8 *iv;
-+ int ivsize = crypto_skcipher_ivsize(skcipher);
-+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
-+ struct dpaa2_sg_entry *sg_table;
-+
-+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
-+ req->cryptlen);
-+ return ERR_PTR(src_nents);
-+ }
-+
-+ if (unlikely(req->dst != req->src)) {
-+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
-+ if (unlikely(dst_nents < 0)) {
-+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->cryptlen);
-+ return ERR_PTR(dst_nents);
-+ }
-+
-+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(dev, "unable to map source\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(!mapped_dst_nents)) {
-+ dev_err(dev, "unable to map destination\n");
-+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ } else {
-+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(dev, "unable to map source\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ }
-+
-+ qm_sg_ents = 1 + mapped_src_nents;
-+ dst_sg_idx = qm_sg_ents;
-+
-+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-+ qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
-+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
-+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
-+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
-+ qm_sg_ents, ivsize);
-+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
-+ 0, 0, 0);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ /* allocate space for base edesc, link tables and IV */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (unlikely(!edesc)) {
-+ dev_err(dev, "could not allocate extended descriptor\n");
-+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
-+ 0, 0, 0);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ /* Make sure IV is located in a DMAable area */
-+ sg_table = &edesc->sgt[0];
-+ iv = (u8 *)(sg_table + qm_sg_ents);
-+ memcpy(iv, req->iv, ivsize);
-+
-+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, iv_dma)) {
-+ dev_err(dev, "unable to map IV\n");
-+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
-+ 0, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ edesc->dst_nents = dst_nents;
-+ edesc->iv_dma = iv_dma;
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
-+
-+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+ dst_sg_idx, 0);
-+
-+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
-+ dev_err(dev, "unable to map S/G table\n");
-+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
-+ dpaa2_fl_set_len(out_fle, req->cryptlen);
-+
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+
-+ if (req->src == req->dst) {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
-+ sizeof(*sg_table));
-+ } else if (mapped_dst_nents > 1) {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
-+ sizeof(*sg_table));
-+ } else {
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
-+ }
-+
-+ return edesc;
-+}
-+
-+static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
-+ struct aead_request *req)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ int ivsize = crypto_aead_ivsize(aead);
-+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+}
-+
-+static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
-+ struct aead_request *req)
-+{
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ int ivsize = crypto_aead_ivsize(tls);
-+
-+ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
-+ edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
-+ edesc->qm_sg_bytes);
-+}
-+
-+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
-+ struct skcipher_request *req)
-+{
-+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-+ int ivsize = crypto_skcipher_ivsize(skcipher);
-+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+}
-+
-+static void aead_encrypt_done(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct aead_request *req = container_of(areq, struct aead_request,
-+ base);
-+ struct caam_request *req_ctx = to_caam_req(areq);
-+ struct aead_edesc *edesc = req_ctx->edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ int ecode = 0;
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+ aead_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ aead_request_complete(req, ecode);
-+}
-+
-+static void aead_decrypt_done(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct aead_request *req = container_of(areq, struct aead_request,
-+ base);
-+ struct caam_request *req_ctx = to_caam_req(areq);
-+ struct aead_edesc *edesc = req_ctx->edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ int ecode = 0;
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ /*
-+ * verify hw auth check passed else return -EBADMSG
-+ */
-+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
-+ JRSTA_CCBERR_ERRID_ICVCHK)
-+ ecode = -EBADMSG;
-+ else
-+ ecode = -EIO;
-+ }
-+
-+ aead_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ aead_request_complete(req, ecode);
-+}
-+
-+static int aead_encrypt(struct aead_request *req)
-+{
-+ struct aead_edesc *edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct caam_request *caam_req = aead_request_ctx(req);
-+ int ret;
-+
-+ /* allocate extended descriptor */
-+ edesc = aead_edesc_alloc(req, true);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ caam_req->flc = &ctx->flc[ENCRYPT];
-+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
-+ caam_req->cbk = aead_encrypt_done;
-+ caam_req->ctx = &req->base;
-+ caam_req->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
-+ aead_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static int aead_decrypt(struct aead_request *req)
-+{
-+ struct aead_edesc *edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct caam_request *caam_req = aead_request_ctx(req);
-+ int ret;
-+
-+ /* allocate extended descriptor */
-+ edesc = aead_edesc_alloc(req, false);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ caam_req->flc = &ctx->flc[DECRYPT];
-+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
-+ caam_req->cbk = aead_decrypt_done;
-+ caam_req->ctx = &req->base;
-+ caam_req->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
-+ aead_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static void tls_encrypt_done(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct aead_request *req = container_of(areq, struct aead_request,
-+ base);
-+ struct caam_request *req_ctx = to_caam_req(areq);
-+ struct tls_edesc *edesc = req_ctx->edesc;
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ int ecode = 0;
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+ tls_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ aead_request_complete(req, ecode);
-+}
-+
-+static void tls_decrypt_done(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct aead_request *req = container_of(areq, struct aead_request,
-+ base);
-+ struct caam_request *req_ctx = to_caam_req(areq);
-+ struct tls_edesc *edesc = req_ctx->edesc;
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ int ecode = 0;
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ /*
-+ * verify hw auth check passed else return -EBADMSG
-+ */
-+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
-+ JRSTA_CCBERR_ERRID_ICVCHK)
-+ ecode = -EBADMSG;
-+ else
-+ ecode = -EIO;
-+ }
-+
-+ tls_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ aead_request_complete(req, ecode);
-+}
-+
-+static int tls_encrypt(struct aead_request *req)
-+{
-+ struct tls_edesc *edesc;
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ struct caam_request *caam_req = aead_request_ctx(req);
-+ int ret;
-+
-+ /* allocate extended descriptor */
-+ edesc = tls_edesc_alloc(req, true);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ caam_req->flc = &ctx->flc[ENCRYPT];
-+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
-+ caam_req->cbk = tls_encrypt_done;
-+ caam_req->ctx = &req->base;
-+ caam_req->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
-+ tls_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static int tls_decrypt(struct aead_request *req)
-+{
-+ struct tls_edesc *edesc;
-+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ struct caam_request *caam_req = aead_request_ctx(req);
-+ int ret;
-+
-+ /* allocate extended descriptor */
-+ edesc = tls_edesc_alloc(req, false);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ caam_req->flc = &ctx->flc[DECRYPT];
-+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
-+ caam_req->cbk = tls_decrypt_done;
-+ caam_req->ctx = &req->base;
-+ caam_req->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
-+ tls_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ipsec_gcm_encrypt(struct aead_request *req)
-+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-+
-+ return aead_encrypt(req);
-+}
-+
-+static int ipsec_gcm_decrypt(struct aead_request *req)
-+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-+
-+ return aead_decrypt(req);
-+}
-+
-+static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct skcipher_request *req = skcipher_request_cast(areq);
-+ struct caam_request *req_ctx = to_caam_req(areq);
-+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-+ struct skcipher_edesc *edesc = req_ctx->edesc;
-+ int ecode = 0;
-+ int ivsize = crypto_skcipher_ivsize(skcipher);
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-+ edesc->src_nents > 1 ? 100 : ivsize, 1);
-+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
-+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-+#endif
-+
-+ skcipher_unmap(ctx->dev, edesc, req);
-+
-+ /*
-+ * The crypto API expects us to set the IV (req->iv) to the last
-+ * ciphertext block. This is used e.g. by the CTS mode.
-+ */
-+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
-+ ivsize, 0);
-+
-+ qi_cache_free(edesc);
-+ skcipher_request_complete(req, ecode);
-+}
-+
-+static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct skcipher_request *req = skcipher_request_cast(areq);
-+ struct caam_request *req_ctx = to_caam_req(areq);
-+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-+ struct skcipher_edesc *edesc = req_ctx->edesc;
-+ int ecode = 0;
-+#ifdef DEBUG
-+ int ivsize = crypto_skcipher_ivsize(skcipher);
-+
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-+ edesc->src_nents > 1 ? 100 : ivsize, 1);
-+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
-+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-+#endif
-+
-+ skcipher_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ skcipher_request_complete(req, ecode);
-+}
-+
-+static int skcipher_encrypt(struct skcipher_request *req)
-+{
-+ struct skcipher_edesc *edesc;
-+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-+ struct caam_request *caam_req = skcipher_request_ctx(req);
-+ int ret;
-+
-+ /* allocate extended descriptor */
-+ edesc = skcipher_edesc_alloc(req);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ caam_req->flc = &ctx->flc[ENCRYPT];
-+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
-+ caam_req->cbk = skcipher_encrypt_done;
-+ caam_req->ctx = &req->base;
-+ caam_req->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
-+ skcipher_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static int skcipher_decrypt(struct skcipher_request *req)
-+{
-+ struct skcipher_edesc *edesc;
-+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-+ struct caam_request *caam_req = skcipher_request_ctx(req);
-+ int ivsize = crypto_skcipher_ivsize(skcipher);
-+ int ret;
-+
-+ /* allocate extended descriptor */
-+ edesc = skcipher_edesc_alloc(req);
-+ if (IS_ERR(edesc))
-+ return PTR_ERR(edesc);
-+
-+ /*
-+ * The crypto API expects us to set the IV (req->iv) to the last
-+ * ciphertext block.
-+ */
-+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
-+ ivsize, 0);
-+
-+ caam_req->flc = &ctx->flc[DECRYPT];
-+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
-+ caam_req->cbk = skcipher_decrypt_done;
-+ caam_req->ctx = &req->base;
-+ caam_req->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
-+ skcipher_unmap(ctx->dev, edesc, req);
-+ qi_cache_free(edesc);
-+ }
-+
-+ return ret;
-+}
-+
-+static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
-+ bool uses_dkp)
-+{
-+ dma_addr_t dma_addr;
-+ int i;
-+
-+ /* copy descriptor header template value */
-+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
-+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
-+
-+ ctx->dev = caam->dev;
-+ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
-+
-+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
-+ offsetof(struct caam_ctx, flc_dma),
-+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
-+ if (dma_mapping_error(ctx->dev, dma_addr)) {
-+ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < NUM_OP; i++)
-+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
-+ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
-+
-+ return 0;
-+}
-+
-+static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
-+{
-+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-+ struct caam_skcipher_alg *caam_alg =
-+ container_of(alg, typeof(*caam_alg), skcipher);
-+
-+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
-+ return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
-+}
-+
-+static int caam_cra_init_aead(struct crypto_aead *tfm)
-+{
-+ struct aead_alg *alg = crypto_aead_alg(tfm);
-+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
-+ aead);
-+
-+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
-+ return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
-+ (alg->setkey == aead_setkey) ||
-+ (alg->setkey == tls_setkey));
-+}
-+
-+static void caam_exit_common(struct caam_ctx *ctx)
-+{
-+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
-+ offsetof(struct caam_ctx, flc_dma), ctx->dir,
-+ DMA_ATTR_SKIP_CPU_SYNC);
-+}
-+
-+static void caam_cra_exit(struct crypto_skcipher *tfm)
-+{
-+ caam_exit_common(crypto_skcipher_ctx(tfm));
-+}
-+
-+static void caam_cra_exit_aead(struct crypto_aead *tfm)
-+{
-+ caam_exit_common(crypto_aead_ctx(tfm));
-+}
-+
-+static struct caam_skcipher_alg driver_algs[] = {
-+ {
-+ .skcipher = {
-+ .base = {
-+ .cra_name = "cbc(aes)",
-+ .cra_driver_name = "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = skcipher_setkey,
-+ .encrypt = skcipher_encrypt,
-+ .decrypt = skcipher_decrypt,
-+ .min_keysize = AES_MIN_KEY_SIZE,
-+ .max_keysize = AES_MAX_KEY_SIZE,
-+ .ivsize = AES_BLOCK_SIZE,
-+ },
-+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ },
-+ {
-+ .skcipher = {
-+ .base = {
-+ .cra_name = "cbc(des3_ede)",
-+ .cra_driver_name = "cbc-3des-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = skcipher_setkey,
-+ .encrypt = skcipher_encrypt,
-+ .decrypt = skcipher_decrypt,
-+ .min_keysize = DES3_EDE_KEY_SIZE,
-+ .max_keysize = DES3_EDE_KEY_SIZE,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ },
-+ {
-+ .skcipher = {
-+ .base = {
-+ .cra_name = "cbc(des)",
-+ .cra_driver_name = "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = skcipher_setkey,
-+ .encrypt = skcipher_encrypt,
-+ .decrypt = skcipher_decrypt,
-+ .min_keysize = DES_KEY_SIZE,
-+ .max_keysize = DES_KEY_SIZE,
-+ .ivsize = DES_BLOCK_SIZE,
-+ },
-+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ },
-+ {
-+ .skcipher = {
-+ .base = {
-+ .cra_name = "ctr(aes)",
-+ .cra_driver_name = "ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = skcipher_setkey,
-+ .encrypt = skcipher_encrypt,
-+ .decrypt = skcipher_decrypt,
-+ .min_keysize = AES_MIN_KEY_SIZE,
-+ .max_keysize = AES_MAX_KEY_SIZE,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .chunksize = AES_BLOCK_SIZE,
-+ },
-+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ },
-+ {
-+ .skcipher = {
-+ .base = {
-+ .cra_name = "rfc3686(ctr(aes))",
-+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = skcipher_setkey,
-+ .encrypt = skcipher_encrypt,
-+ .decrypt = skcipher_decrypt,
-+ .min_keysize = AES_MIN_KEY_SIZE +
-+ CTR_RFC3686_NONCE_SIZE,
-+ .max_keysize = AES_MAX_KEY_SIZE +
-+ CTR_RFC3686_NONCE_SIZE,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .chunksize = AES_BLOCK_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .rfc3686 = true,
-+ },
-+ },
-+ {
-+ .skcipher = {
-+ .base = {
-+ .cra_name = "xts(aes)",
-+ .cra_driver_name = "xts-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = xts_skcipher_setkey,
-+ .encrypt = skcipher_encrypt,
-+ .decrypt = skcipher_decrypt,
-+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
-+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
-+ .ivsize = AES_BLOCK_SIZE,
-+ },
-+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
-+ },
-+ {
-+ .skcipher = {
-+ .base = {
-+ .cra_name = "chacha20",
-+ .cra_driver_name = "chacha20-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = skcipher_setkey,
-+ .encrypt = skcipher_encrypt,
-+ .decrypt = skcipher_decrypt,
-+ .min_keysize = CHACHA20_KEY_SIZE,
-+ .max_keysize = CHACHA20_KEY_SIZE,
-+ .ivsize = CHACHA20_IV_SIZE,
-+ },
-+ .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
-+ },
-+};
-+
-+static struct caam_aead_alg driver_aeads[] = {
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc4106(gcm(aes))",
-+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = rfc4106_setkey,
-+ .setauthsize = rfc4106_setauthsize,
-+ .encrypt = ipsec_gcm_encrypt,
-+ .decrypt = ipsec_gcm_decrypt,
-+ .ivsize = 8,
-+ .maxauthsize = AES_BLOCK_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc4543(gcm(aes))",
-+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = rfc4543_setkey,
-+ .setauthsize = rfc4543_setauthsize,
-+ .encrypt = ipsec_gcm_encrypt,
-+ .decrypt = ipsec_gcm_decrypt,
-+ .ivsize = 8,
-+ .maxauthsize = AES_BLOCK_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
-+ },
-+ },
-+ /* Galois Counter Mode */
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "gcm(aes)",
-+ .cra_driver_name = "gcm-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = gcm_setkey,
-+ .setauthsize = gcm_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = 12,
-+ .maxauthsize = AES_BLOCK_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
-+ }
-+ },
-+ /* single-pass ipsec_esp descriptor */
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(md5),cbc(aes))",
-+ .cra_driver_name = "authenc-hmac-md5-"
-+ "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(md5),"
-+ "cbc(aes)))",
-+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
-+ "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
-+ .cra_driver_name = "authenc-hmac-sha1-"
-+ "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha1),"
-+ "cbc(aes)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha1-cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
-+ .cra_driver_name = "authenc-hmac-sha224-"
-+ "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha224),"
-+ "cbc(aes)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha224-cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
-+ .cra_driver_name = "authenc-hmac-sha256-"
-+ "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha256),"
-+ "cbc(aes)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha256-cbc-aes-"
-+ "caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
-+ .cra_driver_name = "authenc-hmac-sha384-"
-+ "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha384),"
-+ "cbc(aes)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha384-cbc-aes-"
-+ "caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
-+ .cra_driver_name = "authenc-hmac-sha512-"
-+ "cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha512),"
-+ "cbc(aes)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha512-cbc-aes-"
-+ "caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
-+ .cra_driver_name = "authenc-hmac-md5-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(md5),"
-+ "cbc(des3_ede)))",
-+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha1),"
-+ "cbc(des3_ede))",
-+ .cra_driver_name = "authenc-hmac-sha1-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha1),"
-+ "cbc(des3_ede)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha1-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha224),"
-+ "cbc(des3_ede))",
-+ .cra_driver_name = "authenc-hmac-sha224-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha224),"
-+ "cbc(des3_ede)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha224-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha256),"
-+ "cbc(des3_ede))",
-+ .cra_driver_name = "authenc-hmac-sha256-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha256),"
-+ "cbc(des3_ede)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha256-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha384),"
-+ "cbc(des3_ede))",
-+ .cra_driver_name = "authenc-hmac-sha384-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha384),"
-+ "cbc(des3_ede)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha384-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha512),"
-+ "cbc(des3_ede))",
-+ .cra_driver_name = "authenc-hmac-sha512-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha512),"
-+ "cbc(des3_ede)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha512-"
-+ "cbc-des3_ede-caam-qi2",
-+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES3_EDE_BLOCK_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(md5),cbc(des))",
-+ .cra_driver_name = "authenc-hmac-md5-"
-+ "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(md5),"
-+ "cbc(des)))",
-+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
-+ "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha1),cbc(des))",
-+ .cra_driver_name = "authenc-hmac-sha1-"
-+ "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha1),"
-+ "cbc(des)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha1-cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha224),cbc(des))",
-+ .cra_driver_name = "authenc-hmac-sha224-"
-+ "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha224),"
-+ "cbc(des)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha224-cbc-des-"
-+ "caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha256),cbc(des))",
-+ .cra_driver_name = "authenc-hmac-sha256-"
-+ "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha256),"
-+ "cbc(des)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha256-cbc-desi-"
-+ "caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha384),cbc(des))",
-+ .cra_driver_name = "authenc-hmac-sha384-"
-+ "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha384),"
-+ "cbc(des)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha384-cbc-des-"
-+ "caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha512),cbc(des))",
-+ .cra_driver_name = "authenc-hmac-sha512-"
-+ "cbc-des-caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "echainiv(authenc(hmac(sha512),"
-+ "cbc(des)))",
-+ .cra_driver_name = "echainiv-authenc-"
-+ "hmac-sha512-cbc-des-"
-+ "caam-qi2",
-+ .cra_blocksize = DES_BLOCK_SIZE,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = DES_BLOCK_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .geniv = true,
-+ }
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(md5),"
-+ "rfc3686(ctr(aes)))",
-+ .cra_driver_name = "authenc-hmac-md5-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc("
-+ "hmac(md5),rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha1),"
-+ "rfc3686(ctr(aes)))",
-+ .cra_driver_name = "authenc-hmac-sha1-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc("
-+ "hmac(sha1),rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha224),"
-+ "rfc3686(ctr(aes)))",
-+ .cra_driver_name = "authenc-hmac-sha224-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc("
-+ "hmac(sha224),rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA224_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha256),"
-+ "rfc3686(ctr(aes)))",
-+ .cra_driver_name = "authenc-hmac-sha256-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc(hmac(sha256),"
-+ "rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA256_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha384),"
-+ "rfc3686(ctr(aes)))",
-+ .cra_driver_name = "authenc-hmac-sha384-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc(hmac(sha384),"
-+ "rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc7539(chacha20,poly1305)",
-+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
-+ "caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = chachapoly_setkey,
-+ .setauthsize = chachapoly_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CHACHAPOLY_IV_SIZE,
-+ .maxauthsize = POLY1305_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
-+ OP_ALG_AAI_AEAD,
-+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
-+ OP_ALG_AAI_AEAD,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "rfc7539esp(chacha20,poly1305)",
-+ .cra_driver_name = "rfc7539esp-chacha20-"
-+ "poly1305-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = chachapoly_setkey,
-+ .setauthsize = chachapoly_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = 8,
-+ .maxauthsize = POLY1305_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
-+ OP_ALG_AAI_AEAD,
-+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
-+ OP_ALG_AAI_AEAD,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha512),"
-+ "rfc3686(ctr(aes)))",
-+ .cra_driver_name = "authenc-hmac-sha512-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc(hmac(sha512),"
-+ "rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
-+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
-+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "tls10(hmac(sha1),cbc(aes))",
-+ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
-+ },
-+ .setkey = tls_setkey,
-+ .setauthsize = tls_setauthsize,
-+ .encrypt = tls_encrypt,
-+ .decrypt = tls_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ },
-+ },
-+};
-+
-+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
-+{
-+ struct skcipher_alg *alg = &t_alg->skcipher;
-+
-+ alg->base.cra_module = THIS_MODULE;
-+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
-+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
-+
-+ alg->init = caam_cra_init_skcipher;
-+ alg->exit = caam_cra_exit;
-+}
-+
-+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
-+{
-+ struct aead_alg *alg = &t_alg->aead;
-+
-+ alg->base.cra_module = THIS_MODULE;
-+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
-+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
-+
-+ alg->init = caam_cra_init_aead;
-+ alg->exit = caam_cra_exit_aead;
-+}
-+
-+/* max hash key is max split key size */
-+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
-+
-+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
-+#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-+
-+#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
-+ CAAM_MAX_HASH_KEY_SIZE)
-+#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
-+
-+/* caam context sizes for hashes: running digest + 8 */
-+#define HASH_MSG_LEN 8
-+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
-+
-+enum hash_optype {
-+ UPDATE = 0,
-+ UPDATE_FIRST,
-+ FINALIZE,
-+ DIGEST,
-+ HASH_NUM_OP
-+};
-+
-+/**
-+ * caam_hash_ctx - ahash per-session context
-+ * @flc: Flow Contexts array
-+ * @flc_dma: I/O virtual addresses of the Flow Contexts
-+ * @key: virtual address of the authentication key
-+ * @dev: dpseci device
-+ * @ctx_len: size of Context Register
-+ * @adata: hashing algorithm details
-+ */
-+struct caam_hash_ctx {
-+ struct caam_flc flc[HASH_NUM_OP];
-+ dma_addr_t flc_dma[HASH_NUM_OP];
-+ u8 key[CAAM_MAX_HASH_KEY_SIZE];
-+ struct device *dev;
-+ int ctx_len;
-+ struct alginfo adata;
-+};
-+
-+/* ahash state */
-+struct caam_hash_state {
-+ struct caam_request caam_req;
-+ dma_addr_t buf_dma;
-+ dma_addr_t ctx_dma;
-+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
-+ int buflen_0;
-+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
-+ int buflen_1;
-+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
-+ int (*update)(struct ahash_request *req);
-+ int (*final)(struct ahash_request *req);
-+ int (*finup)(struct ahash_request *req);
-+ int current_buf;
-+};
-+
-+struct caam_export_state {
-+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
-+ u8 caam_ctx[MAX_CTX_LEN];
-+ int buflen;
-+ int (*update)(struct ahash_request *req);
-+ int (*final)(struct ahash_request *req);
-+ int (*finup)(struct ahash_request *req);
-+};
-+
-+static inline void switch_buf(struct caam_hash_state *state)
-+{
-+ state->current_buf ^= 1;
-+}
-+
-+static inline u8 *current_buf(struct caam_hash_state *state)
-+{
-+ return state->current_buf ? state->buf_1 : state->buf_0;
-+}
-+
-+static inline u8 *alt_buf(struct caam_hash_state *state)
-+{
-+ return state->current_buf ? state->buf_0 : state->buf_1;
-+}
-+
-+static inline int *current_buflen(struct caam_hash_state *state)
-+{
-+ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-+}
-+
-+static inline int *alt_buflen(struct caam_hash_state *state)
-+{
-+ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-+}
-+
-+/* Map current buffer in state (if length > 0) and put it in link table */
-+static inline int buf_map_to_qm_sg(struct device *dev,
-+ struct dpaa2_sg_entry *qm_sg,
-+ struct caam_hash_state *state)
-+{
-+ int buflen = *current_buflen(state);
-+
-+ if (!buflen)
-+ return 0;
-+
-+ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, state->buf_dma)) {
-+ dev_err(dev, "unable to map buf\n");
-+ state->buf_dma = 0;
-+ return -ENOMEM;
-+ }
-+
-+ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
-+
-+ return 0;
-+}
-+
-+/* Map state->caam_ctx, and add it to link table */
-+static inline int ctx_map_to_qm_sg(struct device *dev,
-+ struct caam_hash_state *state, int ctx_len,
-+ struct dpaa2_sg_entry *qm_sg, u32 flag)
-+{
-+ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
-+ if (dma_mapping_error(dev, state->ctx_dma)) {
-+ dev_err(dev, "unable to map ctx\n");
-+ state->ctx_dma = 0;
-+ return -ENOMEM;
-+ }
-+
-+ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
-+
-+ return 0;
-+}
-+
-+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
-+{
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
-+ struct caam_flc *flc;
-+ u32 *desc;
-+
-+ ctx->adata.key_virt = ctx->key;
-+ ctx->adata.key_inline = true;
-+
-+ /* ahash_update shared descriptor */
-+ flc = &ctx->flc[UPDATE];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
-+ ctx->ctx_len, true, priv->sec_attr.era);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
-+ desc_bytes(desc), DMA_BIDIRECTIONAL);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR,
-+ "ahash update shdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+ /* ahash_update_first shared descriptor */
-+ flc = &ctx->flc[UPDATE_FIRST];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
-+ ctx->ctx_len, false, priv->sec_attr.era);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
-+ desc_bytes(desc), DMA_BIDIRECTIONAL);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR,
-+ "ahash update first shdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+ /* ahash_final shared descriptor */
-+ flc = &ctx->flc[FINALIZE];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
-+ ctx->ctx_len, true, priv->sec_attr.era);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
-+ desc_bytes(desc), DMA_BIDIRECTIONAL);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR,
-+ "ahash final shdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+ /* ahash_digest shared descriptor */
-+ flc = &ctx->flc[DIGEST];
-+ desc = flc->sh_desc;
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
-+ ctx->ctx_len, false, priv->sec_attr.era);
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
-+ desc_bytes(desc), DMA_BIDIRECTIONAL);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR,
-+ "ahash digest shdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+ return 0;
-+}
-+
-+/* Digest hash size if it is too large */
-+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
-+ u32 *keylen, u8 *key_out, u32 digestsize)
-+{
-+ struct caam_request *req_ctx;
-+ u32 *desc;
-+ struct split_key_sh_result result;
-+ dma_addr_t src_dma, dst_dma;
-+ struct caam_flc *flc;
-+ dma_addr_t flc_dma;
-+ int ret = -ENOMEM;
-+ struct dpaa2_fl_entry *in_fle, *out_fle;
-+
-+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
-+ if (!req_ctx)
-+ return -ENOMEM;
-+
-+ in_fle = &req_ctx->fd_flt[1];
-+ out_fle = &req_ctx->fd_flt[0];
-+
-+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
-+ if (!flc)
-+ goto err_flc;
-+
-+ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, src_dma)) {
-+ dev_err(ctx->dev, "unable to map key input memory\n");
-+ goto err_src_dma;
-+ }
-+ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, dst_dma)) {
-+ dev_err(ctx->dev, "unable to map key output memory\n");
-+ goto err_dst_dma;
-+ }
-+
-+ desc = flc->sh_desc;
-+
-+ init_sh_desc(desc, 0);
-+
-+ /* descriptor to perform unkeyed hash on key_in */
-+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
-+ OP_ALG_AS_INITFINAL);
-+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
-+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
-+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
-+
-+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
-+ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, flc_dma)) {
-+ dev_err(ctx->dev, "unable to map shared descriptor\n");
-+ goto err_flc_dma;
-+ }
-+
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(in_fle, src_dma);
-+ dpaa2_fl_set_len(in_fle, *keylen);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, dst_dma);
-+ dpaa2_fl_set_len(out_fle, digestsize);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
-+ print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+ result.err = 0;
-+ init_completion(&result.completion);
-+ result.dev = ctx->dev;
-+
-+ req_ctx->flc = flc;
-+ req_ctx->flc_dma = flc_dma;
-+ req_ctx->cbk = split_key_sh_done;
-+ req_ctx->ctx = &result;
-+
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret == -EINPROGRESS) {
-+ /* in progress */
-+ wait_for_completion(&result.completion);
-+ ret = result.err;
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR,
-+ "digested key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
-+ 1);
-+#endif
-+ }
-+
-+ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_TO_DEVICE);
-+err_flc_dma:
-+ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
-+err_dst_dma:
-+ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
-+err_src_dma:
-+ kfree(flc);
-+err_flc:
-+ kfree(req_ctx);
-+
-+ *keylen = digestsize;
-+
-+ return ret;
-+}
-+
-+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
-+ unsigned int keylen)
-+{
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
-+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
-+ int ret;
-+ u8 *hashed_key = NULL;
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
-+#endif
-+
-+ if (keylen > blocksize) {
-+ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
-+ GFP_KERNEL | GFP_DMA);
-+ if (!hashed_key)
-+ return -ENOMEM;
-+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
-+ digestsize);
-+ if (ret)
-+ goto bad_free_key;
-+ key = hashed_key;
-+ }
-+
-+ ctx->adata.keylen = keylen;
-+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
-+ goto bad_free_key;
-+
-+ memcpy(ctx->key, key, keylen);
-+
-+ kfree(hashed_key);
-+ return ahash_set_sh_desc(ahash);
-+bad_free_key:
-+ kfree(hashed_key);
-+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
-+ struct ahash_request *req, int dst_len)
-+{
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+
-+ if (edesc->src_nents)
-+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
-+ if (edesc->dst_dma)
-+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
-+
-+ if (edesc->qm_sg_bytes)
-+ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
-+ DMA_TO_DEVICE);
-+
-+ if (state->buf_dma) {
-+ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
-+ DMA_TO_DEVICE);
-+ state->buf_dma = 0;
-+ }
-+}
-+
-+static inline void ahash_unmap_ctx(struct device *dev,
-+ struct ahash_edesc *edesc,
-+ struct ahash_request *req, int dst_len,
-+ u32 flag)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+
-+ if (state->ctx_dma) {
-+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
-+ state->ctx_dma = 0;
-+ }
-+ ahash_unmap(dev, edesc, req, dst_len);
-+}
-+
-+static void ahash_done(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct ahash_request *req = ahash_request_cast(areq);
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct ahash_edesc *edesc = state->caam_req.edesc;
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ int ecode = 0;
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+ ahash_unmap(ctx->dev, edesc, req, digestsize);
-+ qi_cache_free(edesc);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
-+ ctx->ctx_len, 1);
-+ if (req->result)
-+ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-+ digestsize, 1);
-+#endif
-+
-+ req->base.complete(&req->base, ecode);
-+}
-+
-+static void ahash_done_bi(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct ahash_request *req = ahash_request_cast(areq);
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct ahash_edesc *edesc = state->caam_req.edesc;
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ int ecode = 0;
-+#ifdef DEBUG
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
-+ switch_buf(state);
-+ qi_cache_free(edesc);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
-+ ctx->ctx_len, 1);
-+ if (req->result)
-+ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-+ digestsize, 1);
-+#endif
-+
-+ req->base.complete(&req->base, ecode);
-+}
-+
-+static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct ahash_request *req = ahash_request_cast(areq);
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct ahash_edesc *edesc = state->caam_req.edesc;
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ int ecode = 0;
-+
-+#ifdef DEBUG
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
-+ qi_cache_free(edesc);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
-+ ctx->ctx_len, 1);
-+ if (req->result)
-+ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-+ digestsize, 1);
-+#endif
-+
-+ req->base.complete(&req->base, ecode);
-+}
-+
-+static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
-+{
-+ struct crypto_async_request *areq = cbk_ctx;
-+ struct ahash_request *req = ahash_request_cast(areq);
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct ahash_edesc *edesc = state->caam_req.edesc;
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ int ecode = 0;
-+#ifdef DEBUG
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+
-+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
-+#endif
-+
-+ if (unlikely(status)) {
-+ caam_qi2_strstatus(ctx->dev, status);
-+ ecode = -EIO;
-+ }
-+
-+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
-+ switch_buf(state);
-+ qi_cache_free(edesc);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
-+ ctx->ctx_len, 1);
-+ if (req->result)
-+ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-+ digestsize, 1);
-+#endif
-+
-+ req->base.complete(&req->base, ecode);
-+}
-+
-+static int ahash_update_ctx(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ u8 *buf = current_buf(state);
-+ int *buflen = current_buflen(state);
-+ u8 *next_buf = alt_buf(state);
-+ int *next_buflen = alt_buflen(state), last_buflen;
-+ int in_len = *buflen + req->nbytes, to_hash;
-+ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
-+ struct ahash_edesc *edesc;
-+ int ret = 0;
-+
-+ last_buflen = *next_buflen;
-+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
-+ to_hash = in_len - *next_buflen;
-+
-+ if (to_hash) {
-+ struct dpaa2_sg_entry *sg_table;
-+
-+ src_nents = sg_nents_for_len(req->src,
-+ req->nbytes - (*next_buflen));
-+ if (src_nents < 0) {
-+ dev_err(ctx->dev, "Invalid number of src SG.\n");
-+ return src_nents;
-+ }
-+
-+ if (src_nents) {
-+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (!mapped_nents) {
-+ dev_err(ctx->dev, "unable to DMA map source\n");
-+ return -ENOMEM;
-+ }
-+ } else {
-+ mapped_nents = 0;
-+ }
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc) {
-+ dma_unmap_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
-+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
-+ sizeof(*sg_table);
-+ sg_table = &edesc->sgt[0];
-+
-+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
-+ DMA_BIDIRECTIONAL);
-+ if (ret)
-+ goto unmap_ctx;
-+
-+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
-+ if (ret)
-+ goto unmap_ctx;
-+
-+ if (mapped_nents) {
-+ sg_to_qm_sg_last(req->src, mapped_nents,
-+ sg_table + qm_sg_src_index, 0);
-+ if (*next_buflen)
-+ scatterwalk_map_and_copy(next_buf, req->src,
-+ to_hash - *buflen,
-+ *next_buflen, 0);
-+ } else {
-+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
-+ true);
-+ }
-+
-+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
-+ qm_sg_bytes, DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
-+ dev_err(ctx->dev, "unable to map S/G table\n");
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
-+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
-+
-+ req_ctx->flc = &ctx->flc[UPDATE];
-+ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
-+ req_ctx->cbk = ahash_done_bi;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY &&
-+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ goto unmap_ctx;
-+ } else if (*next_buflen) {
-+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
-+ req->nbytes, 0);
-+ *buflen = *next_buflen;
-+ *next_buflen = last_buflen;
-+ }
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
-+ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
-+ *next_buflen, 1);
-+#endif
-+
-+ return ret;
-+unmap_ctx:
-+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
-+ qi_cache_free(edesc);
-+ return ret;
-+}
-+
-+static int ahash_final_ctx(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int buflen = *current_buflen(state);
-+ int qm_sg_bytes, qm_sg_src_index;
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ struct ahash_edesc *edesc;
-+ struct dpaa2_sg_entry *sg_table;
-+ int ret;
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc)
-+ return -ENOMEM;
-+
-+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
-+ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
-+ sg_table = &edesc->sgt[0];
-+
-+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
-+ DMA_TO_DEVICE);
-+ if (ret)
-+ goto unmap_ctx;
-+
-+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
-+ if (ret)
-+ goto unmap_ctx;
-+
-+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
-+
-+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
-+ dev_err(ctx->dev, "unable to map S/G table\n");
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-+ dev_err(ctx->dev, "unable to map dst\n");
-+ edesc->dst_dma = 0;
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
-+ dpaa2_fl_set_len(out_fle, digestsize);
-+
-+ req_ctx->flc = &ctx->flc[FINALIZE];
-+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
-+ req_ctx->cbk = ahash_done_ctx_src;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret == -EINPROGRESS ||
-+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ return ret;
-+
-+unmap_ctx:
-+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
-+ qi_cache_free(edesc);
-+ return ret;
-+}
-+
-+static int ahash_finup_ctx(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int buflen = *current_buflen(state);
-+ int qm_sg_bytes, qm_sg_src_index;
-+ int src_nents, mapped_nents;
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ struct ahash_edesc *edesc;
-+ struct dpaa2_sg_entry *sg_table;
-+ int ret;
-+
-+ src_nents = sg_nents_for_len(req->src, req->nbytes);
-+ if (src_nents < 0) {
-+ dev_err(ctx->dev, "Invalid number of src SG.\n");
-+ return src_nents;
-+ }
-+
-+ if (src_nents) {
-+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (!mapped_nents) {
-+ dev_err(ctx->dev, "unable to DMA map source\n");
-+ return -ENOMEM;
-+ }
-+ } else {
-+ mapped_nents = 0;
-+ }
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc) {
-+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
-+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
-+ sg_table = &edesc->sgt[0];
-+
-+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
-+ DMA_TO_DEVICE);
-+ if (ret)
-+ goto unmap_ctx;
-+
-+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
-+ if (ret)
-+ goto unmap_ctx;
-+
-+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
-+
-+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
-+ dev_err(ctx->dev, "unable to map S/G table\n");
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-+ dev_err(ctx->dev, "unable to map dst\n");
-+ edesc->dst_dma = 0;
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
-+ dpaa2_fl_set_len(out_fle, digestsize);
-+
-+ req_ctx->flc = &ctx->flc[FINALIZE];
-+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
-+ req_ctx->cbk = ahash_done_ctx_src;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret == -EINPROGRESS ||
-+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ return ret;
-+
-+unmap_ctx:
-+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
-+ qi_cache_free(edesc);
-+ return ret;
-+}
-+
-+static int ahash_digest(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ int src_nents, mapped_nents;
-+ struct ahash_edesc *edesc;
-+ int ret = -ENOMEM;
-+
-+ state->buf_dma = 0;
-+
-+ src_nents = sg_nents_for_len(req->src, req->nbytes);
-+ if (src_nents < 0) {
-+ dev_err(ctx->dev, "Invalid number of src SG.\n");
-+ return src_nents;
-+ }
-+
-+ if (src_nents) {
-+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (!mapped_nents) {
-+ dev_err(ctx->dev, "unable to map source for DMA\n");
-+ return ret;
-+ }
-+ } else {
-+ mapped_nents = 0;
-+ }
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc) {
-+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
-+ return ret;
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+
-+ if (mapped_nents > 1) {
-+ int qm_sg_bytes;
-+ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
-+
-+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
-+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
-+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
-+ qm_sg_bytes, DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
-+ dev_err(ctx->dev, "unable to map S/G table\n");
-+ goto unmap;
-+ }
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+ } else {
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
-+ }
-+
-+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-+ dev_err(ctx->dev, "unable to map dst\n");
-+ edesc->dst_dma = 0;
-+ goto unmap;
-+ }
-+
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_len(in_fle, req->nbytes);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
-+ dpaa2_fl_set_len(out_fle, digestsize);
-+
-+ req_ctx->flc = &ctx->flc[DIGEST];
-+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
-+ req_ctx->cbk = ahash_done;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret == -EINPROGRESS ||
-+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ return ret;
-+
-+unmap:
-+ ahash_unmap(ctx->dev, edesc, req, digestsize);
-+ qi_cache_free(edesc);
-+ return ret;
-+}
-+
-+static int ahash_final_no_ctx(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ u8 *buf = current_buf(state);
-+ int buflen = *current_buflen(state);
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ struct ahash_edesc *edesc;
-+ int ret = -ENOMEM;
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc)
-+ return ret;
-+
-+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
-+ dev_err(ctx->dev, "unable to map src\n");
-+ goto unmap;
-+ }
-+
-+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-+ dev_err(ctx->dev, "unable to map dst\n");
-+ edesc->dst_dma = 0;
-+ goto unmap;
-+ }
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
-+ dpaa2_fl_set_len(in_fle, buflen);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
-+ dpaa2_fl_set_len(out_fle, digestsize);
-+
-+ req_ctx->flc = &ctx->flc[DIGEST];
-+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
-+ req_ctx->cbk = ahash_done;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret == -EINPROGRESS ||
-+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ return ret;
-+
-+unmap:
-+ ahash_unmap(ctx->dev, edesc, req, digestsize);
-+ qi_cache_free(edesc);
-+ return ret;
-+}
-+
-+static int ahash_update_no_ctx(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ u8 *buf = current_buf(state);
-+ int *buflen = current_buflen(state);
-+ u8 *next_buf = alt_buf(state);
-+ int *next_buflen = alt_buflen(state);
-+ int in_len = *buflen + req->nbytes, to_hash;
-+ int qm_sg_bytes, src_nents, mapped_nents;
-+ struct ahash_edesc *edesc;
-+ int ret = 0;
-+
-+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
-+ to_hash = in_len - *next_buflen;
-+
-+ if (to_hash) {
-+ struct dpaa2_sg_entry *sg_table;
-+
-+ src_nents = sg_nents_for_len(req->src,
-+ req->nbytes - *next_buflen);
-+ if (src_nents < 0) {
-+ dev_err(ctx->dev, "Invalid number of src SG.\n");
-+ return src_nents;
-+ }
-+
-+ if (src_nents) {
-+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (!mapped_nents) {
-+ dev_err(ctx->dev, "unable to DMA map source\n");
-+ return -ENOMEM;
-+ }
-+ } else {
-+ mapped_nents = 0;
-+ }
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc) {
-+ dma_unmap_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
-+ sg_table = &edesc->sgt[0];
-+
-+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
-+ if (ret)
-+ goto unmap_ctx;
-+
-+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
-+
-+ if (*next_buflen)
-+ scatterwalk_map_and_copy(next_buf, req->src,
-+ to_hash - *buflen,
-+ *next_buflen, 0);
-+
-+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
-+ qm_sg_bytes, DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
-+ dev_err(ctx->dev, "unable to map S/G table\n");
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
-+ ctx->ctx_len, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
-+ dev_err(ctx->dev, "unable to map ctx\n");
-+ state->ctx_dma = 0;
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+ dpaa2_fl_set_len(in_fle, to_hash);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
-+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
-+
-+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
-+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
-+ req_ctx->cbk = ahash_done_ctx_dst;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY &&
-+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ goto unmap_ctx;
-+
-+ state->update = ahash_update_ctx;
-+ state->finup = ahash_finup_ctx;
-+ state->final = ahash_final_ctx;
-+ } else if (*next_buflen) {
-+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
-+ req->nbytes, 0);
-+ *buflen = *next_buflen;
-+ *next_buflen = 0;
-+ }
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
-+ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
-+ *next_buflen, 1);
-+#endif
-+
-+ return ret;
-+unmap_ctx:
-+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
-+ qi_cache_free(edesc);
-+ return ret;
-+}
-+
-+static int ahash_finup_no_ctx(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int buflen = *current_buflen(state);
-+ int qm_sg_bytes, src_nents, mapped_nents;
-+ int digestsize = crypto_ahash_digestsize(ahash);
-+ struct ahash_edesc *edesc;
-+ struct dpaa2_sg_entry *sg_table;
-+ int ret;
-+
-+ src_nents = sg_nents_for_len(req->src, req->nbytes);
-+ if (src_nents < 0) {
-+ dev_err(ctx->dev, "Invalid number of src SG.\n");
-+ return src_nents;
-+ }
-+
-+ if (src_nents) {
-+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (!mapped_nents) {
-+ dev_err(ctx->dev, "unable to DMA map source\n");
-+ return -ENOMEM;
-+ }
-+ } else {
-+ mapped_nents = 0;
-+ }
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc) {
-+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
-+ sg_table = &edesc->sgt[0];
-+
-+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
-+ if (ret)
-+ goto unmap;
-+
-+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
-+
-+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
-+ dev_err(ctx->dev, "unable to map S/G table\n");
-+ ret = -ENOMEM;
-+ goto unmap;
-+ }
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+
-+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-+ dev_err(ctx->dev, "unable to map dst\n");
-+ edesc->dst_dma = 0;
-+ ret = -ENOMEM;
-+ goto unmap;
-+ }
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
-+ dpaa2_fl_set_len(out_fle, digestsize);
-+
-+ req_ctx->flc = &ctx->flc[DIGEST];
-+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
-+ req_ctx->cbk = ahash_done;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ goto unmap;
-+
-+ return ret;
-+unmap:
-+ ahash_unmap(ctx->dev, edesc, req, digestsize);
-+ qi_cache_free(edesc);
-+ return -ENOMEM;
-+}
-+
-+static int ahash_update_first(struct ahash_request *req)
-+{
-+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_request *req_ctx = &state->caam_req;
-+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
-+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ u8 *next_buf = alt_buf(state);
-+ int *next_buflen = alt_buflen(state);
-+ int to_hash;
-+ int src_nents, mapped_nents;
-+ struct ahash_edesc *edesc;
-+ int ret = 0;
-+
-+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
-+ 1);
-+ to_hash = req->nbytes - *next_buflen;
-+
-+ if (to_hash) {
-+ struct dpaa2_sg_entry *sg_table;
-+
-+ src_nents = sg_nents_for_len(req->src,
-+ req->nbytes - (*next_buflen));
-+ if (src_nents < 0) {
-+ dev_err(ctx->dev, "Invalid number of src SG.\n");
-+ return src_nents;
-+ }
-+
-+ if (src_nents) {
-+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ if (!mapped_nents) {
-+ dev_err(ctx->dev, "unable to map source for DMA\n");
-+ return -ENOMEM;
-+ }
-+ } else {
-+ mapped_nents = 0;
-+ }
-+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_zalloc(GFP_DMA | flags);
-+ if (!edesc) {
-+ dma_unmap_sg(ctx->dev, req->src, src_nents,
-+ DMA_TO_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ edesc->src_nents = src_nents;
-+ sg_table = &edesc->sgt[0];
-+
-+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_len(in_fle, to_hash);
-+
-+ if (mapped_nents > 1) {
-+ int qm_sg_bytes;
-+
-+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
-+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
-+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
-+ qm_sg_bytes,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
-+ dev_err(ctx->dev, "unable to map S/G table\n");
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+ edesc->qm_sg_bytes = qm_sg_bytes;
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
-+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
-+ } else {
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
-+ }
-+
-+ if (*next_buflen)
-+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
-+ *next_buflen, 0);
-+
-+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
-+ ctx->ctx_len, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
-+ dev_err(ctx->dev, "unable to map ctx\n");
-+ state->ctx_dma = 0;
-+ ret = -ENOMEM;
-+ goto unmap_ctx;
-+ }
-+
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
-+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
-+
-+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
-+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
-+ req_ctx->cbk = ahash_done_ctx_dst;
-+ req_ctx->ctx = &req->base;
-+ req_ctx->edesc = edesc;
-+
-+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
-+ if (ret != -EINPROGRESS &&
-+ !(ret == -EBUSY && req->base.flags &
-+ CRYPTO_TFM_REQ_MAY_BACKLOG))
-+ goto unmap_ctx;
-+
-+ state->update = ahash_update_ctx;
-+ state->finup = ahash_finup_ctx;
-+ state->final = ahash_final_ctx;
-+ } else if (*next_buflen) {
-+ state->update = ahash_update_no_ctx;
-+ state->finup = ahash_finup_no_ctx;
-+ state->final = ahash_final_no_ctx;
-+ scatterwalk_map_and_copy(next_buf, req->src, 0,
-+ req->nbytes, 0);
-+ switch_buf(state);
-+ }
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
-+#endif
-+
-+ return ret;
-+unmap_ctx:
-+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
-+ qi_cache_free(edesc);
-+ return ret;
-+}
-+
-+static int ahash_finup_first(struct ahash_request *req)
-+{
-+ return ahash_digest(req);
-+}
-+
-+static int ahash_init(struct ahash_request *req)
-+{
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+
-+ state->update = ahash_update_first;
-+ state->finup = ahash_finup_first;
-+ state->final = ahash_final_no_ctx;
-+
-+ state->ctx_dma = 0;
-+ state->current_buf = 0;
-+ state->buf_dma = 0;
-+ state->buflen_0 = 0;
-+ state->buflen_1 = 0;
-+
-+ return 0;
-+}
-+
-+static int ahash_update(struct ahash_request *req)
-+{
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+
-+ return state->update(req);
-+}
-+
-+static int ahash_finup(struct ahash_request *req)
-+{
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+
-+ return state->finup(req);
-+}
-+
-+static int ahash_final(struct ahash_request *req)
-+{
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+
-+ return state->final(req);
-+}
-+
-+static int ahash_export(struct ahash_request *req, void *out)
-+{
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ struct caam_export_state *export = out;
-+ int len;
-+ u8 *buf;
-+
-+ if (state->current_buf) {
-+ buf = state->buf_1;
-+ len = state->buflen_1;
-+ } else {
-+ buf = state->buf_0;
-+ len = state->buflen_0;
-+ }
-+
-+ memcpy(export->buf, buf, len);
-+ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
-+ export->buflen = len;
-+ export->update = state->update;
-+ export->final = state->final;
-+ export->finup = state->finup;
-+
-+ return 0;
-+}
-+
-+static int ahash_import(struct ahash_request *req, const void *in)
-+{
-+ struct caam_hash_state *state = ahash_request_ctx(req);
-+ const struct caam_export_state *export = in;
-+
-+ memset(state, 0, sizeof(*state));
-+ memcpy(state->buf_0, export->buf, export->buflen);
-+ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
-+ state->buflen_0 = export->buflen;
-+ state->update = export->update;
-+ state->final = export->final;
-+ state->finup = export->finup;
-+
-+ return 0;
-+}
-+
-+struct caam_hash_template {
-+ char name[CRYPTO_MAX_ALG_NAME];
-+ char driver_name[CRYPTO_MAX_ALG_NAME];
-+ char hmac_name[CRYPTO_MAX_ALG_NAME];
-+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
-+ unsigned int blocksize;
-+ struct ahash_alg template_ahash;
-+ u32 alg_type;
-+};
-+
-+/* ahash descriptors */
-+static struct caam_hash_template driver_hash[] = {
-+ {
-+ .name = "sha1",
-+ .driver_name = "sha1-caam-qi2",
-+ .hmac_name = "hmac(sha1)",
-+ .hmac_driver_name = "hmac-sha1-caam-qi2",
-+ .blocksize = SHA1_BLOCK_SIZE,
-+ .template_ahash = {
-+ .init = ahash_init,
-+ .update = ahash_update,
-+ .final = ahash_final,
-+ .finup = ahash_finup,
-+ .digest = ahash_digest,
-+ .export = ahash_export,
-+ .import = ahash_import,
-+ .setkey = ahash_setkey,
-+ .halg = {
-+ .digestsize = SHA1_DIGEST_SIZE,
-+ .statesize = sizeof(struct caam_export_state),
-+ },
-+ },
-+ .alg_type = OP_ALG_ALGSEL_SHA1,
-+ }, {
-+ .name = "sha224",
-+ .driver_name = "sha224-caam-qi2",
-+ .hmac_name = "hmac(sha224)",
-+ .hmac_driver_name = "hmac-sha224-caam-qi2",
-+ .blocksize = SHA224_BLOCK_SIZE,
-+ .template_ahash = {
-+ .init = ahash_init,
-+ .update = ahash_update,
-+ .final = ahash_final,
-+ .finup = ahash_finup,
-+ .digest = ahash_digest,
-+ .export = ahash_export,
-+ .import = ahash_import,
-+ .setkey = ahash_setkey,
-+ .halg = {
-+ .digestsize = SHA224_DIGEST_SIZE,
-+ .statesize = sizeof(struct caam_export_state),
-+ },
-+ },
-+ .alg_type = OP_ALG_ALGSEL_SHA224,
-+ }, {
-+ .name = "sha256",
-+ .driver_name = "sha256-caam-qi2",
-+ .hmac_name = "hmac(sha256)",
-+ .hmac_driver_name = "hmac-sha256-caam-qi2",
-+ .blocksize = SHA256_BLOCK_SIZE,
-+ .template_ahash = {
-+ .init = ahash_init,
-+ .update = ahash_update,
-+ .final = ahash_final,
-+ .finup = ahash_finup,
-+ .digest = ahash_digest,
-+ .export = ahash_export,
-+ .import = ahash_import,
-+ .setkey = ahash_setkey,
-+ .halg = {
-+ .digestsize = SHA256_DIGEST_SIZE,
-+ .statesize = sizeof(struct caam_export_state),
-+ },
-+ },
-+ .alg_type = OP_ALG_ALGSEL_SHA256,
-+ }, {
-+ .name = "sha384",
-+ .driver_name = "sha384-caam-qi2",
-+ .hmac_name = "hmac(sha384)",
-+ .hmac_driver_name = "hmac-sha384-caam-qi2",
-+ .blocksize = SHA384_BLOCK_SIZE,
-+ .template_ahash = {
-+ .init = ahash_init,
-+ .update = ahash_update,
-+ .final = ahash_final,
-+ .finup = ahash_finup,
-+ .digest = ahash_digest,
-+ .export = ahash_export,
-+ .import = ahash_import,
-+ .setkey = ahash_setkey,
-+ .halg = {
-+ .digestsize = SHA384_DIGEST_SIZE,
-+ .statesize = sizeof(struct caam_export_state),
-+ },
-+ },
-+ .alg_type = OP_ALG_ALGSEL_SHA384,
-+ }, {
-+ .name = "sha512",
-+ .driver_name = "sha512-caam-qi2",
-+ .hmac_name = "hmac(sha512)",
-+ .hmac_driver_name = "hmac-sha512-caam-qi2",
-+ .blocksize = SHA512_BLOCK_SIZE,
-+ .template_ahash = {
-+ .init = ahash_init,
-+ .update = ahash_update,
-+ .final = ahash_final,
-+ .finup = ahash_finup,
-+ .digest = ahash_digest,
-+ .export = ahash_export,
-+ .import = ahash_import,
-+ .setkey = ahash_setkey,
-+ .halg = {
-+ .digestsize = SHA512_DIGEST_SIZE,
-+ .statesize = sizeof(struct caam_export_state),
-+ },
-+ },
-+ .alg_type = OP_ALG_ALGSEL_SHA512,
-+ }, {
-+ .name = "md5",
-+ .driver_name = "md5-caam-qi2",
-+ .hmac_name = "hmac(md5)",
-+ .hmac_driver_name = "hmac-md5-caam-qi2",
-+ .blocksize = MD5_BLOCK_WORDS * 4,
-+ .template_ahash = {
-+ .init = ahash_init,
-+ .update = ahash_update,
-+ .final = ahash_final,
-+ .finup = ahash_finup,
-+ .digest = ahash_digest,
-+ .export = ahash_export,
-+ .import = ahash_import,
-+ .setkey = ahash_setkey,
-+ .halg = {
-+ .digestsize = MD5_DIGEST_SIZE,
-+ .statesize = sizeof(struct caam_export_state),
-+ },
-+ },
-+ .alg_type = OP_ALG_ALGSEL_MD5,
-+ }
-+};
-+
-+struct caam_hash_alg {
-+ struct list_head entry;
-+ struct device *dev;
-+ int alg_type;
-+ struct ahash_alg ahash_alg;
-+};
-+
-+static int caam_hash_cra_init(struct crypto_tfm *tfm)
-+{
-+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
-+ struct crypto_alg *base = tfm->__crt_alg;
-+ struct hash_alg_common *halg =
-+ container_of(base, struct hash_alg_common, base);
-+ struct ahash_alg *alg =
-+ container_of(halg, struct ahash_alg, halg);
-+ struct caam_hash_alg *caam_hash =
-+ container_of(alg, struct caam_hash_alg, ahash_alg);
-+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
-+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
-+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
-+ HASH_MSG_LEN + 32,
-+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
-+ HASH_MSG_LEN + 64,
-+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
-+ dma_addr_t dma_addr;
-+ int i;
-+
-+ ctx->dev = caam_hash->dev;
-+
-+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
-+ DMA_BIDIRECTIONAL,
-+ DMA_ATTR_SKIP_CPU_SYNC);
-+ if (dma_mapping_error(ctx->dev, dma_addr)) {
-+ dev_err(ctx->dev, "unable to map shared descriptors\n");
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < HASH_NUM_OP; i++)
-+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
-+
-+ /* copy descriptor header template value */
-+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
-+
-+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_SUBMASK) >>
-+ OP_ALG_ALGSEL_SHIFT];
-+
-+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-+ sizeof(struct caam_hash_state));
-+
-+ return ahash_set_sh_desc(ahash);
-+}
-+
-+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
-+{
-+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
-+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
-+}
-+
-+static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
-+ struct caam_hash_template *template, bool keyed)
-+{
-+ struct caam_hash_alg *t_alg;
-+ struct ahash_alg *halg;
-+ struct crypto_alg *alg;
-+
-+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
-+ if (!t_alg)
-+ return ERR_PTR(-ENOMEM);
-+
-+ t_alg->ahash_alg = template->template_ahash;
-+ halg = &t_alg->ahash_alg;
-+ alg = &halg->halg.base;
-+
-+ if (keyed) {
-+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
-+ template->hmac_name);
-+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-+ template->hmac_driver_name);
-+ } else {
-+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
-+ template->name);
-+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-+ template->driver_name);
-+ t_alg->ahash_alg.setkey = NULL;
-+ }
-+ alg->cra_module = THIS_MODULE;
-+ alg->cra_init = caam_hash_cra_init;
-+ alg->cra_exit = caam_hash_cra_exit;
-+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
-+ alg->cra_priority = CAAM_CRA_PRIORITY;
-+ alg->cra_blocksize = template->blocksize;
-+ alg->cra_alignmask = 0;
-+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
-+ alg->cra_type = &crypto_ahash_type;
-+
-+ t_alg->alg_type = template->alg_type;
-+ t_alg->dev = dev;
-+
-+ return t_alg;
-+}
-+
-+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
-+{
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+
-+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
-+ napi_schedule_irqoff(&ppriv->napi);
-+}
-+
-+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
-+{
-+ struct device *dev = priv->dev;
-+ struct dpaa2_io_notification_ctx *nctx;
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int err, i = 0, cpu;
-+
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ ppriv->priv = priv;
-+ nctx = &ppriv->nctx;
-+ nctx->is_cdan = 0;
-+ nctx->id = ppriv->rsp_fqid;
-+ nctx->desired_cpu = cpu;
-+ nctx->cb = dpaa2_caam_fqdan_cb;
-+
-+ /* Register notification callbacks */
-+ ppriv->dpio = dpaa2_io_service_select(cpu);
-+ err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
-+ if (unlikely(err)) {
-+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
-+ nctx->cb = NULL;
-+ /*
-+ * If no affine DPIO for this core, there's probably
-+ * none available for next cores either. Signal we want
-+ * to retry later, in case the DPIO devices weren't
-+ * probed yet.
-+ */
-+ err = -EPROBE_DEFER;
-+ goto err;
-+ }
-+
-+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
-+ dev);
-+ if (unlikely(!ppriv->store)) {
-+ dev_err(dev, "dpaa2_io_store_create() failed\n");
-+ err = -ENOMEM;
-+ goto err;
-+ }
-+
-+ if (++i == priv->num_pairs)
-+ break;
-+ }
-+
-+ return 0;
-+
-+err:
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ if (!ppriv->nctx.cb)
-+ break;
-+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
-+ }
-+
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ if (!ppriv->store)
-+ break;
-+ dpaa2_io_store_destroy(ppriv->store);
-+ }
-+
-+ return err;
-+}
-+
-+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
-+{
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ struct device *dev = priv->dev;
-+ int i = 0, cpu;
-+
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
-+ dpaa2_io_store_destroy(ppriv->store);
-+
-+ if (++i == priv->num_pairs)
-+ return;
-+ }
-+}
-+
-+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
-+{
-+ struct dpseci_rx_queue_cfg rx_queue_cfg;
-+ struct device *dev = priv->dev;
-+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int err = 0, i = 0, cpu;
-+
-+ /* Configure Rx queues */
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+
-+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
-+ DPSECI_QUEUE_OPT_USER_CTX;
-+ rx_queue_cfg.order_preservation_en = 0;
-+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
-+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
-+ /*
-+ * Rx priority (WQ) doesn't really matter, since we use
-+ * pull mode, i.e. volatile dequeues from specific FQs
-+ */
-+ rx_queue_cfg.dest_cfg.priority = 0;
-+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
-+
-+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
-+ &rx_queue_cfg);
-+ if (err) {
-+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ if (++i == priv->num_pairs)
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
-+{
-+ struct device *dev = priv->dev;
-+
-+ if (!priv->cscn_mem)
-+ return;
-+
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ kfree(priv->cscn_mem);
-+}
-+
-+static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
-+{
-+ struct device *dev = priv->dev;
-+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
-+
-+ dpaa2_dpseci_congestion_free(priv);
-+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
-+}
-+
-+static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
-+ const struct dpaa2_fd *fd)
-+{
-+ struct caam_request *req;
-+ u32 fd_err;
-+
-+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
-+ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
-+ return;
-+ }
-+
-+ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
-+ if (unlikely(fd_err))
-+ dev_err(priv->dev, "FD error: %08x\n", fd_err);
-+
-+ /*
-+ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
-+ * in FD[ERR] or FD[FRC].
-+ */
-+ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
-+ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
-+ DMA_BIDIRECTIONAL);
-+ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
-+}
-+
-+static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
-+{
-+ int err;
-+
-+ /* Retry while portal is busy */
-+ do {
-+ err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
-+ ppriv->store);
-+ } while (err == -EBUSY);
-+
-+ if (unlikely(err))
-+ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
-+
-+ return err;
-+}
-+
-+static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
-+{
-+ struct dpaa2_dq *dq;
-+ int cleaned = 0, is_last;
-+
-+ do {
-+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
-+ if (unlikely(!dq)) {
-+ if (unlikely(!is_last)) {
-+ dev_dbg(ppriv->priv->dev,
-+ "FQ %d returned no valid frames\n",
-+ ppriv->rsp_fqid);
-+ /*
-+ * MUST retry until we get some sort of
-+ * valid response token (be it "empty dequeue"
-+ * or a valid frame).
-+ */
-+ continue;
-+ }
-+ break;
-+ }
-+
-+ /* Process FD */
-+ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
-+ cleaned++;
-+ } while (!is_last);
-+
-+ return cleaned;
-+}
-+
-+static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
-+{
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ struct dpaa2_caam_priv *priv;
-+ int err, cleaned = 0, store_cleaned;
-+
-+ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
-+ priv = ppriv->priv;
-+
-+ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
-+ return 0;
-+
-+ do {
-+ store_cleaned = dpaa2_caam_store_consume(ppriv);
-+ cleaned += store_cleaned;
-+
-+ if (store_cleaned == 0 ||
-+ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
-+ break;
-+
-+ /* Try to dequeue some more */
-+ err = dpaa2_caam_pull_fq(ppriv);
-+ if (unlikely(err))
-+ break;
-+ } while (1);
-+
-+ if (cleaned < budget) {
-+ napi_complete_done(napi, cleaned);
-+ err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
-+ if (unlikely(err))
-+ dev_err(priv->dev, "Notification rearm failed: %d\n",
-+ err);
-+ }
-+
-+ return cleaned;
-+}
-+
-+static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
-+ u16 token)
-+{
-+ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
-+ struct device *dev = priv->dev;
-+ int err;
-+
-+ /*
-+ * Congestion group feature supported starting with DPSECI API v5.1
-+ * and only when object has been created with this capability.
-+ */
-+ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
-+ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
-+ return 0;
-+
-+ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
-+ GFP_KERNEL | GFP_DMA);
-+ if (!priv->cscn_mem)
-+ return -ENOMEM;
-+
-+ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
-+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
-+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, priv->cscn_dma)) {
-+ dev_err(dev, "Error mapping CSCN memory area\n");
-+ err = -ENOMEM;
-+ goto err_dma_map;
-+ }
-+
-+ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
-+ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
-+ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
-+ cong_notif_cfg.message_ctx = (u64)priv;
-+ cong_notif_cfg.message_iova = priv->cscn_dma;
-+ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
-+ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
-+ DPSECI_CGN_MODE_COHERENT_WRITE;
-+
-+ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
-+ &cong_notif_cfg);
-+ if (err) {
-+ dev_err(dev, "dpseci_set_congestion_notification failed\n");
-+ goto err_set_cong;
-+ }
-+
-+ return 0;
-+
-+err_set_cong:
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+err_dma_map:
-+ kfree(priv->cscn_mem);
-+
-+ return err;
-+}
-+
-+static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
-+{
-+ struct device *dev = &ls_dev->dev;
-+ struct dpaa2_caam_priv *priv;
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int err, cpu;
-+ u8 i;
-+
-+ priv = dev_get_drvdata(dev);
-+
-+ priv->dev = dev;
-+ priv->dpsec_id = ls_dev->obj_desc.id;
-+
-+ /* Get a handle for the DPSECI this interface is associate with */
-+ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpsec_open() failed: %d\n", err);
-+ goto err_open;
-+ }
-+
-+ dev_info(dev, "Opened dpseci object successfully\n");
-+
-+ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
-+ &priv->minor_ver);
-+ if (err) {
-+ dev_err(dev, "dpseci_get_api_version() failed\n");
-+ goto err_get_vers;
-+ }
-+
-+ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
-+ &priv->dpseci_attr);
-+ if (err) {
-+ dev_err(dev, "dpseci_get_attributes() failed\n");
-+ goto err_get_vers;
-+ }
-+
-+ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
-+ &priv->sec_attr);
-+ if (err) {
-+ dev_err(dev, "dpseci_get_sec_attr() failed\n");
-+ goto err_get_vers;
-+ }
-+
-+ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "setup_congestion() failed\n");
-+ goto err_get_vers;
-+ }
-+
-+ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
-+ priv->dpseci_attr.num_tx_queues);
-+ if (priv->num_pairs > num_online_cpus()) {
-+ dev_warn(dev, "%d queues won't be used\n",
-+ priv->num_pairs - num_online_cpus());
-+ priv->num_pairs = num_online_cpus();
-+ }
-+
-+ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
-+ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
-+ &priv->rx_queue_attr[i]);
-+ if (err) {
-+ dev_err(dev, "dpseci_get_rx_queue() failed\n");
-+ goto err_get_rx_queue;
-+ }
-+ }
-+
-+ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
-+ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
-+ &priv->tx_queue_attr[i]);
-+ if (err) {
-+ dev_err(dev, "dpseci_get_tx_queue() failed\n");
-+ goto err_get_rx_queue;
-+ }
-+ }
-+
-+ i = 0;
-+ for_each_online_cpu(cpu) {
-+ u8 j;
-+
-+ j = i % priv->num_pairs;
-+
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
-+
-+ /*
-+ * Allow all cores to enqueue, while only some of them
-+ * will take part in dequeuing.
-+ */
-+ if (++i > priv->num_pairs)
-+ continue;
-+
-+ ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
-+ ppriv->prio = j;
-+
-+ dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", j,
-+ priv->rx_queue_attr[j].fqid,
-+ priv->tx_queue_attr[j].fqid);
-+
-+ ppriv->net_dev.dev = *dev;
-+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
-+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
-+ DPAA2_CAAM_NAPI_WEIGHT);
-+ }
-+
-+ return 0;
-+
-+err_get_rx_queue:
-+ dpaa2_dpseci_congestion_free(priv);
-+err_get_vers:
-+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
-+err_open:
-+ return err;
-+}
-+
-+static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
-+{
-+ struct device *dev = priv->dev;
-+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int err, i;
-+
-+ for (i = 0; i < priv->num_pairs; i++) {
-+ ppriv = per_cpu_ptr(priv->ppriv, i);
-+ napi_enable(&ppriv->napi);
-+ }
-+
-+ err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpseci_enable() failed\n");
-+ return err;
-+ }
-+
-+ dev_info(dev, "DPSECI version %d.%d\n",
-+ priv->major_ver,
-+ priv->minor_ver);
-+
-+ return 0;
-+}
-+
-+static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
-+{
-+ struct device *dev = priv->dev;
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
-+ int i, err = 0, enabled;
-+
-+ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpseci_disable() failed\n");
-+ return err;
-+ }
-+
-+ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
-+ if (err) {
-+ dev_err(dev, "dpseci_is_enabled() failed\n");
-+ return err;
-+ }
-+
-+ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
-+
-+ for (i = 0; i < priv->num_pairs; i++) {
-+ ppriv = per_cpu_ptr(priv->ppriv, i);
-+ napi_disable(&ppriv->napi);
-+ netif_napi_del(&ppriv->napi);
-+ }
-+
-+ return 0;
-+}
-+
-+static struct list_head hash_list;
-+
-+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
-+{
-+ struct device *dev;
-+ struct dpaa2_caam_priv *priv;
-+ int i, err = 0;
-+ bool registered = false;
-+
-+ /*
-+ * There is no way to get CAAM endianness - there is no direct register
-+ * space access and MC f/w does not provide this attribute.
-+ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
-+ * property.
-+ */
-+ caam_little_end = true;
-+
-+ caam_imx = false;
-+
-+ dev = &dpseci_dev->dev;
-+
-+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-+ if (!priv)
-+ return -ENOMEM;
-+
-+ dev_set_drvdata(dev, priv);
-+
-+ priv->domain = iommu_get_domain_for_dev(dev);
-+
-+ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
-+ 0, SLAB_CACHE_DMA, NULL);
-+ if (!qi_cache) {
-+ dev_err(dev, "Can't allocate SEC cache\n");
-+ err = -ENOMEM;
-+ goto err_qicache;
-+ }
-+
-+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
-+ if (err) {
-+ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
-+ goto err_dma_mask;
-+ }
-+
-+ /* Obtain a MC portal */
-+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
-+ if (err) {
-+ if (err == -ENXIO)
-+ err = -EPROBE_DEFER;
-+ else
-+ dev_err(dev, "MC portal allocation failed\n");
-+
-+ goto err_dma_mask;
-+ }
-+
-+ priv->ppriv = alloc_percpu(*priv->ppriv);
-+ if (!priv->ppriv) {
-+ dev_err(dev, "alloc_percpu() failed\n");
-+ err = -ENOMEM;
-+ goto err_alloc_ppriv;
-+ }
-+
-+ /* DPSECI initialization */
-+ err = dpaa2_dpseci_setup(dpseci_dev);
-+ if (err) {
-+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
-+ goto err_dpseci_setup;
-+ }
-+
-+ /* DPIO */
-+ err = dpaa2_dpseci_dpio_setup(priv);
-+ if (err) {
-+ if (err != -EPROBE_DEFER)
-+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
-+ goto err_dpio_setup;
-+ }
-+
-+ /* DPSECI binding to DPIO */
-+ err = dpaa2_dpseci_bind(priv);
-+ if (err) {
-+ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
-+ goto err_bind;
-+ }
-+
-+ /* DPSECI enable */
-+ err = dpaa2_dpseci_enable(priv);
-+ if (err) {
-+ dev_err(dev, "dpaa2_dpseci_enable() failed");
-+ goto err_bind;
-+ }
-+
-+ /* register crypto algorithms the device supports */
-+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
-+ struct caam_skcipher_alg *t_alg = driver_algs + i;
-+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
-+
-+ /* Skip DES algorithms if not supported by device */
-+ if (!priv->sec_attr.des_acc_num &&
-+ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
-+ (alg_sel == OP_ALG_ALGSEL_DES)))
-+ continue;
-+
-+ /* Skip AES algorithms if not supported by device */
-+ if (!priv->sec_attr.aes_acc_num &&
-+ (alg_sel == OP_ALG_ALGSEL_AES))
-+ continue;
-+
-+ /* Skip CHACHA20 algorithms if not supported by device */
-+ if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
-+ !priv->sec_attr.ccha_acc_num)
-+ continue;
-+
-+ t_alg->caam.dev = dev;
-+ caam_skcipher_alg_init(t_alg);
-+
-+ err = crypto_register_skcipher(&t_alg->skcipher);
-+ if (err) {
-+ dev_warn(dev, "%s alg registration failed: %d\n",
-+ t_alg->skcipher.base.cra_driver_name, err);
-+ continue;
-+ }
-+
-+ t_alg->registered = true;
-+ registered = true;
-+ }
-+
-+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
-+ struct caam_aead_alg *t_alg = driver_aeads + i;
-+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
-+ OP_ALG_ALGSEL_MASK;
-+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
-+ OP_ALG_ALGSEL_MASK;
-+
-+ /* Skip DES algorithms if not supported by device */
-+ if (!priv->sec_attr.des_acc_num &&
-+ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
-+ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
-+ continue;
-+
-+ /* Skip AES algorithms if not supported by device */
-+ if (!priv->sec_attr.aes_acc_num &&
-+ (c1_alg_sel == OP_ALG_ALGSEL_AES))
-+ continue;
-+
-+ /* Skip CHACHA20 algorithms if not supported by device */
-+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
-+ !priv->sec_attr.ccha_acc_num)
-+ continue;
-+
-+ /* Skip POLY1305 algorithms if not supported by device */
-+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
-+ !priv->sec_attr.ptha_acc_num)
-+ continue;
-+
-+ /*
-+ * Skip algorithms requiring message digests
-+ * if MD not supported by device.
-+ */
-+ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
-+ !priv->sec_attr.md_acc_num)
-+ continue;
-+
-+ t_alg->caam.dev = dev;
-+ caam_aead_alg_init(t_alg);
-+
-+ err = crypto_register_aead(&t_alg->aead);
-+ if (err) {
-+ dev_warn(dev, "%s alg registration failed: %d\n",
-+ t_alg->aead.base.cra_driver_name, err);
-+ continue;
-+ }
-+
-+ t_alg->registered = true;
-+ registered = true;
-+ }
-+ if (registered)
-+ dev_info(dev, "algorithms registered in /proc/crypto\n");
-+
-+ /* register hash algorithms the device supports */
-+ INIT_LIST_HEAD(&hash_list);
-+
-+ /*
-+ * Skip registration of any hashing algorithms if MD block
-+ * is not present.
-+ */
-+ if (!priv->sec_attr.md_acc_num)
-+ return 0;
-+
-+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
-+ struct caam_hash_alg *t_alg;
-+ struct caam_hash_template *alg = driver_hash + i;
-+
-+ /* register hmac version */
-+ t_alg = caam_hash_alloc(dev, alg, true);
-+ if (IS_ERR(t_alg)) {
-+ err = PTR_ERR(t_alg);
-+ dev_warn(dev, "%s hash alg allocation failed: %d\n",
-+ alg->driver_name, err);
-+ continue;
-+ }
-+
-+ err = crypto_register_ahash(&t_alg->ahash_alg);
-+ if (err) {
-+ dev_warn(dev, "%s alg registration failed: %d\n",
-+ t_alg->ahash_alg.halg.base.cra_driver_name,
-+ err);
-+ kfree(t_alg);
-+ } else {
-+ list_add_tail(&t_alg->entry, &hash_list);
-+ }
-+
-+ /* register unkeyed version */
-+ t_alg = caam_hash_alloc(dev, alg, false);
-+ if (IS_ERR(t_alg)) {
-+ err = PTR_ERR(t_alg);
-+ dev_warn(dev, "%s alg allocation failed: %d\n",
-+ alg->driver_name, err);
-+ continue;
-+ }
-+
-+ err = crypto_register_ahash(&t_alg->ahash_alg);
-+ if (err) {
-+ dev_warn(dev, "%s alg registration failed: %d\n",
-+ t_alg->ahash_alg.halg.base.cra_driver_name,
-+ err);
-+ kfree(t_alg);
-+ } else {
-+ list_add_tail(&t_alg->entry, &hash_list);
-+ }
-+ }
-+ if (!list_empty(&hash_list))
-+ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
-+
-+ return err;
-+
-+err_bind:
-+ dpaa2_dpseci_dpio_free(priv);
-+err_dpio_setup:
-+ dpaa2_dpseci_free(priv);
-+err_dpseci_setup:
-+ free_percpu(priv->ppriv);
-+err_alloc_ppriv:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_dma_mask:
-+ kmem_cache_destroy(qi_cache);
-+err_qicache:
-+ dev_set_drvdata(dev, NULL);
-+
-+ return err;
-+}
-+
-+static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
-+{
-+ struct device *dev;
-+ struct dpaa2_caam_priv *priv;
-+ int i;
-+
-+ dev = &ls_dev->dev;
-+ priv = dev_get_drvdata(dev);
-+
-+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
-+ struct caam_aead_alg *t_alg = driver_aeads + i;
-+
-+ if (t_alg->registered)
-+ crypto_unregister_aead(&t_alg->aead);
-+ }
-+
-+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
-+ struct caam_skcipher_alg *t_alg = driver_algs + i;
-+
-+ if (t_alg->registered)
-+ crypto_unregister_skcipher(&t_alg->skcipher);
-+ }
-+
-+ if (hash_list.next) {
-+ struct caam_hash_alg *t_hash_alg, *p;
-+
-+ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
-+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
-+ list_del(&t_hash_alg->entry);
-+ kfree(t_hash_alg);
-+ }
-+ }
-+
-+ dpaa2_dpseci_disable(priv);
-+ dpaa2_dpseci_dpio_free(priv);
-+ dpaa2_dpseci_free(priv);
-+ free_percpu(priv->ppriv);
-+ fsl_mc_portal_free(priv->mc_io);
-+ dev_set_drvdata(dev, NULL);
-+ kmem_cache_destroy(qi_cache);
-+
-+ return 0;
-+}
-+
-+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
-+{
-+ struct dpaa2_fd fd;
-+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int err = 0, i;
-+
-+ if (IS_ERR(req))
-+ return PTR_ERR(req);
-+
-+ if (priv->cscn_mem) {
-+ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
-+ DPAA2_CSCN_SIZE,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
-+ dev_dbg_ratelimited(dev, "Dropping request\n");
-+ return -EBUSY;
-+ }
-+ }
-+
-+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
-+
-+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
-+ DMA_BIDIRECTIONAL);
-+ if (dma_mapping_error(dev, req->fd_flt_dma)) {
-+ dev_err(dev, "DMA mapping error for QI enqueue request\n");
-+ goto err_out;
-+ }
-+
-+ memset(&fd, 0, sizeof(fd));
-+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
-+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
-+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
-+ dpaa2_fd_set_flc(&fd, req->flc_dma);
-+
-+ ppriv = this_cpu_ptr(priv->ppriv);
-+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
-+ err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
-+ &fd);
-+ if (err != -EBUSY)
-+ break;
-+
-+ cpu_relax();
-+ }
-+
-+ if (unlikely(err)) {
-+ dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
-+ goto err_out;
-+ }
-+
-+ return -EINPROGRESS;
-+
-+err_out:
-+ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
-+ DMA_BIDIRECTIONAL);
-+ return -EIO;
-+}
-+EXPORT_SYMBOL(dpaa2_caam_enqueue);
-+
-+const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpseci",
-+ },
-+ { .vendor = 0x0 }
-+};
-+
-+static struct fsl_mc_driver dpaa2_caam_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = dpaa2_caam_probe,
-+ .remove = dpaa2_caam_remove,
-+ .match_id_table = dpaa2_caam_match_id_table
-+};
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_AUTHOR("Freescale Semiconductor, Inc");
-+MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
-+
-+module_fsl_mc_driver(dpaa2_caam_driver);
---- /dev/null
-+++ b/drivers/crypto/caam/caamalg_qi2.h
-@@ -0,0 +1,276 @@
-+/*
-+ * Copyright 2015-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the names of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef _CAAMALG_QI2_H_
-+#define _CAAMALG_QI2_H_
-+
-+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
-+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
-+#include <linux/threads.h>
-+#include "dpseci.h"
-+#include "desc_constr.h"
-+
-+#define DPAA2_CAAM_STORE_SIZE 16
-+/* NAPI weight *must* be a multiple of the store size. */
-+#define DPAA2_CAAM_NAPI_WEIGHT 64
-+
-+/* The congestion entrance threshold was chosen so that on LS2088
-+ * we support the maximum throughput for the available memory
-+ */
-+#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
-+#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
-+
-+/**
-+ * dpaa2_caam_priv - driver private data
-+ * @dpseci_id: DPSECI object unique ID
-+ * @major_ver: DPSECI major version
-+ * @minor_ver: DPSECI minor version
-+ * @dpseci_attr: DPSECI attributes
-+ * @sec_attr: SEC engine attributes
-+ * @rx_queue_attr: array of Rx queue attributes
-+ * @tx_queue_attr: array of Tx queue attributes
-+ * @cscn_mem: pointer to memory region containing the
-+ * dpaa2_cscn struct; it's size is larger than
-+ * sizeof(struct dpaa2_cscn) to accommodate alignment
-+ * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
-+ * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
-+ * @cscn_dma: dma address used by the QMAN to write CSCN messages
-+ * @dev: device associated with the DPSECI object
-+ * @mc_io: pointer to MC portal's I/O object
-+ * @domain: IOMMU domain
-+ * @ppriv: per CPU pointers to privata data
-+ */
-+struct dpaa2_caam_priv {
-+ int dpsec_id;
-+
-+ u16 major_ver;
-+ u16 minor_ver;
-+
-+ struct dpseci_attr dpseci_attr;
-+ struct dpseci_sec_attr sec_attr;
-+ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
-+ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
-+ int num_pairs;
-+
-+ /* congestion */
-+ void *cscn_mem;
-+ void *cscn_mem_aligned;
-+ dma_addr_t cscn_dma;
-+
-+ struct device *dev;
-+ struct fsl_mc_io *mc_io;
-+ struct iommu_domain *domain;
-+
-+ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
-+};
-+
-+/**
-+ * dpaa2_caam_priv_per_cpu - per CPU private data
-+ * @napi: napi structure
-+ * @net_dev: netdev used by napi
-+ * @req_fqid: (virtual) request (Tx / enqueue) FQID
-+ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
-+ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
-+ * @nctx: notification context of response FQ
-+ * @store: where dequeued frames are stored
-+ * @priv: backpointer to dpaa2_caam_priv
-+ * @dpio: portal used for data path operations
-+ */
-+struct dpaa2_caam_priv_per_cpu {
-+ struct napi_struct napi;
-+ struct net_device net_dev;
-+ int req_fqid;
-+ int rsp_fqid;
-+ int prio;
-+ struct dpaa2_io_notification_ctx nctx;
-+ struct dpaa2_io_store *store;
-+ struct dpaa2_caam_priv *priv;
-+ struct dpaa2_io *dpio;
-+};
-+
-+/*
-+ * The CAAM QI hardware constructs a job descriptor which points
-+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
-+ * When the job descriptor is executed by deco, the whole job
-+ * descriptor together with shared descriptor gets loaded in
-+ * deco buffer which is 64 words long (each 32-bit).
-+ *
-+ * The job descriptor constructed by QI hardware has layout:
-+ *
-+ * HEADER (1 word)
-+ * Shdesc ptr (1 or 2 words)
-+ * SEQ_OUT_PTR (1 word)
-+ * Out ptr (1 or 2 words)
-+ * Out length (1 word)
-+ * SEQ_IN_PTR (1 word)
-+ * In ptr (1 or 2 words)
-+ * In length (1 word)
-+ *
-+ * The shdesc ptr is used to fetch shared descriptor contents
-+ * into deco buffer.
-+ *
-+ * Apart from shdesc contents, the total number of words that
-+ * get loaded in deco buffer are '8' or '11'. The remaining words
-+ * in deco buffer can be used for storing shared descriptor.
-+ */
-+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
-+
-+/* Length of a single buffer in the QI driver memory cache */
-+#define CAAM_QI_MEMCACHE_SIZE 512
-+
-+/*
-+ * aead_edesc - s/w-extended aead descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @assoclen: associated data length, in CAAM endianness
-+ * @assoclen_dma: bus physical mapped address of req->assoclen
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct aead_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ unsigned int assoclen;
-+ dma_addr_t assoclen_dma;
-+ struct dpaa2_sg_entry sgt[0];
-+};
-+
-+/*
-+ * tls_edesc - s/w-extended tls descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
-+ * @dst: pointer to output scatterlist, usefull for unmapping
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct tls_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct scatterlist tmp[2];
-+ struct scatterlist *dst;
-+ struct dpaa2_sg_entry sgt[0];
-+};
-+
-+/*
-+ * skcipher_edesc - s/w-extended skcipher descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped qm_sg space
-+ * @qm_sg_dma: I/O virtual address of h/w link table
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct skcipher_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct dpaa2_sg_entry sgt[0];
-+};
-+
-+/*
-+ * ahash_edesc - s/w-extended ahash descriptor
-+ * @dst_dma: I/O virtual address of req->result
-+ * @qm_sg_dma: I/O virtual address of h/w link table
-+ * @src_nents: number of segments in input scatterlist
-+ * @qm_sg_bytes: length of dma mapped qm_sg space
-+ * @sgt: pointer to h/w link table
-+ */
-+struct ahash_edesc {
-+ dma_addr_t dst_dma;
-+ dma_addr_t qm_sg_dma;
-+ int src_nents;
-+ int qm_sg_bytes;
-+ struct dpaa2_sg_entry sgt[0];
-+};
-+
-+/**
-+ * caam_flc - Flow Context (FLC)
-+ * @flc: Flow Context options
-+ * @sh_desc: Shared Descriptor
-+ */
-+struct caam_flc {
-+ u32 flc[16];
-+ u32 sh_desc[MAX_SDLEN];
-+} ____cacheline_aligned;
-+
-+enum optype {
-+ ENCRYPT = 0,
-+ DECRYPT,
-+ NUM_OP
-+};
-+
-+/**
-+ * caam_request - the request structure the driver application should fill while
-+ * submitting a job to driver.
-+ * @fd_flt: Frame list table defining input and output
-+ * fd_flt[0] - FLE pointing to output buffer
-+ * fd_flt[1] - FLE pointing to input buffer
-+ * @fd_flt_dma: DMA address for the frame list table
-+ * @flc: Flow Context
-+ * @flc_dma: I/O virtual address of Flow Context
-+ * @cbk: Callback function to invoke when job is completed
-+ * @ctx: arbit context attached with request by the application
-+ * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
-+ */
-+struct caam_request {
-+ struct dpaa2_fl_entry fd_flt[2];
-+ dma_addr_t fd_flt_dma;
-+ struct caam_flc *flc;
-+ dma_addr_t flc_dma;
-+ void (*cbk)(void *ctx, u32 err);
-+ void *ctx;
-+ void *edesc;
-+};
-+
-+/**
-+ * dpaa2_caam_enqueue() - enqueue a crypto request
-+ * @dev: device associated with the DPSECI object
-+ * @req: pointer to caam_request
-+ */
-+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
-+
-+#endif /* _CAAMALG_QI2_H_ */
---- a/drivers/crypto/caam/caamhash.c
-+++ b/drivers/crypto/caam/caamhash.c
-@@ -2,6 +2,7 @@
- * caam - Freescale FSL CAAM support for ahash functions of crypto API
- *
- * Copyright 2011 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
- *
- * Based on caamalg.c crypto API driver.
- *
-@@ -62,6 +63,7 @@
- #include "error.h"
- #include "sg_sw_sec4.h"
- #include "key_gen.h"
-+#include "caamhash_desc.h"
-
- #define CAAM_CRA_PRIORITY 3000
-
-@@ -71,14 +73,6 @@
- #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
- #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-
--/* length of descriptors text */
--#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
--#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
--#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
--#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
--#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
--#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
--
- #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
- CAAM_MAX_HASH_KEY_SIZE)
- #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
-@@ -107,6 +101,7 @@ struct caam_hash_ctx {
- dma_addr_t sh_desc_update_first_dma;
- dma_addr_t sh_desc_fin_dma;
- dma_addr_t sh_desc_digest_dma;
-+ enum dma_data_direction dir;
- struct device *jrdev;
- u8 key[CAAM_MAX_HASH_KEY_SIZE];
- int ctx_len;
-@@ -218,7 +213,7 @@ static inline int buf_map_to_sec4_sg(str
- }
-
- /* Map state->caam_ctx, and add it to link table */
--static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
-+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
- struct caam_hash_state *state, int ctx_len,
- struct sec4_sg_entry *sec4_sg, u32 flag)
- {
-@@ -234,68 +229,22 @@ static inline int ctx_map_to_sec4_sg(u32
- return 0;
- }
-
--/*
-- * For ahash update, final and finup (import_ctx = true)
-- * import context, read and write to seqout
-- * For ahash firsts and digest (import_ctx = false)
-- * read and write to seqout
-- */
--static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
-- struct caam_hash_ctx *ctx, bool import_ctx)
--{
-- u32 op = ctx->adata.algtype;
-- u32 *skip_key_load;
--
-- init_sh_desc(desc, HDR_SHARE_SERIAL);
--
-- /* Append key if it has been set; ahash update excluded */
-- if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
-- /* Skip key loading if already shared */
-- skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-- JUMP_COND_SHRD);
--
-- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
-- ctx->adata.keylen, CLASS_2 |
-- KEY_DEST_MDHA_SPLIT | KEY_ENC);
--
-- set_jump_tgt_here(desc, skip_key_load);
--
-- op |= OP_ALG_AAI_HMAC_PRECOMP;
-- }
--
-- /* If needed, import context from software */
-- if (import_ctx)
-- append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
-- LDST_SRCDST_BYTE_CONTEXT);
--
-- /* Class 2 operation */
-- append_operation(desc, op | state | OP_ALG_ENCRYPT);
--
-- /*
-- * Load from buf and/or src and write to req->result or state->context
-- * Calculate remaining bytes to read
-- */
-- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-- /* Read remaining bytes */
-- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
-- FIFOLD_TYPE_MSG | KEY_VLF);
-- /* Store class2 context bytes */
-- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
-- LDST_SRCDST_BYTE_CONTEXT);
--}
--
- static int ahash_set_sh_desc(struct crypto_ahash *ahash)
- {
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- int digestsize = crypto_ahash_digestsize(ahash);
- struct device *jrdev = ctx->jrdev;
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
- u32 *desc;
-
-+ ctx->adata.key_virt = ctx->key;
-+
- /* ahash_update shared descriptor */
- desc = ctx->sh_desc_update;
-- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
-+ ctx->ctx_len, true, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update shdesc@"__stringify(__LINE__)": ",
-@@ -304,9 +253,10 @@ static int ahash_set_sh_desc(struct cryp
-
- /* ahash_update_first shared descriptor */
- desc = ctx->sh_desc_update_first;
-- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
-+ ctx->ctx_len, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update first shdesc@"__stringify(__LINE__)": ",
-@@ -315,9 +265,10 @@ static int ahash_set_sh_desc(struct cryp
-
- /* ahash_final shared descriptor */
- desc = ctx->sh_desc_fin;
-- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
-+ ctx->ctx_len, true, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
-@@ -326,9 +277,10 @@ static int ahash_set_sh_desc(struct cryp
-
- /* ahash_digest shared descriptor */
- desc = ctx->sh_desc_digest;
-- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
-+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
-+ ctx->ctx_len, false, ctrlpriv->era);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
-- desc_bytes(desc), DMA_TO_DEVICE);
-+ desc_bytes(desc), ctx->dir);
- #ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash digest shdesc@"__stringify(__LINE__)": ",
-@@ -421,6 +373,7 @@ static int ahash_setkey(struct crypto_ah
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
- int digestsize = crypto_ahash_digestsize(ahash);
-+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
- int ret;
- u8 *hashed_key = NULL;
-
-@@ -441,16 +394,26 @@ static int ahash_setkey(struct crypto_ah
- key = hashed_key;
- }
-
-- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
-- CAAM_MAX_HASH_KEY_SIZE);
-- if (ret)
-- goto bad_free_key;
-+ /*
-+ * If DKP is supported, use it in the shared descriptor to generate
-+ * the split key.
-+ */
-+ if (ctrlpriv->era >= 6) {
-+ ctx->adata.key_inline = true;
-+ ctx->adata.keylen = keylen;
-+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-
--#ifdef DEBUG
-- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
-- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-- ctx->adata.keylen_pad, 1);
--#endif
-+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
-+ goto bad_free_key;
-+
-+ memcpy(ctx->key, key, keylen);
-+ } else {
-+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
-+ keylen, CAAM_MAX_HASH_KEY_SIZE);
-+ if (ret)
-+ goto bad_free_key;
-+ }
-
- kfree(hashed_key);
- return ahash_set_sh_desc(ahash);
-@@ -773,7 +736,7 @@ static int ahash_update_ctx(struct ahash
- edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
-
-- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
-+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_BIDIRECTIONAL);
- if (ret)
- goto unmap_ctx;
-@@ -871,9 +834,8 @@ static int ahash_final_ctx(struct ahash_
- desc = edesc->hw_desc;
-
- edesc->sec4_sg_bytes = sec4_sg_bytes;
-- edesc->src_nents = 0;
-
-- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
-+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
- if (ret)
- goto unmap_ctx;
-@@ -967,7 +929,7 @@ static int ahash_finup_ctx(struct ahash_
-
- edesc->src_nents = src_nents;
-
-- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
-+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
- if (ret)
- goto unmap_ctx;
-@@ -1126,7 +1088,6 @@ static int ahash_final_no_ctx(struct aha
- dev_err(jrdev, "unable to map dst\n");
- goto unmap;
- }
-- edesc->src_nents = 0;
-
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
-@@ -1208,7 +1169,6 @@ static int ahash_update_no_ctx(struct ah
-
- edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
-- edesc->dst_dma = 0;
-
- ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
- if (ret)
-@@ -1420,7 +1380,6 @@ static int ahash_update_first(struct aha
- }
-
- edesc->src_nents = src_nents;
-- edesc->dst_dma = 0;
-
- ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
- to_hash);
-@@ -1722,6 +1681,7 @@ static int caam_hash_cra_init(struct cry
- HASH_MSG_LEN + 64,
- HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- dma_addr_t dma_addr;
-+ struct caam_drv_private *priv;
-
- /*
- * Get a Job ring from Job Ring driver to ensure in-order
-@@ -1733,10 +1693,13 @@ static int caam_hash_cra_init(struct cry
- return PTR_ERR(ctx->jrdev);
- }
-
-+ priv = dev_get_drvdata(ctx->jrdev->parent);
-+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
-+
- dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
-- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
-+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(ctx->jrdev, dma_addr)) {
- dev_err(ctx->jrdev, "unable to map shared descriptors\n");
- caam_jr_free(ctx->jrdev);
-@@ -1771,11 +1734,11 @@ static void caam_hash_cra_exit(struct cr
- dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
-- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
-+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- caam_jr_free(ctx->jrdev);
- }
-
--static void __exit caam_algapi_hash_exit(void)
-+void caam_algapi_hash_exit(void)
- {
- struct caam_hash_alg *t_alg, *n;
-
-@@ -1834,56 +1797,38 @@ caam_hash_alloc(struct caam_hash_templat
- return t_alg;
- }
-
--static int __init caam_algapi_hash_init(void)
-+int caam_algapi_hash_init(struct device *ctrldev)
- {
-- struct device_node *dev_node;
-- struct platform_device *pdev;
-- struct device *ctrldev;
- int i = 0, err = 0;
-- struct caam_drv_private *priv;
-+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- unsigned int md_limit = SHA512_DIGEST_SIZE;
-- u32 cha_inst, cha_vid;
--
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
-- if (!dev_node) {
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
-- if (!dev_node)
-- return -ENODEV;
-- }
--
-- pdev = of_find_device_by_node(dev_node);
-- if (!pdev) {
-- of_node_put(dev_node);
-- return -ENODEV;
-- }
--
-- ctrldev = &pdev->dev;
-- priv = dev_get_drvdata(ctrldev);
-- of_node_put(dev_node);
--
-- /*
-- * If priv is NULL, it's probably because the caam driver wasn't
-- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
-- */
-- if (!priv)
-- return -ENODEV;
-+ u32 md_inst, md_vid;
-
- /*
- * Register crypto algorithms the device supports. First, identify
- * presence and attributes of MD block.
- */
-- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
-- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
-+ if (priv->era < 10) {
-+ md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
-+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+ md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
-+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-+ } else {
-+ u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
-+
-+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
-+ md_inst = mdha & CHA_VER_NUM_MASK;
-+ }
-
- /*
- * Skip registration of any hashing algorithms if MD block
- * is not present.
- */
-- if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
-+ if (!md_inst)
- return -ENODEV;
-
- /* Limit digest size based on LP256 */
-- if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
-+ if (md_vid == CHA_VER_VID_MD_LP256)
- md_limit = SHA256_DIGEST_SIZE;
-
- INIT_LIST_HEAD(&hash_list);
-@@ -1934,10 +1879,3 @@ static int __init caam_algapi_hash_init(
-
- return err;
- }
--
--module_init(caam_algapi_hash_init);
--module_exit(caam_algapi_hash_exit);
--
--MODULE_LICENSE("GPL");
--MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
--MODULE_AUTHOR("Freescale Semiconductor - NMG");
---- /dev/null
-+++ b/drivers/crypto/caam/caamhash_desc.c
-@@ -0,0 +1,108 @@
-+/*
-+ * Shared descriptors for ahash algorithms
-+ *
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the names of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "compat.h"
-+#include "desc_constr.h"
-+#include "caamhash_desc.h"
-+
-+/**
-+ * cnstr_shdsc_ahash - ahash shared descriptor
-+ * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions.
-+ * A split key is required for SEC Era < 6; the size of the split key
-+ * is specified in this case.
-+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
-+ * SHA256, SHA384, SHA512}.
-+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
-+ * @digestsize: algorithm's digest size
-+ * @ctx_len: size of Context Register
-+ * @import_ctx: true if previous Context Register needs to be restored
-+ * must be true for ahash update and final
-+ * must be false for for ahash first and digest
-+ * @era: SEC Era
-+ */
-+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
-+ int digestsize, int ctx_len, bool import_ctx, int era)
-+{
-+ u32 op = adata->algtype;
-+
-+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-+
-+ /* Append key if it has been set; ahash update excluded */
-+ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
-+ u32 *skip_key_load;
-+
-+ /* Skip key loading if already shared */
-+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_SHRD);
-+
-+ if (era < 6)
-+ append_key_as_imm(desc, adata->key_virt,
-+ adata->keylen_pad,
-+ adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ else
-+ append_proto_dkp(desc, adata);
-+
-+ set_jump_tgt_here(desc, skip_key_load);
-+
-+ op |= OP_ALG_AAI_HMAC_PRECOMP;
-+ }
-+
-+ /* If needed, import context from software */
-+ if (import_ctx)
-+ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
-+
-+ /* Class 2 operation */
-+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
-+
-+ /*
-+ * Load from buf and/or src and write to req->result or state->context
-+ * Calculate remaining bytes to read
-+ */
-+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+ /* Read remaining bytes */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
-+ FIFOLD_TYPE_MSG | KEY_VLF);
-+ /* Store class2 context bytes */
-+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
-+}
-+EXPORT_SYMBOL(cnstr_shdsc_ahash);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
-+MODULE_AUTHOR("NXP Semiconductors");
---- /dev/null
-+++ b/drivers/crypto/caam/caamhash_desc.h
-@@ -0,0 +1,49 @@
-+/*
-+ * Shared descriptors for ahash algorithms
-+ *
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the names of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef _CAAMHASH_DESC_H_
-+#define _CAAMHASH_DESC_H_
-+
-+/* length of descriptors text */
-+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
-+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
-+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-+
-+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
-+ int digestsize, int ctx_len, bool import_ctx, int era);
-+
-+#endif /* _CAAMHASH_DESC_H_ */
---- a/drivers/crypto/caam/caampkc.c
-+++ b/drivers/crypto/caam/caampkc.c
-@@ -2,6 +2,7 @@
- * caam - Freescale FSL CAAM support for Public Key Cryptography
- *
- * Copyright 2016 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
- *
- * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
- * all the desired key parameters, input and output pointers.
-@@ -1017,46 +1018,22 @@ static struct akcipher_alg caam_rsa = {
- };
-
- /* Public Key Cryptography module initialization handler */
--static int __init caam_pkc_init(void)
-+int caam_pkc_init(struct device *ctrldev)
- {
-- struct device_node *dev_node;
-- struct platform_device *pdev;
-- struct device *ctrldev;
-- struct caam_drv_private *priv;
-- u32 cha_inst, pk_inst;
-+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
-+ u32 pk_inst;
- int err;
-
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
-- if (!dev_node) {
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
-- if (!dev_node)
-- return -ENODEV;
-- }
--
-- pdev = of_find_device_by_node(dev_node);
-- if (!pdev) {
-- of_node_put(dev_node);
-- return -ENODEV;
-- }
--
-- ctrldev = &pdev->dev;
-- priv = dev_get_drvdata(ctrldev);
-- of_node_put(dev_node);
--
-- /*
-- * If priv is NULL, it's probably because the caam driver wasn't
-- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
-- */
-- if (!priv)
-- return -ENODEV;
--
- /* Determine public key hardware accelerator presence. */
-- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
-- pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
-+ if (priv->era < 10)
-+ pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
-+ CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
-+ else
-+ pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
-
- /* Do not register algorithms if PKHA is not present. */
- if (!pk_inst)
-- return -ENODEV;
-+ return 0;
-
- err = crypto_register_akcipher(&caam_rsa);
- if (err)
-@@ -1068,14 +1045,7 @@ static int __init caam_pkc_init(void)
- return err;
- }
-
--static void __exit caam_pkc_exit(void)
-+void caam_pkc_exit(void)
- {
- crypto_unregister_akcipher(&caam_rsa);
- }
--
--module_init(caam_pkc_init);
--module_exit(caam_pkc_exit);
--
--MODULE_LICENSE("Dual BSD/GPL");
--MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
--MODULE_AUTHOR("Freescale Semiconductor");
---- a/drivers/crypto/caam/caamrng.c
-+++ b/drivers/crypto/caam/caamrng.c
-@@ -2,6 +2,7 @@
- * caam - Freescale FSL CAAM support for hw_random
- *
- * Copyright 2011 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
- *
- * Based on caamalg.c crypto API driver.
- *
-@@ -294,49 +295,29 @@ static struct hwrng caam_rng = {
- .read = caam_read,
- };
-
--static void __exit caam_rng_exit(void)
-+void caam_rng_exit(void)
- {
- caam_jr_free(rng_ctx->jrdev);
- hwrng_unregister(&caam_rng);
- kfree(rng_ctx);
- }
-
--static int __init caam_rng_init(void)
-+int caam_rng_init(struct device *ctrldev)
- {
- struct device *dev;
-- struct device_node *dev_node;
-- struct platform_device *pdev;
-- struct device *ctrldev;
-- struct caam_drv_private *priv;
-+ u32 rng_inst;
-+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- int err;
-
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
-- if (!dev_node) {
-- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
-- if (!dev_node)
-- return -ENODEV;
-- }
--
-- pdev = of_find_device_by_node(dev_node);
-- if (!pdev) {
-- of_node_put(dev_node);
-- return -ENODEV;
-- }
--
-- ctrldev = &pdev->dev;
-- priv = dev_get_drvdata(ctrldev);
-- of_node_put(dev_node);
--
-- /*
-- * If priv is NULL, it's probably because the caam driver wasn't
-- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
-- */
-- if (!priv)
-- return -ENODEV;
--
- /* Check for an instantiated RNG before registration */
-- if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
-- return -ENODEV;
-+ if (priv->era < 10)
-+ rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
-+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
-+ else
-+ rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
-+
-+ if (!rng_inst)
-+ return 0;
-
- dev = caam_jr_alloc();
- if (IS_ERR(dev)) {
-@@ -364,10 +345,3 @@ free_caam_alloc:
- caam_jr_free(dev);
- return err;
- }
--
--module_init(caam_rng_init);
--module_exit(caam_rng_exit);
--
--MODULE_LICENSE("GPL");
--MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
--MODULE_AUTHOR("Freescale Semiconductor - NMG");
---- a/drivers/crypto/caam/compat.h
-+++ b/drivers/crypto/caam/compat.h
-@@ -17,6 +17,7 @@
- #include <linux/of_platform.h>
- #include <linux/dma-mapping.h>
- #include <linux/io.h>
-+#include <linux/iommu.h>
- #include <linux/spinlock.h>
- #include <linux/rtnetlink.h>
- #include <linux/in.h>
-@@ -34,10 +35,13 @@
- #include <crypto/des.h>
- #include <crypto/sha.h>
- #include <crypto/md5.h>
-+#include <crypto/chacha20.h>
-+#include <crypto/poly1305.h>
- #include <crypto/internal/aead.h>
- #include <crypto/authenc.h>
- #include <crypto/akcipher.h>
- #include <crypto/scatterwalk.h>
-+#include <crypto/skcipher.h>
- #include <crypto/internal/skcipher.h>
- #include <crypto/internal/hash.h>
- #include <crypto/internal/rsa.h>
---- a/drivers/crypto/caam/ctrl.c
-+++ b/drivers/crypto/caam/ctrl.c
-@@ -2,6 +2,7 @@
- * Controller-level driver, kernel property detection, initialization
- *
- * Copyright 2008-2012 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
- */
-
- #include <linux/device.h>
-@@ -16,17 +17,15 @@
- #include "desc_constr.h"
- #include "ctrl.h"
-
--bool caam_little_end;
--EXPORT_SYMBOL(caam_little_end);
- bool caam_dpaa2;
- EXPORT_SYMBOL(caam_dpaa2);
--bool caam_imx;
--EXPORT_SYMBOL(caam_imx);
-
- #ifdef CONFIG_CAAM_QI
- #include "qi.h"
- #endif
-
-+static struct platform_device *caam_dma_dev;
-+
- /*
- * i.MX targets tend to have clock control subsystems that can
- * enable/disable clocking to our device.
-@@ -105,7 +104,7 @@ static inline int run_descriptor_deco0(s
- struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
- struct caam_deco __iomem *deco = ctrlpriv->deco;
- unsigned int timeout = 100000;
-- u32 deco_dbg_reg, flags;
-+ u32 deco_dbg_reg, deco_state, flags;
- int i;
-
-
-@@ -148,13 +147,22 @@ static inline int run_descriptor_deco0(s
- timeout = 10000000;
- do {
- deco_dbg_reg = rd_reg32(&deco->desc_dbg);
-+
-+ if (ctrlpriv->era < 10)
-+ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
-+ DESC_DBG_DECO_STAT_SHIFT;
-+ else
-+ deco_state = (rd_reg32(&deco->dbg_exec) &
-+ DESC_DER_DECO_STAT_MASK) >>
-+ DESC_DER_DECO_STAT_SHIFT;
-+
- /*
- * If an error occured in the descriptor, then
- * the DECO status field will be set to 0x0D
- */
-- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
-- DESC_DBG_DECO_STAT_HOST_ERR)
-+ if (deco_state == DECO_STAT_HOST_ERR)
- break;
-+
- cpu_relax();
- } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
-
-@@ -316,15 +324,15 @@ static int caam_remove(struct platform_d
- of_platform_depopulate(ctrldev);
-
- #ifdef CONFIG_CAAM_QI
-- if (ctrlpriv->qidev)
-- caam_qi_shutdown(ctrlpriv->qidev);
-+ if (ctrlpriv->qi_init)
-+ caam_qi_shutdown(ctrldev);
- #endif
-
- /*
- * De-initialize RNG state handles initialized by this driver.
-- * In case of DPAA 2.x, RNG is managed by MC firmware.
-+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
- */
-- if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
-+ if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
- deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
-
- /* Shut down debug views */
-@@ -332,6 +340,9 @@ static int caam_remove(struct platform_d
- debugfs_remove_recursive(ctrlpriv->dfs_root);
- #endif
-
-+ if (caam_dma_dev)
-+ platform_device_unregister(caam_dma_dev);
-+
- /* Unmap controller region */
- iounmap(ctrl);
-
-@@ -433,6 +444,10 @@ static int caam_probe(struct platform_de
- {.family = "Freescale i.MX"},
- {},
- };
-+ static struct platform_device_info caam_dma_pdev_info = {
-+ .name = "caam-dma",
-+ .id = PLATFORM_DEVID_NONE
-+ };
- struct device *dev;
- struct device_node *nprop, *np;
- struct caam_ctrl __iomem *ctrl;
-@@ -442,7 +457,7 @@ static int caam_probe(struct platform_de
- struct caam_perfmon *perfmon;
- #endif
- u32 scfgr, comp_params;
-- u32 cha_vid_ls;
-+ u8 rng_vid;
- int pg_size;
- int BLOCK_OFFSET = 0;
-
-@@ -454,15 +469,54 @@ static int caam_probe(struct platform_de
- dev_set_drvdata(dev, ctrlpriv);
- nprop = pdev->dev.of_node;
-
-+ /* Get configuration properties from device tree */
-+ /* First, get register page */
-+ ctrl = of_iomap(nprop, 0);
-+ if (!ctrl) {
-+ dev_err(dev, "caam: of_iomap() failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
-+ (CSTA_PLEND | CSTA_ALT_PLEND));
- caam_imx = (bool)soc_device_match(imx_soc);
-
-+ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
-+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
-+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
-+
-+#ifdef CONFIG_CAAM_QI
-+ /* If (DPAA 1.x) QI present, check whether dependencies are available */
-+ if (ctrlpriv->qi_present && !caam_dpaa2) {
-+ ret = qman_is_probed();
-+ if (!ret) {
-+ ret = -EPROBE_DEFER;
-+ goto iounmap_ctrl;
-+ } else if (ret < 0) {
-+ dev_err(dev, "failing probe due to qman probe error\n");
-+ ret = -ENODEV;
-+ goto iounmap_ctrl;
-+ }
-+
-+ ret = qman_portals_probed();
-+ if (!ret) {
-+ ret = -EPROBE_DEFER;
-+ goto iounmap_ctrl;
-+ } else if (ret < 0) {
-+ dev_err(dev, "failing probe due to qman portals probe error\n");
-+ ret = -ENODEV;
-+ goto iounmap_ctrl;
-+ }
-+ }
-+#endif
-+
- /* Enable clocking */
- clk = caam_drv_identify_clk(&pdev->dev, "ipg");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM ipg clk: %d\n", ret);
-- return ret;
-+ goto iounmap_ctrl;
- }
- ctrlpriv->caam_ipg = clk;
-
-@@ -471,7 +525,7 @@ static int caam_probe(struct platform_de
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM mem clk: %d\n", ret);
-- return ret;
-+ goto iounmap_ctrl;
- }
- ctrlpriv->caam_mem = clk;
-
-@@ -480,7 +534,7 @@ static int caam_probe(struct platform_de
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM aclk clk: %d\n", ret);
-- return ret;
-+ goto iounmap_ctrl;
- }
- ctrlpriv->caam_aclk = clk;
-
-@@ -490,7 +544,7 @@ static int caam_probe(struct platform_de
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
-- return ret;
-+ goto iounmap_ctrl;
- }
- ctrlpriv->caam_emi_slow = clk;
- }
-@@ -498,7 +552,7 @@ static int caam_probe(struct platform_de
- ret = clk_prepare_enable(ctrlpriv->caam_ipg);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
-- return ret;
-+ goto iounmap_ctrl;
- }
-
- ret = clk_prepare_enable(ctrlpriv->caam_mem);
-@@ -523,25 +577,10 @@ static int caam_probe(struct platform_de
- }
- }
-
-- /* Get configuration properties from device tree */
-- /* First, get register page */
-- ctrl = of_iomap(nprop, 0);
-- if (ctrl == NULL) {
-- dev_err(dev, "caam: of_iomap() failed\n");
-- ret = -ENOMEM;
-- goto disable_caam_emi_slow;
-- }
--
-- caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
-- (CSTA_PLEND | CSTA_ALT_PLEND));
--
-- /* Finding the page size for using the CTPR_MS register */
-- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
-- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
--
- /* Allocating the BLOCK_OFFSET based on the supported page size on
- * the platform
- */
-+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
- if (pg_size == 0)
- BLOCK_OFFSET = PG_SIZE_4K;
- else
-@@ -563,11 +602,14 @@ static int caam_probe(struct platform_de
- /*
- * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
- * long pointers in master configuration register.
-- * In case of DPAA 2.x, Management Complex firmware performs
-+ * In case of SoCs with Management Complex, MC f/w performs
- * the configuration.
- */
-- caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
-- if (!caam_dpaa2)
-+ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
-+ ctrlpriv->mc_en = !!np;
-+ of_node_put(np);
-+
-+ if (!ctrlpriv->mc_en)
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
- MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
-@@ -612,14 +654,11 @@ static int caam_probe(struct platform_de
- }
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
-- goto iounmap_ctrl;
-+ goto disable_caam_emi_slow;
- }
-
-- ret = of_platform_populate(nprop, caam_match, NULL, dev);
-- if (ret) {
-- dev_err(dev, "JR platform devices creation error\n");
-- goto iounmap_ctrl;
-- }
-+ ctrlpriv->era = caam_get_era();
-+ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
-
- #ifdef CONFIG_DEBUG_FS
- /*
-@@ -633,21 +672,7 @@ static int caam_probe(struct platform_de
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
- #endif
-
-- ring = 0;
-- for_each_available_child_of_node(nprop, np)
-- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
-- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
-- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
-- ((__force uint8_t *)ctrl +
-- (ring + JR_BLOCK_NUMBER) *
-- BLOCK_OFFSET
-- );
-- ctrlpriv->total_jobrs++;
-- ring++;
-- }
--
- /* Check to see if (DPAA 1.x) QI present. If so, enable */
-- ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
- if (ctrlpriv->qi_present && !caam_dpaa2) {
- ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
- ((__force uint8_t *)ctrl +
-@@ -664,6 +689,25 @@ static int caam_probe(struct platform_de
- #endif
- }
-
-+ ret = of_platform_populate(nprop, caam_match, NULL, dev);
-+ if (ret) {
-+ dev_err(dev, "JR platform devices creation error\n");
-+ goto shutdown_qi;
-+ }
-+
-+ ring = 0;
-+ for_each_available_child_of_node(nprop, np)
-+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
-+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
-+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
-+ ((__force uint8_t *)ctrl +
-+ (ring + JR_BLOCK_NUMBER) *
-+ BLOCK_OFFSET
-+ );
-+ ctrlpriv->total_jobrs++;
-+ ring++;
-+ }
-+
- /* If no QI and no rings specified, quit and go home */
- if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
- dev_err(dev, "no queues configured, terminating\n");
-@@ -671,15 +715,29 @@ static int caam_probe(struct platform_de
- goto caam_remove;
- }
-
-- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
-+ caam_dma_pdev_info.parent = dev;
-+ caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
-+ caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
-+ if (IS_ERR(caam_dma_dev)) {
-+ dev_err(dev, "Unable to create and register caam-dma dev\n");
-+ caam_dma_dev = 0;
-+ } else {
-+ set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
-+ }
-+
-+ if (ctrlpriv->era < 10)
-+ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
-+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
-+ else
-+ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
-+ CHA_VER_VID_SHIFT;
-
- /*
- * If SEC has RNG version >= 4 and RNG state handle has not been
- * already instantiated, do RNG instantiation
-- * In case of DPAA 2.x, RNG is managed by MC firmware.
-+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
- */
-- if (!caam_dpaa2 &&
-- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
-+ if (!ctrlpriv->mc_en && rng_vid >= 4) {
- ctrlpriv->rng4_sh_init =
- rd_reg32(&ctrl->r4tst[0].rdsta);
- /*
-@@ -746,10 +804,9 @@ static int caam_probe(struct platform_de
-
- /* Report "alive" for developer to see */
- dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
-- caam_get_era());
-- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
-- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
-- caam_dpaa2 ? "yes" : "no");
-+ ctrlpriv->era);
-+ dev_info(dev, "job rings = %d, qi = %d\n",
-+ ctrlpriv->total_jobrs, ctrlpriv->qi_present);
-
- #ifdef CONFIG_DEBUG_FS
- debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
-@@ -816,8 +873,11 @@ caam_remove:
- caam_remove(pdev);
- return ret;
-
--iounmap_ctrl:
-- iounmap(ctrl);
-+shutdown_qi:
-+#ifdef CONFIG_CAAM_QI
-+ if (ctrlpriv->qi_init)
-+ caam_qi_shutdown(dev);
-+#endif
- disable_caam_emi_slow:
- if (ctrlpriv->caam_emi_slow)
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-@@ -827,6 +887,8 @@ disable_caam_mem:
- clk_disable_unprepare(ctrlpriv->caam_mem);
- disable_caam_ipg:
- clk_disable_unprepare(ctrlpriv->caam_ipg);
-+iounmap_ctrl:
-+ iounmap(ctrl);
- return ret;
- }
-
---- a/drivers/crypto/caam/desc.h
-+++ b/drivers/crypto/caam/desc.h
-@@ -4,6 +4,7 @@
- * Definitions to support CAAM descriptor instruction generation
- *
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
- */
-
- #ifndef DESC_H
-@@ -42,6 +43,7 @@
- #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
- #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
- #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
-+#define CMD_MOVEB (0x07 << CMD_SHIFT)
- #define CMD_STORE (0x0a << CMD_SHIFT)
- #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
- #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
-@@ -242,6 +244,7 @@
- #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
- #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
- #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
-+#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
- #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
-
- /* Offset in source/destination */
-@@ -284,6 +287,12 @@
- #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
- #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
-
-+/* Special Length definitions when dst=sm, nfifo-{sm,m} */
-+#define LDLEN_MATH0 0
-+#define LDLEN_MATH1 1
-+#define LDLEN_MATH2 2
-+#define LDLEN_MATH3 3
-+
- /*
- * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
- * Command Constructs
-@@ -355,6 +364,7 @@
- #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
- #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
- #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
-+#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
-
- /* Other types. Need to OR in last/flush bits as desired */
- #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
-@@ -408,6 +418,7 @@
- #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
- #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
- #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
-+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
- #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
-
- /*
-@@ -444,6 +455,18 @@
- #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
- #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
- #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
-+#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
-
- /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
- #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
-@@ -1093,6 +1116,22 @@
- /* MacSec protinfos */
- #define OP_PCL_MACSEC 0x0001
-
-+/* Derived Key Protocol (DKP) Protinfo */
-+#define OP_PCL_DKP_SRC_SHIFT 14
-+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
-+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
-+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
-+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
-+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
-+#define OP_PCL_DKP_DST_SHIFT 12
-+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
-+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
-+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
-+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
-+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
-+#define OP_PCL_DKP_KEY_SHIFT 0
-+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
-+
- /* PKI unidirectional protocol protinfo bits */
- #define OP_PCL_PKPROT_TEST 0x0008
- #define OP_PCL_PKPROT_DECRYPT 0x0004
-@@ -1105,6 +1144,12 @@
- #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
- #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
-
-+/* version register fields */
-+#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
-+#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
-+#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
-+#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
-+
- #define OP_ALG_ALGSEL_SHIFT 16
- #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
- #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
-@@ -1124,6 +1169,8 @@
- #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
- #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
- #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
-+#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
-+#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
-
- #define OP_ALG_AAI_SHIFT 4
- #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
-@@ -1171,6 +1218,11 @@
- #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
- #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
-
-+/* Chacha20 AAI set */
-+#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
-+#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
-+#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
-+
- /* hmac/smac AAI set */
- #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
- #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
-@@ -1359,6 +1411,7 @@
- #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
- #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
- #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
-+#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
-
- #define MOVE_DEST_SHIFT 16
- #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
-@@ -1385,6 +1438,10 @@
-
- #define MOVELEN_MRSEL_SHIFT 0
- #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
-+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
-+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
-+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
-+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
-
- /*
- * MATH Command Constructs
-@@ -1440,10 +1497,11 @@
- #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
- #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
- #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
--#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
-+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
- #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
- #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
- #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
-+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
-
- /* Destination selectors */
- #define MATH_DEST_SHIFT 8
-@@ -1452,6 +1510,7 @@
- #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
- #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
- #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
-+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
- #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
- #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
- #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
-@@ -1560,6 +1619,7 @@
- #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
- #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
- #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
-+#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
- #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
- #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
-
-@@ -1624,4 +1684,31 @@
- /* Frame Descriptor Command for Replacement Job Descriptor */
- #define FD_CMD_REPLACE_JOB_DESC 0x20000000
-
-+/* CHA Control Register bits */
-+#define CCTRL_RESET_CHA_ALL 0x1
-+#define CCTRL_RESET_CHA_AESA 0x2
-+#define CCTRL_RESET_CHA_DESA 0x4
-+#define CCTRL_RESET_CHA_AFHA 0x8
-+#define CCTRL_RESET_CHA_KFHA 0x10
-+#define CCTRL_RESET_CHA_SF8A 0x20
-+#define CCTRL_RESET_CHA_PKHA 0x40
-+#define CCTRL_RESET_CHA_MDHA 0x80
-+#define CCTRL_RESET_CHA_CRCA 0x100
-+#define CCTRL_RESET_CHA_RNG 0x200
-+#define CCTRL_RESET_CHA_SF9A 0x400
-+#define CCTRL_RESET_CHA_ZUCE 0x800
-+#define CCTRL_RESET_CHA_ZUCA 0x1000
-+#define CCTRL_UNLOAD_PK_A0 0x10000
-+#define CCTRL_UNLOAD_PK_A1 0x20000
-+#define CCTRL_UNLOAD_PK_A2 0x40000
-+#define CCTRL_UNLOAD_PK_A3 0x80000
-+#define CCTRL_UNLOAD_PK_B0 0x100000
-+#define CCTRL_UNLOAD_PK_B1 0x200000
-+#define CCTRL_UNLOAD_PK_B2 0x400000
-+#define CCTRL_UNLOAD_PK_B3 0x800000
-+#define CCTRL_UNLOAD_PK_N 0x1000000
-+#define CCTRL_UNLOAD_PK_A 0x4000000
-+#define CCTRL_UNLOAD_PK_B 0x8000000
-+#define CCTRL_UNLOAD_SBOX 0x10000000
-+
- #endif /* DESC_H */
---- a/drivers/crypto/caam/desc_constr.h
-+++ b/drivers/crypto/caam/desc_constr.h
-@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
- append_ptr(desc, ptr);
- }
-
--static inline void append_data(u32 * const desc, void *data, int len)
-+static inline void append_data(u32 * const desc, const void *data, int len)
- {
- u32 *offset = desc_end(desc);
-
-@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
- append_cmd(desc, len);
- }
-
--static inline void append_cmd_data(u32 * const desc, void *data, int len,
-+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
- u32 command)
- {
- append_cmd(desc, command | IMMEDIATE | len);
-@@ -189,6 +189,8 @@ static inline u32 *append_##cmd(u32 * co
- }
- APPEND_CMD_RET(jump, JUMP)
- APPEND_CMD_RET(move, MOVE)
-+APPEND_CMD_RET(moveb, MOVEB)
-+APPEND_CMD_RET(move_len, MOVE_LEN)
-
- static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
- {
-@@ -271,7 +273,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
- APPEND_SEQ_PTR_INTLEN(out, OUT)
-
- #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
--static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
- unsigned int len, u32 options) \
- { \
- PRINT_POS; \
-@@ -312,7 +314,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
- * from length of immediate data provided, e.g., split keys
- */
- #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
--static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
- unsigned int data_len, \
- unsigned int len, u32 options) \
- { \
-@@ -327,7 +329,11 @@ static inline void append_##cmd##_imm_##
- u32 options) \
- { \
- PRINT_POS; \
-- append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
-+ if (options & LDST_LEN_MASK) \
-+ append_cmd(desc, CMD_##op | IMMEDIATE | options); \
-+ else \
-+ append_cmd(desc, CMD_##op | IMMEDIATE | options | \
-+ sizeof(type)); \
- append_cmd(desc, immediate); \
- }
- APPEND_CMD_RAW_IMM(load, LOAD, u32);
-@@ -452,7 +458,7 @@ struct alginfo {
- unsigned int keylen_pad;
- union {
- dma_addr_t key_dma;
-- void *key_virt;
-+ const void *key_virt;
- };
- bool key_inline;
- };
-@@ -496,4 +502,45 @@ static inline int desc_inline_query(unsi
- return (rem_bytes >= 0) ? 0 : -1;
- }
-
-+/**
-+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
-+ * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions.
-+ * keylen should be the length of initial key, while keylen_pad
-+ * the length of the derived (split) key.
-+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
-+ * SHA256, SHA384, SHA512}.
-+ */
-+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
-+{
-+ u32 protid;
-+
-+ /*
-+ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
-+ * to OP_PCLID_DKP_{MD5, SHA*}
-+ */
-+ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
-+ (0x20 << OP_ALG_ALGSEL_SHIFT);
-+
-+ if (adata->key_inline) {
-+ int words;
-+
-+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
-+ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
-+ adata->keylen);
-+ append_data(desc, adata->key_virt, adata->keylen);
-+
-+ /* Reserve space in descriptor buffer for the derived key */
-+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
-+ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
-+ if (words)
-+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
-+ } else {
-+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
-+ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
-+ adata->keylen);
-+ append_ptr(desc, adata->key_dma);
-+ }
-+}
-+
- #endif /* DESC_CONSTR_H */
---- /dev/null
-+++ b/drivers/crypto/caam/dpseci.c
-@@ -0,0 +1,865 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the names of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/fsl/mc.h>
-+#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
-+#include "dpseci.h"
-+#include "dpseci_cmd.h"
-+
-+/**
-+ * dpseci_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpseci_id: DPSECI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an already created
-+ * object; an object may have been declared in the DPL or by calling the
-+ * dpseci_create() function.
-+ * This function returns a unique authentication token, associated with the
-+ * specific object ID and the specific MC portal; this token must be used in all
-+ * subsequent commands for this specific object.
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
-+ u16 *token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_open *cmd_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
-+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * After this function is called, no further operations are allowed on the
-+ * object without opening a new control session.
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_create() - Create the DPSECI object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @obj_id: returned object id
-+ *
-+ * Create the DPSECI object, allocate required resources and perform required
-+ * initialization.
-+ *
-+ * The object can be created either by declaring it in the DPL file, or by
-+ * calling this function.
-+ *
-+ * The function accepts an authentication token of a parent container that this
-+ * object should be assigned to. The token can be '0' so the object will be
-+ * assigned to the default container.
-+ * The newly created object can be opened with the returned object id and using
-+ * the container's associated tokens and MC portals.
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
-+ const struct dpseci_cfg *cfg, u32 *obj_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_create *cmd_params;
-+ int i, err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
-+ cmd_flags,
-+ dprc_token);
-+ cmd_params = (struct dpseci_cmd_create *)cmd.params;
-+ for (i = 0; i < 8; i++)
-+ cmd_params->priorities[i] = cfg->priorities[i];
-+ for (i = 0; i < 8; i++)
-+ cmd_params->priorities2[i] = cfg->priorities[8 + i];
-+ cmd_params->num_tx_queues = cfg->num_tx_queues;
-+ cmd_params->num_rx_queues = cfg->num_rx_queues;
-+ cmd_params->options = cpu_to_le32(cfg->options);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ *obj_id = mc_cmd_read_object_id(&cmd);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @dprc_token: Parent container token; '0' for default container
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @object_id: The object id; it must be a valid id within the container that
-+ * created this object
-+ *
-+ * The function accepts the authentication token of the parent container that
-+ * created the object (not the one that currently owns the object). The object
-+ * is searched within parent using the provided 'object_id'.
-+ * All tokens to the object must be closed before calling destroy.
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
-+ u32 object_id)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_destroy *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
-+ cmd_flags,
-+ dprc_token);
-+ cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
-+ cmd_params->object_id = cpu_to_le32(object_id);
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ int *en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_rsp_is_enabled *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
-+ *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned Interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u8 *en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_irq_enable *cmd_params;
-+ struct dpseci_rsp_get_irq_enable *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
-+ *en = rsp_params->enable_state;
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. If the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u8 en)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_irq_enable *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->enable_state = en;
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently.
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 *mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_irq_mask *cmd_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ *mask = le32_to_cpu(cmd_params->mask);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 mask)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_irq_mask *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 *status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_irq_status *cmd_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ *status = le32_to_cpu(cmd_params->status);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 status)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_irq_status *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(status);
-+ cmd_params->irq_index = irq_index;
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_get_attributes() - Retrieve DPSECI attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ struct dpseci_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_rsp_get_attributes *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
-+ attr->id = le32_to_cpu(rsp_params->id);
-+ attr->num_tx_queues = rsp_params->num_tx_queues;
-+ attr->num_rx_queues = rsp_params->num_rx_queues;
-+ attr->options = le32_to_cpu(rsp_params->options);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_set_rx_queue() - Set Rx queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @queue: Select the queue relative to number of priorities configured at
-+ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
-+ * Rx queues identically.
-+ * @cfg: Rx queue configuration
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_queue *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
-+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
-+ cmd_params->priority = cfg->dest_cfg.priority;
-+ cmd_params->queue = queue;
-+ dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
-+ cfg->dest_cfg.dest_type);
-+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
-+ cmd_params->options = cpu_to_le32(cfg->options);
-+ dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
-+ cfg->order_preservation_en);
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @queue: Select the queue relative to number of priorities configured at
-+ * DPSECI creation
-+ * @attr: Returned Rx queue attributes
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 queue, struct dpseci_rx_queue_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_queue *cmd_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
-+ cmd_params->queue = queue;
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
-+ attr->dest_cfg.priority = cmd_params->priority;
-+ attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
-+ DEST_TYPE);
-+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
-+ attr->fqid = le32_to_cpu(cmd_params->fqid);
-+ attr->order_preservation_en =
-+ dpseci_get_field(cmd_params->order_preservation_en,
-+ ORDER_PRESERVATION);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @queue: Select the queue relative to number of priorities configured at
-+ * DPSECI creation
-+ * @attr: Returned Tx queue attributes
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 queue, struct dpseci_tx_queue_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_queue *cmd_params;
-+ struct dpseci_rsp_get_tx_queue *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
-+ cmd_params->queue = queue;
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
-+ attr->fqid = le32_to_cpu(rsp_params->fqid);
-+ attr->priority = rsp_params->priority;
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @attr: Returned SEC attributes
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ struct dpseci_sec_attr *attr)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_rsp_get_sec_attr *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
-+ cmd_flags,
-+ token);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
-+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
-+ attr->major_rev = rsp_params->major_rev;
-+ attr->minor_rev = rsp_params->minor_rev;
-+ attr->era = rsp_params->era;
-+ attr->deco_num = rsp_params->deco_num;
-+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
-+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
-+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
-+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
-+ attr->crc_acc_num = rsp_params->crc_acc_num;
-+ attr->pk_acc_num = rsp_params->pk_acc_num;
-+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
-+ attr->rng_acc_num = rsp_params->rng_acc_num;
-+ attr->md_acc_num = rsp_params->md_acc_num;
-+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
-+ attr->des_acc_num = rsp_params->des_acc_num;
-+ attr->aes_acc_num = rsp_params->aes_acc_num;
-+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
-+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @counters: Returned SEC counters
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ struct dpseci_sec_counters *counters)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_rsp_get_sec_counters *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
-+ cmd_flags,
-+ token);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
-+ counters->dequeued_requests =
-+ le64_to_cpu(rsp_params->dequeued_requests);
-+ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
-+ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
-+ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
-+ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
-+ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
-+ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of data path sec API
-+ * @minor_ver: Minor version of data path sec API
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
-+ u16 *major_ver, u16 *minor_ver)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_rsp_get_api_version *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
-+ cmd_flags, 0);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
-+ *major_ver = le16_to_cpu(rsp_params->major);
-+ *minor_ver = le16_to_cpu(rsp_params->minor);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_set_opr() - Set Order Restoration configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @index: The queue index
-+ * @options: Configuration mode options; can be OPR_OPT_CREATE or
-+ * OPR_OPT_RETIRE
-+ * @cfg: Configuration options for the OPR
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
-+ u8 options, struct opr_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_opr *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(
-+ DPSECI_CMDID_SET_OPR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_opr *)cmd.params;
-+ cmd_params->index = index;
-+ cmd_params->options = options;
-+ cmd_params->oloe = cfg->oloe;
-+ cmd_params->oeane = cfg->oeane;
-+ cmd_params->olws = cfg->olws;
-+ cmd_params->oa = cfg->oa;
-+ cmd_params->oprrws = cfg->oprrws;
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_get_opr() - Retrieve Order Restoration config and query
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @index: The queue index
-+ * @cfg: Returned OPR configuration
-+ * @qry: Returned OPR query
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
-+ struct opr_cfg *cfg, struct opr_qry *qry)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_opr *cmd_params;
-+ struct dpseci_rsp_get_opr *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_opr *)cmd.params;
-+ cmd_params->index = index;
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
-+ qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
-+ qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
-+ cfg->oloe = rsp_params->oloe;
-+ cfg->oeane = rsp_params->oeane;
-+ cfg->olws = rsp_params->olws;
-+ cfg->oa = rsp_params->oa;
-+ cfg->oprrws = rsp_params->oprrws;
-+ qry->nesn = le16_to_cpu(rsp_params->nesn);
-+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
-+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
-+ qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
-+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
-+ qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
-+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
-+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
-+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
-+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
-+
-+ return 0;
-+}
-+
-+/**
-+ * dpseci_set_congestion_notification() - Set congestion group
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
-+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_congestion_notification *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(
-+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
-+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
-+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
-+ cmd_params->priority = cfg->dest_cfg.priority;
-+ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
-+ cfg->dest_cfg.dest_type);
-+ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
-+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
-+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
-+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
-+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpseci_get_congestion_notification() - Get congestion group notification
-+ * configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on success, error code otherwise
-+ */
-+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
-+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
-+{
-+ struct fsl_mc_command cmd = { 0 };
-+ struct dpseci_cmd_congestion_notification *rsp_params;
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(
-+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
-+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
-+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
-+ cfg->dest_cfg.priority = rsp_params->priority;
-+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
-+ CGN_DEST_TYPE);
-+ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
-+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
-+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
-+
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/crypto/caam/dpseci.h
-@@ -0,0 +1,433 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the names of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _DPSECI_H_
-+#define _DPSECI_H_
-+
-+/*
-+ * Data Path SEC Interface API
-+ * Contains initialization APIs and runtime control APIs for DPSECI
-+ */
-+
-+struct fsl_mc_io;
-+struct opr_cfg;
-+struct opr_qry;
-+
-+/**
-+ * General DPSECI macros
-+ */
-+
-+/**
-+ * Maximum number of Tx/Rx queues per DPSECI object
-+ */
-+#define DPSECI_MAX_QUEUE_NUM 16
-+
-+/**
-+ * All queues considered; see dpseci_set_rx_queue()
-+ */
-+#define DPSECI_ALL_QUEUES (u8)(-1)
-+
-+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
-+ u16 *token);
-+
-+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-+
-+/**
-+ * Enable the Congestion Group support
-+ */
-+#define DPSECI_OPT_HAS_CG 0x000020
-+
-+/**
-+ * Enable the Order Restoration support
-+ */
-+#define DPSECI_OPT_HAS_OPR 0x000040
-+
-+/**
-+ * Order Point Records are shared for the entire DPSECI
-+ */
-+#define DPSECI_OPT_OPR_SHARED 0x000080
-+
-+/**
-+ * struct dpseci_cfg - Structure representing DPSECI configuration
-+ * @options: Any combination of the following options:
-+ * DPSECI_OPT_HAS_CG
-+ * DPSECI_OPT_HAS_OPR
-+ * DPSECI_OPT_OPR_SHARED
-+ * @num_tx_queues: num of queues towards the SEC
-+ * @num_rx_queues: num of queues back from the SEC
-+ * @priorities: Priorities for the SEC hardware processing;
-+ * each place in the array is the priority of the tx queue
-+ * towards the SEC;
-+ * valid priorities are configured with values 1-8;
-+ */
-+struct dpseci_cfg {
-+ u32 options;
-+ u8 num_tx_queues;
-+ u8 num_rx_queues;
-+ u8 priorities[DPSECI_MAX_QUEUE_NUM];
-+};
-+
-+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
-+ const struct dpseci_cfg *cfg, u32 *obj_id);
-+
-+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
-+ u32 object_id);
-+
-+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-+
-+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-+
-+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ int *en);
-+
-+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-+
-+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u8 *en);
-+
-+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u8 en);
-+
-+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 *mask);
-+
-+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 mask);
-+
-+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 *status);
-+
-+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 irq_index, u32 status);
-+
-+/**
-+ * struct dpseci_attr - Structure representing DPSECI attributes
-+ * @id: DPSECI object ID
-+ * @num_tx_queues: number of queues towards the SEC
-+ * @num_rx_queues: number of queues back from the SEC
-+ * @options: any combination of the following options:
-+ * DPSECI_OPT_HAS_CG
-+ * DPSECI_OPT_HAS_OPR
-+ * DPSECI_OPT_OPR_SHARED
-+ */
-+struct dpseci_attr {
-+ int id;
-+ u8 num_tx_queues;
-+ u8 num_rx_queues;
-+ u32 options;
-+};
-+
-+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ struct dpseci_attr *attr);
-+
-+/**
-+ * enum dpseci_dest - DPSECI destination types
-+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
-+ * and does not generate FQDAN notifications; user is expected to dequeue
-+ * from the queue based on polling or other user-defined method
-+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to dequeue from
-+ * the queue only after notification is received
-+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified DPCON object;
-+ * user is expected to dequeue from the DPCON channel
-+ */
-+enum dpseci_dest {
-+ DPSECI_DEST_NONE = 0,
-+ DPSECI_DEST_DPIO,
-+ DPSECI_DEST_DPCON
-+};
-+
-+/**
-+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that channel;
-+ * not relevant for 'DPSECI_DEST_NONE' option
-+ */
-+struct dpseci_dest_cfg {
-+ enum dpseci_dest dest_type;
-+ int dest_id;
-+ u8 priority;
-+};
-+
-+/**
-+ * DPSECI queue modification options
-+ */
-+
-+/**
-+ * Select to modify the user's context associated with the queue
-+ */
-+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
-+
-+/**
-+ * Select to modify the queue's destination
-+ */
-+#define DPSECI_QUEUE_OPT_DEST 0x00000002
-+
-+/**
-+ * Select to modify the queue's order preservation
-+ */
-+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
-+
-+/**
-+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
-+ * @options: Flags representing the suggested modifications to the queue;
-+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
-+ * @order_preservation_en: order preservation configuration for the rx queue
-+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
-+ * in 'options'
-+ * @dest_cfg: Queue destination parameters; valid only if
-+ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
-+ */
-+struct dpseci_rx_queue_cfg {
-+ u32 options;
-+ int order_preservation_en;
-+ u64 user_ctx;
-+ struct dpseci_dest_cfg dest_cfg;
-+};
-+
-+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
-+
-+/**
-+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame
-+ * @order_preservation_en: Status of the order preservation configuration on the
-+ * queue
-+ * @dest_cfg: Queue destination configuration
-+ * @fqid: Virtual FQID value to be used for dequeue operations
-+ */
-+struct dpseci_rx_queue_attr {
-+ u64 user_ctx;
-+ int order_preservation_en;
-+ struct dpseci_dest_cfg dest_cfg;
-+ u32 fqid;
-+};
-+
-+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 queue, struct dpseci_rx_queue_attr *attr);
-+
-+/**
-+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
-+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
-+ * @priority: SEC hardware processing priority for the queue
-+ */
-+struct dpseci_tx_queue_attr {
-+ u32 fqid;
-+ u8 priority;
-+};
-+
-+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ u8 queue, struct dpseci_tx_queue_attr *attr);
-+
-+/**
-+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
-+ * hardware accelerator
-+ * @ip_id: ID for SEC
-+ * @major_rev: Major revision number for SEC
-+ * @minor_rev: Minor revision number for SEC
-+ * @era: SEC Era
-+ * @deco_num: The number of copies of the DECO that are implemented in this
-+ * version of SEC
-+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
-+ * version of SEC
-+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
-+ * version of SEC
-+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
-+ * implemented in this version of SEC
-+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
-+ * implemented in this version of SEC
-+ * @crc_acc_num: The number of copies of the CRC module that are implemented in
-+ * this version of SEC
-+ * @pk_acc_num: The number of copies of the Public Key module that are
-+ * implemented in this version of SEC
-+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
-+ * implemented in this version of SEC
-+ * @rng_acc_num: The number of copies of the Random Number Generator that are
-+ * implemented in this version of SEC
-+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
-+ * implemented in this version of SEC
-+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
-+ * in this version of SEC
-+ * @des_acc_num: The number of copies of the DES module that are implemented in
-+ * this version of SEC
-+ * @aes_acc_num: The number of copies of the AES module that are implemented in
-+ * this version of SEC
-+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
-+ * implemented in this version of SEC.
-+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
-+ * implemented in this version of SEC.
-+ **/
-+struct dpseci_sec_attr {
-+ u16 ip_id;
-+ u8 major_rev;
-+ u8 minor_rev;
-+ u8 era;
-+ u8 deco_num;
-+ u8 zuc_auth_acc_num;
-+ u8 zuc_enc_acc_num;
-+ u8 snow_f8_acc_num;
-+ u8 snow_f9_acc_num;
-+ u8 crc_acc_num;
-+ u8 pk_acc_num;
-+ u8 kasumi_acc_num;
-+ u8 rng_acc_num;
-+ u8 md_acc_num;
-+ u8 arc4_acc_num;
-+ u8 des_acc_num;
-+ u8 aes_acc_num;
-+ u8 ccha_acc_num;
-+ u8 ptha_acc_num;
-+};
-+
-+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ struct dpseci_sec_attr *attr);
-+
-+/**
-+ * struct dpseci_sec_counters - Structure representing global SEC counters and
-+ * not per dpseci counters
-+ * @dequeued_requests: Number of Requests Dequeued
-+ * @ob_enc_requests: Number of Outbound Encrypt Requests
-+ * @ib_dec_requests: Number of Inbound Decrypt Requests
-+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
-+ * @ob_prot_bytes: Number of Outbound Bytes Protected
-+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
-+ * @ib_valid_bytes: Number of Inbound Bytes Validated
-+ */
-+struct dpseci_sec_counters {
-+ u64 dequeued_requests;
-+ u64 ob_enc_requests;
-+ u64 ib_dec_requests;
-+ u64 ob_enc_bytes;
-+ u64 ob_prot_bytes;
-+ u64 ib_dec_bytes;
-+ u64 ib_valid_bytes;
-+};
-+
-+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
-+ struct dpseci_sec_counters *counters);
-+
-+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
-+ u16 *major_ver, u16 *minor_ver);
-+
-+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
-+ u8 options, struct opr_cfg *cfg);
-+
-+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
-+ struct opr_cfg *cfg, struct opr_qry *qry);
-+
-+/**
-+ * enum dpseci_congestion_unit - DPSECI congestion units
-+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
-+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
-+ */
-+enum dpseci_congestion_unit {
-+ DPSECI_CONGESTION_UNIT_BYTES = 0,
-+ DPSECI_CONGESTION_UNIT_FRAMES
-+};
-+
-+/**
-+ * CSCN message is written to message_iova once entering a
-+ * congestion state (see 'threshold_entry')
-+ */
-+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
-+
-+/**
-+ * CSCN message is written to message_iova once exiting a
-+ * congestion state (see 'threshold_exit')
-+ */
-+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
-+
-+/**
-+ * CSCN write will attempt to allocate into a cache (coherent write);
-+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
-+ */
-+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
-+
-+/**
-+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once entering a congestion state
-+ * (see 'threshold_entry')
-+ */
-+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
-+
-+/**
-+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once exiting a congestion state
-+ * (see 'threshold_exit')
-+ */
-+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
-+
-+/**
-+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
-+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
-+ * (if enabled)
-+ */
-+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
-+
-+/**
-+ * struct dpseci_congestion_notification_cfg - congestion notification
-+ * configuration
-+ * @units: units type
-+ * @threshold_entry: above this threshold we enter a congestion state.
-+ * set it to '0' to disable it
-+ * @threshold_exit: below this threshold we exit the congestion state.
-+ * @message_ctx: The context that will be part of the CSCN message
-+ * @message_iova: I/O virtual address (must be in DMA-able memory),
-+ * must be 16B aligned;
-+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
-+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
-+ * values
-+ */
-+struct dpseci_congestion_notification_cfg {
-+ enum dpseci_congestion_unit units;
-+ u32 threshold_entry;
-+ u32 threshold_exit;
-+ u64 message_ctx;
-+ u64 message_iova;
-+ struct dpseci_dest_cfg dest_cfg;
-+ u16 notification_mode;
-+};
-+
-+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
-+ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
-+
-+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
-+ u16 token, struct dpseci_congestion_notification_cfg *cfg);
-+
-+#endif /* _DPSECI_H_ */
---- /dev/null
-+++ b/drivers/crypto/caam/dpseci_cmd.h
-@@ -0,0 +1,287 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the names of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef _DPSECI_CMD_H_
-+#define _DPSECI_CMD_H_
-+
-+/* DPSECI Version */
-+#define DPSECI_VER_MAJOR 5
-+#define DPSECI_VER_MINOR 3
-+
-+#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
-+#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
-+
-+/* Command versioning */
-+#define DPSECI_CMD_BASE_VERSION 1
-+#define DPSECI_CMD_BASE_VERSION_V2 2
-+#define DPSECI_CMD_BASE_VERSION_V3 3
-+#define DPSECI_CMD_ID_OFFSET 4
-+
-+#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
-+ DPSECI_CMD_BASE_VERSION)
-+
-+#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
-+ DPSECI_CMD_BASE_VERSION_V2)
-+
-+#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
-+ DPSECI_CMD_BASE_VERSION_V3)
-+
-+/* Command IDs */
-+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
-+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
-+#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
-+#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
-+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
-+
-+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
-+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
-+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
-+#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
-+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
-+
-+#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012)
-+#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013)
-+#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014)
-+#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015)
-+#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016)
-+#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017)
-+
-+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
-+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
-+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
-+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
-+#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
-+#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
-+#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
-+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
-+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
-+
-+/* Macros for accessing command fields smaller than 1 byte */
-+#define DPSECI_MASK(field) \
-+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
-+ DPSECI_##field##_SHIFT)
-+
-+#define dpseci_set_field(var, field, val) \
-+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
-+
-+#define dpseci_get_field(var, field) \
-+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
-+
-+struct dpseci_cmd_open {
-+ __le32 dpseci_id;
-+};
-+
-+struct dpseci_cmd_create {
-+ u8 priorities[8];
-+ u8 num_tx_queues;
-+ u8 num_rx_queues;
-+ u8 pad0[6];
-+ __le32 options;
-+ __le32 pad1;
-+ u8 priorities2[8];
-+};
-+
-+struct dpseci_cmd_destroy {
-+ __le32 object_id;
-+};
-+
-+#define DPSECI_ENABLE_SHIFT 0
-+#define DPSECI_ENABLE_SIZE 1
-+
-+struct dpseci_rsp_is_enabled {
-+ u8 is_enabled;
-+};
-+
-+struct dpseci_cmd_irq_enable {
-+ u8 enable_state;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
-+
-+struct dpseci_rsp_get_irq_enable {
-+ u8 enable_state;
-+};
-+
-+struct dpseci_cmd_irq_mask {
-+ __le32 mask;
-+ u8 irq_index;
-+};
-+
-+struct dpseci_cmd_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
-+
-+struct dpseci_rsp_get_attributes {
-+ __le32 id;
-+ __le32 pad0;
-+ u8 num_tx_queues;
-+ u8 num_rx_queues;
-+ u8 pad1[6];
-+ __le32 options;
-+};
-+
-+#define DPSECI_DEST_TYPE_SHIFT 0
-+#define DPSECI_DEST_TYPE_SIZE 4
-+
-+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
-+#define DPSECI_ORDER_PRESERVATION_SIZE 1
-+
-+struct dpseci_cmd_queue {
-+ __le32 dest_id;
-+ u8 priority;
-+ u8 queue;
-+ u8 dest_type;
-+ u8 pad;
-+ __le64 user_ctx;
-+ union {
-+ __le32 options;
-+ __le32 fqid;
-+ };
-+ u8 order_preservation_en;
-+};
-+
-+struct dpseci_rsp_get_tx_queue {
-+ __le32 pad;
-+ __le32 fqid;
-+ u8 priority;
-+};
-+
-+struct dpseci_rsp_get_sec_attr {
-+ __le16 ip_id;
-+ u8 major_rev;
-+ u8 minor_rev;
-+ u8 era;
-+ u8 pad0[3];
-+ u8 deco_num;
-+ u8 zuc_auth_acc_num;
-+ u8 zuc_enc_acc_num;
-+ u8 pad1;
-+ u8 snow_f8_acc_num;
-+ u8 snow_f9_acc_num;
-+ u8 crc_acc_num;
-+ u8 pad2;
-+ u8 pk_acc_num;
-+ u8 kasumi_acc_num;
-+ u8 rng_acc_num;
-+ u8 pad3;
-+ u8 md_acc_num;
-+ u8 arc4_acc_num;
-+ u8 des_acc_num;
-+ u8 aes_acc_num;
-+ u8 ccha_acc_num;
-+ u8 ptha_acc_num;
-+};
-+
-+struct dpseci_rsp_get_sec_counters {
-+ __le64 dequeued_requests;
-+ __le64 ob_enc_requests;
-+ __le64 ib_dec_requests;
-+ __le64 ob_enc_bytes;
-+ __le64 ob_prot_bytes;
-+ __le64 ib_dec_bytes;
-+ __le64 ib_valid_bytes;
-+};
-+
-+struct dpseci_rsp_get_api_version {
-+ __le16 major;
-+ __le16 minor;
-+};
-+
-+struct dpseci_cmd_opr {
-+ __le16 pad;
-+ u8 index;
-+ u8 options;
-+ u8 pad1[7];
-+ u8 oloe;
-+ u8 oeane;
-+ u8 olws;
-+ u8 oa;
-+ u8 oprrws;
-+};
-+
-+#define DPSECI_OPR_RIP_SHIFT 0
-+#define DPSECI_OPR_RIP_SIZE 1
-+#define DPSECI_OPR_ENABLE_SHIFT 1
-+#define DPSECI_OPR_ENABLE_SIZE 1
-+#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0
-+#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
-+#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0
-+#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
-+
-+struct dpseci_rsp_get_opr {
-+ __le64 pad;
-+ u8 flags;
-+ u8 pad0[2];
-+ u8 oloe;
-+ u8 oeane;
-+ u8 olws;
-+ u8 oa;
-+ u8 oprrws;
-+ __le16 nesn;
-+ __le16 pad1;
-+ __le16 ndsn;
-+ __le16 pad2;
-+ __le16 ea_tseq;
-+ u8 tseq_nlis;
-+ u8 pad3;
-+ __le16 ea_hseq;
-+ u8 hseq_nlis;
-+ u8 pad4;
-+ __le16 ea_hptr;
-+ __le16 pad5;
-+ __le16 ea_tptr;
-+ __le16 pad6;
-+ __le16 opr_vid;
-+ __le16 pad7;
-+ __le16 opr_id;
-+};
-+
-+#define DPSECI_CGN_DEST_TYPE_SHIFT 0
-+#define DPSECI_CGN_DEST_TYPE_SIZE 4
-+#define DPSECI_CGN_UNITS_SHIFT 4
-+#define DPSECI_CGN_UNITS_SIZE 2
-+
-+struct dpseci_cmd_congestion_notification {
-+ __le32 dest_id;
-+ __le16 notification_mode;
-+ u8 priority;
-+ u8 options;
-+ __le64 message_iova;
-+ __le64 message_ctx;
-+ __le32 threshold_entry;
-+ __le32 threshold_exit;
-+};
-+
-+#endif /* _DPSECI_CMD_H_ */
---- a/drivers/crypto/caam/error.c
-+++ b/drivers/crypto/caam/error.c
-@@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, con
- #endif /* DEBUG */
- EXPORT_SYMBOL(caam_dump_sg);
-
-+bool caam_little_end;
-+EXPORT_SYMBOL(caam_little_end);
-+
-+bool caam_imx;
-+EXPORT_SYMBOL(caam_imx);
-+
- static const struct {
- u8 value;
- const char *error_text;
-@@ -108,6 +114,54 @@ static const struct {
- { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
- };
-
-+static const struct {
-+ u8 value;
-+ const char *error_text;
-+} qi_error_list[] = {
-+ { 0x1F, "Job terminated by FQ or ICID flush" },
-+ { 0x20, "FD format error"},
-+ { 0x21, "FD command format error"},
-+ { 0x23, "FL format error"},
-+ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
-+ { 0x30, "Max. buffer size too small"},
-+ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
-+ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
-+ { 0x33, "Size over/underflow (allocate mode)"},
-+ { 0x34, "Size over/underflow (reuse mode)"},
-+ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
-+ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
-+ { 0x41, "SBC frame format not supported (allocate mode)"},
-+ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
-+ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
-+ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
-+ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
-+ { 0x46, "Annotation length exceeds offset (reuse mode)"},
-+ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
-+ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
-+ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
-+ { 0x51, "Unsupported IF reuse mode"},
-+ { 0x52, "Unsupported FL use mode"},
-+ { 0x53, "Unsupported RJD use mode"},
-+ { 0x54, "Unsupported inline descriptor use mode"},
-+ { 0xC0, "Table buffer pool 0 depletion"},
-+ { 0xC1, "Table buffer pool 1 depletion"},
-+ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
-+ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
-+ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
-+ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
-+ { 0xD0, "FLC read error"},
-+ { 0xD1, "FL read error"},
-+ { 0xD2, "FL write error"},
-+ { 0xD3, "OF SGT write error"},
-+ { 0xD4, "PTA read error"},
-+ { 0xD5, "PTA write error"},
-+ { 0xD6, "OF SGT F-bit write error"},
-+ { 0xD7, "ASA write error"},
-+ { 0xE1, "FLC[ICR]=0 ICID error"},
-+ { 0xE2, "FLC[ICR]=1 ICID error"},
-+ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
-+};
-+
- static const char * const cha_id_list[] = {
- "",
- "AES",
-@@ -236,6 +290,27 @@ static void report_deco_status(struct de
- status, error, idx_str, idx, err_str, err_err_code);
- }
-
-+static void report_qi_status(struct device *qidev, const u32 status,
-+ const char *error)
-+{
-+ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
-+ const char *err_str = "unidentified error value 0x";
-+ char err_err_code[3] = { 0 };
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
-+ if (qi_error_list[i].value == err_id)
-+ break;
-+
-+ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
-+ err_str = qi_error_list[i].error_text;
-+ else
-+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
-+
-+ dev_err(qidev, "%08x: %s: %s%s\n",
-+ status, error, err_str, err_err_code);
-+}
-+
- static void report_jr_status(struct device *jrdev, const u32 status,
- const char *error)
- {
-@@ -250,7 +325,7 @@ static void report_cond_code_status(stru
- status, error, __func__);
- }
-
--void caam_jr_strstatus(struct device *jrdev, u32 status)
-+void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
- {
- static const struct stat_src {
- void (*report_ssed)(struct device *jrdev, const u32 status,
-@@ -262,7 +337,7 @@ void caam_jr_strstatus(struct device *jr
- { report_ccb_status, "CCB" },
- { report_jump_status, "Jump" },
- { report_deco_status, "DECO" },
-- { NULL, "Queue Manager Interface" },
-+ { report_qi_status, "Queue Manager Interface" },
- { report_jr_status, "Job Ring" },
- { report_cond_code_status, "Condition Code" },
- { NULL, NULL },
-@@ -288,4 +363,4 @@ void caam_jr_strstatus(struct device *jr
- else
- dev_err(jrdev, "%d: unknown error source\n", ssrc);
- }
--EXPORT_SYMBOL(caam_jr_strstatus);
-+EXPORT_SYMBOL(caam_strstatus);
---- a/drivers/crypto/caam/error.h
-+++ b/drivers/crypto/caam/error.h
-@@ -8,7 +8,11 @@
- #ifndef CAAM_ERROR_H
- #define CAAM_ERROR_H
- #define CAAM_ERROR_STR_MAX 302
--void caam_jr_strstatus(struct device *jrdev, u32 status);
-+
-+void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
-+
-+#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
-+#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
-
- void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
- int rowsize, int groupsize, struct scatterlist *sg,
---- a/drivers/crypto/caam/intern.h
-+++ b/drivers/crypto/caam/intern.h
-@@ -65,10 +65,6 @@ struct caam_drv_private_jr {
- * Driver-private storage for a single CAAM block instance
- */
- struct caam_drv_private {
--#ifdef CONFIG_CAAM_QI
-- struct device *qidev;
--#endif
--
- /* Physical-presence section */
- struct caam_ctrl __iomem *ctrl; /* controller region */
- struct caam_deco __iomem *deco; /* DECO/CCB views */
-@@ -76,14 +72,21 @@ struct caam_drv_private {
- struct caam_queue_if __iomem *qi; /* QI control region */
- struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
-
-+ struct iommu_domain *domain;
-+
- /*
- * Detected geometry block. Filled in from device tree if powerpc,
- * or from register-based version detection code
- */
- u8 total_jobrs; /* Total Job Rings in device */
- u8 qi_present; /* Nonzero if QI present in device */
-+#ifdef CONFIG_CAAM_QI
-+ u8 qi_init; /* Nonzero if QI has been initialized */
-+#endif
-+ u8 mc_en; /* Nonzero if MC f/w is active */
- int secvio_irq; /* Security violation interrupt number */
- int virt_en; /* Virtualization enabled in CAAM */
-+ int era; /* CAAM Era (internal HW revision) */
-
- #define RNG4_MAX_HANDLES 2
- /* RNG4 block */
-@@ -108,8 +111,95 @@ struct caam_drv_private {
- #endif
- };
-
--void caam_jr_algapi_init(struct device *dev);
--void caam_jr_algapi_remove(struct device *dev);
-+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
-+
-+int caam_algapi_init(struct device *dev);
-+void caam_algapi_exit(void);
-+
-+#else
-+
-+static inline int caam_algapi_init(struct device *dev)
-+{
-+ return 0;
-+}
-+
-+static inline void caam_algapi_exit(void)
-+{
-+}
-+
-+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
-+
-+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
-+
-+int caam_algapi_hash_init(struct device *dev);
-+void caam_algapi_hash_exit(void);
-+
-+#else
-+
-+static inline int caam_algapi_hash_init(struct device *dev)
-+{
-+ return 0;
-+}
-+
-+static inline void caam_algapi_hash_exit(void)
-+{
-+}
-+
-+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
-+
-+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
-+
-+int caam_pkc_init(struct device *dev);
-+void caam_pkc_exit(void);
-+
-+#else
-+
-+static inline int caam_pkc_init(struct device *dev)
-+{
-+ return 0;
-+}
-+
-+static inline void caam_pkc_exit(void)
-+{
-+}
-+
-+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
-+
-+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
-+
-+int caam_rng_init(struct device *dev);
-+void caam_rng_exit(void);
-+
-+#else
-+
-+static inline int caam_rng_init(struct device *dev)
-+{
-+ return 0;
-+}
-+
-+static inline void caam_rng_exit(void)
-+{
-+}
-+
-+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
-+
-+#ifdef CONFIG_CAAM_QI
-+
-+int caam_qi_algapi_init(struct device *dev);
-+void caam_qi_algapi_exit(void);
-+
-+#else
-+
-+static inline int caam_qi_algapi_init(struct device *dev)
-+{
-+ return 0;
-+}
-+
-+static inline void caam_qi_algapi_exit(void)
-+{
-+}
-+
-+#endif /* CONFIG_CAAM_QI */
-
- #ifdef CONFIG_DEBUG_FS
- static int caam_debugfs_u64_get(void *data, u64 *val)
---- a/drivers/crypto/caam/jr.c
-+++ b/drivers/crypto/caam/jr.c
-@@ -23,6 +23,52 @@ struct jr_driver_data {
-
- static struct jr_driver_data driver_data;
-
-+static int jr_driver_probed;
-+
-+int caam_jr_driver_probed(void)
-+{
-+ return jr_driver_probed;
-+}
-+EXPORT_SYMBOL(caam_jr_driver_probed);
-+
-+static DEFINE_MUTEX(algs_lock);
-+static unsigned int active_devs;
-+
-+static void register_algs(struct device *dev)
-+{
-+ mutex_lock(&algs_lock);
-+
-+ if (++active_devs != 1)
-+ goto algs_unlock;
-+
-+ caam_algapi_init(dev);
-+ caam_algapi_hash_init(dev);
-+ caam_pkc_init(dev);
-+ caam_rng_init(dev);
-+ caam_qi_algapi_init(dev);
-+
-+algs_unlock:
-+ mutex_unlock(&algs_lock);
-+}
-+
-+static void unregister_algs(void)
-+{
-+ mutex_lock(&algs_lock);
-+
-+ if (--active_devs != 0)
-+ goto algs_unlock;
-+
-+ caam_qi_algapi_exit();
-+
-+ caam_rng_exit();
-+ caam_pkc_exit();
-+ caam_algapi_hash_exit();
-+ caam_algapi_exit();
-+
-+algs_unlock:
-+ mutex_unlock(&algs_lock);
-+}
-+
- static int caam_reset_hw_jr(struct device *dev)
- {
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
-@@ -108,6 +154,9 @@ static int caam_jr_remove(struct platfor
- return -EBUSY;
- }
-
-+ /* Unregister JR-based RNG & crypto algorithms */
-+ unregister_algs();
-+
- /* Remove the node from Physical JobR list maintained by driver */
- spin_lock(&driver_data.jr_alloc_lock);
- list_del(&jrpriv->list_node);
-@@ -119,6 +168,8 @@ static int caam_jr_remove(struct platfor
- dev_err(jrdev, "Failed to shut down job ring\n");
- irq_dispose_mapping(jrpriv->irq);
-
-+ jr_driver_probed--;
-+
- return ret;
- }
-
-@@ -282,6 +333,36 @@ struct device *caam_jr_alloc(void)
- EXPORT_SYMBOL(caam_jr_alloc);
-
- /**
-+ * caam_jridx_alloc() - Alloc a specific job ring based on its index.
-+ *
-+ * returns : pointer to the newly allocated physical
-+ * JobR dev can be written to if successful.
-+ **/
-+struct device *caam_jridx_alloc(int idx)
-+{
-+ struct caam_drv_private_jr *jrpriv;
-+ struct device *dev = ERR_PTR(-ENODEV);
-+
-+ spin_lock(&driver_data.jr_alloc_lock);
-+
-+ if (list_empty(&driver_data.jr_list))
-+ goto end;
-+
-+ list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
-+ if (jrpriv->ridx == idx) {
-+ atomic_inc(&jrpriv->tfm_count);
-+ dev = jrpriv->dev;
-+ break;
-+ }
-+ }
-+
-+end:
-+ spin_unlock(&driver_data.jr_alloc_lock);
-+ return dev;
-+}
-+EXPORT_SYMBOL(caam_jridx_alloc);
-+
-+/**
- * caam_jr_free() - Free the Job Ring
- * @rdev - points to the dev that identifies the Job ring to
- * be released.
-@@ -539,6 +620,9 @@ static int caam_jr_probe(struct platform
-
- atomic_set(&jrpriv->tfm_count, 0);
-
-+ register_algs(jrdev->parent);
-+ jr_driver_probed++;
-+
- return 0;
- }
-
---- a/drivers/crypto/caam/jr.h
-+++ b/drivers/crypto/caam/jr.h
-@@ -9,7 +9,9 @@
- #define JR_H
-
- /* Prototypes for backend-level services exposed to APIs */
-+int caam_jr_driver_probed(void);
- struct device *caam_jr_alloc(void);
-+struct device *caam_jridx_alloc(int idx);
- void caam_jr_free(struct device *rdev);
- int caam_jr_enqueue(struct device *dev, u32 *desc,
- void (*cbk)(struct device *dev, u32 *desc, u32 status,
---- a/drivers/crypto/caam/key_gen.c
-+++ b/drivers/crypto/caam/key_gen.c
-@@ -11,36 +11,6 @@
- #include "desc_constr.h"
- #include "key_gen.h"
-
--/**
-- * split_key_len - Compute MDHA split key length for a given algorithm
-- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
-- * SHA224, SHA384, SHA512.
-- *
-- * Return: MDHA split key length
-- */
--static inline u32 split_key_len(u32 hash)
--{
-- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
-- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
-- u32 idx;
--
-- idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
--
-- return (u32)(mdpadlen[idx] * 2);
--}
--
--/**
-- * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
-- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
-- * SHA224, SHA384, SHA512.
-- *
-- * Return: MDHA split key pad length
-- */
--static inline u32 split_key_pad_len(u32 hash)
--{
-- return ALIGN(split_key_len(hash), 16);
--}
--
- void split_key_done(struct device *dev, u32 *desc, u32 err,
- void *context)
- {
---- a/drivers/crypto/caam/key_gen.h
-+++ b/drivers/crypto/caam/key_gen.h
-@@ -6,6 +6,36 @@
- *
- */
-
-+/**
-+ * split_key_len - Compute MDHA split key length for a given algorithm
-+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
-+ * SHA224, SHA384, SHA512.
-+ *
-+ * Return: MDHA split key length
-+ */
-+static inline u32 split_key_len(u32 hash)
-+{
-+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
-+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
-+ u32 idx;
-+
-+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
-+
-+ return (u32)(mdpadlen[idx] * 2);
-+}
-+
-+/**
-+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
-+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
-+ * SHA224, SHA384, SHA512.
-+ *
-+ * Return: MDHA split key pad length
-+ */
-+static inline u32 split_key_pad_len(u32 hash)
-+{
-+ return ALIGN(split_key_len(hash), 16);
-+}
-+
- struct split_key_result {
- struct completion completion;
- int err;
---- a/drivers/crypto/caam/qi.c
-+++ b/drivers/crypto/caam/qi.c
-@@ -9,7 +9,7 @@
-
- #include <linux/cpumask.h>
- #include <linux/kthread.h>
--#include <soc/fsl/qman.h>
-+#include <linux/fsl_qman.h>
-
- #include "regs.h"
- #include "qi.h"
-@@ -58,11 +58,9 @@ static DEFINE_PER_CPU(int, last_cpu);
- /*
- * caam_qi_priv - CAAM QI backend private params
- * @cgr: QMan congestion group
-- * @qi_pdev: platform device for QI backend
- */
- struct caam_qi_priv {
- struct qman_cgr cgr;
-- struct platform_device *qi_pdev;
- };
-
- static struct caam_qi_priv qipriv ____cacheline_aligned;
-@@ -102,26 +100,34 @@ static int mod_init_cpu;
- */
- static struct kmem_cache *qi_cache;
-
-+static void *caam_iova_to_virt(struct iommu_domain *domain,
-+ dma_addr_t iova_addr)
-+{
-+ phys_addr_t phys_addr;
-+
-+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
-+
-+ return phys_to_virt(phys_addr);
-+}
-+
- int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
- {
- struct qm_fd fd;
-- dma_addr_t addr;
- int ret;
- int num_retries = 0;
-
-- qm_fd_clear_fd(&fd);
-- qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
--
-- addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
-+ fd.cmd = 0;
-+ fd.format = qm_fd_compound;
-+ fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
-+ fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
- DMA_BIDIRECTIONAL);
-- if (dma_mapping_error(qidev, addr)) {
-+ if (dma_mapping_error(qidev, fd.addr)) {
- dev_err(qidev, "DMA mapping error for QI enqueue request\n");
- return -EIO;
- }
-- qm_fd_addr_set64(&fd, addr);
-
- do {
-- ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
-+ ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
- if (likely(!ret))
- return 0;
-
-@@ -137,20 +143,21 @@ int caam_qi_enqueue(struct device *qidev
- EXPORT_SYMBOL(caam_qi_enqueue);
-
- static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
-- const union qm_mr_entry *msg)
-+ const struct qm_mr_entry *msg)
- {
- const struct qm_fd *fd;
- struct caam_drv_req *drv_req;
- struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
-+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
-
- fd = &msg->ern.fd;
-
-- if (qm_fd_get_format(fd) != qm_fd_compound) {
-+ if (fd->format != qm_fd_compound) {
- dev_err(qidev, "Non-compound FD from CAAM\n");
- return;
- }
-
-- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
-+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
- if (!drv_req) {
- dev_err(qidev,
- "Can't find original request for CAAM response\n");
-@@ -180,20 +187,22 @@ static struct qman_fq *create_caam_req_f
- req_fq->cb.fqs = NULL;
-
- ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
-- QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
-+ QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
-+ req_fq);
- if (ret) {
- dev_err(qidev, "Failed to create session req FQ\n");
- goto create_req_fq_fail;
- }
-
-- memset(&opts, 0, sizeof(opts));
-- opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
-- QM_INITFQ_WE_CONTEXTB |
-- QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
-- opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
-- qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
-- opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
-- qm_fqd_context_a_set64(&opts.fqd, hwdesc);
-+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
-+ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
-+ QM_INITFQ_WE_CGID;
-+ opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
-+ opts.fqd.dest.channel = qm_channel_caam;
-+ opts.fqd.dest.wq = 2;
-+ opts.fqd.context_b = qman_fq_fqid(rsp_fq);
-+ opts.fqd.context_a.hi = upper_32_bits(hwdesc);
-+ opts.fqd.context_a.lo = lower_32_bits(hwdesc);
- opts.fqd.cgid = qipriv.cgr.cgrid;
-
- ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
-@@ -207,7 +216,7 @@ static struct qman_fq *create_caam_req_f
- return req_fq;
-
- init_req_fq_fail:
-- qman_destroy_fq(req_fq);
-+ qman_destroy_fq(req_fq, 0);
- create_req_fq_fail:
- kfree(req_fq);
- return ERR_PTR(ret);
-@@ -275,7 +284,7 @@ empty_fq:
- if (ret)
- dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
-
-- qman_destroy_fq(fq);
-+ qman_destroy_fq(fq, 0);
- kfree(fq);
-
- return ret;
-@@ -292,7 +301,7 @@ static int empty_caam_fq(struct qman_fq
- if (ret)
- return ret;
-
-- if (!qm_mcr_np_get(&np, frm_cnt))
-+ if (!np.frm_cnt)
- break;
-
- msleep(20);
-@@ -495,7 +504,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
- int caam_qi_shutdown(struct device *qidev)
- {
- int i, ret;
-- struct caam_qi_priv *priv = dev_get_drvdata(qidev);
-+ struct caam_qi_priv *priv = &qipriv;
- const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
-
-@@ -528,7 +537,6 @@ int caam_qi_shutdown(struct device *qide
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
-
-- platform_device_unregister(priv->qi_pdev);
- return ret;
- }
-
-@@ -572,22 +580,28 @@ static enum qman_cb_dqrr_result caam_rsp
- struct caam_drv_req *drv_req;
- const struct qm_fd *fd;
- struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
-- u32 status;
-+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
-
- if (caam_qi_napi_schedule(p, caam_napi))
- return qman_cb_dqrr_stop;
-
- fd = &dqrr->fd;
-- status = be32_to_cpu(fd->status);
-- if (unlikely(status))
-- dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
-+ if (unlikely(fd->status)) {
-+ u32 ssrc = fd->status & JRSTA_SSRC_MASK;
-+ u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
-
-- if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
-+ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
-+ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
-+ dev_err(qidev, "Error: %#x in CAAM response FD\n",
-+ fd->status);
-+ }
-+
-+ if (unlikely(fd->format != qm_fd_compound)) {
- dev_err(qidev, "Non-compound FD from CAAM\n");
- return qman_cb_dqrr_consume;
- }
-
-- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
-+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
- if (unlikely(!drv_req)) {
- dev_err(qidev,
- "Can't find original request for caam response\n");
-@@ -597,7 +611,7 @@ static enum qman_cb_dqrr_result caam_rsp
- dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
- sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
-
-- drv_req->cbk(drv_req, status);
-+ drv_req->cbk(drv_req, fd->status);
- return qman_cb_dqrr_consume;
- }
-
-@@ -621,17 +635,18 @@ static int alloc_rsp_fq_cpu(struct devic
- return -ENODEV;
- }
-
-- memset(&opts, 0, sizeof(opts));
-- opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
-- QM_INITFQ_WE_CONTEXTB |
-- QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
-- opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
-- QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
-- qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
-+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
-+ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
-+ QM_INITFQ_WE_CGID;
-+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
-+ QM_FQCTRL_CGE;
-+ opts.fqd.dest.channel = qman_affine_channel(cpu);
-+ opts.fqd.dest.wq = 3;
- opts.fqd.cgid = qipriv.cgr.cgrid;
- opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
- QM_STASHING_EXCL_DATA;
-- qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
-+ opts.fqd.context_a.stashing.data_cl = 1;
-+ opts.fqd.context_a.stashing.context_cl = 1;
-
- ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
- if (ret) {
-@@ -650,9 +665,8 @@ static int init_cgr(struct device *qidev
- {
- int ret;
- struct qm_mcc_initcgr opts;
-- const u64 cpus = *(u64 *)qman_affine_cpus();
-- const int num_cpus = hweight64(cpus);
-- const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
-+ const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
-+ MAX_RSP_FQ_BACKLOG_PER_CPU;
-
- ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
- if (ret) {
-@@ -662,8 +676,7 @@ static int init_cgr(struct device *qidev
-
- qipriv.cgr.cb = cgr_cb;
- memset(&opts, 0, sizeof(opts));
-- opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
-- QM_CGR_WE_MODE);
-+ opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
- opts.cgr.cscn_en = QM_CGR_EN;
- opts.cgr.mode = QMAN_CGR_MODE_FRAME;
- qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
-@@ -708,15 +721,10 @@ static void free_rsp_fqs(void)
- int caam_qi_init(struct platform_device *caam_pdev)
- {
- int err, i;
-- struct platform_device *qi_pdev;
- struct device *ctrldev = &caam_pdev->dev, *qidev;
- struct caam_drv_private *ctrlpriv;
- const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
-- static struct platform_device_info qi_pdev_info = {
-- .name = "caam_qi",
-- .id = PLATFORM_DEVID_NONE
-- };
-
- /*
- * QMAN requires CGRs to be removed from same CPU+portal from where it
-@@ -728,24 +736,13 @@ int caam_qi_init(struct platform_device
- mod_init_cpu = cpumask_first(cpus);
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
-- qi_pdev_info.parent = ctrldev;
-- qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
-- qi_pdev = platform_device_register_full(&qi_pdev_info);
-- if (IS_ERR(qi_pdev))
-- return PTR_ERR(qi_pdev);
-- set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
--
- ctrlpriv = dev_get_drvdata(ctrldev);
-- qidev = &qi_pdev->dev;
--
-- qipriv.qi_pdev = qi_pdev;
-- dev_set_drvdata(qidev, &qipriv);
-+ qidev = ctrldev;
-
- /* Initialize the congestion detection */
- err = init_cgr(qidev);
- if (err) {
- dev_err(qidev, "CGR initialization failed: %d\n", err);
-- platform_device_unregister(qi_pdev);
- return err;
- }
-
-@@ -754,7 +751,6 @@ int caam_qi_init(struct platform_device
- if (err) {
- dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
- free_rsp_fqs();
-- platform_device_unregister(qi_pdev);
- return err;
- }
-
-@@ -777,15 +773,11 @@ int caam_qi_init(struct platform_device
- napi_enable(irqtask);
- }
-
-- /* Hook up QI device to parent controlling caam device */
-- ctrlpriv->qidev = qidev;
--
- qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
- SLAB_CACHE_DMA, NULL);
- if (!qi_cache) {
- dev_err(qidev, "Can't allocate CAAM cache\n");
- free_rsp_fqs();
-- platform_device_unregister(qi_pdev);
- return -ENOMEM;
- }
-
-@@ -795,6 +787,8 @@ int caam_qi_init(struct platform_device
- debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
- &times_congested, &caam_fops_u64_ro);
- #endif
-+
-+ ctrlpriv->qi_init = 1;
- dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
- return 0;
- }
---- a/drivers/crypto/caam/qi.h
-+++ b/drivers/crypto/caam/qi.h
-@@ -9,7 +9,7 @@
- #ifndef __QI_H__
- #define __QI_H__
-
--#include <soc/fsl/qman.h>
-+#include <linux/fsl_qman.h>
- #include "compat.h"
- #include "desc.h"
- #include "desc_constr.h"
---- a/drivers/crypto/caam/regs.h
-+++ b/drivers/crypto/caam/regs.h
-@@ -3,6 +3,7 @@
- * CAAM hardware register-level view
- *
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
-+ * Copyright 2018 NXP
- */
-
- #ifndef REGS_H
-@@ -211,6 +212,47 @@ struct jr_outentry {
- u32 jrstatus; /* Status for completed descriptor */
- } __packed;
-
-+/* Version registers (Era 10+) e80-eff */
-+struct version_regs {
-+ u32 crca; /* CRCA_VERSION */
-+ u32 afha; /* AFHA_VERSION */
-+ u32 kfha; /* KFHA_VERSION */
-+ u32 pkha; /* PKHA_VERSION */
-+ u32 aesa; /* AESA_VERSION */
-+ u32 mdha; /* MDHA_VERSION */
-+ u32 desa; /* DESA_VERSION */
-+ u32 snw8a; /* SNW8A_VERSION */
-+ u32 snw9a; /* SNW9A_VERSION */
-+ u32 zuce; /* ZUCE_VERSION */
-+ u32 zuca; /* ZUCA_VERSION */
-+ u32 ccha; /* CCHA_VERSION */
-+ u32 ptha; /* PTHA_VERSION */
-+ u32 rng; /* RNG_VERSION */
-+ u32 trng; /* TRNG_VERSION */
-+ u32 aaha; /* AAHA_VERSION */
-+ u32 rsvd[10];
-+ u32 sr; /* SR_VERSION */
-+ u32 dma; /* DMA_VERSION */
-+ u32 ai; /* AI_VERSION */
-+ u32 qi; /* QI_VERSION */
-+ u32 jr; /* JR_VERSION */
-+ u32 deco; /* DECO_VERSION */
-+};
-+
-+/* Version registers bitfields */
-+
-+/* Number of CHAs instantiated */
-+#define CHA_VER_NUM_MASK 0xffull
-+/* CHA Miscellaneous Information */
-+#define CHA_VER_MISC_SHIFT 8
-+#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
-+/* CHA Revision Number */
-+#define CHA_VER_REV_SHIFT 16
-+#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
-+/* CHA Version ID */
-+#define CHA_VER_VID_SHIFT 24
-+#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
-+
- /*
- * caam_perfmon - Performance Monitor/Secure Memory Status/
- * CAAM Global Status/Component Version IDs
-@@ -223,15 +265,13 @@ struct jr_outentry {
- #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
-
- /*
-- * CHA version IDs / instantiation bitfields
-+ * CHA version IDs / instantiation bitfields (< Era 10)
- * Defined for use with the cha_id fields in perfmon, but the same shift/mask
- * selectors can be used to pull out the number of instantiated blocks within
- * cha_num fields in perfmon because the locations are the same.
- */
- #define CHA_ID_LS_AES_SHIFT 0
- #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
--#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
--#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
-
- #define CHA_ID_LS_DES_SHIFT 4
- #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
-@@ -241,9 +281,6 @@ struct jr_outentry {
-
- #define CHA_ID_LS_MD_SHIFT 12
- #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
--#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
--#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
--#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
-
- #define CHA_ID_LS_RNG_SHIFT 16
- #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
-@@ -269,6 +306,13 @@ struct jr_outentry {
- #define CHA_ID_MS_JR_SHIFT 28
- #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
-
-+/* Specific CHA version IDs */
-+#define CHA_VER_VID_AES_LP 0x3ull
-+#define CHA_VER_VID_AES_HP 0x4ull
-+#define CHA_VER_VID_MD_LP256 0x0ull
-+#define CHA_VER_VID_MD_LP512 0x1ull
-+#define CHA_VER_VID_MD_HP 0x2ull
-+
- struct sec_vid {
- u16 ip_id;
- u8 maj_rev;
-@@ -473,8 +517,10 @@ struct caam_ctrl {
- struct rng4tst r4tst[2];
- };
-
-- u32 rsvd9[448];
-+ u32 rsvd9[416];
-
-+ /* Version registers - introduced with era 10 e80-eff */
-+ struct version_regs vreg;
- /* Performance Monitor f00-fff */
- struct caam_perfmon perfmon;
- };
-@@ -564,8 +610,10 @@ struct caam_job_ring {
- u32 rsvd11;
- u32 jrcommand; /* JRCRx - JobR command */
-
-- u32 rsvd12[932];
-+ u32 rsvd12[900];
-
-+ /* Version registers - introduced with era 10 e80-eff */
-+ struct version_regs vreg;
- /* Performance Monitor f00-fff */
- struct caam_perfmon perfmon;
- };
-@@ -627,6 +675,8 @@ struct caam_job_ring {
- #define JRSTA_DECOERR_INVSIGN 0x86
- #define JRSTA_DECOERR_DSASIGN 0x87
-
-+#define JRSTA_QIERR_ERROR_MASK 0x00ff
-+
- #define JRSTA_CCBERR_JUMP 0x08000000
- #define JRSTA_CCBERR_INDEX_MASK 0xff00
- #define JRSTA_CCBERR_INDEX_SHIFT 8
-@@ -870,13 +920,19 @@ struct caam_deco {
- u32 rsvd29[48];
- u32 descbuf[64]; /* DxDESB - Descriptor buffer */
- u32 rscvd30[193];
--#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
- #define DESC_DBG_DECO_STAT_VALID 0x80000000
- #define DESC_DBG_DECO_STAT_MASK 0x00F00000
-+#define DESC_DBG_DECO_STAT_SHIFT 20
- u32 desc_dbg; /* DxDDR - DECO Debug Register */
-- u32 rsvd31[126];
-+ u32 rsvd31[13];
-+#define DESC_DER_DECO_STAT_MASK 0x000F0000
-+#define DESC_DER_DECO_STAT_SHIFT 16
-+ u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
-+ u32 rsvd32[112];
- };
-
-+#define DECO_STAT_HOST_ERR 0xD
-+
- #define DECO_JQCR_WHL 0x20000000
- #define DECO_JQCR_FOUR 0x10000000
-
---- a/drivers/crypto/caam/sg_sw_qm.h
-+++ b/drivers/crypto/caam/sg_sw_qm.h
-@@ -34,46 +34,61 @@
- #ifndef __SG_SW_QM_H
- #define __SG_SW_QM_H
-
--#include <soc/fsl/qman.h>
-+#include <linux/fsl_qman.h>
- #include "regs.h"
-
-+static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
-+{
-+ dma_addr_t addr = qm_sg_ptr->opaque;
-+
-+ qm_sg_ptr->opaque = cpu_to_caam64(addr);
-+ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
-+}
-+
- static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
-- u16 offset)
-+ u32 len, u16 offset)
- {
-- qm_sg_entry_set64(qm_sg_ptr, dma);
-+ qm_sg_ptr->addr = dma;
-+ qm_sg_ptr->length = len;
- qm_sg_ptr->__reserved2 = 0;
- qm_sg_ptr->bpid = 0;
-- qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
-+ qm_sg_ptr->__reserved3 = 0;
-+ qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
-+
-+ cpu_to_hw_sg(qm_sg_ptr);
- }
-
- static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len, u16 offset)
- {
-- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
-- qm_sg_entry_set_len(qm_sg_ptr, len);
-+ qm_sg_ptr->extension = 0;
-+ qm_sg_ptr->final = 0;
-+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len, u16 offset)
- {
-- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
-- qm_sg_entry_set_f(qm_sg_ptr, len);
-+ qm_sg_ptr->extension = 0;
-+ qm_sg_ptr->final = 1;
-+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len, u16 offset)
- {
-- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
-- qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
-+ qm_sg_ptr->extension = 1;
-+ qm_sg_ptr->final = 0;
-+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
- dma_addr_t dma, u32 len,
- u16 offset)
- {
-- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
-- qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
-- (len & QM_SG_LEN_MASK));
-+ qm_sg_ptr->extension = 1;
-+ qm_sg_ptr->final = 1;
-+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
- }
-
- /*
-@@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
- struct qm_sg_entry *qm_sg_ptr, u16 offset)
- {
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
-- qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
-+
-+ qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
-+ qm_sg_ptr->final = 1;
-+ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
- }
-
- #endif /* __SG_SW_QM_H */
---- a/drivers/crypto/talitos.c
-+++ b/drivers/crypto/talitos.c
-@@ -1250,6 +1250,14 @@ static int ipsec_esp(struct talitos_edes
- ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
- sg_count, areq->assoclen, tbl_off, elen);
-
-+ /*
-+ * In case of SEC 2.x+, cipher in len must include only the ciphertext,
-+ * while extent is used for ICV len.
-+ */
-+ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
-+ (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
-+ desc->ptr[4].len = cpu_to_be16(cryptlen);
-+
- if (ret > 1) {
- tbl_off += ret;
- sync_needed = true;
---- a/include/crypto/chacha20.h
-+++ b/include/crypto/chacha20.h
-@@ -13,6 +13,7 @@
- #define CHACHA20_IV_SIZE 16
- #define CHACHA20_KEY_SIZE 32
- #define CHACHA20_BLOCK_SIZE 64
-+#define CHACHAPOLY_IV_SIZE 12
-
- struct chacha20_ctx {
- u32 key[8];
diff --git a/target/linux/layerscape/patches-4.14/821-smmu-support-layerscape.patch b/target/linux/layerscape/patches-4.14/821-smmu-support-layerscape.patch
deleted file mode 100644
index 931e09a9fa..0000000000
--- a/target/linux/layerscape/patches-4.14/821-smmu-support-layerscape.patch
+++ /dev/null
@@ -1,506 +0,0 @@
-From 05375244c8e74f90239db92646a771905fdfc0db Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:28:22 +0800
-Subject: [PATCH 38/40] smmu: support layerscape
-This is an integrated patch of smmu for layerscape
-
-Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
-Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- .../devicetree/bindings/misc/fsl,qoriq-mc.txt | 39 ++++++
- drivers/iommu/arm-smmu.c | 7 +
- drivers/iommu/iommu.c | 21 +++
- drivers/iommu/of_iommu.c | 126 +++++++++++++++++-
- drivers/of/irq.c | 6 +-
- drivers/of/of_pci.c | 101 --------------
- include/linux/iommu.h | 2 +
- include/linux/of_iommu.h | 10 ++
- include/linux/of_pci.h | 10 --
- 9 files changed, 205 insertions(+), 117 deletions(-)
-
---- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
-+++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
-@@ -9,6 +9,25 @@ blocks that can be used to create functi
- such as network interfaces, crypto accelerator instances, L2 switches,
- etc.
-
-+For an overview of the DPAA2 architecture and fsl-mc bus see:
-+drivers/staging/fsl-mc/README.txt
-+
-+As described in the above overview, all DPAA2 objects in a DPRC share the
-+same hardware "isolation context" and a 10-bit value called an ICID
-+(isolation context id) is expressed by the hardware to identify
-+the requester.
-+
-+The generic 'iommus' property is insufficient to describe the relationship
-+between ICIDs and IOMMUs, so an iommu-map property is used to define
-+the set of possible ICIDs under a root DPRC and how they map to
-+an IOMMU.
-+
-+For generic IOMMU bindings, see
-+Documentation/devicetree/bindings/iommu/iommu.txt.
-+
-+For arm-smmu binding, see:
-+Documentation/devicetree/bindings/iommu/arm,smmu.txt.
-+
- Required properties:
-
- - compatible
-@@ -88,14 +107,34 @@ Sub-nodes:
- Value type: <phandle>
- Definition: Specifies the phandle to the PHY device node associated
- with the this dpmac.
-+Optional properties:
-+
-+- iommu-map: Maps an ICID to an IOMMU and associated iommu-specifier
-+ data.
-+
-+ The property is an arbitrary number of tuples of
-+ (icid-base,iommu,iommu-base,length).
-+
-+ Any ICID i in the interval [icid-base, icid-base + length) is
-+ associated with the listed IOMMU, with the iommu-specifier
-+ (i - icid-base + iommu-base).
-
- Example:
-
-+ smmu: iommu@5000000 {
-+ compatible = "arm,mmu-500";
-+ #iommu-cells = <2>;
-+ stream-match-mask = <0x7C00>;
-+ ...
-+ };
-+
- fsl_mc: fsl-mc@80c000000 {
- compatible = "fsl,qoriq-mc";
- reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
- <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
- msi-parent = <&its>;
-+ /* define map for ICIDs 23-64 */
-+ iommu-map = <23 &smmu 23 41>;
- #address-cells = <3>;
- #size-cells = <1>;
-
---- a/drivers/iommu/arm-smmu.c
-+++ b/drivers/iommu/arm-smmu.c
-@@ -52,6 +52,7 @@
- #include <linux/spinlock.h>
-
- #include <linux/amba/bus.h>
-+#include <linux/fsl/mc.h>
-
- #include "io-pgtable.h"
- #include "arm-smmu-regs.h"
-@@ -1474,6 +1475,8 @@ static struct iommu_group *arm_smmu_devi
-
- if (dev_is_pci(dev))
- group = pci_device_group(dev);
-+ else if (dev_is_fsl_mc(dev))
-+ group = fsl_mc_device_group(dev);
- else
- group = generic_device_group(dev);
-
-@@ -2052,6 +2055,10 @@ static void arm_smmu_bus_init(void)
- bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
- }
- #endif
-+#ifdef CONFIG_FSL_MC_BUS
-+ if (!iommu_present(&fsl_mc_bus_type))
-+ bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
-+#endif
- }
-
- static int arm_smmu_device_probe(struct platform_device *pdev)
---- a/drivers/iommu/iommu.c
-+++ b/drivers/iommu/iommu.c
-@@ -33,6 +33,7 @@
- #include <linux/bitops.h>
- #include <linux/property.h>
- #include <trace/events/iommu.h>
-+#include <linux/fsl/mc.h>
-
- static struct kset *iommu_group_kset;
- static DEFINE_IDA(iommu_group_ida);
-@@ -990,6 +991,26 @@ struct iommu_group *pci_device_group(str
- return iommu_group_alloc();
- }
-
-+/* Get the IOMMU group for device on fsl-mc bus */
-+struct iommu_group *fsl_mc_device_group(struct device *dev)
-+{
-+ struct device *cont_dev = fsl_mc_cont_dev(dev);
-+ struct iommu_group *group;
-+
-+ /* Container device is responsible for creating the iommu group */
-+ if (fsl_mc_is_cont_dev(dev)) {
-+ group = iommu_group_alloc();
-+ if (IS_ERR(group))
-+ return NULL;
-+ } else {
-+ get_device(cont_dev);
-+ group = iommu_group_get(cont_dev);
-+ put_device(cont_dev);
-+ }
-+
-+ return group;
-+}
-+
- /**
- * iommu_group_get_for_dev - Find or create the IOMMU group for a device
- * @dev: target device
---- a/drivers/iommu/of_iommu.c
-+++ b/drivers/iommu/of_iommu.c
-@@ -24,6 +24,7 @@
- #include <linux/of_iommu.h>
- #include <linux/of_pci.h>
- #include <linux/slab.h>
-+#include <linux/fsl/mc.h>
-
- #define NO_IOMMU 1
-
-@@ -143,15 +144,115 @@ struct of_pci_iommu_alias_info {
- struct device_node *np;
- };
-
-+/**
-+ * of_map_rid - Translate a requester ID through a downstream mapping.
-+ * @np: root complex device node.
-+ * @rid: Requester ID to map.
-+ * @map_name: property name of the map to use.
-+ * @map_mask_name: optional property name of the mask to use.
-+ * @target: optional pointer to a target device node.
-+ * @id_out: optional pointer to receive the translated ID.
-+ *
-+ * Given PCI/MC requester ID, look up the appropriate implementation-defined
-+ * platform ID and/or the target device which receives transactions on that
-+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
-+ * @id_out may be NULL if only the other is required. If @target points to
-+ * a non-NULL device node pointer, only entries targeting that node will be
-+ * matched; if it points to a NULL value, it will receive the device node of
-+ * the first matching target phandle, with a reference held.
-+ *
-+ * Return: 0 on success or a standard error code on failure.
-+ */
-+int of_map_rid(struct device_node *np, u32 rid,
-+ const char *map_name, const char *map_mask_name,
-+ struct device_node **target, u32 *id_out)
-+{
-+ u32 map_mask, masked_rid;
-+ int map_len;
-+ const __be32 *map = NULL;
-+
-+ if (!np || !map_name || (!target && !id_out))
-+ return -EINVAL;
-+
-+ map = of_get_property(np, map_name, &map_len);
-+ if (!map) {
-+ if (target)
-+ return -ENODEV;
-+ /* Otherwise, no map implies no translation */
-+ *id_out = rid;
-+ return 0;
-+ }
-+
-+ if (!map_len || map_len % (4 * sizeof(*map))) {
-+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
-+ map_name, map_len);
-+ return -EINVAL;
-+ }
-+
-+ /* The default is to select all bits. */
-+ map_mask = 0xffffffff;
-+
-+ /*
-+ * Can be overridden by "{iommu,msi}-map-mask" property.
-+ * If of_property_read_u32() fails, the default is used.
-+ */
-+ if (map_mask_name)
-+ of_property_read_u32(np, map_mask_name, &map_mask);
-+
-+ masked_rid = map_mask & rid;
-+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
-+ struct device_node *phandle_node;
-+ u32 rid_base = be32_to_cpup(map + 0);
-+ u32 phandle = be32_to_cpup(map + 1);
-+ u32 out_base = be32_to_cpup(map + 2);
-+ u32 rid_len = be32_to_cpup(map + 3);
-+
-+ if (rid_base & ~map_mask) {
-+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
-+ np, map_name, map_name,
-+ map_mask, rid_base);
-+ return -EFAULT;
-+ }
-+
-+ if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
-+ continue;
-+
-+ phandle_node = of_find_node_by_phandle(phandle);
-+ if (!phandle_node)
-+ return -ENODEV;
-+
-+ if (target) {
-+ if (*target)
-+ of_node_put(phandle_node);
-+ else
-+ *target = phandle_node;
-+
-+ if (*target != phandle_node)
-+ continue;
-+ }
-+
-+ if (id_out)
-+ *id_out = masked_rid - rid_base + out_base;
-+
-+ pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
-+ np, map_name, map_mask, rid_base, out_base,
-+ rid_len, rid, *id_out);
-+ return 0;
-+ }
-+
-+ pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
-+ np, map_name, rid, target && *target ? *target : NULL);
-+ return -EFAULT;
-+}
- static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
- {
- struct of_pci_iommu_alias_info *info = data;
- struct of_phandle_args iommu_spec = { .args_count = 1 };
- int err;
-
-- err = of_pci_map_rid(info->np, alias, "iommu-map",
-- "iommu-map-mask", &iommu_spec.np,
-- iommu_spec.args);
-+ err = of_map_rid(info->np, alias, "iommu-map",
-+ "iommu-map-mask", &iommu_spec.np,
-+ iommu_spec.args);
- if (err)
- return err == -ENODEV ? NO_IOMMU : err;
-
-@@ -160,6 +261,23 @@ static int of_pci_iommu_init(struct pci_
- return err;
- }
-
-+static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev,
-+ struct device_node *master_np)
-+{
-+ struct of_phandle_args iommu_spec = { .args_count = 1 };
-+ int err;
-+
-+ err = of_map_rid(master_np, mc_dev->icid, "iommu-map",
-+ "iommu-map-mask", &iommu_spec.np,
-+ iommu_spec.args);
-+ if (err)
-+ return err == -ENODEV ? NO_IOMMU : err;
-+
-+ err = of_iommu_xlate(&mc_dev->dev, &iommu_spec);
-+ of_node_put(iommu_spec.np);
-+ return err;
-+}
-+
- const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
- {
-@@ -191,6 +309,8 @@ const struct iommu_ops *of_iommu_configu
-
- err = pci_for_each_dma_alias(to_pci_dev(dev),
- of_pci_iommu_init, &info);
-+ } else if (dev_is_fsl_mc(dev)) {
-+ err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np);
- } else {
- struct of_phandle_args iommu_spec;
- int idx = 0;
---- a/drivers/of/irq.c
-+++ b/drivers/of/irq.c
-@@ -26,7 +26,7 @@
- #include <linux/module.h>
- #include <linux/of.h>
- #include <linux/of_irq.h>
--#include <linux/of_pci.h>
-+#include <linux/of_iommu.h>
- #include <linux/string.h>
- #include <linux/slab.h>
-
-@@ -593,8 +593,8 @@ static u32 __of_msi_map_rid(struct devic
- * "msi-map" property.
- */
- for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
-- if (!of_pci_map_rid(parent_dev->of_node, rid_in, "msi-map",
-- "msi-map-mask", np, &rid_out))
-+ if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map",
-+ "msi-map-mask", np, &rid_out))
- break;
- return rid_out;
- }
---- a/drivers/of/of_pci.c
-+++ b/drivers/of/of_pci.c
-@@ -281,104 +281,3 @@ parse_failed:
- }
- EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
- #endif /* CONFIG_OF_ADDRESS */
--
--/**
-- * of_pci_map_rid - Translate a requester ID through a downstream mapping.
-- * @np: root complex device node.
-- * @rid: PCI requester ID to map.
-- * @map_name: property name of the map to use.
-- * @map_mask_name: optional property name of the mask to use.
-- * @target: optional pointer to a target device node.
-- * @id_out: optional pointer to receive the translated ID.
-- *
-- * Given a PCI requester ID, look up the appropriate implementation-defined
-- * platform ID and/or the target device which receives transactions on that
-- * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
-- * @id_out may be NULL if only the other is required. If @target points to
-- * a non-NULL device node pointer, only entries targeting that node will be
-- * matched; if it points to a NULL value, it will receive the device node of
-- * the first matching target phandle, with a reference held.
-- *
-- * Return: 0 on success or a standard error code on failure.
-- */
--int of_pci_map_rid(struct device_node *np, u32 rid,
-- const char *map_name, const char *map_mask_name,
-- struct device_node **target, u32 *id_out)
--{
-- u32 map_mask, masked_rid;
-- int map_len;
-- const __be32 *map = NULL;
--
-- if (!np || !map_name || (!target && !id_out))
-- return -EINVAL;
--
-- map = of_get_property(np, map_name, &map_len);
-- if (!map) {
-- if (target)
-- return -ENODEV;
-- /* Otherwise, no map implies no translation */
-- *id_out = rid;
-- return 0;
-- }
--
-- if (!map_len || map_len % (4 * sizeof(*map))) {
-- pr_err("%pOF: Error: Bad %s length: %d\n", np,
-- map_name, map_len);
-- return -EINVAL;
-- }
--
-- /* The default is to select all bits. */
-- map_mask = 0xffffffff;
--
-- /*
-- * Can be overridden by "{iommu,msi}-map-mask" property.
-- * If of_property_read_u32() fails, the default is used.
-- */
-- if (map_mask_name)
-- of_property_read_u32(np, map_mask_name, &map_mask);
--
-- masked_rid = map_mask & rid;
-- for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
-- struct device_node *phandle_node;
-- u32 rid_base = be32_to_cpup(map + 0);
-- u32 phandle = be32_to_cpup(map + 1);
-- u32 out_base = be32_to_cpup(map + 2);
-- u32 rid_len = be32_to_cpup(map + 3);
--
-- if (rid_base & ~map_mask) {
-- pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
-- np, map_name, map_name,
-- map_mask, rid_base);
-- return -EFAULT;
-- }
--
-- if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
-- continue;
--
-- phandle_node = of_find_node_by_phandle(phandle);
-- if (!phandle_node)
-- return -ENODEV;
--
-- if (target) {
-- if (*target)
-- of_node_put(phandle_node);
-- else
-- *target = phandle_node;
--
-- if (*target != phandle_node)
-- continue;
-- }
--
-- if (id_out)
-- *id_out = masked_rid - rid_base + out_base;
--
-- pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
-- np, map_name, map_mask, rid_base, out_base,
-- rid_len, rid, *id_out);
-- return 0;
-- }
--
-- pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
-- np, map_name, rid, target && *target ? *target : NULL);
-- return -EFAULT;
--}
---- a/include/linux/iommu.h
-+++ b/include/linux/iommu.h
-@@ -389,6 +389,8 @@ static inline size_t iommu_map_sg(struct
- extern struct iommu_group *pci_device_group(struct device *dev);
- /* Generic device grouping function */
- extern struct iommu_group *generic_device_group(struct device *dev);
-+/* FSL-MC device grouping function */
-+struct iommu_group *fsl_mc_device_group(struct device *dev);
-
- /**
- * struct iommu_fwspec - per-device IOMMU instance data
---- a/include/linux/of_iommu.h
-+++ b/include/linux/of_iommu.h
-@@ -15,6 +15,9 @@ extern int of_get_dma_window(struct devi
- extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np);
-
-+int of_map_rid(struct device_node *np, u32 rid,
-+ const char *map_name, const char *map_mask_name,
-+ struct device_node **target, u32 *id_out);
- #else
-
- static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
-@@ -30,6 +33,13 @@ static inline const struct iommu_ops *of
- return NULL;
- }
-
-+static inline int of_map_rid(struct device_node *np, u32 rid,
-+ const char *map_name, const char *map_mask_name,
-+ struct device_node **target, u32 *id_out)
-+{
-+ return -EINVAL;
-+}
-+
- #endif /* CONFIG_OF_IOMMU */
-
- extern struct of_device_id __iommu_of_table;
---- a/include/linux/of_pci.h
-+++ b/include/linux/of_pci.h
-@@ -19,9 +19,6 @@ int of_pci_parse_bus_range(struct device
- int of_get_pci_domain_nr(struct device_node *node);
- int of_pci_get_max_link_speed(struct device_node *node);
- void of_pci_check_probe_only(void);
--int of_pci_map_rid(struct device_node *np, u32 rid,
-- const char *map_name, const char *map_mask_name,
-- struct device_node **target, u32 *id_out);
- #else
- static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
- {
-@@ -57,13 +54,6 @@ of_get_pci_domain_nr(struct device_node
- return -1;
- }
-
--static inline int of_pci_map_rid(struct device_node *np, u32 rid,
-- const char *map_name, const char *map_mask_name,
-- struct device_node **target, u32 *id_out)
--{
-- return -EINVAL;
--}
--
- static inline int
- of_pci_get_max_link_speed(struct device_node *node)
- {
diff --git a/target/linux/layerscape/patches-4.14/822-uart-support-layerscape.patch b/target/linux/layerscape/patches-4.14/822-uart-support-layerscape.patch
deleted file mode 100644
index f8d893ceeb..0000000000
--- a/target/linux/layerscape/patches-4.14/822-uart-support-layerscape.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 558ca1294aa2bf7f29d55361d2f18c6dc534e1d6 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:28:33 +0800
-Subject: [PATCH 39/40] uart: support layerscape
-This is an integrated patch of uart for layerscape
-
-Signed-off-by: Sriram Dash <Sriram.dash@nxp.com>
-Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- drivers/tty/serial/fsl_lpuart.c | 15 +++++++++------
- 1 file changed, 9 insertions(+), 6 deletions(-)
-
---- a/drivers/tty/serial/fsl_lpuart.c
-+++ b/drivers/tty/serial/fsl_lpuart.c
-@@ -236,6 +236,8 @@
- /* IMX lpuart has four extra unused regs located at the beginning */
- #define IMX_REG_OFF 0x10
-
-+static DECLARE_BITMAP(linemap, UART_NR);
-+
- struct lpuart_port {
- struct uart_port port;
- struct clk *clk;
-@@ -2153,13 +2155,13 @@ static int lpuart_probe(struct platform_
-
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
-- dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
-- return ret;
-- }
-- if (ret >= ARRAY_SIZE(lpuart_ports)) {
-- dev_err(&pdev->dev, "serial%d out of range\n", ret);
-- return -EINVAL;
-+ ret = find_first_zero_bit(linemap, UART_NR);
-+ if (ret >= UART_NR) {
-+ dev_err(&pdev->dev, "port line is full, add device failed\n");
-+ return ret;
-+ }
- }
-+ set_bit(ret, linemap);
- sport->port.line = ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
-@@ -2250,6 +2252,7 @@ static int lpuart_remove(struct platform
- struct lpuart_port *sport = platform_get_drvdata(pdev);
-
- uart_remove_one_port(&lpuart_reg, &sport->port);
-+ clear_bit(sport->port.line, linemap);
-
- clk_disable_unprepare(sport->clk);
-
diff --git a/target/linux/layerscape/patches-4.14/823-pm-support-layerscape.patch b/target/linux/layerscape/patches-4.14/823-pm-support-layerscape.patch
deleted file mode 100644
index 99863c47a0..0000000000
--- a/target/linux/layerscape/patches-4.14/823-pm-support-layerscape.patch
+++ /dev/null
@@ -1,631 +0,0 @@
-From 62ac0c4fda3b40a8994f2abfdc52784ced80c83b Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:51 +0800
-Subject: [PATCH] pm: support layerscape
-
-This is an integrated patch of pm for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
-Signed-off-by: Hongbo Zhang <hongbo.zhang@freescale.com>
-Signed-off-by: Li Yang <leoyang.li@nxp.com>
-Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
-Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
-Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
-Signed-off-by: Zhao Chenhui <chenhui.zhao@nxp.com>
----
- drivers/firmware/psci.c | 16 ++-
- drivers/soc/fsl/rcpm.c | 156 ++++++++++++++++++++
- drivers/soc/fsl/sleep_fsm.c | 279 ++++++++++++++++++++++++++++++++++++
- drivers/soc/fsl/sleep_fsm.h | 130 +++++++++++++++++
- 4 files changed, 579 insertions(+), 2 deletions(-)
- create mode 100644 drivers/soc/fsl/rcpm.c
- create mode 100644 drivers/soc/fsl/sleep_fsm.c
- create mode 100644 drivers/soc/fsl/sleep_fsm.h
-
---- a/drivers/firmware/psci.c
-+++ b/drivers/firmware/psci.c
-@@ -437,8 +437,18 @@ CPUIDLE_METHOD_OF_DECLARE(psci, "psci",
-
- static int psci_system_suspend(unsigned long unused)
- {
-- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
-- __pa_symbol(cpu_resume), 0, 0);
-+ u32 state;
-+ u32 ver = psci_get_version();
-+
-+ if (PSCI_VERSION_MAJOR(ver) >= 1) {
-+ return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
-+ virt_to_phys(cpu_resume), 0, 0);
-+ } else {
-+ state = ( 2 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) |
-+ (1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT);
-+
-+ return psci_cpu_suspend(state, virt_to_phys(cpu_resume));
-+ }
- }
-
- static int psci_system_suspend_enter(suspend_state_t state)
-@@ -562,6 +572,8 @@ static void __init psci_0_2_set_function
- arm_pm_restart = psci_sys_reset;
-
- pm_power_off = psci_sys_poweroff;
-+
-+ suspend_set_ops(&psci_suspend_ops);
- }
-
- /*
---- /dev/null
-+++ b/drivers/soc/fsl/rcpm.c
-@@ -0,0 +1,156 @@
-+/*
-+ * Run Control and Power Management (RCPM) driver
-+ *
-+ * Copyright 2016 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ */
-+#define pr_fmt(fmt) "RCPM: %s: " fmt, __func__
-+
-+#include <linux/kernel.h>
-+#include <linux/io.h>
-+#include <linux/of_platform.h>
-+#include <linux/of_address.h>
-+#include <linux/suspend.h>
-+
-+/* RCPM register offset */
-+#define RCPM_IPPDEXPCR0 0x140
-+
-+#define RCPM_WAKEUP_CELL_SIZE 2
-+
-+struct rcpm_config {
-+ int ipp_num;
-+ int ippdexpcr_offset;
-+ u32 ippdexpcr[2];
-+ void *rcpm_reg_base;
-+};
-+
-+static struct rcpm_config *rcpm;
-+
-+static inline void rcpm_reg_write(u32 offset, u32 value)
-+{
-+ iowrite32be(value, rcpm->rcpm_reg_base + offset);
-+}
-+
-+static inline u32 rcpm_reg_read(u32 offset)
-+{
-+ return ioread32be(rcpm->rcpm_reg_base + offset);
-+}
-+
-+static void rcpm_wakeup_fixup(struct device *dev, void *data)
-+{
-+ struct device_node *node = dev ? dev->of_node : NULL;
-+ u32 value[RCPM_WAKEUP_CELL_SIZE];
-+ int ret, i;
-+
-+ if (!dev || !node || !device_may_wakeup(dev))
-+ return;
-+
-+ /*
-+ * Get the values in the "rcpm-wakeup" property.
-+ * Three values are:
-+ * The first is a pointer to the RCPM node.
-+ * The second is the value of the ippdexpcr0 register.
-+ * The third is the value of the ippdexpcr1 register.
-+ */
-+ ret = of_property_read_u32_array(node, "fsl,rcpm-wakeup",
-+ value, RCPM_WAKEUP_CELL_SIZE);
-+ if (ret)
-+ return;
-+
-+ pr_debug("wakeup source: the device %s\n", node->full_name);
-+
-+ for (i = 0; i < rcpm->ipp_num; i++)
-+ rcpm->ippdexpcr[i] |= value[i + 1];
-+}
-+
-+static int rcpm_suspend_prepare(void)
-+{
-+ int i;
-+ u32 val;
-+
-+ BUG_ON(!rcpm);
-+
-+ for (i = 0; i < rcpm->ipp_num; i++)
-+ rcpm->ippdexpcr[i] = 0;
-+
-+ dpm_for_each_dev(NULL, rcpm_wakeup_fixup);
-+
-+ for (i = 0; i < rcpm->ipp_num; i++) {
-+ if (rcpm->ippdexpcr[i]) {
-+ val = rcpm_reg_read(rcpm->ippdexpcr_offset + 4 * i);
-+ rcpm_reg_write(rcpm->ippdexpcr_offset + 4 * i,
-+ val | rcpm->ippdexpcr[i]);
-+ pr_debug("ippdexpcr%d = 0x%x\n", i, rcpm->ippdexpcr[i]);
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int rcpm_suspend_notifier_call(struct notifier_block *bl,
-+ unsigned long state,
-+ void *unused)
-+{
-+ switch (state) {
-+ case PM_SUSPEND_PREPARE:
-+ rcpm_suspend_prepare();
-+ break;
-+ }
-+
-+ return NOTIFY_DONE;
-+}
-+
-+static struct rcpm_config rcpm_default_config = {
-+ .ipp_num = 1,
-+ .ippdexpcr_offset = RCPM_IPPDEXPCR0,
-+};
-+
-+static const struct of_device_id rcpm_matches[] = {
-+ {
-+ .compatible = "fsl,qoriq-rcpm-2.1",
-+ .data = &rcpm_default_config,
-+ },
-+ {}
-+};
-+
-+static struct notifier_block rcpm_suspend_notifier = {
-+ .notifier_call = rcpm_suspend_notifier_call,
-+};
-+
-+static int __init layerscape_rcpm_init(void)
-+{
-+ const struct of_device_id *match;
-+ struct device_node *np;
-+
-+ np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
-+ if (!np)
-+ return -EINVAL;
-+
-+ if (match->data)
-+ rcpm = (struct rcpm_config *)match->data;
-+ else
-+ return -EINVAL;
-+
-+ rcpm->rcpm_reg_base = of_iomap(np, 0);
-+ of_node_put(np);
-+ if (!rcpm->rcpm_reg_base)
-+ return -ENOMEM;
-+
-+ register_pm_notifier(&rcpm_suspend_notifier);
-+
-+ pr_info("The RCPM driver initialized.\n");
-+
-+ return 0;
-+}
-+
-+subsys_initcall(layerscape_rcpm_init);
---- /dev/null
-+++ b/drivers/soc/fsl/sleep_fsm.c
-@@ -0,0 +1,279 @@
-+/*
-+ * deep sleep FSM (finite-state machine) configuration
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Author: Hongbo Zhang <hongbo.zhang@freescale.com>
-+ * Chenhui Zhao <chenhui.zhao@freescale.com>
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/io.h>
-+#include <linux/types.h>
-+
-+#include "sleep_fsm.h"
-+/*
-+ * These values are from chip's reference manual. For example,
-+ * the values for T1040 can be found in "8.4.3.8 Programming
-+ * supporting deep sleep mode" of Chapter 8 "Run Control and
-+ * Power Management (RCPM)".
-+ * The default value can be applied to T104x, LS1021.
-+ */
-+struct fsm_reg_vals epu_default_val[] = {
-+ /* EPGCR (Event Processor Global Control Register) */
-+ {EPGCR, 0},
-+ /* EPECR (Event Processor Event Control Registers) */
-+ {EPECR0 + EPECR_STRIDE * 0, 0},
-+ {EPECR0 + EPECR_STRIDE * 1, 0},
-+ {EPECR0 + EPECR_STRIDE * 2, 0xF0004004},
-+ {EPECR0 + EPECR_STRIDE * 3, 0x80000084},
-+ {EPECR0 + EPECR_STRIDE * 4, 0x20000084},
-+ {EPECR0 + EPECR_STRIDE * 5, 0x08000004},
-+ {EPECR0 + EPECR_STRIDE * 6, 0x80000084},
-+ {EPECR0 + EPECR_STRIDE * 7, 0x80000084},
-+ {EPECR0 + EPECR_STRIDE * 8, 0x60000084},
-+ {EPECR0 + EPECR_STRIDE * 9, 0x08000084},
-+ {EPECR0 + EPECR_STRIDE * 10, 0x42000084},
-+ {EPECR0 + EPECR_STRIDE * 11, 0x90000084},
-+ {EPECR0 + EPECR_STRIDE * 12, 0x80000084},
-+ {EPECR0 + EPECR_STRIDE * 13, 0x08000084},
-+ {EPECR0 + EPECR_STRIDE * 14, 0x02000084},
-+ {EPECR0 + EPECR_STRIDE * 15, 0x00000004},
-+ /*
-+ * EPEVTCR (Event Processor EVT Pin Control Registers)
-+ * SCU8 triger EVT2, and SCU11 triger EVT9
-+ */
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 0, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 1, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 2, 0x80000001},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 3, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 4, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 5, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 6, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 7, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 8, 0},
-+ {EPEVTCR0 + EPEVTCR_STRIDE * 9, 0xB0000001},
-+ /* EPCMPR (Event Processor Counter Compare Registers) */
-+ {EPCMPR0 + EPCMPR_STRIDE * 0, 0},
-+ {EPCMPR0 + EPCMPR_STRIDE * 1, 0},
-+ {EPCMPR0 + EPCMPR_STRIDE * 2, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 3, 0},
-+ {EPCMPR0 + EPCMPR_STRIDE * 4, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 5, 0x00000020},
-+ {EPCMPR0 + EPCMPR_STRIDE * 6, 0},
-+ {EPCMPR0 + EPCMPR_STRIDE * 7, 0},
-+ {EPCMPR0 + EPCMPR_STRIDE * 8, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 9, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 10, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 11, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 12, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 13, 0},
-+ {EPCMPR0 + EPCMPR_STRIDE * 14, 0x000000FF},
-+ {EPCMPR0 + EPCMPR_STRIDE * 15, 0x000000FF},
-+ /* EPCCR (Event Processor Counter Control Registers) */
-+ {EPCCR0 + EPCCR_STRIDE * 0, 0},
-+ {EPCCR0 + EPCCR_STRIDE * 1, 0},
-+ {EPCCR0 + EPCCR_STRIDE * 2, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 3, 0},
-+ {EPCCR0 + EPCCR_STRIDE * 4, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 5, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 6, 0},
-+ {EPCCR0 + EPCCR_STRIDE * 7, 0},
-+ {EPCCR0 + EPCCR_STRIDE * 8, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 9, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 10, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 11, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 12, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 13, 0},
-+ {EPCCR0 + EPCCR_STRIDE * 14, 0x92840000},
-+ {EPCCR0 + EPCCR_STRIDE * 15, 0x92840000},
-+ /* EPSMCR (Event Processor SCU Mux Control Registers) */
-+ {EPSMCR0 + EPSMCR_STRIDE * 0, 0},
-+ {EPSMCR0 + EPSMCR_STRIDE * 1, 0},
-+ {EPSMCR0 + EPSMCR_STRIDE * 2, 0x6C700000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 3, 0x2F000000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 4, 0x002F0000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 5, 0x00002E00},
-+ {EPSMCR0 + EPSMCR_STRIDE * 6, 0x7C000000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 7, 0x30000000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 8, 0x64300000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 9, 0x00003000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 10, 0x65000030},
-+ {EPSMCR0 + EPSMCR_STRIDE * 11, 0x31740000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 12, 0x7F000000},
-+ {EPSMCR0 + EPSMCR_STRIDE * 13, 0x00003100},
-+ {EPSMCR0 + EPSMCR_STRIDE * 14, 0x00000031},
-+ {EPSMCR0 + EPSMCR_STRIDE * 15, 0x76000000},
-+ /* EPACR (Event Processor Action Control Registers) */
-+ {EPACR0 + EPACR_STRIDE * 0, 0},
-+ {EPACR0 + EPACR_STRIDE * 1, 0},
-+ {EPACR0 + EPACR_STRIDE * 2, 0},
-+ {EPACR0 + EPACR_STRIDE * 3, 0x00000080},
-+ {EPACR0 + EPACR_STRIDE * 4, 0},
-+ {EPACR0 + EPACR_STRIDE * 5, 0x00000040},
-+ {EPACR0 + EPACR_STRIDE * 6, 0},
-+ {EPACR0 + EPACR_STRIDE * 7, 0},
-+ {EPACR0 + EPACR_STRIDE * 8, 0},
-+ {EPACR0 + EPACR_STRIDE * 9, 0x0000001C},
-+ {EPACR0 + EPACR_STRIDE * 10, 0x00000020},
-+ {EPACR0 + EPACR_STRIDE * 11, 0},
-+ {EPACR0 + EPACR_STRIDE * 12, 0x00000003},
-+ {EPACR0 + EPACR_STRIDE * 13, 0x06000000},
-+ {EPACR0 + EPACR_STRIDE * 14, 0x04000000},
-+ {EPACR0 + EPACR_STRIDE * 15, 0x02000000},
-+ /* EPIMCR (Event Processor Input Mux Control Registers) */
-+ {EPIMCR0 + EPIMCR_STRIDE * 0, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 1, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 2, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 3, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 4, 0x44000000},
-+ {EPIMCR0 + EPIMCR_STRIDE * 5, 0x40000000},
-+ {EPIMCR0 + EPIMCR_STRIDE * 6, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 7, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 8, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 9, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 10, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 11, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 12, 0x44000000},
-+ {EPIMCR0 + EPIMCR_STRIDE * 13, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 14, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 15, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 16, 0x6A000000},
-+ {EPIMCR0 + EPIMCR_STRIDE * 17, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 18, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 19, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 20, 0x48000000},
-+ {EPIMCR0 + EPIMCR_STRIDE * 21, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 22, 0x6C000000},
-+ {EPIMCR0 + EPIMCR_STRIDE * 23, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 24, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 25, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 26, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 27, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 28, 0x76000000},
-+ {EPIMCR0 + EPIMCR_STRIDE * 29, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 30, 0},
-+ {EPIMCR0 + EPIMCR_STRIDE * 31, 0x76000000},
-+ /* EPXTRIGCR (Event Processor Crosstrigger Control Register) */
-+ {EPXTRIGCR, 0x0000FFDF},
-+ /* end */
-+ {FSM_END_FLAG, 0},
-+};
-+
-+struct fsm_reg_vals npc_default_val[] = {
-+ /* NPC triggered Memory-Mapped Access Registers */
-+ {NCR, 0x80000000},
-+ {MCCR1, 0},
-+ {MCSR1, 0},
-+ {MMAR1LO, 0},
-+ {MMAR1HI, 0},
-+ {MMDR1, 0},
-+ {MCSR2, 0},
-+ {MMAR2LO, 0},
-+ {MMAR2HI, 0},
-+ {MMDR2, 0},
-+ {MCSR3, 0x80000000},
-+ {MMAR3LO, 0x000E2130},
-+ {MMAR3HI, 0x00030000},
-+ {MMDR3, 0x00020000},
-+ /* end */
-+ {FSM_END_FLAG, 0},
-+};
-+
-+/**
-+ * fsl_fsm_setup - Configure EPU's FSM registers
-+ * @base: the base address of registers
-+ * @val: Pointer to address-value pairs for FSM registers
-+ */
-+void fsl_fsm_setup(void __iomem *base, struct fsm_reg_vals *val)
-+{
-+ struct fsm_reg_vals *data = val;
-+
-+ WARN_ON(!base || !data);
-+ while (data->offset != FSM_END_FLAG) {
-+ iowrite32be(data->value, base + data->offset);
-+ data++;
-+ }
-+}
-+
-+void fsl_epu_setup_default(void __iomem *epu_base)
-+{
-+ fsl_fsm_setup(epu_base, epu_default_val);
-+}
-+
-+void fsl_npc_setup_default(void __iomem *npc_base)
-+{
-+ fsl_fsm_setup(npc_base, npc_default_val);
-+}
-+
-+void fsl_epu_clean_default(void __iomem *epu_base)
-+{
-+ u32 offset;
-+
-+ /* follow the exact sequence to clear the registers */
-+ /* Clear EPACRn */
-+ for (offset = EPACR0; offset <= EPACR15; offset += EPACR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+
-+ /* Clear EPEVTCRn */
-+ for (offset = EPEVTCR0; offset <= EPEVTCR9; offset += EPEVTCR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+
-+ /* Clear EPGCR */
-+ iowrite32be(0, epu_base + EPGCR);
-+
-+ /* Clear EPSMCRn */
-+ for (offset = EPSMCR0; offset <= EPSMCR15; offset += EPSMCR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+
-+ /* Clear EPCCRn */
-+ for (offset = EPCCR0; offset <= EPCCR31; offset += EPCCR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+
-+ /* Clear EPCMPRn */
-+ for (offset = EPCMPR0; offset <= EPCMPR31; offset += EPCMPR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+
-+ /* Clear EPCTRn */
-+ for (offset = EPCTR0; offset <= EPCTR31; offset += EPCTR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+
-+ /* Clear EPIMCRn */
-+ for (offset = EPIMCR0; offset <= EPIMCR31; offset += EPIMCR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+
-+ /* Clear EPXTRIGCRn */
-+ iowrite32be(0, epu_base + EPXTRIGCR);
-+
-+ /* Clear EPECRn */
-+ for (offset = EPECR0; offset <= EPECR15; offset += EPECR_STRIDE)
-+ iowrite32be(0, epu_base + offset);
-+}
---- /dev/null
-+++ b/drivers/soc/fsl/sleep_fsm.h
-@@ -0,0 +1,130 @@
-+/*
-+ * deep sleep FSM (finite-state machine) configuration
-+ *
-+ * Copyright 2018 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef _FSL_SLEEP_FSM_H
-+#define _FSL_SLEEP_FSM_H
-+
-+#define FSL_STRIDE_4B 4
-+#define FSL_STRIDE_8B 8
-+
-+/* End flag */
-+#define FSM_END_FLAG 0xFFFFFFFFUL
-+
-+/* Block offsets */
-+#define RCPM_BLOCK_OFFSET 0x00022000
-+#define EPU_BLOCK_OFFSET 0x00000000
-+#define NPC_BLOCK_OFFSET 0x00001000
-+
-+/* EPGCR (Event Processor Global Control Register) */
-+#define EPGCR 0x000
-+
-+/* EPEVTCR0-9 (Event Processor EVT Pin Control Registers) */
-+#define EPEVTCR0 0x050
-+#define EPEVTCR9 0x074
-+#define EPEVTCR_STRIDE FSL_STRIDE_4B
-+
-+/* EPXTRIGCR (Event Processor Crosstrigger Control Register) */
-+#define EPXTRIGCR 0x090
-+
-+/* EPIMCR0-31 (Event Processor Input Mux Control Registers) */
-+#define EPIMCR0 0x100
-+#define EPIMCR31 0x17C
-+#define EPIMCR_STRIDE FSL_STRIDE_4B
-+
-+/* EPSMCR0-15 (Event Processor SCU Mux Control Registers) */
-+#define EPSMCR0 0x200
-+#define EPSMCR15 0x278
-+#define EPSMCR_STRIDE FSL_STRIDE_8B
-+
-+/* EPECR0-15 (Event Processor Event Control Registers) */
-+#define EPECR0 0x300
-+#define EPECR15 0x33C
-+#define EPECR_STRIDE FSL_STRIDE_4B
-+
-+/* EPACR0-15 (Event Processor Action Control Registers) */
-+#define EPACR0 0x400
-+#define EPACR15 0x43C
-+#define EPACR_STRIDE FSL_STRIDE_4B
-+
-+/* EPCCRi0-15 (Event Processor Counter Control Registers) */
-+#define EPCCR0 0x800
-+#define EPCCR15 0x83C
-+#define EPCCR31 0x87C
-+#define EPCCR_STRIDE FSL_STRIDE_4B
-+
-+/* EPCMPR0-15 (Event Processor Counter Compare Registers) */
-+#define EPCMPR0 0x900
-+#define EPCMPR15 0x93C
-+#define EPCMPR31 0x97C
-+#define EPCMPR_STRIDE FSL_STRIDE_4B
-+
-+/* EPCTR0-31 (Event Processor Counter Register) */
-+#define EPCTR0 0xA00
-+#define EPCTR31 0xA7C
-+#define EPCTR_STRIDE FSL_STRIDE_4B
-+
-+/* NPC triggered Memory-Mapped Access Registers */
-+#define NCR 0x000
-+#define MCCR1 0x0CC
-+#define MCSR1 0x0D0
-+#define MMAR1LO 0x0D4
-+#define MMAR1HI 0x0D8
-+#define MMDR1 0x0DC
-+#define MCSR2 0x0E0
-+#define MMAR2LO 0x0E4
-+#define MMAR2HI 0x0E8
-+#define MMDR2 0x0EC
-+#define MCSR3 0x0F0
-+#define MMAR3LO 0x0F4
-+#define MMAR3HI 0x0F8
-+#define MMDR3 0x0FC
-+
-+/* RCPM Core State Action Control Register 0 */
-+#define CSTTACR0 0xB00
-+
-+/* RCPM Core Group 1 Configuration Register 0 */
-+#define CG1CR0 0x31C
-+
-+struct fsm_reg_vals {
-+ u32 offset;
-+ u32 value;
-+};
-+
-+void fsl_fsm_setup(void __iomem *base, struct fsm_reg_vals *val);
-+void fsl_epu_setup_default(void __iomem *epu_base);
-+void fsl_npc_setup_default(void __iomem *npc_base);
-+void fsl_epu_clean_default(void __iomem *epu_base);
-+
-+#endif /* _FSL_SLEEP_FSM_H */
diff --git a/target/linux/layerscape/patches-4.14/824-ptp-support-layerscape.patch b/target/linux/layerscape/patches-4.14/824-ptp-support-layerscape.patch
deleted file mode 100644
index 5664e18094..0000000000
--- a/target/linux/layerscape/patches-4.14/824-ptp-support-layerscape.patch
+++ /dev/null
@@ -1,1399 +0,0 @@
-From bba7af6efb0aad1d52ee5e7d80f9e2ab59d85e20 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:58:52 +0800
-Subject: [PATCH] ptp: support layerscape
-
-This is an integrated patch of ptp for layerscape
-
-Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/net/ethernet/freescale/Makefile | 1 -
- drivers/net/ethernet/freescale/gianfar_ptp.c | 572 ------------------
- drivers/ptp/Makefile | 1 +
- drivers/ptp/ptp_chardev.c | 4 +-
- drivers/ptp/ptp_qoriq.c | 589 +++++++++++++++++++
- include/linux/fsl/ptp_qoriq.h | 169 ++++++
- 6 files changed, 761 insertions(+), 575 deletions(-)
- delete mode 100644 drivers/net/ethernet/freescale/gianfar_ptp.c
- create mode 100644 drivers/ptp/ptp_qoriq.c
- create mode 100644 include/linux/fsl/ptp_qoriq.h
-
---- a/drivers/net/ethernet/freescale/Makefile
-+++ b/drivers/net/ethernet/freescale/Makefile
-@@ -14,7 +14,6 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
- obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
- obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
- obj-$(CONFIG_GIANFAR) += gianfar_driver.o
--obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
- gianfar_driver-objs := gianfar.o \
- gianfar_ethtool.o
- obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
---- a/drivers/net/ethernet/freescale/gianfar_ptp.c
-+++ /dev/null
-@@ -1,572 +0,0 @@
--/*
-- * PTP 1588 clock using the eTSEC
-- *
-- * Copyright (C) 2010 OMICRON electronics GmbH
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, write to the Free Software
-- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-- */
--
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
--#include <linux/device.h>
--#include <linux/hrtimer.h>
--#include <linux/interrupt.h>
--#include <linux/kernel.h>
--#include <linux/module.h>
--#include <linux/of.h>
--#include <linux/of_platform.h>
--#include <linux/timex.h>
--#include <linux/io.h>
--
--#include <linux/ptp_clock_kernel.h>
--
--#include "gianfar.h"
--
--/*
-- * gianfar ptp registers
-- * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
-- */
--struct gianfar_ptp_registers {
-- u32 tmr_ctrl; /* Timer control register */
-- u32 tmr_tevent; /* Timestamp event register */
-- u32 tmr_temask; /* Timer event mask register */
-- u32 tmr_pevent; /* Timestamp event register */
-- u32 tmr_pemask; /* Timer event mask register */
-- u32 tmr_stat; /* Timestamp status register */
-- u32 tmr_cnt_h; /* Timer counter high register */
-- u32 tmr_cnt_l; /* Timer counter low register */
-- u32 tmr_add; /* Timer drift compensation addend register */
-- u32 tmr_acc; /* Timer accumulator register */
-- u32 tmr_prsc; /* Timer prescale */
-- u8 res1[4];
-- u32 tmroff_h; /* Timer offset high */
-- u32 tmroff_l; /* Timer offset low */
-- u8 res2[8];
-- u32 tmr_alarm1_h; /* Timer alarm 1 high register */
-- u32 tmr_alarm1_l; /* Timer alarm 1 high register */
-- u32 tmr_alarm2_h; /* Timer alarm 2 high register */
-- u32 tmr_alarm2_l; /* Timer alarm 2 high register */
-- u8 res3[48];
-- u32 tmr_fiper1; /* Timer fixed period interval */
-- u32 tmr_fiper2; /* Timer fixed period interval */
-- u32 tmr_fiper3; /* Timer fixed period interval */
-- u8 res4[20];
-- u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
-- u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
-- u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
-- u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
--};
--
--/* Bit definitions for the TMR_CTRL register */
--#define ALM1P (1<<31) /* Alarm1 output polarity */
--#define ALM2P (1<<30) /* Alarm2 output polarity */
--#define FIPERST (1<<28) /* FIPER start indication */
--#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
--#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
--#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
--#define TCLK_PERIOD_MASK (0x3ff)
--#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
--#define FRD (1<<14) /* FIPER Realignment Disable */
--#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
--#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
--#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
--#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
--#define COPH (1<<7) /* Generated clock output phase. */
--#define CIPH (1<<6) /* External oscillator input clock phase */
--#define TMSR (1<<5) /* Timer soft reset. */
--#define BYP (1<<3) /* Bypass drift compensated clock */
--#define TE (1<<2) /* 1588 timer enable. */
--#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
--#define CKSEL_MASK (0x3)
--
--/* Bit definitions for the TMR_TEVENT register */
--#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
--#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
--#define ALM2 (1<<17) /* Current time = alarm time register 2 */
--#define ALM1 (1<<16) /* Current time = alarm time register 1 */
--#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
--#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
--#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
--
--/* Bit definitions for the TMR_TEMASK register */
--#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
--#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
--#define ALM2EN (1<<17) /* Timer ALM2 event enable */
--#define ALM1EN (1<<16) /* Timer ALM1 event enable */
--#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
--#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
--
--/* Bit definitions for the TMR_PEVENT register */
--#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
--#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
--#define RXP (1<<0) /* PTP frame has been received */
--
--/* Bit definitions for the TMR_PEMASK register */
--#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
--#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
--#define RXPEN (1<<0) /* Receive PTP packet event enable */
--
--/* Bit definitions for the TMR_STAT register */
--#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
--#define STAT_VEC_MASK (0x3f)
--
--/* Bit definitions for the TMR_PRSC register */
--#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
--#define PRSC_OCK_MASK (0xffff)
--
--
--#define DRIVER "gianfar_ptp"
--#define DEFAULT_CKSEL 1
--#define N_EXT_TS 2
--#define REG_SIZE sizeof(struct gianfar_ptp_registers)
--
--struct etsects {
-- struct gianfar_ptp_registers __iomem *regs;
-- spinlock_t lock; /* protects regs */
-- struct ptp_clock *clock;
-- struct ptp_clock_info caps;
-- struct resource *rsrc;
-- int irq;
-- u64 alarm_interval; /* for periodic alarm */
-- u64 alarm_value;
-- u32 tclk_period; /* nanoseconds */
-- u32 tmr_prsc;
-- u32 tmr_add;
-- u32 cksel;
-- u32 tmr_fiper1;
-- u32 tmr_fiper2;
--};
--
--/*
-- * Register access functions
-- */
--
--/* Caller must hold etsects->lock. */
--static u64 tmr_cnt_read(struct etsects *etsects)
--{
-- u64 ns;
-- u32 lo, hi;
--
-- lo = gfar_read(&etsects->regs->tmr_cnt_l);
-- hi = gfar_read(&etsects->regs->tmr_cnt_h);
-- ns = ((u64) hi) << 32;
-- ns |= lo;
-- return ns;
--}
--
--/* Caller must hold etsects->lock. */
--static void tmr_cnt_write(struct etsects *etsects, u64 ns)
--{
-- u32 hi = ns >> 32;
-- u32 lo = ns & 0xffffffff;
--
-- gfar_write(&etsects->regs->tmr_cnt_l, lo);
-- gfar_write(&etsects->regs->tmr_cnt_h, hi);
--}
--
--/* Caller must hold etsects->lock. */
--static void set_alarm(struct etsects *etsects)
--{
-- u64 ns;
-- u32 lo, hi;
--
-- ns = tmr_cnt_read(etsects) + 1500000000ULL;
-- ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
-- ns -= etsects->tclk_period;
-- hi = ns >> 32;
-- lo = ns & 0xffffffff;
-- gfar_write(&etsects->regs->tmr_alarm1_l, lo);
-- gfar_write(&etsects->regs->tmr_alarm1_h, hi);
--}
--
--/* Caller must hold etsects->lock. */
--static void set_fipers(struct etsects *etsects)
--{
-- set_alarm(etsects);
-- gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
-- gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
--}
--
--/*
-- * Interrupt service routine
-- */
--
--static irqreturn_t isr(int irq, void *priv)
--{
-- struct etsects *etsects = priv;
-- struct ptp_clock_event event;
-- u64 ns;
-- u32 ack = 0, lo, hi, mask, val;
--
-- val = gfar_read(&etsects->regs->tmr_tevent);
--
-- if (val & ETS1) {
-- ack |= ETS1;
-- hi = gfar_read(&etsects->regs->tmr_etts1_h);
-- lo = gfar_read(&etsects->regs->tmr_etts1_l);
-- event.type = PTP_CLOCK_EXTTS;
-- event.index = 0;
-- event.timestamp = ((u64) hi) << 32;
-- event.timestamp |= lo;
-- ptp_clock_event(etsects->clock, &event);
-- }
--
-- if (val & ETS2) {
-- ack |= ETS2;
-- hi = gfar_read(&etsects->regs->tmr_etts2_h);
-- lo = gfar_read(&etsects->regs->tmr_etts2_l);
-- event.type = PTP_CLOCK_EXTTS;
-- event.index = 1;
-- event.timestamp = ((u64) hi) << 32;
-- event.timestamp |= lo;
-- ptp_clock_event(etsects->clock, &event);
-- }
--
-- if (val & ALM2) {
-- ack |= ALM2;
-- if (etsects->alarm_value) {
-- event.type = PTP_CLOCK_ALARM;
-- event.index = 0;
-- event.timestamp = etsects->alarm_value;
-- ptp_clock_event(etsects->clock, &event);
-- }
-- if (etsects->alarm_interval) {
-- ns = etsects->alarm_value + etsects->alarm_interval;
-- hi = ns >> 32;
-- lo = ns & 0xffffffff;
-- spin_lock(&etsects->lock);
-- gfar_write(&etsects->regs->tmr_alarm2_l, lo);
-- gfar_write(&etsects->regs->tmr_alarm2_h, hi);
-- spin_unlock(&etsects->lock);
-- etsects->alarm_value = ns;
-- } else {
-- gfar_write(&etsects->regs->tmr_tevent, ALM2);
-- spin_lock(&etsects->lock);
-- mask = gfar_read(&etsects->regs->tmr_temask);
-- mask &= ~ALM2EN;
-- gfar_write(&etsects->regs->tmr_temask, mask);
-- spin_unlock(&etsects->lock);
-- etsects->alarm_value = 0;
-- etsects->alarm_interval = 0;
-- }
-- }
--
-- if (val & PP1) {
-- ack |= PP1;
-- event.type = PTP_CLOCK_PPS;
-- ptp_clock_event(etsects->clock, &event);
-- }
--
-- if (ack) {
-- gfar_write(&etsects->regs->tmr_tevent, ack);
-- return IRQ_HANDLED;
-- } else
-- return IRQ_NONE;
--}
--
--/*
-- * PTP clock operations
-- */
--
--static int ptp_gianfar_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
--{
-- u64 adj, diff;
-- u32 tmr_add;
-- int neg_adj = 0;
-- struct etsects *etsects = container_of(ptp, struct etsects, caps);
--
-- if (scaled_ppm < 0) {
-- neg_adj = 1;
-- scaled_ppm = -scaled_ppm;
-- }
-- tmr_add = etsects->tmr_add;
-- adj = tmr_add;
--
-- /* calculate diff as adj*(scaled_ppm/65536)/1000000
-- * and round() to the nearest integer
-- */
-- adj *= scaled_ppm;
-- diff = div_u64(adj, 8000000);
-- diff = (diff >> 13) + ((diff >> 12) & 1);
--
-- tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
--
-- gfar_write(&etsects->regs->tmr_add, tmr_add);
--
-- return 0;
--}
--
--static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
--{
-- s64 now;
-- unsigned long flags;
-- struct etsects *etsects = container_of(ptp, struct etsects, caps);
--
-- spin_lock_irqsave(&etsects->lock, flags);
--
-- now = tmr_cnt_read(etsects);
-- now += delta;
-- tmr_cnt_write(etsects, now);
-- set_fipers(etsects);
--
-- spin_unlock_irqrestore(&etsects->lock, flags);
--
-- return 0;
--}
--
--static int ptp_gianfar_gettime(struct ptp_clock_info *ptp,
-- struct timespec64 *ts)
--{
-- u64 ns;
-- unsigned long flags;
-- struct etsects *etsects = container_of(ptp, struct etsects, caps);
--
-- spin_lock_irqsave(&etsects->lock, flags);
--
-- ns = tmr_cnt_read(etsects);
--
-- spin_unlock_irqrestore(&etsects->lock, flags);
--
-- *ts = ns_to_timespec64(ns);
--
-- return 0;
--}
--
--static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
-- const struct timespec64 *ts)
--{
-- u64 ns;
-- unsigned long flags;
-- struct etsects *etsects = container_of(ptp, struct etsects, caps);
--
-- ns = timespec64_to_ns(ts);
--
-- spin_lock_irqsave(&etsects->lock, flags);
--
-- tmr_cnt_write(etsects, ns);
-- set_fipers(etsects);
--
-- spin_unlock_irqrestore(&etsects->lock, flags);
--
-- return 0;
--}
--
--static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
-- struct ptp_clock_request *rq, int on)
--{
-- struct etsects *etsects = container_of(ptp, struct etsects, caps);
-- unsigned long flags;
-- u32 bit, mask;
--
-- switch (rq->type) {
-- case PTP_CLK_REQ_EXTTS:
-- switch (rq->extts.index) {
-- case 0:
-- bit = ETS1EN;
-- break;
-- case 1:
-- bit = ETS2EN;
-- break;
-- default:
-- return -EINVAL;
-- }
-- spin_lock_irqsave(&etsects->lock, flags);
-- mask = gfar_read(&etsects->regs->tmr_temask);
-- if (on)
-- mask |= bit;
-- else
-- mask &= ~bit;
-- gfar_write(&etsects->regs->tmr_temask, mask);
-- spin_unlock_irqrestore(&etsects->lock, flags);
-- return 0;
--
-- case PTP_CLK_REQ_PPS:
-- spin_lock_irqsave(&etsects->lock, flags);
-- mask = gfar_read(&etsects->regs->tmr_temask);
-- if (on)
-- mask |= PP1EN;
-- else
-- mask &= ~PP1EN;
-- gfar_write(&etsects->regs->tmr_temask, mask);
-- spin_unlock_irqrestore(&etsects->lock, flags);
-- return 0;
--
-- default:
-- break;
-- }
--
-- return -EOPNOTSUPP;
--}
--
--static const struct ptp_clock_info ptp_gianfar_caps = {
-- .owner = THIS_MODULE,
-- .name = "gianfar clock",
-- .max_adj = 512000,
-- .n_alarm = 0,
-- .n_ext_ts = N_EXT_TS,
-- .n_per_out = 0,
-- .n_pins = 0,
-- .pps = 1,
-- .adjfine = ptp_gianfar_adjfine,
-- .adjtime = ptp_gianfar_adjtime,
-- .gettime64 = ptp_gianfar_gettime,
-- .settime64 = ptp_gianfar_settime,
-- .enable = ptp_gianfar_enable,
--};
--
--static int gianfar_ptp_probe(struct platform_device *dev)
--{
-- struct device_node *node = dev->dev.of_node;
-- struct etsects *etsects;
-- struct timespec64 now;
-- int err = -ENOMEM;
-- u32 tmr_ctrl;
-- unsigned long flags;
--
-- etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
-- if (!etsects)
-- goto no_memory;
--
-- err = -ENODEV;
--
-- etsects->caps = ptp_gianfar_caps;
--
-- if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel))
-- etsects->cksel = DEFAULT_CKSEL;
--
-- if (of_property_read_u32(node,
-- "fsl,tclk-period", &etsects->tclk_period) ||
-- of_property_read_u32(node,
-- "fsl,tmr-prsc", &etsects->tmr_prsc) ||
-- of_property_read_u32(node,
-- "fsl,tmr-add", &etsects->tmr_add) ||
-- of_property_read_u32(node,
-- "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
-- of_property_read_u32(node,
-- "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
-- of_property_read_u32(node,
-- "fsl,max-adj", &etsects->caps.max_adj)) {
-- pr_err("device tree node missing required elements\n");
-- goto no_node;
-- }
--
-- etsects->irq = platform_get_irq(dev, 0);
--
-- if (etsects->irq < 0) {
-- pr_err("irq not in device tree\n");
-- goto no_node;
-- }
-- if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
-- pr_err("request_irq failed\n");
-- goto no_node;
-- }
--
-- etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
-- if (!etsects->rsrc) {
-- pr_err("no resource\n");
-- goto no_resource;
-- }
-- if (request_resource(&iomem_resource, etsects->rsrc)) {
-- pr_err("resource busy\n");
-- goto no_resource;
-- }
--
-- spin_lock_init(&etsects->lock);
--
-- etsects->regs = ioremap(etsects->rsrc->start,
-- resource_size(etsects->rsrc));
-- if (!etsects->regs) {
-- pr_err("ioremap ptp registers failed\n");
-- goto no_ioremap;
-- }
-- getnstimeofday64(&now);
-- ptp_gianfar_settime(&etsects->caps, &now);
--
-- tmr_ctrl =
-- (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
-- (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
--
-- spin_lock_irqsave(&etsects->lock, flags);
--
-- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
-- gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
-- gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
-- gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
-- gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
-- set_alarm(etsects);
-- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
--
-- spin_unlock_irqrestore(&etsects->lock, flags);
--
-- etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
-- if (IS_ERR(etsects->clock)) {
-- err = PTR_ERR(etsects->clock);
-- goto no_clock;
-- }
-- gfar_phc_index = ptp_clock_index(etsects->clock);
--
-- platform_set_drvdata(dev, etsects);
--
-- return 0;
--
--no_clock:
-- iounmap(etsects->regs);
--no_ioremap:
-- release_resource(etsects->rsrc);
--no_resource:
-- free_irq(etsects->irq, etsects);
--no_node:
-- kfree(etsects);
--no_memory:
-- return err;
--}
--
--static int gianfar_ptp_remove(struct platform_device *dev)
--{
-- struct etsects *etsects = platform_get_drvdata(dev);
--
-- gfar_write(&etsects->regs->tmr_temask, 0);
-- gfar_write(&etsects->regs->tmr_ctrl, 0);
--
-- gfar_phc_index = -1;
-- ptp_clock_unregister(etsects->clock);
-- iounmap(etsects->regs);
-- release_resource(etsects->rsrc);
-- free_irq(etsects->irq, etsects);
-- kfree(etsects);
--
-- return 0;
--}
--
--static const struct of_device_id match_table[] = {
-- { .compatible = "fsl,etsec-ptp" },
-- {},
--};
--MODULE_DEVICE_TABLE(of, match_table);
--
--static struct platform_driver gianfar_ptp_driver = {
-- .driver = {
-- .name = "gianfar_ptp",
-- .of_match_table = match_table,
-- },
-- .probe = gianfar_ptp_probe,
-- .remove = gianfar_ptp_remove,
--};
--
--module_platform_driver(gianfar_ptp_driver);
--
--MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
--MODULE_DESCRIPTION("PTP clock using the eTSEC");
--MODULE_LICENSE("GPL");
---- a/drivers/ptp/Makefile
-+++ b/drivers/ptp/Makefile
-@@ -9,3 +9,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_
- obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
- obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
- obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
-+obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o
---- a/drivers/ptp/ptp_chardev.c
-+++ b/drivers/ptp/ptp_chardev.c
-@@ -224,7 +224,7 @@ long ptp_ioctl(struct posix_clock *pc, u
- }
- pct = &sysoff->ts[0];
- for (i = 0; i < sysoff->n_samples; i++) {
-- getnstimeofday64(&ts);
-+ ktime_get_real_ts64(&ts);
- pct->sec = ts.tv_sec;
- pct->nsec = ts.tv_nsec;
- pct++;
-@@ -235,7 +235,7 @@ long ptp_ioctl(struct posix_clock *pc, u
- pct->nsec = ts.tv_nsec;
- pct++;
- }
-- getnstimeofday64(&ts);
-+ ktime_get_real_ts64(&ts);
- pct->sec = ts.tv_sec;
- pct->nsec = ts.tv_nsec;
- if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
---- /dev/null
-+++ b/drivers/ptp/ptp_qoriq.c
-@@ -0,0 +1,589 @@
-+/*
-+ * PTP 1588 clock for Freescale QorIQ 1588 timer
-+ *
-+ * Copyright (C) 2010 OMICRON electronics GmbH
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-+ */
-+
-+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-+
-+#include <linux/device.h>
-+#include <linux/hrtimer.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/of_platform.h>
-+#include <linux/timex.h>
-+#include <linux/slab.h>
-+#include <linux/clk.h>
-+
-+#include <linux/fsl/ptp_qoriq.h>
-+
-+/*
-+ * Register access functions
-+ */
-+
-+/* Caller must hold qoriq_ptp->lock. */
-+static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
-+{
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+ u64 ns;
-+ u32 lo, hi;
-+
-+ lo = qoriq_read(&regs->ctrl_regs->tmr_cnt_l);
-+ hi = qoriq_read(&regs->ctrl_regs->tmr_cnt_h);
-+ ns = ((u64) hi) << 32;
-+ ns |= lo;
-+ return ns;
-+}
-+
-+/* Caller must hold qoriq_ptp->lock. */
-+static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns)
-+{
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+ u32 hi = ns >> 32;
-+ u32 lo = ns & 0xffffffff;
-+
-+ qoriq_write(&regs->ctrl_regs->tmr_cnt_l, lo);
-+ qoriq_write(&regs->ctrl_regs->tmr_cnt_h, hi);
-+}
-+
-+/* Caller must hold qoriq_ptp->lock. */
-+static void set_alarm(struct qoriq_ptp *qoriq_ptp)
-+{
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+ u64 ns;
-+ u32 lo, hi;
-+
-+ ns = tmr_cnt_read(qoriq_ptp) + 1500000000ULL;
-+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
-+ ns -= qoriq_ptp->tclk_period;
-+ hi = ns >> 32;
-+ lo = ns & 0xffffffff;
-+ qoriq_write(&regs->alarm_regs->tmr_alarm1_l, lo);
-+ qoriq_write(&regs->alarm_regs->tmr_alarm1_h, hi);
-+}
-+
-+/* Caller must hold qoriq_ptp->lock. */
-+static void set_fipers(struct qoriq_ptp *qoriq_ptp)
-+{
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+
-+ set_alarm(qoriq_ptp);
-+ qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
-+ qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
-+}
-+
-+/*
-+ * Interrupt service routine
-+ */
-+
-+static irqreturn_t isr(int irq, void *priv)
-+{
-+ struct qoriq_ptp *qoriq_ptp = priv;
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+ struct ptp_clock_event event;
-+ u64 ns;
-+ u32 ack = 0, lo, hi, mask, val;
-+
-+ val = qoriq_read(&regs->ctrl_regs->tmr_tevent);
-+
-+ if (val & ETS1) {
-+ ack |= ETS1;
-+ hi = qoriq_read(&regs->etts_regs->tmr_etts1_h);
-+ lo = qoriq_read(&regs->etts_regs->tmr_etts1_l);
-+ event.type = PTP_CLOCK_EXTTS;
-+ event.index = 0;
-+ event.timestamp = ((u64) hi) << 32;
-+ event.timestamp |= lo;
-+ ptp_clock_event(qoriq_ptp->clock, &event);
-+ }
-+
-+ if (val & ETS2) {
-+ ack |= ETS2;
-+ hi = qoriq_read(&regs->etts_regs->tmr_etts2_h);
-+ lo = qoriq_read(&regs->etts_regs->tmr_etts2_l);
-+ event.type = PTP_CLOCK_EXTTS;
-+ event.index = 1;
-+ event.timestamp = ((u64) hi) << 32;
-+ event.timestamp |= lo;
-+ ptp_clock_event(qoriq_ptp->clock, &event);
-+ }
-+
-+ if (val & ALM2) {
-+ ack |= ALM2;
-+ if (qoriq_ptp->alarm_value) {
-+ event.type = PTP_CLOCK_ALARM;
-+ event.index = 0;
-+ event.timestamp = qoriq_ptp->alarm_value;
-+ ptp_clock_event(qoriq_ptp->clock, &event);
-+ }
-+ if (qoriq_ptp->alarm_interval) {
-+ ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval;
-+ hi = ns >> 32;
-+ lo = ns & 0xffffffff;
-+ spin_lock(&qoriq_ptp->lock);
-+ qoriq_write(&regs->alarm_regs->tmr_alarm2_l, lo);
-+ qoriq_write(&regs->alarm_regs->tmr_alarm2_h, hi);
-+ spin_unlock(&qoriq_ptp->lock);
-+ qoriq_ptp->alarm_value = ns;
-+ } else {
-+ qoriq_write(&regs->ctrl_regs->tmr_tevent, ALM2);
-+ spin_lock(&qoriq_ptp->lock);
-+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
-+ mask &= ~ALM2EN;
-+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
-+ spin_unlock(&qoriq_ptp->lock);
-+ qoriq_ptp->alarm_value = 0;
-+ qoriq_ptp->alarm_interval = 0;
-+ }
-+ }
-+
-+ if (val & PP1) {
-+ ack |= PP1;
-+ event.type = PTP_CLOCK_PPS;
-+ ptp_clock_event(qoriq_ptp->clock, &event);
-+ }
-+
-+ if (ack) {
-+ qoriq_write(&regs->ctrl_regs->tmr_tevent, ack);
-+ return IRQ_HANDLED;
-+ } else
-+ return IRQ_NONE;
-+}
-+
-+/*
-+ * PTP clock operations
-+ */
-+
-+static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
-+{
-+ u64 adj, diff;
-+ u32 tmr_add;
-+ int neg_adj = 0;
-+ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+
-+ if (scaled_ppm < 0) {
-+ neg_adj = 1;
-+ scaled_ppm = -scaled_ppm;
-+ }
-+ tmr_add = qoriq_ptp->tmr_add;
-+ adj = tmr_add;
-+
-+ /* calculate diff as adj*(scaled_ppm/65536)/1000000
-+ * and round() to the nearest integer
-+ */
-+ adj *= scaled_ppm;
-+ diff = div_u64(adj, 8000000);
-+ diff = (diff >> 13) + ((diff >> 12) & 1);
-+
-+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-+
-+ qoriq_write(&regs->ctrl_regs->tmr_add, tmr_add);
-+
-+ return 0;
-+}
-+
-+static int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
-+{
-+ s64 now;
-+ unsigned long flags;
-+ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
-+
-+ spin_lock_irqsave(&qoriq_ptp->lock, flags);
-+
-+ now = tmr_cnt_read(qoriq_ptp);
-+ now += delta;
-+ tmr_cnt_write(qoriq_ptp, now);
-+ set_fipers(qoriq_ptp);
-+
-+ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
-+
-+ return 0;
-+}
-+
-+static int ptp_qoriq_gettime(struct ptp_clock_info *ptp,
-+ struct timespec64 *ts)
-+{
-+ u64 ns;
-+ unsigned long flags;
-+ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
-+
-+ spin_lock_irqsave(&qoriq_ptp->lock, flags);
-+
-+ ns = tmr_cnt_read(qoriq_ptp);
-+
-+ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
-+
-+ *ts = ns_to_timespec64(ns);
-+
-+ return 0;
-+}
-+
-+static int ptp_qoriq_settime(struct ptp_clock_info *ptp,
-+ const struct timespec64 *ts)
-+{
-+ u64 ns;
-+ unsigned long flags;
-+ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
-+
-+ ns = timespec64_to_ns(ts);
-+
-+ spin_lock_irqsave(&qoriq_ptp->lock, flags);
-+
-+ tmr_cnt_write(qoriq_ptp, ns);
-+ set_fipers(qoriq_ptp);
-+
-+ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
-+
-+ return 0;
-+}
-+
-+static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
-+ struct ptp_clock_request *rq, int on)
-+{
-+ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+ unsigned long flags;
-+ u32 bit, mask;
-+
-+ switch (rq->type) {
-+ case PTP_CLK_REQ_EXTTS:
-+ switch (rq->extts.index) {
-+ case 0:
-+ bit = ETS1EN;
-+ break;
-+ case 1:
-+ bit = ETS2EN;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ spin_lock_irqsave(&qoriq_ptp->lock, flags);
-+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
-+ if (on)
-+ mask |= bit;
-+ else
-+ mask &= ~bit;
-+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
-+ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
-+ return 0;
-+
-+ case PTP_CLK_REQ_PPS:
-+ spin_lock_irqsave(&qoriq_ptp->lock, flags);
-+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
-+ if (on)
-+ mask |= PP1EN;
-+ else
-+ mask &= ~PP1EN;
-+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
-+ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
-+ return 0;
-+
-+ default:
-+ break;
-+ }
-+
-+ return -EOPNOTSUPP;
-+}
-+
-+static const struct ptp_clock_info ptp_qoriq_caps = {
-+ .owner = THIS_MODULE,
-+ .name = "qoriq ptp clock",
-+ .max_adj = 512000,
-+ .n_alarm = 0,
-+ .n_ext_ts = N_EXT_TS,
-+ .n_per_out = 0,
-+ .n_pins = 0,
-+ .pps = 1,
-+ .adjfine = ptp_qoriq_adjfine,
-+ .adjtime = ptp_qoriq_adjtime,
-+ .gettime64 = ptp_qoriq_gettime,
-+ .settime64 = ptp_qoriq_settime,
-+ .enable = ptp_qoriq_enable,
-+};
-+
-+/**
-+ * qoriq_ptp_nominal_freq - calculate nominal frequency according to
-+ * reference clock frequency
-+ *
-+ * @clk_src: reference clock frequency
-+ *
-+ * The nominal frequency is the desired clock frequency.
-+ * It should be less than the reference clock frequency.
-+ * It should be a factor of 1000MHz.
-+ *
-+ * Return the nominal frequency
-+ */
-+static u32 qoriq_ptp_nominal_freq(u32 clk_src)
-+{
-+ u32 remainder = 0;
-+
-+ clk_src /= 1000000;
-+ remainder = clk_src % 100;
-+ if (remainder) {
-+ clk_src -= remainder;
-+ clk_src += 100;
-+ }
-+
-+ do {
-+ clk_src -= 100;
-+
-+ } while (1000 % clk_src);
-+
-+ return clk_src * 1000000;
-+}
-+
-+/**
-+ * qoriq_ptp_auto_config - calculate a set of default configurations
-+ *
-+ * @qoriq_ptp: pointer to qoriq_ptp
-+ * @node: pointer to device_node
-+ *
-+ * If below dts properties are not provided, this function will be
-+ * called to calculate a set of default configurations for them.
-+ * "fsl,tclk-period"
-+ * "fsl,tmr-prsc"
-+ * "fsl,tmr-add"
-+ * "fsl,tmr-fiper1"
-+ * "fsl,tmr-fiper2"
-+ * "fsl,max-adj"
-+ *
-+ * Return 0 if success
-+ */
-+static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
-+ struct device_node *node)
-+{
-+ struct clk *clk;
-+ u64 freq_comp;
-+ u64 max_adj;
-+ u32 nominal_freq;
-+ u32 remainder = 0;
-+ u32 clk_src = 0;
-+
-+ qoriq_ptp->cksel = DEFAULT_CKSEL;
-+
-+ clk = of_clk_get(node, 0);
-+ if (!IS_ERR(clk)) {
-+ clk_src = clk_get_rate(clk);
-+ clk_put(clk);
-+ }
-+
-+ if (clk_src <= 100000000UL) {
-+ pr_err("error reference clock value, or lower than 100MHz\n");
-+ return -EINVAL;
-+ }
-+
-+ nominal_freq = qoriq_ptp_nominal_freq(clk_src);
-+ if (!nominal_freq)
-+ return -EINVAL;
-+
-+ qoriq_ptp->tclk_period = 1000000000UL / nominal_freq;
-+ qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC;
-+
-+ /* Calculate initial frequency compensation value for TMR_ADD register.
-+ * freq_comp = ceil(2^32 / freq_ratio)
-+ * freq_ratio = reference_clock_freq / nominal_freq
-+ */
-+ freq_comp = ((u64)1 << 32) * nominal_freq;
-+ freq_comp = div_u64_rem(freq_comp, clk_src, &remainder);
-+ if (remainder)
-+ freq_comp++;
-+
-+ qoriq_ptp->tmr_add = freq_comp;
-+ qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period;
-+ qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period;
-+
-+ /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
-+ * freq_ratio = reference_clock_freq / nominal_freq
-+ */
-+ max_adj = 1000000000ULL * (clk_src - nominal_freq);
-+ max_adj = div_u64(max_adj, nominal_freq) - 1;
-+ qoriq_ptp->caps.max_adj = max_adj;
-+
-+ return 0;
-+}
-+
-+static int qoriq_ptp_probe(struct platform_device *dev)
-+{
-+ struct device_node *node = dev->dev.of_node;
-+ struct qoriq_ptp *qoriq_ptp;
-+ struct qoriq_ptp_registers *regs;
-+ struct timespec64 now;
-+ int err = -ENOMEM;
-+ u32 tmr_ctrl;
-+ unsigned long flags;
-+ void __iomem *base;
-+
-+ qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL);
-+ if (!qoriq_ptp)
-+ goto no_memory;
-+
-+ err = -EINVAL;
-+
-+ qoriq_ptp->caps = ptp_qoriq_caps;
-+
-+ if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel))
-+ qoriq_ptp->cksel = DEFAULT_CKSEL;
-+
-+ if (of_property_read_u32(node,
-+ "fsl,tclk-period", &qoriq_ptp->tclk_period) ||
-+ of_property_read_u32(node,
-+ "fsl,tmr-prsc", &qoriq_ptp->tmr_prsc) ||
-+ of_property_read_u32(node,
-+ "fsl,tmr-add", &qoriq_ptp->tmr_add) ||
-+ of_property_read_u32(node,
-+ "fsl,tmr-fiper1", &qoriq_ptp->tmr_fiper1) ||
-+ of_property_read_u32(node,
-+ "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) ||
-+ of_property_read_u32(node,
-+ "fsl,max-adj", &qoriq_ptp->caps.max_adj)) {
-+ pr_warn("device tree node missing required elements, try automatic configuration\n");
-+
-+ if (qoriq_ptp_auto_config(qoriq_ptp, node))
-+ goto no_config;
-+ }
-+
-+ err = -ENODEV;
-+
-+ qoriq_ptp->irq = platform_get_irq(dev, 0);
-+
-+ if (qoriq_ptp->irq < 0) {
-+ pr_err("irq not in device tree\n");
-+ goto no_node;
-+ }
-+ if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) {
-+ pr_err("request_irq failed\n");
-+ goto no_node;
-+ }
-+
-+ qoriq_ptp->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
-+ if (!qoriq_ptp->rsrc) {
-+ pr_err("no resource\n");
-+ goto no_resource;
-+ }
-+ if (request_resource(&iomem_resource, qoriq_ptp->rsrc)) {
-+ pr_err("resource busy\n");
-+ goto no_resource;
-+ }
-+
-+ spin_lock_init(&qoriq_ptp->lock);
-+
-+ base = ioremap(qoriq_ptp->rsrc->start,
-+ resource_size(qoriq_ptp->rsrc));
-+ if (!base) {
-+ pr_err("ioremap ptp registers failed\n");
-+ goto no_ioremap;
-+ }
-+
-+ qoriq_ptp->base = base;
-+
-+ if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) {
-+ qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET;
-+ qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET;
-+ qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET;
-+ qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET;
-+ } else {
-+ qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
-+ qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET;
-+ qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET;
-+ qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET;
-+ }
-+
-+ ktime_get_real_ts64(&now);
-+ ptp_qoriq_settime(&qoriq_ptp->caps, &now);
-+
-+ tmr_ctrl =
-+ (qoriq_ptp->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
-+ (qoriq_ptp->cksel & CKSEL_MASK) << CKSEL_SHIFT;
-+
-+ spin_lock_irqsave(&qoriq_ptp->lock, flags);
-+
-+ regs = &qoriq_ptp->regs;
-+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
-+ qoriq_write(&regs->ctrl_regs->tmr_add, qoriq_ptp->tmr_add);
-+ qoriq_write(&regs->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc);
-+ qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
-+ qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
-+ set_alarm(qoriq_ptp);
-+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
-+
-+ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
-+
-+ qoriq_ptp->clock = ptp_clock_register(&qoriq_ptp->caps, &dev->dev);
-+ if (IS_ERR(qoriq_ptp->clock)) {
-+ err = PTR_ERR(qoriq_ptp->clock);
-+ goto no_clock;
-+ }
-+ qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock);
-+
-+ platform_set_drvdata(dev, qoriq_ptp);
-+
-+ return 0;
-+
-+no_clock:
-+ iounmap(qoriq_ptp->base);
-+no_ioremap:
-+ release_resource(qoriq_ptp->rsrc);
-+no_resource:
-+ free_irq(qoriq_ptp->irq, qoriq_ptp);
-+no_config:
-+no_node:
-+ kfree(qoriq_ptp);
-+no_memory:
-+ return err;
-+}
-+
-+static int qoriq_ptp_remove(struct platform_device *dev)
-+{
-+ struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev);
-+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-+
-+ qoriq_write(&regs->ctrl_regs->tmr_temask, 0);
-+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, 0);
-+
-+ ptp_clock_unregister(qoriq_ptp->clock);
-+ iounmap(qoriq_ptp->base);
-+ release_resource(qoriq_ptp->rsrc);
-+ free_irq(qoriq_ptp->irq, qoriq_ptp);
-+ kfree(qoriq_ptp);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id match_table[] = {
-+ { .compatible = "fsl,etsec-ptp" },
-+ { .compatible = "fsl,fman-ptp-timer" },
-+ {},
-+};
-+MODULE_DEVICE_TABLE(of, match_table);
-+
-+static struct platform_driver qoriq_ptp_driver = {
-+ .driver = {
-+ .name = "ptp_qoriq",
-+ .of_match_table = match_table,
-+ },
-+ .probe = qoriq_ptp_probe,
-+ .remove = qoriq_ptp_remove,
-+};
-+
-+module_platform_driver(qoriq_ptp_driver);
-+
-+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
-+MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/include/linux/fsl/ptp_qoriq.h
-@@ -0,0 +1,169 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2010 OMICRON electronics GmbH
-+ * Copyright 2018 NXP
-+ */
-+#ifndef __PTP_QORIQ_H__
-+#define __PTP_QORIQ_H__
-+
-+#include <linux/io.h>
-+#include <linux/ptp_clock_kernel.h>
-+
-+/*
-+ * qoriq ptp registers
-+ */
-+struct ctrl_regs {
-+ u32 tmr_ctrl; /* Timer control register */
-+ u32 tmr_tevent; /* Timestamp event register */
-+ u32 tmr_temask; /* Timer event mask register */
-+ u32 tmr_pevent; /* Timestamp event register */
-+ u32 tmr_pemask; /* Timer event mask register */
-+ u32 tmr_stat; /* Timestamp status register */
-+ u32 tmr_cnt_h; /* Timer counter high register */
-+ u32 tmr_cnt_l; /* Timer counter low register */
-+ u32 tmr_add; /* Timer drift compensation addend register */
-+ u32 tmr_acc; /* Timer accumulator register */
-+ u32 tmr_prsc; /* Timer prescale */
-+ u8 res1[4];
-+ u32 tmroff_h; /* Timer offset high */
-+ u32 tmroff_l; /* Timer offset low */
-+};
-+
-+struct alarm_regs {
-+ u32 tmr_alarm1_h; /* Timer alarm 1 high register */
-+ u32 tmr_alarm1_l; /* Timer alarm 1 high register */
-+ u32 tmr_alarm2_h; /* Timer alarm 2 high register */
-+ u32 tmr_alarm2_l; /* Timer alarm 2 high register */
-+};
-+
-+struct fiper_regs {
-+ u32 tmr_fiper1; /* Timer fixed period interval */
-+ u32 tmr_fiper2; /* Timer fixed period interval */
-+ u32 tmr_fiper3; /* Timer fixed period interval */
-+};
-+
-+struct etts_regs {
-+ u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
-+ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
-+ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
-+ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
-+};
-+
-+struct qoriq_ptp_registers {
-+ struct ctrl_regs __iomem *ctrl_regs;
-+ struct alarm_regs __iomem *alarm_regs;
-+ struct fiper_regs __iomem *fiper_regs;
-+ struct etts_regs __iomem *etts_regs;
-+};
-+
-+/* Offset definitions for the four register groups */
-+#define CTRL_REGS_OFFSET 0x0
-+#define ALARM_REGS_OFFSET 0x40
-+#define FIPER_REGS_OFFSET 0x80
-+#define ETTS_REGS_OFFSET 0xa0
-+
-+#define FMAN_CTRL_REGS_OFFSET 0x80
-+#define FMAN_ALARM_REGS_OFFSET 0xb8
-+#define FMAN_FIPER_REGS_OFFSET 0xd0
-+#define FMAN_ETTS_REGS_OFFSET 0xe0
-+
-+
-+/* Bit definitions for the TMR_CTRL register */
-+#define ALM1P (1<<31) /* Alarm1 output polarity */
-+#define ALM2P (1<<30) /* Alarm2 output polarity */
-+#define FIPERST (1<<28) /* FIPER start indication */
-+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
-+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
-+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
-+#define TCLK_PERIOD_MASK (0x3ff)
-+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
-+#define FRD (1<<14) /* FIPER Realignment Disable */
-+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
-+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
-+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
-+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
-+#define COPH (1<<7) /* Generated clock output phase. */
-+#define CIPH (1<<6) /* External oscillator input clock phase */
-+#define TMSR (1<<5) /* Timer soft reset. */
-+#define BYP (1<<3) /* Bypass drift compensated clock */
-+#define TE (1<<2) /* 1588 timer enable. */
-+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
-+#define CKSEL_MASK (0x3)
-+
-+/* Bit definitions for the TMR_TEVENT register */
-+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
-+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
-+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
-+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
-+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
-+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
-+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
-+
-+/* Bit definitions for the TMR_TEMASK register */
-+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
-+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
-+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
-+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
-+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
-+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
-+
-+/* Bit definitions for the TMR_PEVENT register */
-+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
-+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
-+#define RXP (1<<0) /* PTP frame has been received */
-+
-+/* Bit definitions for the TMR_PEMASK register */
-+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
-+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
-+#define RXPEN (1<<0) /* Receive PTP packet event enable */
-+
-+/* Bit definitions for the TMR_STAT register */
-+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
-+#define STAT_VEC_MASK (0x3f)
-+
-+/* Bit definitions for the TMR_PRSC register */
-+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
-+#define PRSC_OCK_MASK (0xffff)
-+
-+
-+#define DRIVER "ptp_qoriq"
-+#define N_EXT_TS 2
-+
-+#define DEFAULT_CKSEL 1
-+#define DEFAULT_TMR_PRSC 2
-+#define DEFAULT_FIPER1_PERIOD 1000000000
-+#define DEFAULT_FIPER2_PERIOD 100000
-+
-+struct qoriq_ptp {
-+ void __iomem *base;
-+ struct qoriq_ptp_registers regs;
-+ spinlock_t lock; /* protects regs */
-+ struct ptp_clock *clock;
-+ struct ptp_clock_info caps;
-+ struct resource *rsrc;
-+ int irq;
-+ int phc_index;
-+ u64 alarm_interval; /* for periodic alarm */
-+ u64 alarm_value;
-+ u32 tclk_period; /* nanoseconds */
-+ u32 tmr_prsc;
-+ u32 tmr_add;
-+ u32 cksel;
-+ u32 tmr_fiper1;
-+ u32 tmr_fiper2;
-+};
-+
-+static inline u32 qoriq_read(unsigned __iomem *addr)
-+{
-+ u32 val;
-+
-+ val = ioread32be(addr);
-+ return val;
-+}
-+
-+static inline void qoriq_write(unsigned __iomem *addr, u32 val)
-+{
-+ iowrite32be(val, addr);
-+}
-+
-+#endif
diff --git a/target/linux/layerscape/patches-4.14/825-tmu-support-layerscape.patch b/target/linux/layerscape/patches-4.14/825-tmu-support-layerscape.patch
deleted file mode 100644
index 5312444f10..0000000000
--- a/target/linux/layerscape/patches-4.14/825-tmu-support-layerscape.patch
+++ /dev/null
@@ -1,188 +0,0 @@
-From 2ddaec76dbe9b6e911e2a1442248ab103909cce3 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Wed, 17 Apr 2019 18:59:06 +0800
-Subject: [PATCH] tmu: support layerscape
-
-This is an integrated patch of tmu for layerscape
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
-Signed-off-by: Eduardo Valentin <edubezval@gmail.com>
-Signed-off-by: Fabio Estevam <fabio.estevam@nxp.com>
-Signed-off-by: Yuantian Tang <andy.tang@nxp.com>
----
- drivers/thermal/qoriq_thermal.c | 102 ++++++++++++++------------------
- 1 file changed, 46 insertions(+), 56 deletions(-)
-
---- a/drivers/thermal/qoriq_thermal.c
-+++ b/drivers/thermal/qoriq_thermal.c
-@@ -69,14 +69,21 @@ struct qoriq_tmu_regs {
- u32 ttr3cr; /* Temperature Range 3 Control Register */
- };
-
-+struct qoriq_tmu_data;
-+
- /*
- * Thermal zone data
- */
-+struct qoriq_sensor {
-+ struct thermal_zone_device *tzd;
-+ struct qoriq_tmu_data *qdata;
-+ int id;
-+};
-+
- struct qoriq_tmu_data {
-- struct thermal_zone_device *tz;
- struct qoriq_tmu_regs __iomem *regs;
-- int sensor_id;
- bool little_endian;
-+ struct qoriq_sensor *sensor[SITES_MAX];
- };
-
- static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
-@@ -97,48 +104,51 @@ static u32 tmu_read(struct qoriq_tmu_dat
-
- static int tmu_get_temp(void *p, int *temp)
- {
-+ struct qoriq_sensor *qsensor = p;
-+ struct qoriq_tmu_data *qdata = qsensor->qdata;
- u32 val;
-- struct qoriq_tmu_data *data = p;
-
-- val = tmu_read(data, &data->regs->site[data->sensor_id].tritsr);
-+ val = tmu_read(qdata, &qdata->regs->site[qsensor->id].tritsr);
- *temp = (val & 0xff) * 1000;
-
- return 0;
- }
-
--static int qoriq_tmu_get_sensor_id(void)
-+static const struct thermal_zone_of_device_ops tmu_tz_ops = {
-+ .get_temp = tmu_get_temp,
-+};
-+
-+static int qoriq_tmu_register_tmu_zone(struct platform_device *pdev)
- {
-- int ret, id;
-- struct of_phandle_args sensor_specs;
-- struct device_node *np, *sensor_np;
-+ struct qoriq_tmu_data *qdata = platform_get_drvdata(pdev);
-+ int id, sites = 0;
-
-- np = of_find_node_by_name(NULL, "thermal-zones");
-- if (!np)
-- return -ENODEV;
-+ for (id = 0; id < SITES_MAX; id++) {
-+ qdata->sensor[id] = devm_kzalloc(&pdev->dev,
-+ sizeof(struct qoriq_sensor), GFP_KERNEL);
-+ if (!qdata->sensor[id])
-+ return -ENOMEM;
-+
-+ qdata->sensor[id]->id = id;
-+ qdata->sensor[id]->qdata = qdata;
-+
-+ qdata->sensor[id]->tzd = devm_thermal_zone_of_sensor_register(
-+ &pdev->dev, id, qdata->sensor[id], &tmu_tz_ops);
-+ if (IS_ERR(qdata->sensor[id]->tzd)) {
-+ if (PTR_ERR(qdata->sensor[id]->tzd) == -ENODEV)
-+ continue;
-+ else
-+ return PTR_ERR(qdata->sensor[id]->tzd);
-
-- sensor_np = of_get_next_child(np, NULL);
-- ret = of_parse_phandle_with_args(sensor_np, "thermal-sensors",
-- "#thermal-sensor-cells",
-- 0, &sensor_specs);
-- if (ret) {
-- of_node_put(np);
-- of_node_put(sensor_np);
-- return ret;
-- }
--
-- if (sensor_specs.args_count >= 1) {
-- id = sensor_specs.args[0];
-- WARN(sensor_specs.args_count > 1,
-- "%s: too many cells in sensor specifier %d\n",
-- sensor_specs.np->name, sensor_specs.args_count);
-- } else {
-- id = 0;
-- }
-+ }
-
-- of_node_put(np);
-- of_node_put(sensor_np);
-+ sites |= 0x1 << (15 - id);
-+ }
-+ /* Enable monitoring */
-+ if (sites != 0)
-+ tmu_write(qdata, sites | TMR_ME | TMR_ALPF, &qdata->regs->tmr);
-
-- return id;
-+ return 0;
- }
-
- static int qoriq_tmu_calibration(struct platform_device *pdev)
-@@ -188,16 +198,11 @@ static void qoriq_tmu_init_device(struct
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
- }
-
--static const struct thermal_zone_of_device_ops tmu_tz_ops = {
-- .get_temp = tmu_get_temp,
--};
--
- static int qoriq_tmu_probe(struct platform_device *pdev)
- {
- int ret;
- struct qoriq_tmu_data *data;
- struct device_node *np = pdev->dev.of_node;
-- u32 site = 0;
-
- if (!np) {
- dev_err(&pdev->dev, "Device OF-Node is NULL");
-@@ -213,13 +218,6 @@ static int qoriq_tmu_probe(struct platfo
-
- data->little_endian = of_property_read_bool(np, "little-endian");
-
-- data->sensor_id = qoriq_tmu_get_sensor_id();
-- if (data->sensor_id < 0) {
-- dev_err(&pdev->dev, "Failed to get sensor id\n");
-- ret = -ENODEV;
-- goto err_iomap;
-- }
--
- data->regs = of_iomap(np, 0);
- if (!data->regs) {
- dev_err(&pdev->dev, "Failed to get memory region\n");
-@@ -233,19 +231,13 @@ static int qoriq_tmu_probe(struct platfo
- if (ret < 0)
- goto err_tmu;
-
-- data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
-- data, &tmu_tz_ops);
-- if (IS_ERR(data->tz)) {
-- ret = PTR_ERR(data->tz);
-- dev_err(&pdev->dev,
-- "Failed to register thermal zone device %d\n", ret);
-- goto err_tmu;
-+ ret = qoriq_tmu_register_tmu_zone(pdev);
-+ if (ret < 0) {
-+ dev_err(&pdev->dev, "Failed to register sensors\n");
-+ ret = -ENODEV;
-+ goto err_iomap;
- }
-
-- /* Enable monitoring */
-- site |= 0x1 << (15 - data->sensor_id);
-- tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
--
- return 0;
-
- err_tmu:
-@@ -261,8 +253,6 @@ static int qoriq_tmu_remove(struct platf
- {
- struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
-
-- thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
--
- /* Disable monitoring */
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
-